Examples of DataDescriptorGenerator


Examples of org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator

      CardinalityCounter cCount = (CardinalityCounter)rowSources[index];
      long numRows;
      if ((numRows = cCount.getRowCount()) > 0)
      {
        long[] c = cCount.getCardinality();
        DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();

        for (int i= 0; i < c.length; i++)
        {
          StatisticsDescriptor statDesc =
            new StatisticsDescriptor(dd, dd.getUUIDFactory().createUUID(),
View Full Code Here

Examples of org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator

    /* If this index already has an essentially same one, we share the
     * conglomerate with the old one, and just simply add a descriptor
     * entry into SYSCONGLOMERATES.
     */
    DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
    if (duplicate)
    {
      ConglomerateDescriptor cgd =
        ddg.newConglomerateDescriptor(conglomId, indexName, true,
                      indexRowGenerator, isConstraint,
                      conglomerateUUID, td.getUUID(), sd.getUUID() );
      dd.addDescriptor(cgd, sd, DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc);
      // add newly added conglomerate to the list of conglomerate
      // descriptors in the td.
      ConglomerateDescriptorList cdl =
        td.getConglomerateDescriptorList();
      cdl.add(cgd);

      // can't just return yet, need to get member "indexTemplateRow"
      // because create constraint may use it
    }

    // Describe the properties of the index to the store using Properties
    // RESOLVE: The following properties assume a BTREE index.
    Properties  indexProperties;
   
    if (properties != null)
    {
      indexProperties = properties;
    }
    else
    {
      indexProperties = new Properties();
    }

    // Tell it the conglomerate id of the base table
    indexProperties.put("baseConglomerateId",
              Long.toString(td.getHeapConglomerateId()));

    // All indexes are unique because they contain the RowLocation.
    // The number of uniqueness columns must include the RowLocation
    // if the user did not specify a unique index.
    indexProperties.put("nUniqueColumns",
          Integer.toString(unique ? baseColumnPositions.length :
                        baseColumnPositions.length + 1)
              );

    // By convention, the row location column is the last column
    indexProperties.put("rowLocationColumn",
              Integer.toString(baseColumnPositions.length));

    // For now, all columns are key fields, including the RowLocation
    indexProperties.put("nKeyFields",
              Integer.toString(baseColumnPositions.length + 1));

    // For now, assume that all index columns are ordered columns
    if (! duplicate)
    {
      indexRowGenerator = new IndexRowGenerator(indexType, unique,
                          baseColumnPositions,
                          isAscending,
                          baseColumnPositions.length);
    }

    /* Now add the rows from the base table to the conglomerate.
     * We do this by scanning the base table and inserting the
     * rows into a sorter before inserting from the sorter
     * into the index.  This gives us better performance
     * and a more compact index.
     */

    rowSource = null;
    sortId = 0;
    boolean needToDropSort = false// set to true once the sorter is created

    /* bulkFetchSIze will be 16 (for now) unless
     * we are creating the table in which case it
     * will be 1.  Too hard to remove scan when
     * creating index on new table, so minimize
     * work where we can.
     */
    int bulkFetchSize = (forCreateTable) ? 1 : 16
    int numColumns = td.getNumberOfColumns();
    int approximateRowSize = 0;

    // Create the FormatableBitSet for mapping the partial to full base row
    FormatableBitSet bitSet = new FormatableBitSet(numColumns+1);
    for (int index = 0; index < baseColumnPositions.length; index++)
    {
      bitSet.set(baseColumnPositions[index]);
    }
    FormatableBitSet zeroBasedBitSet = RowUtil.shift(bitSet, 1);

    // Start by opening a full scan on the base table.
    scan = tc.openGroupFetchScan(
                            td.getHeapConglomerateId(),
              false,  // hold
              0// open base table read only
                            TransactionController.MODE_TABLE,
                            TransactionController.ISOLATION_SERIALIZABLE,
              zeroBasedBitSet,    // all fields as objects
              (DataValueDescriptor[]) null,  // startKeyValue
              0,    // not used when giving null start posn.
              null,  // qualifier
              (DataValueDescriptor[]) null,  // stopKeyValue
              0);    // not used when giving null stop posn.

    // Create an array to put base row template
    baseRows = new ExecRow[bulkFetchSize];
    indexRows = new ExecIndexRow[bulkFetchSize];
    compactBaseRows = new ExecRow[bulkFetchSize];

    try
    {
      // Create the array of base row template
      for (int i = 0; i < bulkFetchSize; i++)
      {
        // create a base row template
        baseRows[i] = activation.getExecutionFactory().getValueRow(maxBaseColumnPosition);

        // create an index row template
        indexRows[i] = indexRowGenerator.getIndexRowTemplate();

        // create a compact base row template
        compactBaseRows[i] = activation.getExecutionFactory().getValueRow(
                          baseColumnPositions.length);
      }

      indexTemplateRow = indexRows[0];

      // Fill the partial row with nulls of the correct type
      ColumnDescriptorList cdl = td.getColumnDescriptorList();
      int           cdlSize = cdl.size();
      for (int index = 0, numSet = 0; index < cdlSize; index++)
      {
        if (! zeroBasedBitSet.get(index))
        {
          continue;
        }
        numSet++;
        ColumnDescriptor cd = (ColumnDescriptor) cdl.elementAt(index);
        DataTypeDescriptor dts = cd.getType();


        for (int i = 0; i < bulkFetchSize; i++)
        {
          // Put the column in both the compact and sparse base rows
          baseRows[i].setColumn(index + 1,
                  dts.getNull());
          compactBaseRows[i].setColumn(numSet,
                  baseRows[i].getColumn(index + 1));
        }

        // Calculate the approximate row size for the index row
        approximateRowSize += dts.getTypeId().getApproximateLengthInBytes(dts);
      }

      // Get an array of RowLocation template
      RowLocation rl[] = new RowLocation[bulkFetchSize];
      for (int i = 0; i < bulkFetchSize; i++)
      {
        rl[i] = scan.newRowLocationTemplate();

        // Get an index row based on the base row
        indexRowGenerator.getIndexRow(compactBaseRows[i], rl[i], indexRows[i], bitSet);
      }

      /* now that we got indexTemplateRow, done for duplicate index
       */
      if (duplicate)
        return;

      /* For non-unique indexes, we order by all columns + the RID.
       * For unique indexes, we just order by the columns.
       * We create a unique index observer for unique indexes
       * so that we can catch duplicate key.
       * We create a basic sort observer for non-unique indexes
       * so that we can reuse the wrappers during an external
       * sort.
       */
      int numColumnOrderings;
      SortObserver sortObserver = null;
      if (unique)
      {
        numColumnOrderings = baseColumnPositions.length;
        // if the index is a constraint, use constraintname in possible error messagge
        String indexOrConstraintName = indexName;
        if  (conglomerateUUID != null)
        {
          ConglomerateDescriptor cd = dd.getConglomerateDescriptor(conglomerateUUID);
          if ((isConstraint) && (cd != null && cd.getUUID() != null && td != null))
          {
            ConstraintDescriptor conDesc = dd.getConstraintDescriptor(td,
                                                                      cd.getUUID());
            indexOrConstraintName = conDesc.getConstraintName();
          }
        }
        sortObserver = new UniqueIndexSortObserver(true, isConstraint,
                               indexOrConstraintName,
                               indexTemplateRow,
                               true,
                               td.getName());
      }
      else
      {
        numColumnOrderings = baseColumnPositions.length + 1;
        sortObserver = new BasicSortObserver(true, false,
                           indexTemplateRow,
                           true);
      }

      ColumnOrdering[]  order = new ColumnOrdering[numColumnOrderings];
      for (int i=0; i < numColumnOrderings; i++)
      {
        order[i] =
                    new IndexColumnOrder(
                        i,
                        unique || i < numColumnOrderings - 1 ?
                            isAscending[i] : true);
      }

      // create the sorter
      sortId = tc.createSort((Properties)null,
          indexTemplateRow.getRowArrayClone(),
          order,
          sortObserver,
          false,      // not in order
          scan.getEstimatedRowCount(),
          approximateRowSize  // est row size, -1 means no idea 
          );

      needToDropSort = true;

      // Populate sorter and get the output of the sorter into a row
      // source.  The sorter has the indexed columns only and the columns
      // are in the correct order.
      rowSource = loadSorter(baseRows, indexRows, tc,
                   scan, sortId, rl);

      conglomId =
                tc.createAndLoadConglomerate(
          indexType,
          indexTemplateRow.getRowArray()// index row template
          order, //colums sort order
                    indexRowGenerator.getColumnCollationIds(
                        td.getColumnDescriptorList()),
          indexProperties,
          TransactionController.IS_DEFAULT, // not temporary
          rowSource,
          (long[]) null);
     
    }
    finally
    {

      /* close the table scan */
      if (scan != null)
        scan.close();

      /* close the sorter row source before throwing exception */
      if (rowSource != null)
        rowSource.closeRowSource();

      /*
      ** drop the sort so that intermediate external sort run can be
      ** removed from disk
      */
      if (needToDropSort)
         tc.dropSort(sortId);
    }

    ConglomerateController indexController =
      tc.openConglomerate(
                conglomId, false, 0, TransactionController.MODE_TABLE,
                TransactionController.ISOLATION_SERIALIZABLE);

    // Check to make sure that the conglomerate can be used as an index
    if ( ! indexController.isKeyed())
    {
      indexController.close();
      throw StandardException.newException(SQLState.LANG_NON_KEYED_INDEX, indexName,
                               indexType);
    }
    indexController.close();

    //
    // Create a conglomerate descriptor with the conglomId filled in and
    // add it.
    //

    ConglomerateDescriptor cgd =
      ddg.newConglomerateDescriptor(conglomId, indexName, true,
                      indexRowGenerator, isConstraint,
                      conglomerateUUID, td.getUUID(), sd.getUUID() );

    dd.addDescriptor(cgd, sd, DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc);

View Full Code Here

Examples of org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator

    boolean          isEnabled;
    boolean          referencingOld;
    boolean          referencingNew;
    ReferencedColumns rcd;
    TriggerDescriptor    descriptor;
    DataDescriptorGenerator  ddg = dd.getDataDescriptorGenerator();

    if (SanityManager.DEBUG)
    {
      SanityManager.ASSERT(row.nColumns() == SYSTRIGGERS_COLUMN_COUNT,
                 "Wrong number of columns for a SYSTRIGGERS row");
View Full Code Here

Examples of org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator

    SchemaDescriptor sd = td.getSchemaDescriptor();
   
    // Check that the current user has permission to grant the privileges.
    checkOwnership( currentUser, td, sd, dd, lcc, grant);
   
    DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();

    TablePermsDescriptor tablePermsDesc =
      ddg.newTablePermsDescriptor( td,
                     getPermString( SELECT_ACTION, false),
                     getPermString( DELETE_ACTION, false),
                     getPermString( INSERT_ACTION, false),
                     getPermString( UPDATE_ACTION, false),
                     getPermString( REFERENCES_ACTION, false),
                     getPermString( TRIGGER_ACTION, false),
                     currentUser);
     
    ColPermsDescriptor[] colPermsDescs = new ColPermsDescriptor[ columnBitSets.length];
    for( int i = 0; i < columnBitSets.length; i++)
    {
      if( columnBitSets[i] != null ||
        // If it is a revoke and no column list is specified then revoke all column permissions.
        // A null column bitSet in a ColPermsDescriptor indicates that all the column permissions
        // should be removed.
        (!grant) && hasColumnPermissions(i) && actionAllowed[i]
        )
      {
        colPermsDescs[i] = ddg.newColPermsDescriptor( td,
                                getActionString(i, false),
                                columnBitSets[i],
                                currentUser);
      }
    }
View Full Code Here

Examples of org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator

    }

    UUID constraintId = uuidFactory.createUUID();

    /* Now, lets create the constraint descriptor */
    DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
    switch (constraintType)
    {
      case DataDictionary.PRIMARYKEY_CONSTRAINT:
        conDesc = ddg.newPrimaryKeyConstraintDescriptor(
                td, constraintName,
                false, //deferable,
                false, //initiallyDeferred,
                genColumnPositions(td, false), //int[],
                constraintId,
                indexId,
                sd,
                enabled,
                0        // referenceCount
                );
        dd.addConstraintDescriptor(conDesc, tc);
        break;

      case DataDictionary.UNIQUE_CONSTRAINT:
        conDesc = ddg.newUniqueConstraintDescriptor(
                td, constraintName,
                false, //deferable,
                false, //initiallyDeferred,
                genColumnPositions(td, false), //int[],
                constraintId,
                indexId,
                sd,
                enabled,
                0        // referenceCount
                );
        dd.addConstraintDescriptor(conDesc, tc);
        break;

      case DataDictionary.CHECK_CONSTRAINT:
        conDesc = ddg.newCheckConstraintDescriptor(
                td, constraintName,
                false, //deferable,
                false, //initiallyDeferred,
                constraintId,
                constraintText,
                new ReferencedColumnsDescriptorImpl(genColumnPositions(td, false)), //int[],
                sd,
                enabled
                );
        dd.addConstraintDescriptor(conDesc, tc);
        break;

      case DataDictionary.FOREIGNKEY_CONSTRAINT:
        ReferencedKeyConstraintDescriptor referencedConstraint = DDUtils.locateReferencedConstraint
          ( dd, td, constraintName, columnNames, otherConstraintInfo );
        DDUtils.validateReferentialActions(dd, td, constraintName, otherConstraintInfo,columnNames);
       
        conDesc = ddg.newForeignKeyConstraintDescriptor(
                td, constraintName,
                false, //deferable,
                false, //initiallyDeferred,
                genColumnPositions(td, false), //int[],
                constraintId,
View Full Code Here

Examples of org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator

      sd = DDLConstantAction.getSchemaDescriptorForCreate(dd, activation, schemaName);

    //
    // Create a new table descriptor.
    //
    DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();

    if ( tableType != TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE )
    {
      td = ddg.newTableDescriptor(tableName, sd, tableType, lockGranularity);
      dd.addDescriptor(td, sd, DataDictionary.SYSTABLES_CATALOG_NUM, false, tc);
    } else
    {
      td = ddg.newTableDescriptor(tableName, sd, tableType, onCommitDeleteRows, onRollbackDeleteRows);
      td.setUUID(dd.getUUIDFactory().createUUID());
    }
    toid = td.getUUID();

    // Save the TableDescriptor off in the Activation
    activation.setDDLTableDescriptor(td);

    /* NOTE: We must write the columns out to the system
     * tables before any of the conglomerates, including
     * the heap, since we read the columns before the
     * conglomerates when building a TableDescriptor.
     * This will hopefully reduce the probability of
     * a deadlock involving those system tables.
     */
   
    // for each column, stuff system.column
    int index = 1;

    ColumnDescriptor[] cdlArray = new ColumnDescriptor[columnInfo.length];
    for (int ix = 0; ix < columnInfo.length; ix++)
    {
      UUID defaultUUID = columnInfo[ix].newDefaultUUID;

      /* Generate a UUID for the default, if one exists
       * and there is no default id yet.
       */
      if (columnInfo[ix].defaultInfo != null &&
        defaultUUID == null)
      {
        defaultUUID = dd.getUUIDFactory().createUUID();
      }

      if (columnInfo[ix].autoincInc != 0)//dealing with autoinc column
      columnDescriptor = new ColumnDescriptor(
                           columnInfo[ix].name,
                   index++,
                   columnInfo[ix].dataType,
                   columnInfo[ix].defaultValue,
                   columnInfo[ix].defaultInfo,
                   td,
                   defaultUUID,
                   columnInfo[ix].autoincStart,
                   columnInfo[ix].autoincInc,
                   columnInfo[ix].autoinc_create_or_modify_Start_Increment
                 );
      else
        columnDescriptor = new ColumnDescriptor(
                       columnInfo[ix].name,
               index++,
               columnInfo[ix].dataType,
               columnInfo[ix].defaultValue,
               columnInfo[ix].defaultInfo,
               td,
               defaultUUID,
               columnInfo[ix].autoincStart,
               columnInfo[ix].autoincInc
             );

      cdlArray[ix] = columnDescriptor;
    }

    if ( tableType != TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE )
    {
      dd.addDescriptorArray(cdlArray, td,
                DataDictionary.SYSCOLUMNS_CATALOG_NUM,
                false, tc);
    }

    // now add the column descriptors to the table.
    ColumnDescriptorList cdl = td.getColumnDescriptorList();
    for (int i = 0; i < cdlArray.length; i++)
      cdl.add(cdlArray[i]);
        
    //
    // Create a conglomerate desciptor with the conglomId filled in and
    // add it.
    //
    // RESOLVE: Get information from the conglomerate descriptor which
    //          was provided.
    //
    ConglomerateDescriptor cgd =
      ddg.newConglomerateDescriptor(conglomId, null, false, null, false, null, toid,
                      sd.getUUID());
    if ( tableType != TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE )
    {
      dd.addDescriptor(cgd, sd, DataDictionary.SYSCONGLOMERATES_CATALOG_NUM,
             false, tc);
View Full Code Here

Examples of org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator

    char            type;
    boolean            valid;
    Timestamp          time = null;
    ExecPreparedStatement    preparedStatement = null;
    boolean            initiallyCompilable;
    DataDescriptorGenerator    ddg = dd.getDataDescriptorGenerator();

    if (SanityManager.DEBUG)
    {
      SanityManager.ASSERT(row.nColumns() == SYSSTATEMENTS_COLUMN_COUNT,
                 "Wrong number of columns for a SYSSTATEMENTS row");
View Full Code Here

Examples of org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator

    // indicate that we are in the process of booting
    booting = true;

    // set only if child class hasn't overriden this already
    if ( dataDescriptorGenerator == null )
    { dataDescriptorGenerator = new DataDescriptorGenerator( this ); }

    if (!create) {


      // SYSTABLES

      coreInfo[SYSTABLES_CORE_NUM].setHeapConglomerate(
          getBootParameter(startParams, CFG_SYSTABLES_ID, true));

      coreInfo[SYSTABLES_CORE_NUM].setIndexConglomerate(SYSTABLESRowFactory.SYSTABLES_INDEX1_ID,
          getBootParameter(startParams, CFG_SYSTABLES_INDEX1_ID, true));


      coreInfo[SYSTABLES_CORE_NUM].setIndexConglomerate(
          SYSTABLESRowFactory.SYSTABLES_INDEX2_ID,
          getBootParameter(startParams, CFG_SYSTABLES_INDEX2_ID, true));

      // SYSCOLUMNS

      coreInfo[SYSCOLUMNS_CORE_NUM].setHeapConglomerate(
          getBootParameter(startParams, CFG_SYSCOLUMNS_ID, true));


      coreInfo[SYSCOLUMNS_CORE_NUM].setIndexConglomerate(
          SYSCOLUMNSRowFactory.SYSCOLUMNS_INDEX1_ID,
          getBootParameter(startParams, CFG_SYSCOLUMNS_INDEX1_ID, true));
      // 2nd syscolumns index added in Xena, hence may not be there
      coreInfo[SYSCOLUMNS_CORE_NUM].setIndexConglomerate(
          SYSCOLUMNSRowFactory.SYSCOLUMNS_INDEX2_ID,
          getBootParameter(startParams, CFG_SYSCOLUMNS_INDEX2_ID, false));

      // SYSCONGLOMERATES

      coreInfo[SYSCONGLOMERATES_CORE_NUM].setHeapConglomerate(
          getBootParameter(startParams, CFG_SYSCONGLOMERATES_ID, true));


      coreInfo[SYSCONGLOMERATES_CORE_NUM].setIndexConglomerate(
          SYSCONGLOMERATESRowFactory.SYSCONGLOMERATES_INDEX1_ID,
          getBootParameter(startParams, CFG_SYSCONGLOMERATES_INDEX1_ID, true));


      coreInfo[SYSCONGLOMERATES_CORE_NUM].setIndexConglomerate(
          SYSCONGLOMERATESRowFactory.SYSCONGLOMERATES_INDEX2_ID,
          getBootParameter(startParams, CFG_SYSCONGLOMERATES_INDEX2_ID, true));

      coreInfo[SYSCONGLOMERATES_CORE_NUM].setIndexConglomerate(
          SYSCONGLOMERATESRowFactory.SYSCONGLOMERATES_INDEX3_ID,
          getBootParameter(startParams, CFG_SYSCONGLOMERATES_INDEX3_ID, true));


      // SYSSCHEMAS
      coreInfo[SYSSCHEMAS_CORE_NUM].setHeapConglomerate(
          getBootParameter(startParams, CFG_SYSSCHEMAS_ID, true));


      coreInfo[SYSSCHEMAS_CORE_NUM].setIndexConglomerate(
          SYSSCHEMASRowFactory.SYSSCHEMAS_INDEX1_ID,
          getBootParameter(startParams, CFG_SYSSCHEMAS_INDEX1_ID, true));

      coreInfo[SYSSCHEMAS_CORE_NUM].setIndexConglomerate(
          SYSSCHEMASRowFactory.SYSSCHEMAS_INDEX2_ID,
          getBootParameter(startParams, CFG_SYSSCHEMAS_INDEX2_ID, true));

    }



    String value = startParams.getProperty(Property.LANG_TD_CACHE_SIZE);
    tdCacheSize = PropertyUtil.intPropertyValue(Property.LANG_TD_CACHE_SIZE, value,
                     0, Integer.MAX_VALUE, Property.LANG_TD_CACHE_SIZE_DEFAULT);

   
    value = startParams.getProperty(Property.LANG_SPS_CACHE_SIZE);
    stmtCacheSize = PropertyUtil.intPropertyValue(Property.LANG_SPS_CACHE_SIZE, value,
                     0, Integer.MAX_VALUE, Property.LANG_SPS_CACHE_SIZE_DEFAULT);

    value = startParams.getProperty(Property.LANG_SEQGEN_CACHE_SIZE);
    seqgenCacheSize = PropertyUtil.intPropertyValue(Property.LANG_SEQGEN_CACHE_SIZE, value,
                     0, Integer.MAX_VALUE, Property.LANG_SEQGEN_CACHE_SIZE_DEFAULT);

    value = startParams.getProperty(Property.LANG_PERMISSIONS_CACHE_SIZE);
    permissionsCacheSize = PropertyUtil.intPropertyValue(Property.LANG_PERMISSIONS_CACHE_SIZE, value,
                     0, Integer.MAX_VALUE, Property.LANG_PERMISSIONS_CACHE_SIZE_DEFAULT);

        // See if automatic index statistics update is disabled through a
        // system wide property. May be overridden by a database specific
        // property later on.
        // The default is that automatic index statistics update is enabled.
        indexStatsUpdateDisabled = !PropertyUtil.getSystemBoolean(
                Property.STORAGE_AUTO_INDEX_STATS, true);

        // See if we should enable logging of index stats activities.
        indexStatsUpdateLogging = PropertyUtil.getSystemBoolean(
                Property.STORAGE_AUTO_INDEX_STATS_LOGGING);

        // See if we should enable tracing of index stats activities.
        indexStatsUpdateTracing = PropertyUtil.getSystemProperty(
                Property.STORAGE_AUTO_INDEX_STATS_TRACING, "off");

    /*
     * data dictionary contexts are only associated with connections.
     * we have to look for the basic data dictionary, as there is
     * no connection, and thus no context stack yet.
     */

    /*
     * Get the table descriptor cache.
     */
    CacheFactory cf =
        (CacheFactory) Monitor.startSystemModule(org.apache.derby.iapi.reference.Module.CacheFactory);
    OIDTdCache =
      cf.newCacheManager(this,
        "TableDescriptorOIDCache",
        tdCacheSize,
        tdCacheSize);
    nameTdCache =
      cf.newCacheManager(this,
        "TableDescriptorNameCache",
        tdCacheSize,
        tdCacheSize);

    if (stmtCacheSize > 0)
    {
      spsNameCache =
        cf.newCacheManager(this,
          "SPSNameDescriptorCache",
          stmtCacheSize,
          stmtCacheSize);
      spsIdHash = new Hashtable(stmtCacheSize);
      // spsTextHash = new Hashtable(stmtCacheSize);
    }

    sequenceGeneratorCache = cf.newCacheManager
            ( this, "SequenceGeneratorCache", seqgenCacheSize, seqgenCacheSize );

    /* Get the object to coordinate cache transitions */
    cacheCoordinator = new ShExLockable();

    /* Get AccessFactory in order to transaction stuff */
    af = (AccessFactoryMonitor.findServiceModule(this, AccessFactory.MODULE);

    /* Get the lock factory */
    lockFactory = af.getLockFactory();

    /*
     * now we need to setup a context stack for the database creation work.
     * We assume the System boot process has created a context
     * manager already, but not that contexts we need are there.
     */
    ContextService csf = ContextService.getFactory();

    ContextManager cm = csf.getCurrentContextManager();
    if (SanityManager.DEBUG)
      SanityManager.ASSERT((cm != null), "Failed to get current ContextManager");

    // RESOLVE other non-StandardException errors.
    bootingTC = null;
    try
    {
      // Get a transaction controller. This has the side effect of
      // creating a transaction context if there isn't one already.
      bootingTC = af.getTransaction(cm);

      /*
        We need an execution context so that we can generate rows
        REMIND: maybe only for create case?
       */
      exFactory.newExecutionContext(cm);

      DataDescriptorGenerator ddg = getDataDescriptorGenerator();

      //We should set the user schema collation type here now because
      //later on, we are going to create user schema APP. By the time any
      //user schema gets created, we should have the correct collation
      //type set for such schemas to use. For this reason, don't remove
View Full Code Here

Examples of org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator

  public  void  makeCatalog( TabInfoImpl          ti,
                 SchemaDescriptor      sd,
                 TransactionController     tc )
          throws StandardException
  {
    DataDescriptorGenerator ddg = getDataDescriptorGenerator();

    Properties  heapProperties = ti.getCreateHeapProperties();
    ti.setHeapConglomerate(
      createConglomerate(
        ti.getTableName(),
View Full Code Here

Examples of org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator

    long          heapConglomerateNumber
    )
    throws StandardException
  {
    SchemaDescriptor    sd = getSystemSchemaDescriptor( );
    DataDescriptorGenerator ddg = getDataDescriptorGenerator();
    long          indexConglomerateNumber;

    ConglomerateDescriptor  conglomerateDescriptor = bootstrapOneIndex
      ( sd, tc, ddg, ti, indexNumber, heapConglomerateNumber );
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.