Package org.apache.derby.iapi.store.access

Examples of org.apache.derby.iapi.store.access.GroupFetchScanController


            FKInfo fkInfo, long fkConglom, long pkConglom,
            String fkConstraintName)
    throws StandardException
  {
    ExecRow                 template;
    GroupFetchScanController   refScan = null;
    GroupFetchScanController   fkScan  = null;

    try
    {

        template = makeIndexTemplate(fkInfo, fullTemplate, cm);

        /*
        ** The indexes have been dropped and recreated, so
        ** we need to get the new index conglomerate number.
        */
        fkScan =
                    tc.openGroupFetchScan(
                        fkConglom,
                        false,                       // hold
                        0,                // read only
                        tc.MODE_TABLE,         // doesn't matter,
                                                     //   already locked
                        tc.ISOLATION_READ_COMMITTED, // doesn't matter,
                                                     //   already locked
                        (FormatableBitSet)null,          // retrieve all fields
                        (DataValueDescriptor[])null, // startKeyValue
                        ScanController.GE,           // startSearchOp
                        null,                        // qualifier
                        (DataValueDescriptor[])null, // stopKeyValue
                        ScanController.GT            // stopSearchOp
                        );

        if (SanityManager.DEBUG)
        { 
          /*
          ** Bulk insert replace calls this method regardless
          ** of whether or not any rows were inserted because
          ** it has to check any referencing foreign keys
          ** after the replace.  Otherwise, we
          ** make sure that we actually have a row in the fk.
          ** If not, we have an error because we thought that
          ** since indexRows != null, we must have gotten some
          ** rows.
          */
          if (! bulkInsertReplace)
          {
            SanityManager.ASSERT(fkScan.next(),
              "No rows in fk index, even though indexRows != null");
     
            /*
            ** Crank up the scan again.
            */ 
            fkScan.reopenScan(
              (DataValueDescriptor[])null,    // startKeyValue
              ScanController.GE,              // startSearchOp
              null,                           // qualifier
              (DataValueDescriptor[])null,    // stopKeyValue
              ScanController.GT               // stopSearchOp
                          );
          }
        }

        /*
        ** Open the referenced key scan.  Use row locking on
        ** the referenced table unless it is self-referencing
         ** (in which case we don't need locks)
        */ 
        refScan =
                    tc.openGroupFetchScan(
            pkConglom,
            false,                         // hold
            0,                 // read only
                        (fkConglom == pkConglom) ?
                tc.MODE_TABLE :
                tc.MODE_RECORD,
            tc.ISOLATION_READ_COMMITTED,  // read committed is
                                                        //    good enough
            (FormatableBitSet)null,           // retrieve all fields
            (DataValueDescriptor[])null,    // startKeyValue
            ScanController.GE,              // startSearchOp
            null,                           // qualifier
            (DataValueDescriptor[])null,    // stopKeyValue
            ScanController.GT               // stopSearchOp
            );

        /*
        ** Give the scans to the bulk checker to do its
        ** magic.  It will do a merge on the two indexes.
        */ 
        ExecRow firstFailedRow = template.getClone();
        RIBulkChecker riChecker = new RIBulkChecker(refScan,
                      fkScan,
                      template,  
                      true,         // fail on 1st failure
                      (ConglomerateController)null,
                      firstFailedRow);
 
        int numFailures = riChecker.doCheck();
        if (numFailures > 0)
        {
          StandardException se = StandardException.newException(SQLState.LANG_FK_VIOLATION, fkConstraintName,
                  fkInfo.tableName,
                  StatementUtil.typeName(fkInfo.stmtType),
                  RowUtil.toString(firstFailedRow, 0, fkInfo.colArray.length - 1));
          throw se;
        }
    }
    finally
    {
      if (fkScan != null)
      {
        fkScan.close();
        fkScan = null;
      }
      if (refScan != null)
      {
        refScan.close();
View Full Code Here


    TransactionController tc = lcc.getTransactionExecute();
    ConglomerateDescriptor[] cds;
    long[] conglomerateNumber;
    ExecIndexRow[] indexRow;
    UUID[] objectUUID;
    GroupFetchScanController gsc;
    DependencyManager dm = dd.getDependencyManager();
    //initialize numRows to -1 so we can tell if we scanned an index. 
    long numRows = -1;   
   
    td = dd.getTableDescriptor(tableId);
    if (updateStatisticsAll)
    {
      cds = td.getConglomerateDescriptors();
    }
    else
    {
      cds = new ConglomerateDescriptor[1];
      cds[0] = dd.getConglomerateDescriptor(indexNameForUpdateStatistics, sd, false);
    }

    conglomerateNumber = new long[cds.length];
    indexRow = new ExecIndexRow[cds.length];
    objectUUID = new UUID[cds.length];
    ConglomerateController heapCC =
      tc.openConglomerate(td.getHeapConglomerateId(), false, 0,
          TransactionController.MODE_RECORD,
          TransactionController.ISOLATION_REPEATABLE_READ);

    try
    {
      for (int i = 0; i < cds.length; i++)
      {
        if (!cds[i].isIndex())
        {
          conglomerateNumber[i] = -1;
          continue;
        }

        conglomerateNumber[i] = cds[i].getConglomerateNumber();

        objectUUID[i] = cds[i].getUUID();

        indexRow[i] =
          cds[i].getIndexDescriptor().getNullIndexRow(
            td.getColumnDescriptorList(),
            heapCC.newRowLocationTemplate());
      }
    }
    finally
    {
      heapCC.close();
    }

    dd.startWriting(lcc);

    dm.invalidateFor(td, DependencyManager.UPDATE_STATISTICS, lcc);

    for (int indexNumber = 0; indexNumber < conglomerateNumber.length;
       indexNumber++)
    {
      if (conglomerateNumber[indexNumber] == -1)
        continue;

      int numCols = indexRow[indexNumber].nColumns() - 1;
      long[] cardinality = new long[numCols];
      numRows = 0;
      initializeRowBuffers(indexRow[indexNumber]);

      /* Read uncommited, with record locking. Actually CS store may
         not hold record locks */
      gsc =
        tc.openGroupFetchScan(
            conglomerateNumber[indexNumber],
            false,  // hold
            0,      // openMode: for read
            TransactionController.MODE_RECORD, // locking
            TransactionController.ISOLATION_READ_UNCOMMITTED, //isolation level
            null,   // scancolumnlist-- want everything.
            null,   // startkeyvalue-- start from the beginning.
            0,
            null,   // qualifiers, none!
            null,   // stopkeyvalue,
            0);

      try
      {
        boolean firstRow = true;
        int rowsFetched = 0;
        while ((rowsFetched = gsc.fetchNextGroup(rowBufferArray, null)) > 0)
        {
          for (int i = 0; i < rowsFetched; i++)
          {
            int whichPositionChanged = compareWithPrevKey(i, firstRow);
            firstRow = false;
            if (whichPositionChanged >= 0)
            {
              for (int j = whichPositionChanged; j < cardinality.length; j++)
                cardinality[j]++;
            }
            numRows++;
          }

          DataValueDescriptor[] tmp;
          tmp = rowBufferArray[GROUP_FETCH_SIZE - 1];
          rowBufferArray[GROUP_FETCH_SIZE - 1] = lastUniqueKey;
          lastUniqueKey = tmp;
        } // while
        gsc.setEstimatedRowCount(numRows);
      } // try
      finally
      {
        gsc.close();
        gsc = null;
      }

      if (numRows == 0)
      {
View Full Code Here

  private void defragmentRows(
      TransactionController tc,
      LanguageConnectionContext lcc)
        throws StandardException
  {
        GroupFetchScanController base_group_fetch_cc = null;
        int                      num_indexes         = 0;

        int[][]                  index_col_map       =  null;
        ScanController[]         index_scan          =  null;
        ConglomerateController[] index_cc            =  null;
        DataValueDescriptor[][]  index_row           =  null;

    TransactionController     nested_tc = null;

    try {

            nested_tc =
                tc.startNestedUserTransaction(false);

            switch (td.getTableType())
            {
            /* Skip views and vti tables */
            case TableDescriptor.VIEW_TYPE:
            case TableDescriptor.VTI_TYPE:
              return;
            // other types give various errors here
            // DERBY-719,DERBY-720
            default:
              break;
            }


      ConglomerateDescriptor heapCD =
                td.getConglomerateDescriptor(td.getHeapConglomerateId());

      /* Get a row template for the base table */
      ExecRow baseRow =
                lcc.getLanguageConnectionFactory().getExecutionFactory().getValueRow(
                    td.getNumberOfColumns());


      /* Fill the row with nulls of the correct type */
      ColumnDescriptorList cdl = td.getColumnDescriptorList();
      int           cdlSize = cdl.size();

      for (int index = 0; index < cdlSize; index++)
      {
        ColumnDescriptor cd = (ColumnDescriptor) cdl.elementAt(index);
        baseRow.setColumn(cd.getPosition(), cd.getType().getNull());
      }

            DataValueDescriptor[][] row_array = new DataValueDescriptor[100][];
            row_array[0] = baseRow.getRowArray();
            RowLocation[] old_row_location_array = new RowLocation[100];
            RowLocation[] new_row_location_array = new RowLocation[100];

            // Create the following 3 arrays which will be used to update
            // each index as the scan moves rows about the heap as part of
            // the compress:
            //     index_col_map - map location of index cols in the base row,
            //                     ie. index_col_map[0] is column offset of 1st
            //                     key column in base row.  All offsets are 0
            //                     based.
            //     index_scan - open ScanController used to delete old index row
            //     index_cc   - open ConglomerateController used to insert new
            //                  row

            ConglomerateDescriptor[] conglom_descriptors =
                td.getConglomerateDescriptors();

            // conglom_descriptors has an entry for the conglomerate and each
            // one of it's indexes.
            num_indexes = conglom_descriptors.length - 1;

            // if indexes exist, set up data structures to update them
            if (num_indexes > 0)
            {
                // allocate arrays
                index_col_map   = new int[num_indexes][];
                index_scan      = new ScanController[num_indexes];
                index_cc        = new ConglomerateController[num_indexes];
                index_row       = new DataValueDescriptor[num_indexes][];

                setup_indexes(
                    nested_tc,
                    td,
                    index_col_map,
                    index_scan,
                    index_cc,
                    index_row);

            }

      /* Open the heap for reading */
      base_group_fetch_cc =
                nested_tc.defragmentConglomerate(
                    td.getHeapConglomerateId(),
                    false,
                    true,
                    TransactionController.OPENMODE_FORUPDATE,
            TransactionController.MODE_TABLE,
          TransactionController.ISOLATION_SERIALIZABLE);

            int num_rows_fetched = 0;
            while ((num_rows_fetched =
                        base_group_fetch_cc.fetchNextGroup(
                            row_array,
                            old_row_location_array,
                            new_row_location_array)) != 0)
            {
                if (num_indexes > 0)
                {
                    for (int row = 0; row < num_rows_fetched; row++)
                    {
                        for (int index = 0; index < num_indexes; index++)
                        {
                            fixIndex(
                                row_array[row],
                                index_row[index],
                                old_row_location_array[row],
                                new_row_location_array[row],
                                index_cc[index],
                                index_scan[index],
                                index_col_map[index]);
                        }
                    }
                }
            }

            // TODO - It would be better if commits happened more frequently
            // in the nested transaction, but to do that there has to be more
            // logic to catch a ddl that might jump in the middle of the
            // above loop and invalidate the various table control structures
            // which are needed to properly update the indexes.  For example
            // the above loop would corrupt an index added midway through
            // the loop if not properly handled.  See DERBY-1188. 
            nested_tc.commit();
     
    }
    finally
    {
                /* Clean up before we leave */
                if (base_group_fetch_cc != null)
                {
                    base_group_fetch_cc.close();
                    base_group_fetch_cc = null;
                }

                if (num_indexes > 0)
                {
View Full Code Here

    String                  tableName,
    DataDictionary          data_dictionary,
    TransactionController   tc)
        throws SQLException
  {
        GroupFetchScanController base_group_fetch_cc = null;
        int                      num_indexes         = 0;

        int[][]                  index_col_map       =  null;
        ScanController[]         index_scan          =  null;
        ConglomerateController[] index_cc            =  null;
        DataValueDescriptor[][]  index_row           =  null;

    LanguageConnectionContext lcc       = ConnectionUtil.getCurrentLCC();
    TransactionController     nested_tc = null;

    try {

            SchemaDescriptor sd =
                data_dictionary.getSchemaDescriptor(
                    schemaName, nested_tc, true);
            TableDescriptor td =
                data_dictionary.getTableDescriptor(tableName, sd);
            nested_tc =
                tc.startNestedUserTransaction(false);

            if (td == null)
            {
                throw StandardException.newException(
                    SQLState.LANG_TABLE_NOT_FOUND,
                    schemaName + "." + tableName);
            }

            switch (td.getTableType())
            {
            /* Skip views and vti tables */
            case TableDescriptor.VIEW_TYPE:
            case TableDescriptor.VTI_TYPE:
              return;
            // other types give various errors here
            // DERBY-719,DERBY-720
            default:
              break;
            }


      ConglomerateDescriptor heapCD =
                td.getConglomerateDescriptor(td.getHeapConglomerateId());

      /* Get a row template for the base table */
      ExecRow baseRow =
                lcc.getExecutionContext().getExecutionFactory().getValueRow(
                    td.getNumberOfColumns());


      /* Fill the row with nulls of the correct type */
      ColumnDescriptorList cdl = td.getColumnDescriptorList();
      int           cdlSize = cdl.size();

      for (int index = 0; index < cdlSize; index++)
      {
        ColumnDescriptor cd = (ColumnDescriptor) cdl.elementAt(index);
        baseRow.setColumn(cd.getPosition(), cd.getType().getNull());
      }

            DataValueDescriptor[][] row_array = new DataValueDescriptor[100][];
            row_array[0] = baseRow.getRowArray();
            RowLocation[] old_row_location_array = new RowLocation[100];
            RowLocation[] new_row_location_array = new RowLocation[100];

            // Create the following 3 arrays which will be used to update
            // each index as the scan moves rows about the heap as part of
            // the compress:
            //     index_col_map - map location of index cols in the base row,
            //                     ie. index_col_map[0] is column offset of 1st
            //                     key collumn in base row.  All offsets are 0
            //                     based.
            //     index_scan - open ScanController used to delete old index row
            //     index_cc   - open ConglomerateController used to insert new
            //                  row

            ConglomerateDescriptor[] conglom_descriptors =
                td.getConglomerateDescriptors();

            // conglom_descriptors has an entry for the conglomerate and each
            // one of it's indexes.
            num_indexes = conglom_descriptors.length - 1;

            // if indexes exist, set up data structures to update them
            if (num_indexes > 0)
            {
                // allocate arrays
                index_col_map   = new int[num_indexes][];
                index_scan      = new ScanController[num_indexes];
                index_cc        = new ConglomerateController[num_indexes];
                index_row       = new DataValueDescriptor[num_indexes][];

                setup_indexes(
                    nested_tc,
                    td,
                    index_col_map,
                    index_scan,
                    index_cc,
                    index_row);

            }

      /* Open the heap for reading */
      base_group_fetch_cc =
                nested_tc.defragmentConglomerate(
                    td.getHeapConglomerateId(),
                    false,
                    true,
                    TransactionController.OPENMODE_FORUPDATE,
            TransactionController.MODE_TABLE,
          TransactionController.ISOLATION_SERIALIZABLE);

            int num_rows_fetched = 0;
            while ((num_rows_fetched =
                        base_group_fetch_cc.fetchNextGroup(
                            row_array,
                            old_row_location_array,
                            new_row_location_array)) != 0)
            {
                if (num_indexes > 0)
                {
                    for (int row = 0; row < num_rows_fetched; row++)
                    {
                        for (int index = 0; index < num_indexes; index++)
                        {
                            fixIndex(
                                row_array[row],
                                index_row[index],
                                old_row_location_array[row],
                                new_row_location_array[row],
                                index_cc[index],
                                index_scan[index],
                                index_col_map[index]);
                        }
                    }
                }
            }

            // TODO - It would be better if commits happened more frequently
            // in the nested transaction, but to do that there has to be more
            // logic to catch a ddl that might jump in the middle of the
            // above loop and invalidate the various table control structures
            // which are needed to properly update the indexes.  For example
            // the above loop would corrupt an index added midway through
            // the loop if not properly handled.  See DERBY-1188. 
            nested_tc.commit();
     
    }
    catch (StandardException se)
    {
      throw PublicAPI.wrapStandardException(se);
    }
    finally
    {
            try
            {
                /* Clean up before we leave */
                if (base_group_fetch_cc != null)
                {
                    base_group_fetch_cc.close();
                    base_group_fetch_cc = null;
                }

                if (num_indexes > 0)
                {
View Full Code Here

            FKInfo fkInfo, long fkConglom, long pkConglom,
            String fkConstraintName)
    throws StandardException
  {
    ExecRow                 template;
    GroupFetchScanController   refScan = null;
    GroupFetchScanController   fkScan  = null;

    try
    {

        template = makeIndexTemplate(fkInfo, fullTemplate, cm);

        /*
        ** The indexes have been dropped and recreated, so
        ** we need to get the new index conglomerate number.
        */
        fkScan =
                    tc.openGroupFetchScan(
                        fkConglom,
                        false,                       // hold
                        0,                // read only
                        tc.MODE_TABLE,         // doesn't matter,
                                                     //   already locked
                        tc.ISOLATION_READ_COMMITTED, // doesn't matter,
                                                     //   already locked
                        (FormatableBitSet)null,          // retrieve all fields
                        (DataValueDescriptor[])null, // startKeyValue
                        ScanController.GE,           // startSearchOp
                        null,                        // qualifier
                        (DataValueDescriptor[])null, // stopKeyValue
                        ScanController.GT            // stopSearchOp
                        );

        if (SanityManager.DEBUG)
        { 
          /*
          ** Bulk insert replace calls this method regardless
          ** of whether or not any rows were inserted because
          ** it has to check any referencing foreign keys
          ** after the replace.  Otherwise, we
          ** make sure that we actually have a row in the fk.
          ** If not, we have an error because we thought that
          ** since indexRows != null, we must have gotten some
          ** rows.
          */
          if (! bulkInsertReplace)
          {
            SanityManager.ASSERT(fkScan.next(),
              "No rows in fk index, even though indexRows != null");
     
            /*
            ** Crank up the scan again.
            */ 
            fkScan.reopenScan(
              (DataValueDescriptor[])null,    // startKeyValue
              ScanController.GE,              // startSearchOp
              null,                           // qualifier
              (DataValueDescriptor[])null,    // stopKeyValue
              ScanController.GT               // stopSearchOp
                          );
          }
        }

        /*
        ** Open the referenced key scan.  Use row locking on
        ** the referenced table unless it is self-referencing
         ** (in which case we don't need locks)
        */ 
        refScan =
                    tc.openGroupFetchScan(
            pkConglom,
            false,                         // hold
            0,                 // read only
                        (fkConglom == pkConglom) ?
                tc.MODE_TABLE :
                tc.MODE_RECORD,
            tc.ISOLATION_READ_COMMITTED,  // read committed is
                                                        //    good enough
            (FormatableBitSet)null,           // retrieve all fields
            (DataValueDescriptor[])null,    // startKeyValue
            ScanController.GE,              // startSearchOp
            null,                           // qualifier
            (DataValueDescriptor[])null,    // stopKeyValue
            ScanController.GT               // stopSearchOp
            );

        /*
        ** Give the scans to the bulk checker to do its
        ** magic.  It will do a merge on the two indexes.
        */ 
        ExecRow firstFailedRow = template.getClone();
        RIBulkChecker riChecker = new RIBulkChecker(refScan,
                      fkScan,
                      template,  
                      true,         // fail on 1st failure
                      (ConglomerateController)null,
                      firstFailedRow);
 
        int numFailures = riChecker.doCheck();
        if (numFailures > 0)
        {
          StandardException se = StandardException.newException(SQLState.LANG_FK_VIOLATION, fkConstraintName,
                  fkInfo.tableName,
                  StatementUtil.typeName(fkInfo.stmtType),
                  RowUtil.toString(firstFailedRow, 0, fkInfo.colArray.length - 1));
          throw se;
        }
    }
    finally
    {
      if (fkScan != null)
      {
        fkScan.close();
        fkScan = null;
      }
      if (refScan != null)
      {
        refScan.close();
View Full Code Here

    TransactionController tc = lcc.getTransactionExecute();
    ConglomerateDescriptor[] cds;
    long[] conglomerateNumber;
    ExecIndexRow[] indexRow;
    UUID[] objectUUID;
    GroupFetchScanController gsc;
    DependencyManager dm = dd.getDependencyManager();
    //initialize numRows to -1 so we can tell if we scanned an index. 
    long numRows = -1;   
   
    td = dd.getTableDescriptor(tableId);
    if (updateStatisticsAll)
    {
      cds = td.getConglomerateDescriptors();
    }
    else
    {
      cds = new ConglomerateDescriptor[1];
      cds[0] = dd.getConglomerateDescriptor(indexNameForUpdateStatistics, sd, false);
    }

    conglomerateNumber = new long[cds.length];
    indexRow = new ExecIndexRow[cds.length];
    objectUUID = new UUID[cds.length];
    ConglomerateController heapCC =
      tc.openConglomerate(td.getHeapConglomerateId(), false, 0,
          TransactionController.MODE_RECORD,
          TransactionController.ISOLATION_REPEATABLE_READ);

    try
    {
      for (int i = 0; i < cds.length; i++)
      {
        if (!cds[i].isIndex())
        {
          conglomerateNumber[i] = -1;
          continue;
        }

        conglomerateNumber[i] = cds[i].getConglomerateNumber();

        objectUUID[i] = cds[i].getUUID();

        indexRow[i] =
          cds[i].getIndexDescriptor().getNullIndexRow(
            td.getColumnDescriptorList(),
            heapCC.newRowLocationTemplate());
      }
    }
    finally
    {
      heapCC.close();
    }

    dd.startWriting(lcc);

    dm.invalidateFor(td, DependencyManager.UPDATE_STATISTICS, lcc);

    for (int indexNumber = 0; indexNumber < conglomerateNumber.length;
       indexNumber++)
    {
      if (conglomerateNumber[indexNumber] == -1)
        continue;

      int numCols = indexRow[indexNumber].nColumns() - 1;
      long[] cardinality = new long[numCols];
      numRows = 0;
      initializeRowBuffers(indexRow[indexNumber]);

      /* Read uncommited, with record locking. Actually CS store may
         not hold record locks */
      gsc =
        tc.openGroupFetchScan(
            conglomerateNumber[indexNumber],
            false,  // hold
            0,      // openMode: for read
            TransactionController.MODE_RECORD, // locking
            TransactionController.ISOLATION_READ_UNCOMMITTED, //isolation level
            null,   // scancolumnlist-- want everything.
            null,   // startkeyvalue-- start from the beginning.
            0,
            null,   // qualifiers, none!
            null,   // stopkeyvalue,
            0);

      try
      {
        boolean firstRow = true;
        int rowsFetched = 0;
        while ((rowsFetched = gsc.fetchNextGroup(rowBufferArray, null)) > 0)
        {
          for (int i = 0; i < rowsFetched; i++)
          {
            int whichPositionChanged = compareWithPrevKey(i, firstRow);
            firstRow = false;
            if (whichPositionChanged >= 0)
            {
              for (int j = whichPositionChanged; j < cardinality.length; j++)
                cardinality[j]++;
            }
            numRows++;
          }

          DataValueDescriptor[] tmp;
          tmp = rowBufferArray[GROUP_FETCH_SIZE - 1];
          rowBufferArray[GROUP_FETCH_SIZE - 1] = lastUniqueKey;
          lastUniqueKey = tmp;
        } // while
        gsc.setEstimatedRowCount(numRows);
      } // try
      finally
      {
        gsc.close();
        gsc = null;
      }

      if (numRows == 0)
      {
View Full Code Here

  private void defragmentRows(
      TransactionController tc,
      LanguageConnectionContext lcc)
        throws StandardException
  {
        GroupFetchScanController base_group_fetch_cc = null;
        int                      num_indexes         = 0;

        int[][]                  index_col_map       =  null;
        ScanController[]         index_scan          =  null;
        ConglomerateController[] index_cc            =  null;
        DataValueDescriptor[][]  index_row           =  null;

    TransactionController     nested_tc = null;

    try {

            nested_tc =
                tc.startNestedUserTransaction(false);

            switch (td.getTableType())
            {
            /* Skip views and vti tables */
            case TableDescriptor.VIEW_TYPE:
            case TableDescriptor.VTI_TYPE:
              return;
            // other types give various errors here
            // DERBY-719,DERBY-720
            default:
              break;
            }


      ConglomerateDescriptor heapCD =
                td.getConglomerateDescriptor(td.getHeapConglomerateId());

      /* Get a row template for the base table */
      ExecRow baseRow =
                lcc.getLanguageConnectionFactory().getExecutionFactory().getValueRow(
                    td.getNumberOfColumns());


      /* Fill the row with nulls of the correct type */
      ColumnDescriptorList cdl = td.getColumnDescriptorList();
      int           cdlSize = cdl.size();

      for (int index = 0; index < cdlSize; index++)
      {
        ColumnDescriptor cd = (ColumnDescriptor) cdl.elementAt(index);
        baseRow.setColumn(cd.getPosition(), cd.getType().getNull());
      }

            DataValueDescriptor[][] row_array = new DataValueDescriptor[100][];
            row_array[0] = baseRow.getRowArray();
            RowLocation[] old_row_location_array = new RowLocation[100];
            RowLocation[] new_row_location_array = new RowLocation[100];

            // Create the following 3 arrays which will be used to update
            // each index as the scan moves rows about the heap as part of
            // the compress:
            //     index_col_map - map location of index cols in the base row,
            //                     ie. index_col_map[0] is column offset of 1st
            //                     key column in base row.  All offsets are 0
            //                     based.
            //     index_scan - open ScanController used to delete old index row
            //     index_cc   - open ConglomerateController used to insert new
            //                  row

            ConglomerateDescriptor[] conglom_descriptors =
                td.getConglomerateDescriptors();

            // conglom_descriptors has an entry for the conglomerate and each
            // one of it's indexes.
            num_indexes = conglom_descriptors.length - 1;

            // if indexes exist, set up data structures to update them
            if (num_indexes > 0)
            {
                // allocate arrays
                index_col_map   = new int[num_indexes][];
                index_scan      = new ScanController[num_indexes];
                index_cc        = new ConglomerateController[num_indexes];
                index_row       = new DataValueDescriptor[num_indexes][];

                setup_indexes(
                    nested_tc,
                    td,
                    index_col_map,
                    index_scan,
                    index_cc,
                    index_row);

            }

      /* Open the heap for reading */
      base_group_fetch_cc =
                nested_tc.defragmentConglomerate(
                    td.getHeapConglomerateId(),
                    false,
                    true,
                    TransactionController.OPENMODE_FORUPDATE,
            TransactionController.MODE_TABLE,
          TransactionController.ISOLATION_SERIALIZABLE);

            int num_rows_fetched = 0;
            while ((num_rows_fetched =
                        base_group_fetch_cc.fetchNextGroup(
                            row_array,
                            old_row_location_array,
                            new_row_location_array)) != 0)
            {
                if (num_indexes > 0)
                {
                    for (int row = 0; row < num_rows_fetched; row++)
                    {
                        for (int index = 0; index < num_indexes; index++)
                        {
                            fixIndex(
                                row_array[row],
                                index_row[index],
                                old_row_location_array[row],
                                new_row_location_array[row],
                                index_cc[index],
                                index_scan[index],
                                index_col_map[index]);
                        }
                    }
                }
            }

            // TODO - It would be better if commits happened more frequently
            // in the nested transaction, but to do that there has to be more
            // logic to catch a ddl that might jump in the middle of the
            // above loop and invalidate the various table control structures
            // which are needed to properly update the indexes.  For example
            // the above loop would corrupt an index added midway through
            // the loop if not properly handled.  See DERBY-1188. 
            nested_tc.commit();
     
    }
    finally
    {
                /* Clean up before we leave */
                if (base_group_fetch_cc != null)
                {
                    base_group_fetch_cc.close();
                    base_group_fetch_cc = null;
                }

                if (num_indexes > 0)
                {
View Full Code Here

            FKInfo fkInfo, long fkConglom, long pkConglom,
                        String fkConstraintName, ExecRow fullTemplate)
    throws StandardException
  {
    ExecRow                 template;
    GroupFetchScanController   refScan = null;
    GroupFetchScanController   fkScan  = null;

    try
    {

        template = makeIndexTemplate(fkInfo, fullTemplate, cm);

        /*
        ** The indexes have been dropped and recreated, so
        ** we need to get the new index conglomerate number.
        */
        fkScan =
                    tc.openGroupFetchScan(
                        fkConglom,
                        false,                       // hold
                        0,                // read only
                        // doesn't matter, already locked
                        TransactionController.MODE_TABLE,
                        // doesn't matter, already locked
                        TransactionController.ISOLATION_READ_COMMITTED,
                        (FormatableBitSet)null,          // retrieve all fields
                        (DataValueDescriptor[])null, // startKeyValue
                        ScanController.GE,           // startSearchOp
                        null,                        // qualifier
                        (DataValueDescriptor[])null, // stopKeyValue
                        ScanController.GT            // stopSearchOp
                        );

        if (SanityManager.DEBUG)
        { 
          /*
          ** Bulk insert replace calls this method regardless
          ** of whether or not any rows were inserted because
          ** it has to check any referencing foreign keys
          ** after the replace.  Otherwise, we
          ** make sure that we actually have a row in the fk.
          ** If not, we have an error because we thought that
          ** since indexRows != null, we must have gotten some
          ** rows.
          */
          if (! bulkInsertReplace)
          {
            SanityManager.ASSERT(fkScan.next(),
              "No rows in fk index, even though indexRows != null");
     
            /*
            ** Crank up the scan again.
            */ 
            fkScan.reopenScan(
              (DataValueDescriptor[])null,    // startKeyValue
              ScanController.GE,              // startSearchOp
              null,                           // qualifier
              (DataValueDescriptor[])null,    // stopKeyValue
              ScanController.GT               // stopSearchOp
                          );
          }
        }

        /*
        ** Open the referenced key scan.  Use row locking on
        ** the referenced table unless it is self-referencing
         ** (in which case we don't need locks)
        */ 
        refScan =
                    tc.openGroupFetchScan(
            pkConglom,
            false,                         // hold
            0,                 // read only
                        (fkConglom == pkConglom) ?
                                TransactionController.MODE_TABLE :
                                TransactionController.MODE_RECORD,
                        // read committed is good enough
                        TransactionController.ISOLATION_READ_COMMITTED,
            (FormatableBitSet)null,           // retrieve all fields
            (DataValueDescriptor[])null,    // startKeyValue
            ScanController.GE,              // startSearchOp
            null,                           // qualifier
            (DataValueDescriptor[])null,    // stopKeyValue
            ScanController.GT               // stopSearchOp
            );

        /*
        ** Give the scans to the bulk checker to do its
        ** magic.  It will do a merge on the two indexes.
        */ 
        ExecRow firstFailedRow = template.getClone();
        RIBulkChecker riChecker = new RIBulkChecker(refScan,
                      fkScan,
                      template,  
                      true,         // fail on 1st failure
                      (ConglomerateController)null,
                      firstFailedRow);
 
        int numFailures = riChecker.doCheck();
        if (numFailures > 0)
        {
          StandardException se = StandardException.newException(SQLState.LANG_FK_VIOLATION, fkConstraintName,
                  fkInfo.tableName,
                  StatementUtil.typeName(fkInfo.stmtType),
                  RowUtil.toString(firstFailedRow, 0, fkInfo.colArray.length - 1));
          throw se;
        }
    }
    finally
    {
      if (fkScan != null)
      {
        fkScan.close();
        fkScan = null;
      }
      if (refScan != null)
      {
        refScan.close();
View Full Code Here

     * @param tc                transaction controller to use to do updates.
     *
     **/
    private void defragmentRows(TransactionController tc)
            throws StandardException {
        GroupFetchScanController base_group_fetch_cc = null;
        int                      num_indexes         = 0;

        int[][]                  index_col_map       =  null;
        ScanController[]         index_scan          =  null;
        ConglomerateController[] index_cc            =  null;
        DataValueDescriptor[][]  index_row           =  null;

    TransactionController     nested_tc = null;

    try {

            nested_tc =
                tc.startNestedUserTransaction(false, true);

            switch (td.getTableType())
            {
            /* Skip views and vti tables */
            case TableDescriptor.VIEW_TYPE:
            case TableDescriptor.VTI_TYPE:
              return;
            // other types give various errors here
            // DERBY-719,DERBY-720
            default:
              break;
            }

      /* Get a row template for the base table */
      ExecRow baseRow =
                lcc.getLanguageConnectionFactory().getExecutionFactory().getValueRow(
                    td.getNumberOfColumns());


      /* Fill the row with nulls of the correct type */
      ColumnDescriptorList cdl = td.getColumnDescriptorList();
      int           cdlSize = cdl.size();

      for (int index = 0; index < cdlSize; index++)
      {
        ColumnDescriptor cd = (ColumnDescriptor) cdl.elementAt(index);
        baseRow.setColumn(cd.getPosition(), cd.getType().getNull());
      }

            DataValueDescriptor[][] row_array = new DataValueDescriptor[100][];
            row_array[0] = baseRow.getRowArray();
            RowLocation[] old_row_location_array = new RowLocation[100];
            RowLocation[] new_row_location_array = new RowLocation[100];

            // Create the following 3 arrays which will be used to update
            // each index as the scan moves rows about the heap as part of
            // the compress:
            //     index_col_map - map location of index cols in the base row,
            //                     ie. index_col_map[0] is column offset of 1st
            //                     key column in base row.  All offsets are 0
            //                     based.
            //     index_scan - open ScanController used to delete old index row
            //     index_cc   - open ConglomerateController used to insert new
            //                  row

            ConglomerateDescriptor[] conglom_descriptors =
                td.getConglomerateDescriptors();

            // conglom_descriptors has an entry for the conglomerate and each
            // one of it's indexes.
            num_indexes = conglom_descriptors.length - 1;

            // if indexes exist, set up data structures to update them
            if (num_indexes > 0)
            {
                // allocate arrays
                index_col_map   = new int[num_indexes][];
                index_scan      = new ScanController[num_indexes];
                index_cc        = new ConglomerateController[num_indexes];
                index_row       = new DataValueDescriptor[num_indexes][];

                setup_indexes(
                    nested_tc,
                    td,
                    index_col_map,
                    index_scan,
                    index_cc,
                    index_row);

            }

      /* Open the heap for reading */
      base_group_fetch_cc =
                nested_tc.defragmentConglomerate(
                    td.getHeapConglomerateId(),
                    false,
                    true,
                    TransactionController.OPENMODE_FORUPDATE,
            TransactionController.MODE_TABLE,
          TransactionController.ISOLATION_SERIALIZABLE);

            int num_rows_fetched = 0;
            while ((num_rows_fetched =
                        base_group_fetch_cc.fetchNextGroup(
                            row_array,
                            old_row_location_array,
                            new_row_location_array)) != 0)
            {
                if (num_indexes > 0)
                {
                    for (int row = 0; row < num_rows_fetched; row++)
                    {
                        for (int index = 0; index < num_indexes; index++)
                        {
                            fixIndex(
                                row_array[row],
                                index_row[index],
                                old_row_location_array[row],
                                new_row_location_array[row],
                                index_cc[index],
                                index_scan[index],
                                index_col_map[index]);
                        }
                    }
                }
            }

            // TODO - It would be better if commits happened more frequently
            // in the nested transaction, but to do that there has to be more
            // logic to catch a ddl that might jump in the middle of the
            // above loop and invalidate the various table control structures
            // which are needed to properly update the indexes.  For example
            // the above loop would corrupt an index added midway through
            // the loop if not properly handled.  See DERBY-1188. 
            nested_tc.commit();
     
    }
    finally
    {
                /* Clean up before we leave */
                if (base_group_fetch_cc != null)
                {
                    base_group_fetch_cc.close();
                    base_group_fetch_cc = null;
                }

                if (num_indexes > 0)
                {
View Full Code Here

            long[]        cardinality = new long[numCols];
            KeyComparator cmp         = new KeyComparator(indexRow[indexNumber]);

            /* Read uncommitted, with record locking. Actually CS store may
               not hold record locks */
            GroupFetchScanController gsc =
                tc.openGroupFetchScan(
                        conglomerateNumber[indexNumber],
                        false,  // hold
                        0,
                        TransactionController.MODE_RECORD, // locking
                        TransactionController.ISOLATION_READ_UNCOMMITTED,
                        null,   // scancolumnlist-- want everything.
                        null,   // startkeyvalue-- start from the beginning.
                        0,
                        null,   // qualifiers, none!
                        null,   // stopkeyvalue,
                        0);

            try
            {
                int     rowsFetched           = 0;
                boolean giving_up_on_shutdown = false;

                while ((rowsFetched = cmp.fetchRows(gsc)) > 0)
                {
                    // DERBY-5108
                    // Check if daemon has been disabled, and if so stop
                    // scan and exit asap.  On shutdown the system will
                    // send interrupts, but the system currently will
                    // recover from these during the scan and allow the
                    // scan to finish. Checking here after each group
                    // I/O that is processed as a convenient point.
                    if (asBackgroundTask) {
                        if (isShuttingDown()) {
                            giving_up_on_shutdown = true;
                            break;
                        }
                    }

                    for (int i = 0; i < rowsFetched; i++)
                    {
                        int whichPositionChanged = cmp.compareWithPrevKey(i);
                        if (whichPositionChanged >= 0) {
                            for (int j = whichPositionChanged; j < numCols; j++)
                                cardinality[j]++;
                        }
                    }

                } // while

                if (giving_up_on_shutdown)
                    break;

                gsc.setEstimatedRowCount(cmp.getRowCount());
            } // try
            finally
            {
                gsc.close();
                gsc = null;
            }
            scanTimes[sci++][2] = System.currentTimeMillis();

            // We have scanned the indexes, so let's give this a few attempts
View Full Code Here

TOP

Related Classes of org.apache.derby.iapi.store.access.GroupFetchScanController

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.