Package org.apache.derby.iapi.sql.dictionary

Examples of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor


        {
          numIndexes--;
          /* get first conglomerate with this conglom number each time
           * and each duplicate one will be eventually all dropped
           */
          ConglomerateDescriptor cd = td.getConglomerateDescriptor
                        (indexConglomerateNumbers[i]);

          dropConglomerate(cd, td, true, newCongloms, activation,
            activation.getLanguageConnectionContext());

          compressIRGs[i] = null;    // mark it
          continue;
        }
        // give an error for unique index on multiple columns including
        // the column we are to drop (restrict), such index is not for
        // a constraint, because constraints have already been handled
        if (compressIRGs[i].isUnique())
        {
          ConglomerateDescriptor cd = td.getConglomerateDescriptor
                        (indexConglomerateNumbers[i]);
          throw StandardException.newException(SQLState.LANG_PROVIDER_HAS_DEPENDENT_OBJECT,
                    dm.getActionString(DependencyManager.DROP_COLUMN),
                    columnInfo[0].name, "UNIQUE INDEX",
                    cd.getConglomerateName() );
        }
      }

      /* If there are new backing conglomerates which must be
       * created to replace a dropped shared conglomerate
View Full Code Here


        continue;
      }

      TableDescriptor  pktd = refcd.getTableDescriptor();
      UUID pkuuid = refcd.getIndexId();
      ConglomerateDescriptor pkIndexConglom = pktd.getConglomerateDescriptor(pkuuid);

      TableDescriptor refTd = cd.getTableDescriptor();
      fkVector.addElement(new FKInfo(
                  fkNames,              // foreign key names
                  refTd.getName(),        // table being modified
                  statementType,            // INSERT|UPDATE|DELETE
                  type,                // FOREIGN_KEY|REFERENCED_KEY
                  pkuuid,                // referenced backing index uuid
                  pkIndexConglom.getConglomerateNumber(), // referened backing index conglom
                  uuids,                // fk backing index uuids
                  conglomNumbers,            // fk backing index congloms
                  isSelfReferencingFK,        // is self ref array of bool
                  remapReferencedColumns(cd, rowMap)// column referened by key
                  dd.getRowLocationTemplate(getLanguageConnectionContext(), refTd),
View Full Code Here

    long[] distinctConglomNums = new long[cds.length - 1];
    int distinctCount = 0;

    for (int index = 0; index < cds.length; index++)
    {
      ConglomerateDescriptor cd = cds[index];

      if (!cd.isIndex()) { continue; }

      /*
      ** If this index doesn't contain any updated
      ** columns, then we can skip it.
      */
      if ((updatedColumns != null) &&
        (!updatedColumns.updateOverlaps(
          cd.getIndexDescriptor().baseColumnPositions())))
      { continue; }

      if ( conglomVector != null )
      {
        int i;
        for (i = 0; i < distinctCount; i++)
        {
          if (distinctConglomNums[i] == cd.getConglomerateNumber())
            break;
        }
        if (i == distinctCount)    // first appearence
        {
          distinctConglomNums[distinctCount++] = cd.getConglomerateNumber();
          conglomVector.addElement( cd );
        }
      }

      IndexRowGenerator ixd = cd.getIndexDescriptor();
      int[] cols = ixd.baseColumnPositions();

      if (colBitSet != null)
      {
        for (int i = 0; i < cols.length; i++)
View Full Code Here

  (
    Vector  affectedConglomerates
    )
    throws StandardException
  {
    ConglomerateDescriptor  cd;
    int            indexCount = affectedConglomerates.size();
    CompilerContext      cc = getCompilerContext();

    indicesToMaintain = new IndexRowGenerator[ indexCount ];
    indexConglomerateNumbers = new long[ indexCount ];
    indexNames = new String[indexCount];

    for ( int ictr = 0; ictr < indexCount; ictr++ )
    {
      cd = (ConglomerateDescriptor) affectedConglomerates.elementAt( ictr );

      indicesToMaintain[ ictr ] = cd.getIndexDescriptor();
      indexConglomerateNumbers[ ictr ] = cd.getConglomerateNumber();
      indexNames[ictr] =
        ((cd.isConstraint()) ? null : cd.getConglomerateName());

      cc.createDependency(cd);
    }

  }
View Full Code Here

        continue;
      }

      TableDescriptor  pktd = refcd.getTableDescriptor();
      UUID pkuuid = refcd.getIndexId();
      ConglomerateDescriptor pkIndexConglom = pktd.getConglomerateDescriptor(pkuuid);

      TableDescriptor refTd = cd.getTableDescriptor();
      fkVector.addElement(new FKInfo(
                  fkNames,              // foreign key names
                  refTd.getName(),        // table being modified
                  statementType,            // INSERT|UPDATE|DELETE
                  type,                // FOREIGN_KEY|REFERENCED_KEY
                  pkuuid,                // referenced backing index uuid
                  pkIndexConglom.getConglomerateNumber(), // referened backing index conglom
                  uuids,                // fk backing index uuids
                  conglomNumbers,            // fk backing index congloms
                  isSelfReferencingFK,        // is self ref array of bool
                  remapReferencedColumns(cd, rowMap)// column referened by key
                  dd.getRowLocationTemplate(getLanguageConnectionContext(), refTd),
View Full Code Here

    long[] distinctConglomNums = new long[cds.length - 1];
    int distinctCount = 0;

    for (int index = 0; index < cds.length; index++)
    {
      ConglomerateDescriptor cd = cds[index];

      if (!cd.isIndex()) { continue; }

      /*
      ** If this index doesn't contain any updated
      ** columns, then we can skip it.
      */
      if ((updatedColumns != null) &&
        (!updatedColumns.updateOverlaps(
          cd.getIndexDescriptor().baseColumnPositions())))
      { continue; }

      if ( conglomVector != null )
      {
        int i;
        for (i = 0; i < distinctCount; i++)
        {
          if (distinctConglomNums[i] == cd.getConglomerateNumber())
            break;
        }
        if (i == distinctCount)    // first appearence
        {
          distinctConglomNums[distinctCount++] = cd.getConglomerateNumber();
          conglomVector.addElement( cd );
        }
      }

      IndexRowGenerator ixd = cd.getIndexDescriptor();
      int[] cols = ixd.baseColumnPositions();

      if (colBitSet != null)
      {
        for (int i = 0; i < cols.length; i++)
View Full Code Here

  (
    Vector  affectedConglomerates
    )
    throws StandardException
  {
    ConglomerateDescriptor  cd;
    int            indexCount = affectedConglomerates.size();
    CompilerContext      cc = getCompilerContext();

    indicesToMaintain = new IndexRowGenerator[ indexCount ];
    indexConglomerateNumbers = new long[ indexCount ];
    indexNames = new String[indexCount];

    for ( int ictr = 0; ictr < indexCount; ictr++ )
    {
      cd = (ConglomerateDescriptor) affectedConglomerates.elementAt( ictr );

      indicesToMaintain[ ictr ] = cd.getIndexDescriptor();
      indexConglomerateNumbers[ ictr ] = cd.getConglomerateNumber();
      indexNames[ictr] =
        ((cd.isConstraint()) ? null : cd.getConglomerateName());

      cc.createDependency(cd);
    }

  }
View Full Code Here

                  RowOrdering rowOrdering)
          throws StandardException
  {
    String userSpecifiedIndexName = getUserSpecifiedIndexName();
    AccessPath ap = getCurrentAccessPath();
    ConglomerateDescriptor currentConglomerateDescriptor =
                        ap.getConglomerateDescriptor();

    optimizer.trace(Optimizer.CALLING_NEXT_ACCESS_PATH,
             ((predList == null) ? 0 : predList.size()),
             0, 0.0, getExposedName());

    /*
    ** Remove the ordering of the current conglomerate descriptor,
    ** if any.
    */
    rowOrdering.removeOptimizable(getTableNumber());

    // RESOLVE: This will have to be modified to step through the
    // join strategies as well as the conglomerates.

    if (userSpecifiedIndexName != null)
    {
      /*
      ** User specified an index name, so we should look at only one
      ** index.  If there is a current conglomerate descriptor, and there
      ** are no more join strategies, we've already looked at the index,
      ** so go back to null.
      */
      if (currentConglomerateDescriptor != null)
      {
        if ( ! super.nextAccessPath(optimizer,
                      predList,
                      rowOrdering) )
        {
          currentConglomerateDescriptor = null;
        }
      }
      else
      {
        optimizer.trace(Optimizer.LOOKING_FOR_SPECIFIED_INDEX,
                tableNumber, 0, 0.0, userSpecifiedIndexName);

        if (StringUtil.SQLToUpperCase(userSpecifiedIndexName).equals("NULL"))
        {
          /* Special case - user-specified table scan */
          currentConglomerateDescriptor =
            tableDescriptor.getConglomerateDescriptor(
                    tableDescriptor.getHeapConglomerateId()
                  );
        }
        else
        {
          /* User-specified index name */
          getConglomDescs();
       
          for (int index = 0; index < conglomDescs.length; index++)
          {
            currentConglomerateDescriptor = conglomDescs[index];
            String conglomerateName =
              currentConglomerateDescriptor.getConglomerateName();
            if (conglomerateName != null)
            {
              /* Have we found the desired index? */
              if (conglomerateName.equals(userSpecifiedIndexName))
              {
                break;
              }
            }
          }

          /* We should always find a match */
          if (SanityManager.DEBUG)
          {
            if (currentConglomerateDescriptor == null)
            {
              SanityManager.THROWASSERT(
                "Expected to find match for forced index " +
                userSpecifiedIndexName);
            }
          }
        }

        if ( ! super.nextAccessPath(optimizer,
                      predList,
                      rowOrdering))
        {
          if (SanityManager.DEBUG)
          {
            SanityManager.THROWASSERT("No join strategy found");
          }
        }
      }
    }
    else
    {
      if (currentConglomerateDescriptor != null)
      {
        /*
        ** Once we have a conglomerate descriptor, cycle through
        ** the join strategies (done in parent).
        */
        if ( ! super.nextAccessPath(optimizer,
                      predList,
                      rowOrdering))
        {
          /*
          ** When we're out of join strategies, go to the next
          ** conglomerate descriptor.
          */
          currentConglomerateDescriptor = getNextConglom(currentConglomerateDescriptor);

          /*
          ** New conglomerate, so step through join strategies
          ** again.
          */
          resetJoinStrategies(optimizer);

          if ( ! super.nextAccessPath(optimizer,
                        predList,
                        rowOrdering))
          {
            if (SanityManager.DEBUG)
            {
              SanityManager.THROWASSERT("No join strategy found");
            }
          }
        }
      }
      else
      {
        /* Get the first conglomerate descriptor */
        currentConglomerateDescriptor = getFirstConglom();

        if ( ! super.nextAccessPath(optimizer,
                      predList,
                      rowOrdering))
        {
          if (SanityManager.DEBUG)
          {
            SanityManager.THROWASSERT("No join strategy found");
          }
        }
      }
    }

    if (currentConglomerateDescriptor == null)
    {
      optimizer.trace(Optimizer.NO_MORE_CONGLOMERATES, tableNumber, 0, 0.0, null);
    }
    else
    {
      currentConglomerateDescriptor.setColumnNames(columnNames);
      optimizer.trace(Optimizer.CONSIDERING_CONGLOMERATE, tableNumber, 0, 0.0,
              currentConglomerateDescriptor);
    }

    /*
    ** Tell the rowOrdering that what the ordering of this conglomerate is
    */
    if (currentConglomerateDescriptor != null)
    {
      if ( ! currentConglomerateDescriptor.isIndex())
      {
        /* If we are scanning the heap, but there
         * is a full match on a unique key, then
         * we can say that the table IS NOT unordered.
         * (We can't currently say what the ordering is
         * though.)
         */
        if (! isOneRowResultSet(predList))
        {
          optimizer.trace(Optimizer.ADDING_UNORDERED_OPTIMIZABLE,
                   ((predList == null) ? 0 : predList.size()),
                   0, 0.0, null);

          rowOrdering.addUnorderedOptimizable(this);
        }
        else
        {
          optimizer.trace(Optimizer.SCANNING_HEAP_FULL_MATCH_ON_UNIQUE_KEY,
                   0, 0, 0.0, null);
        }
      }
      else
      {
        IndexRowGenerator irg =
              currentConglomerateDescriptor.getIndexDescriptor();

        int[] baseColumnPositions = irg.baseColumnPositions();
        boolean[] isAscending = irg.isAscending();

        for (int i = 0; i < baseColumnPositions.length; i++)
View Full Code Here

        indexSpecified = true;

        /* Validate index name - NULL means table scan */
        if (! StringUtil.SQLToUpperCase(value).equals("NULL"))
        {
          ConglomerateDescriptor cd = null;
          ConglomerateDescriptor[] cds = tableDescriptor.getConglomerateDescriptors();

          for (int index = 0; index < cds.length; index++)
          {
            cd = cds[index];
            String conglomerateName = cd.getConglomerateName();
            if (conglomerateName != null)
            {
              if (conglomerateName.equals(value))
              {
                break;
              }
            }
            // Not a match, clear cd
            cd = null;
          }

          // Throw exception if user specified index not found
          if (cd == null)
          {
            throw StandardException.newException(SQLState.LANG_INVALID_FORCED_INDEX1,
                    value, getBaseTableName());
          }
          /* Query is dependent on the ConglomerateDescriptor */
          getCompilerContext().createDependency(cd);
        }
      }
      else if (key.equals("constraint"))
      {
        // User only allowed to specify 1 of index and constraint, not both
        if (indexSpecified)
        {
          throw StandardException.newException(SQLState.LANG_BOTH_FORCE_INDEX_AND_CONSTRAINT_SPECIFIED,
                getBaseTableName());
        }
        constraintSpecified = true;

        if (! StringUtil.SQLToUpperCase(value).equals("NULL"))
        {
          consDesc =
            dDictionary.getConstraintDescriptorByName(
                  tableDescriptor, (SchemaDescriptor)null, value,
                  false);

          /* Throw exception if user specified constraint not found
           * or if it does not have a backing index.
           */
          if ((consDesc == null) || ! consDesc.hasBackingIndex())
          {
            throw StandardException.newException(SQLState.LANG_INVALID_FORCED_INDEX2,
                    value, getBaseTableName());
          }

          /* Query is dependent on the ConstraintDescriptor */
          getCompilerContext().createDependency(consDesc);
        }
      }
      else if (key.equals("joinStrategy"))
      {
        userSpecifiedJoinStrategy = StringUtil.SQLToUpperCase(value);
      }
      else if (key.equals("hashInitialCapacity"))
      {
        initialCapacity = getIntProperty(value, key);

        // verify that the specified value is valid
        if (initialCapacity <= 0)
        {
          throw StandardException.newException(SQLState.LANG_INVALID_HASH_INITIAL_CAPACITY,
              String.valueOf(initialCapacity));
        }
      }
      else if (key.equals("hashLoadFactor"))
      {
        try
        {
          loadFactor = Float.valueOf(value).floatValue();
        }
        catch (NumberFormatException nfe)
        {
          throw StandardException.newException(SQLState.LANG_INVALID_NUMBER_FORMAT_FOR_OVERRIDE,
              value, key);
        }

        // verify that the specified value is valid
        if (loadFactor <= 0.0 || loadFactor > 1.0)
        {
          throw StandardException.newException(SQLState.LANG_INVALID_HASH_LOAD_FACTOR,
              value);
        }
      }
      else if (key.equals("hashMaxCapacity"))
      {
        maxCapacity = getIntProperty(value, key);

        // verify that the specified value is valid
        if (maxCapacity <= 0)
        {
          throw StandardException.newException(SQLState.LANG_INVALID_HASH_MAX_CAPACITY,
              String.valueOf(maxCapacity));
        }
      }
      else if (key.equals("bulkFetch"))
      {
        bulkFetch = getIntProperty(value, key);

        // verify that the specified value is valid
        if (bulkFetch <= 0)
        {
          throw StandardException.newException(SQLState.LANG_INVALID_BULK_FETCH_VALUE,
              String.valueOf(bulkFetch));
        }
     
        // no bulk fetch on updatable scans
        if (forUpdate())
        {
          throw StandardException.newException(SQLState.LANG_INVALID_BULK_FETCH_UPDATEABLE);
        }
      }
      else
      {
        // No other "legal" values at this time
        throw StandardException.newException(SQLState.LANG_INVALID_FROM_TABLE_PROPERTY, key,
          "index, constraint, joinStrategy");
      }
    }

    /* If user specified a non-null constraint name(DERBY-1707), then 
     * replace it in the properties list with the underlying index name to
     * simplify the code in the optimizer.
     * NOTE: The code to get from the constraint name, for a constraint
     * with a backing index, to the index name is convoluted.  Given
     * the constraint name, we can get the conglomerate id from the
     * ConstraintDescriptor.  We then use the conglomerate id to get
     * the ConglomerateDescriptor from the DataDictionary and, finally,
     * we get the index name (conglomerate name) from the ConglomerateDescriptor.
     */
    if (constraintSpecified && consDesc != null)
    {
      ConglomerateDescriptor cd =
        dDictionary.getConglomerateDescriptor(
          consDesc.getConglomerateId());
      String indexName = cd.getConglomerateName();

      tableProperties.remove("constraint");
      tableProperties.put("index", indexName);
    }
  }
View Full Code Here

   */
  public ResultSetNode changeAccessPath() throws StandardException
  {
    ResultSetNode  retval;
    AccessPath ap = getTrulyTheBestAccessPath();
    ConglomerateDescriptor trulyTheBestConglomerateDescriptor =
                         ap.getConglomerateDescriptor();
    JoinStrategy trulyTheBestJoinStrategy = ap.getJoinStrategy();
    Optimizer optimizer = ap.getOptimizer();

    optimizer.trace(Optimizer.CHANGING_ACCESS_PATH_FOR_TABLE,
            tableNumber, 0, 0.0, null);

    if (SanityManager.DEBUG)
    {
      SanityManager.ASSERT(
        trulyTheBestConglomerateDescriptor != null,
        "Should only modify access path after conglomerate has been chosen.");
    }

    /*
    ** Make sure user-specified bulk fetch is OK with the chosen join
    ** strategy.
    */
    if (bulkFetch != UNSET)
    {
      if ( ! trulyTheBestJoinStrategy.bulkFetchOK())
      {
        throw StandardException.newException(SQLState.LANG_INVALID_BULK_FETCH_WITH_JOIN_TYPE,
                      trulyTheBestJoinStrategy.getName());
      }
      // bulkFetch has no meaning for hash join, just ignore it
      else if (trulyTheBestJoinStrategy.ignoreBulkFetch())
      {
        disableBulkFetch();
      }
      // bug 4431 - ignore bulkfetch property if it's 1 row resultset
      else if (isOneRowResultSet())
      {
        disableBulkFetch();
      }
    }

    // bulkFetch = 1 is the same as no bulk fetch
    if (bulkFetch == 1)
    {
      disableBulkFetch();
    }

    /* Remove any redundant join clauses.  A redundant join clause is one
     * where there are other join clauses in the same equivalence class
     * after it in the PredicateList.
     */
    restrictionList.removeRedundantPredicates();

    /*
    ** Divide up the predicates for different processing phases of the
    ** best join strategy.
    */
    storeRestrictionList = (PredicateList) getNodeFactory().getNode(
                          C_NodeTypes.PREDICATE_LIST,
                          getContextManager());
    nonStoreRestrictionList = (PredicateList) getNodeFactory().getNode(
                          C_NodeTypes.PREDICATE_LIST,
                          getContextManager());
    requalificationRestrictionList =
                  (PredicateList) getNodeFactory().getNode(
                          C_NodeTypes.PREDICATE_LIST,
                          getContextManager());
    trulyTheBestJoinStrategy.divideUpPredicateLists(
                      this,
                      restrictionList,
                      storeRestrictionList,
                      nonStoreRestrictionList,
                      requalificationRestrictionList,
                      getDataDictionary());

    /* Check to see if we are going to do execution-time probing
     * of an index using IN-list values.  We can tell by looking
     * at the restriction list: if there is an IN-list probe
     * predicate that is also a start/stop key then we know that
     * we're going to do execution-time probing.  In that case
     * we disable bulk fetching to minimize the number of non-
     * matching rows that we read from disk.  RESOLVE: Do we
     * really need to completely disable bulk fetching here,
     * or can we do something else?
     */
    for (int i = 0; i < restrictionList.size(); i++)
    {
      Predicate pred = (Predicate)restrictionList.elementAt(i);
      if (pred.isInListProbePredicate() && pred.isStartKey())
      {
        disableBulkFetch();
        multiProbing = true;
        break;
      }
    }

    /*
    ** Consider turning on bulkFetch if it is turned
    ** off.  Only turn it on if it is a not an updatable
    ** scan and if it isn't a oneRowResultSet, and
    ** not a subquery, and it is OK to use bulk fetch
    ** with the chosen join strategy.  NOTE: the subquery logic
    ** could be more sophisticated -- we are taking
    ** the safe route in avoiding reading extra
    ** data for something like:
    **
    **  select x from t where x in (select y from t)
     **
    ** In this case we want to stop the subquery
    ** evaluation as soon as something matches.
    */
    if (trulyTheBestJoinStrategy.bulkFetchOK() &&
      !(trulyTheBestJoinStrategy.ignoreBulkFetch()) &&
      ! bulkFetchTurnedOff &&
      (bulkFetch == UNSET) &&
      !forUpdate() &&
      !isOneRowResultSet() &&
      getLevel() == 0)
    {
      bulkFetch = getDefaultBulkFetch()
    }

    /* Statement is dependent on the chosen conglomerate. */
    getCompilerContext().createDependency(
        trulyTheBestConglomerateDescriptor);

    /* No need to modify access path if conglomerate is the heap */
    if ( ! trulyTheBestConglomerateDescriptor.isIndex())
    {
      /*
      ** We need a little special logic for SYSSTATEMENTS
      ** here.  SYSSTATEMENTS has a hidden column at the
      ** end.  When someone does a select * we don't want
View Full Code Here

TOP

Related Classes of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.