indexRowGenerator, isConstraint,
conglomerateUUID, td.getUUID(), sd.getUUID() );
dd.addDescriptor(cgd, sd, DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc);
// add newly added conglomerate to the list of conglomerate
// descriptors in the td.
ConglomerateDescriptorList cdl =
td.getConglomerateDescriptorList();
cdl.add(cgd);
// can't just return yet, need to get member "indexTemplateRow"
// because create constraint may use it
}
// Describe the properties of the index to the store using Properties
// RESOLVE: The following properties assume a BTREE index.
Properties indexProperties;
if (properties != null)
{
indexProperties = properties;
}
else
{
indexProperties = new Properties();
}
// Tell it the conglomerate id of the base table
indexProperties.put("baseConglomerateId",
Long.toString(td.getHeapConglomerateId()));
if (uniqueWithDuplicateNulls && !hasDeferrableChecking)
{
if (dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_4, null))
{
indexProperties.put(
"uniqueWithDuplicateNulls", Boolean.toString(true));
}
else
{
// for lower version of DD there is no unique with nulls
// index creating a unique index instead.
if (uniqueWithDuplicateNulls)
{
unique = true;
}
}
}
// All indexes are unique because they contain the RowLocation.
// The number of uniqueness columns must include the RowLocation
// if the user did not specify a unique index.
indexProperties.put("nUniqueColumns",
Integer.toString(unique ? baseColumnPositions.length :
baseColumnPositions.length + 1)
);
// By convention, the row location column is the last column
indexProperties.put("rowLocationColumn",
Integer.toString(baseColumnPositions.length));
// For now, all columns are key fields, including the RowLocation
indexProperties.put("nKeyFields",
Integer.toString(baseColumnPositions.length + 1));
// For now, assume that all index columns are ordered columns
if (! shareExisting)
{
if (dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_4, null))
{
indexRowGenerator = new IndexRowGenerator(
indexType,
unique,
uniqueWithDuplicateNulls,
uniqueDeferrable,
(hasDeferrableChecking &&
constraintType != DataDictionary.FOREIGNKEY_CONSTRAINT),
baseColumnPositions,
isAscending,
baseColumnPositions.length);
}
else
{
indexRowGenerator = new IndexRowGenerator(
indexType,
unique,
false,
false,
false,
baseColumnPositions,
isAscending,
baseColumnPositions.length);
}
}
/* Now add the rows from the base table to the conglomerate.
* We do this by scanning the base table and inserting the
* rows into a sorter before inserting from the sorter
* into the index. This gives us better performance
* and a more compact index.
*/
rowSource = null;
sortId = 0;
boolean needToDropSort = false; // set to true once the sorter is created
/* bulkFetchSIze will be 16 (for now) unless
* we are creating the table in which case it
* will be 1. Too hard to remove scan when
* creating index on new table, so minimize
* work where we can.
*/
int bulkFetchSize = (forCreateTable) ? 1 : 16;
int numColumns = td.getNumberOfColumns();
int approximateRowSize = 0;
// Create the FormatableBitSet for mapping the partial to full base row
FormatableBitSet bitSet = new FormatableBitSet(numColumns+1);
for (int index = 0; index < baseColumnPositions.length; index++)
{
bitSet.set(baseColumnPositions[index]);
}
FormatableBitSet zeroBasedBitSet = RowUtil.shift(bitSet, 1);
// Start by opening a full scan on the base table.
scan = tc.openGroupFetchScan(
td.getHeapConglomerateId(),
false, // hold
0, // open base table read only
TransactionController.MODE_TABLE,
TransactionController.ISOLATION_SERIALIZABLE,
zeroBasedBitSet, // all fields as objects
(DataValueDescriptor[]) null, // startKeyValue
0, // not used when giving null start posn.
null, // qualifier
(DataValueDescriptor[]) null, // stopKeyValue
0); // not used when giving null stop posn.
// Create an array to put base row template
baseRows = new ExecRow[bulkFetchSize];
indexRows = new ExecIndexRow[bulkFetchSize];
compactBaseRows = new ExecRow[bulkFetchSize];
try
{
// Create the array of base row template
for (int i = 0; i < bulkFetchSize; i++)
{
// create a base row template
baseRows[i] = activation.getExecutionFactory().getValueRow(maxBaseColumnPosition);
// create an index row template
indexRows[i] = indexRowGenerator.getIndexRowTemplate();
// create a compact base row template
compactBaseRows[i] = activation.getExecutionFactory().getValueRow(
baseColumnPositions.length);
}
indexTemplateRow = indexRows[0];
// Fill the partial row with nulls of the correct type
ColumnDescriptorList cdl = td.getColumnDescriptorList();
int cdlSize = cdl.size();
for (int index = 0, numSet = 0; index < cdlSize; index++)
{
if (! zeroBasedBitSet.get(index))
{
continue;
}
numSet++;
ColumnDescriptor cd = cdl.elementAt(index);
DataTypeDescriptor dts = cd.getType();
for (int i = 0; i < bulkFetchSize; i++)
{
// Put the column in both the compact and sparse base rows
baseRows[i].setColumn(index + 1,
dts.getNull());
compactBaseRows[i].setColumn(numSet,
baseRows[i].getColumn(index + 1));
}
// Calculate the approximate row size for the index row
approximateRowSize += dts.getTypeId().getApproximateLengthInBytes(dts);
}
// Get an array of RowLocation template
RowLocation rl[] = new RowLocation[bulkFetchSize];
for (int i = 0; i < bulkFetchSize; i++)
{
rl[i] = scan.newRowLocationTemplate();
// Get an index row based on the base row
indexRowGenerator.getIndexRow(compactBaseRows[i], rl[i], indexRows[i], bitSet);
}
/* now that we got indexTemplateRow, done for sharing index
*/
if (shareExisting)
return;
/* For non-unique indexes, we order by all columns + the RID.
* For unique indexes, we just order by the columns.
* We create a unique index observer for unique indexes
* so that we can catch duplicate key.
* We create a basic sort observer for non-unique indexes
* so that we can reuse the wrappers during an external
* sort.
*/
int numColumnOrderings;
SortObserver sortObserver;
Properties sortProperties = null;
if (unique || uniqueWithDuplicateNulls || uniqueDeferrable)
{
// if the index is a constraint, use constraintname in
// possible error message
String indexOrConstraintName = indexName;
if (conglomerateUUID != null)
{
ConglomerateDescriptor cd =
dd.getConglomerateDescriptor(conglomerateUUID);
if ((isConstraint) &&
(cd != null && cd.getUUID() != null && td != null))
{
ConstraintDescriptor conDesc =
dd.getConstraintDescriptor(td, cd.getUUID());
indexOrConstraintName = conDesc.getConstraintName();
}
}
if (unique || uniqueDeferrable)
{
numColumnOrderings = unique ? baseColumnPositions.length :
baseColumnPositions.length + 1;
sortObserver = new UniqueIndexSortObserver(
lcc,
constraintID,
true,
uniqueDeferrable,
initiallyDeferred,
indexOrConstraintName,
indexTemplateRow,
true,
td.getName());
}
else
{
// unique with duplicate nulls allowed.
numColumnOrderings = baseColumnPositions.length + 1;
// tell transaction controller to use the unique with
// duplicate nulls sorter, when making createSort() call.
sortProperties = new Properties();
sortProperties.put(
AccessFactoryGlobals.IMPL_TYPE,
AccessFactoryGlobals.SORT_UNIQUEWITHDUPLICATENULLS_EXTERNAL);
//use sort operator which treats nulls unequal
sortObserver =
new UniqueWithDuplicateNullsIndexSortObserver(
lcc,
constraintID,
true,
(hasDeferrableChecking &&
constraintType != DataDictionary.FOREIGNKEY_CONSTRAINT),
initiallyDeferred,
indexOrConstraintName,
indexTemplateRow,
true,
td.getName());
}
}
else
{
numColumnOrderings = baseColumnPositions.length + 1;
sortObserver = new BasicSortObserver(true, false,
indexTemplateRow,
true);
}
ColumnOrdering[] order = new ColumnOrdering[numColumnOrderings];
for (int i=0; i < numColumnOrderings; i++)
{
order[i] =
new IndexColumnOrder(
i,
unique || i < numColumnOrderings - 1 ?
isAscending[i] : true);
}
// create the sorter
sortId = tc.createSort(sortProperties,
indexTemplateRow.getRowArrayClone(),
order,
sortObserver,
false, // not in order
scan.getEstimatedRowCount(),
approximateRowSize // est row size, -1 means no idea
);
needToDropSort = true;
// Populate sorter and get the output of the sorter into a row
// source. The sorter has the indexed columns only and the columns
// are in the correct order.
rowSource = loadSorter(baseRows, indexRows, tc,
scan, sortId, rl);
conglomId =
tc.createAndLoadConglomerate(
indexType,
indexTemplateRow.getRowArray(), // index row template
order, //colums sort order
indexRowGenerator.getColumnCollationIds(
td.getColumnDescriptorList()),
indexProperties,
TransactionController.IS_DEFAULT, // not temporary
rowSource,
(long[]) null);
}
finally
{
/* close the table scan */
if (scan != null)
scan.close();
/* close the sorter row source before throwing exception */
if (rowSource != null)
rowSource.closeRowSource();
/*
** drop the sort so that intermediate external sort run can be
** removed from disk
*/
if (needToDropSort)
tc.dropSort(sortId);
}
ConglomerateController indexController =
tc.openConglomerate(
conglomId, false, 0, TransactionController.MODE_TABLE,
TransactionController.ISOLATION_SERIALIZABLE);
// Check to make sure that the conglomerate can be used as an index
if ( ! indexController.isKeyed())
{
indexController.close();
throw StandardException.newException(SQLState.LANG_NON_KEYED_INDEX, indexName,
indexType);
}
indexController.close();
//
// Create a conglomerate descriptor with the conglomId filled
// in and add it--if we don't have one already.
//
if (!alreadyHaveConglomDescriptor)
{
ConglomerateDescriptor cgd =
ddg.newConglomerateDescriptor(
conglomId, indexName, true,
indexRowGenerator, isConstraint,
conglomerateUUID, td.getUUID(), sd.getUUID() );
dd.addDescriptor(cgd, sd,
DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc);
// add newly added conglomerate to the list of conglomerate
// descriptors in the td.
ConglomerateDescriptorList cdl = td.getConglomerateDescriptorList();
cdl.add(cgd);
/* Since we created a new conglomerate descriptor, load
* its UUID into the corresponding field, to ensure that
* it is properly set in the StatisticsDescriptor created
* below.