TaskBatch<Void> tasks = new TaskBatch<Void>(entries.size());
for (Entry<HTableInterfaceReference, Collection<Mutation>> entry : entries) {
// get the mutations for each table. We leak the implementation here a little bit to save
// doing a complete copy over of all the index update for each table.
final List<Mutation> mutations = kvBuilder.cloneIfNecessary((List<Mutation>)entry.getValue());
final HTableInterfaceReference tableReference = entry.getKey();
final RegionCoprocessorEnvironment env = this.env;
/*
* Write a batch of index updates to an index table. This operation stops (is cancelable) via two
* mechanisms: (1) setting aborted or stopped on the IndexWriter or, (2) interrupting the running thread.
* The former will only work if we are not in the midst of writing the current batch to the table, though we
* do check these status variables before starting and before writing the batch. The latter usage,
* interrupting the thread, will work in the previous situations as was at some points while writing the
* batch, depending on the underlying writer implementation (HTableInterface#batch is blocking, but doesn't
* elaborate when is supports an interrupt).
*/
tasks.add(new Task<Void>() {
/**
* Do the actual write to the primary table. We don't need to worry about closing the table because that
* is handled the {@link CachingHTableFactory}.
*
* @return
*/
@SuppressWarnings("deprecation")
@Override
public Void call() throws Exception {
// this may have been queued, so another task infront of us may have failed, so we should
// early exit, if that's the case
throwFailureIfDone();
if (LOG.isDebugEnabled()) {
LOG.debug("Writing index update:" + mutations + " to table: " + tableReference);
}
try {
// TODO: Once HBASE-11766 is fixed, reexamine whether this is necessary.
// Also, checking the prefix of the table name to determine if this is a local
// index is pretty hacky. If we're going to keep this, we should revisit that
// as well.
try {
if (tableReference.getTableName().startsWith(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX)) {
HRegion indexRegion = IndexUtil.getIndexRegion(env);
if (indexRegion != null) {
throwFailureIfDone();
indexRegion.batchMutate(mutations.toArray(new Mutation[mutations.size()]));
return null;
}
}
} catch (IOException ignord) {
// when it's failed we fall back to the standard & slow way
if (LOG.isDebugEnabled()) {
LOG.debug("indexRegion.batchMutate failed and fall back to HTable.batch(). Got error="
+ ignord);
}
}
HTableInterface table = factory.getTable(tableReference.get());
throwFailureIfDone();
table.batch(mutations);
} catch (SingleIndexWriteFailureException e) {
throw e;
} catch (IOException e) {
throw new SingleIndexWriteFailureException(tableReference.toString(), mutations, e);
} catch (InterruptedException e) {
// reset the interrupt status on the thread
Thread.currentThread().interrupt();
throw new SingleIndexWriteFailureException(tableReference.toString(), mutations, e);
}
return null;
}
private void throwFailureIfDone() throws SingleIndexWriteFailureException {