//If using secure bulk load, get source delegation token, and
//prepare staging directory and token
// fs is the source filesystem
fsDelegationToken.acquireDelegationToken(fs);
if(isSecureBulkLoadEndpointAvailable()) {
bulkToken = new SecureBulkLoadClient(table).prepareBulkLoad(table.getName());
}
// Assumes that region splits can happen while this occurs.
while (!queue.isEmpty()) {
// need to reload split keys each iteration.
final Pair<byte[][], byte[][]> startEndKeys = table.getStartEndKeys();
if (count != 0) {
LOG.info("Split occured while grouping HFiles, retry attempt " +
+ count + " with " + queue.size() + " files remaining to group or split");
}
int maxRetries = getConf().getInt("hbase.bulkload.retries.number", 0);
if (maxRetries != 0 && count >= maxRetries) {
LOG.error("Retry attempted " + count + " times without completing, bailing out");
return;
}
count++;
// Using ByteBuffer for byte[] equality semantics
Multimap<ByteBuffer, LoadQueueItem> regionGroups = groupOrSplitPhase(table,
pool, queue, startEndKeys);
if (!checkHFilesCountPerRegionPerFamily(regionGroups)) {
// Error is logged inside checkHFilesCountPerRegionPerFamily.
throw new IOException("Trying to load more than " + maxFilesPerRegionPerFamily
+ " hfiles to one family of one region");
}
bulkLoadPhase(table, conn, pool, queue, regionGroups);
// NOTE: The next iteration's split / group could happen in parallel to
// atomic bulkloads assuming that there are splits and no merges, and
// that we can atomically pull out the groups we want to retry.
}
} finally {
fsDelegationToken.releaseDelegationToken();
if(bulkToken != null) {
new SecureBulkLoadClient(table).cleanupBulkLoad(bulkToken);
}
pool.shutdown();
if (queue != null && !queue.isEmpty()) {
StringBuilder err = new StringBuilder();
err.append("-------------------------------------------------\n");