return fixSplit(table, metadataEntry, metadataPrevEndRow, oper, splitRatio, tserver, credentials, time.toString(), initFlushID, initCompactID, lock);
}
public static void deleteTable(String tableId, boolean insertDeletes, TCredentials credentials, ZooLock lock) throws AccumuloException {
Scanner ms = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
Text tableIdText = new Text(tableId);
BatchWriter bw = new BatchWriterImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, new BatchWriterConfig().setMaxMemory(1000000)
.setMaxLatency(120000l, TimeUnit.MILLISECONDS).setMaxWriteThreads(2));
// scan metadata for our table and delete everything we find
Mutation m = null;
ms.setRange(new KeyExtent(tableIdText, null, null).toMetadataRange());
// insert deletes before deleting data from !METADATA... this makes the code fault tolerant
if (insertDeletes) {
ms.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
Constants.METADATA_DIRECTORY_COLUMN.fetch(ms);
for (Entry<Key,Value> cell : ms) {
Key key = cell.getKey();
if (key.getColumnFamily().equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
String relPath = key.getColumnQualifier().toString();
// only insert deletes for files owned by this table
if (!relPath.startsWith("../"))
bw.addMutation(createDeleteMutation(tableId, relPath));
}
if (Constants.METADATA_DIRECTORY_COLUMN.hasColumns(key)) {
bw.addMutation(createDeleteMutation(tableId, cell.getValue().toString()));
}
}
bw.flush();
ms.clearColumns();
}
for (Entry<Key,Value> cell : ms) {
Key key = cell.getKey();