Package org.apache.accumulo.core.client.impl

Examples of org.apache.accumulo.core.client.impl.ScannerImpl


  }
 
  public static SortedMap<String,DataFileValue> getDataFileSizes(KeyExtent extent, TCredentials credentials) {
    TreeMap<String,DataFileValue> sizes = new TreeMap<String,DataFileValue>();

    Scanner mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
    mdScanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
    Text row = extent.getMetadataEntry();

    Key endKey = new Key(row, Constants.METADATA_DATAFILE_COLUMN_FAMILY, new Text(""));
    endKey = endKey.followingKey(PartialKey.ROW_COLFAM);

    mdScanner.setRange(new Range(new Key(row), endKey));
    for (Entry<Key,Value> entry : mdScanner) {

      if (!entry.getKey().getRow().equals(row))
        break;
      DataFileValue dfv = new DataFileValue(entry.getValue().get());
View Full Code Here


      throw new AccumuloException("Split tablet does not have prev end row, something is amiss, extent = " + metadataEntry);
   
    // check to see if prev tablet exist in metadata tablet
    Key prevRowKey = new Key(new Text(KeyExtent.getMetadataEntry(table, metadataPrevEndRow)));

    ScannerImpl scanner2 = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
    scanner2.setRange(new Range(prevRowKey, prevRowKey.followingKey(PartialKey.ROW)));

    if (!scanner2.iterator().hasNext()) {
      log.info("Rolling back incomplete split " + metadataEntry + " " + metadataPrevEndRow);
      rollBackSplit(metadataEntry, KeyExtent.decodePrevEndRow(oper), credentials, lock);
      return new KeyExtent(metadataEntry, KeyExtent.decodePrevEndRow(oper));
    } else {
      log.info("Finishing incomplete split " + metadataEntry + " " + metadataPrevEndRow);

      List<String> highDatafilesToRemove = new ArrayList<String>();

      Scanner scanner3 = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
      Key rowKey = new Key(metadataEntry);
     
      SortedMap<String,DataFileValue> origDatafileSizes = new TreeMap<String,DataFileValue>();
      SortedMap<String,DataFileValue> highDatafileSizes = new TreeMap<String,DataFileValue>();
      SortedMap<String,DataFileValue> lowDatafileSizes = new TreeMap<String,DataFileValue>();
      scanner3.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
      scanner3.setRange(new Range(rowKey, rowKey.followingKey(PartialKey.ROW)));
     
      for (Entry<Key,Value> entry : scanner3) {
        if (entry.getKey().compareColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY) == 0) {
          origDatafileSizes.put(entry.getKey().getColumnQualifier().toString(), new DataFileValue(entry.getValue().get()));
        }
View Full Code Here

    return fixSplit(table, metadataEntry, metadataPrevEndRow, oper, splitRatio, tserver, credentials, time.toString(), initFlushID, initCompactID, lock);
  }
 
  public static void deleteTable(String tableId, boolean insertDeletes, TCredentials credentials, ZooLock lock) throws AccumuloException {
    Scanner ms = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
    Text tableIdText = new Text(tableId);
    BatchWriter bw = new BatchWriterImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, new BatchWriterConfig().setMaxMemory(1000000)
        .setMaxLatency(120000l, TimeUnit.MILLISECONDS).setMaxWriteThreads(2));
   
    // scan metadata for our table and delete everything we find
    Mutation m = null;
    ms.setRange(new KeyExtent(tableIdText, null, null).toMetadataRange());

    // insert deletes before deleting data from !METADATA... this makes the code fault tolerant
    if (insertDeletes) {

      ms.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
      Constants.METADATA_DIRECTORY_COLUMN.fetch(ms);
     
      for (Entry<Key,Value> cell : ms) {
        Key key = cell.getKey();

        if (key.getColumnFamily().equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
          String relPath = key.getColumnQualifier().toString();
          // only insert deletes for files owned by this table
          if (!relPath.startsWith("../"))
            bw.addMutation(createDeleteMutation(tableId, relPath));
        }

        if (Constants.METADATA_DIRECTORY_COLUMN.hasColumns(key)) {
          bw.addMutation(createDeleteMutation(tableId, cell.getValue().toString()));
        }
      }

      bw.flush();

      ms.clearColumns();
    }

    for (Entry<Key,Value> cell : ms) {
      Key key = cell.getKey();
View Full Code Here

        DataFileValue dfv = new DataFileValue(0, 0);
        sizes.put(Constants.ZROOT_TABLET + "/" + fileStatus.getPath().getName(), dfv);
      }

    } else {
      Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
      scanner.fetchColumnFamily(Constants.METADATA_LOG_COLUMN_FAMILY);
      scanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
      scanner.setRange(extent.toMetadataRange());

      for (Entry<Key,Value> entry : scanner) {
        if (!entry.getKey().getRow().equals(extent.getMetadataEntry())) {
          throw new RuntimeException("Unexpected row " + entry.getKey().getRow() + " expected " + extent.getMetadataEntry());
        }
View Full Code Here

      break;
    }
  }
 
  private static Scanner getTabletLogScanner(TCredentials credentials, KeyExtent extent) {
    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
    scanner.fetchColumnFamily(Constants.METADATA_LOG_COLUMN_FAMILY);
    Text start = extent.getMetadataEntry();
    Key endKey = new Key(start, Constants.METADATA_LOG_COLUMN_FAMILY);
    endKey = endKey.followingKey(PartialKey.ROW_COLFAM);
    scanner.setRange(new Range(new Key(start), endKey));
    return scanner;
  }
View Full Code Here

 
  public static Map<String,Long> getBulkFilesLoaded(TCredentials credentials, Text metadataRow) {
   
    Map<String,Long> ret = new HashMap<String,Long>();

    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
    scanner.setRange(new Range(metadataRow));
    scanner.fetchColumnFamily(Constants.METADATA_BULKFILE_COLUMN_FAMILY);
    for (Entry<Key,Value> entry : scanner) {
      String file = entry.getKey().getColumnQualifier().toString();
      Long tid = Long.parseLong(entry.getValue().toString());

      ret.put(file, tid);
View Full Code Here

  /**
   * During an upgrade from Accumulo 1.4 -> 1.5, we need to move deletion requests for files under the !METADATA table to the root tablet.
   */
  public static void moveMetaDeleteMarkers(Instance instance, TCredentials creds) {
    // move delete markers from the normal delete keyspace to the root tablet delete keyspace if the files are for the !METADATA table
    Scanner scanner = new ScannerImpl(instance, creds, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
    scanner.setRange(new Range(Constants.METADATA_DELETES_KEYSPACE));
    for (Entry<Key,Value> entry : scanner) {
      String row = entry.getKey().getRow().toString();
      if (row.startsWith(Constants.METADATA_DELETE_FLAG_PREFIX + "/" + Constants.METADATA_TABLE_ID)) {
        String filename = row.substring(Constants.METADATA_DELETE_FLAG_PREFIX.length());
        // add the new entry first
View Full Code Here

     
      SortedMap<Key,Value> datafilesMetadata;
     
      Text rowName = extent.getMetadataEntry();
       
      ScannerImpl mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), SecurityConstants.getSystemCredentials(), Constants.METADATA_TABLE_ID,
          Constants.NO_AUTHS);
     
      // Commented out because when no data file is present, each tablet will scan through metadata table and return nothing
      // reduced batch size to improve performance
      // changed here after endKeys were implemented from 10 to 1000
      mdScanner.setBatchSize(1000);
     
      // leave these in, again, now using endKey for safety
      mdScanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
     
      mdScanner.setRange(new Range(rowName));
     
      datafilesMetadata = new TreeMap<Key,Value>();
     
      for (Entry<Key,Value> entry : mdScanner) {
       
View Full Code Here

    }
   
    List<ColumnFQ> columnsToFetch = Arrays.asList(new ColumnFQ[] {Constants.METADATA_DIRECTORY_COLUMN, Constants.METADATA_PREV_ROW_COLUMN,
        Constants.METADATA_SPLIT_RATIO_COLUMN, Constants.METADATA_OLD_PREV_ROW_COLUMN, Constants.METADATA_TIME_COLUMN});
   
    ScannerImpl scanner = new ScannerImpl(HdfsZooInstance.getInstance(), SecurityConstants.getSystemCredentials(), Constants.METADATA_TABLE_ID,
        Constants.NO_AUTHS);
    scanner.setRange(extent.toMetadataRange());

    TreeMap<Key,Value> tkv = new TreeMap<Key,Value>();
    for (Entry<Key,Value> entry : scanner)
      tkv.put(entry.getKey(), entry.getValue());
View Full Code Here

      ensureTabletHasNoUnexpectedMetadataEntries(extent, mapFiles);
    }
  }
 
  private void ensureTabletHasNoUnexpectedMetadataEntries(KeyExtent extent, SortedMap<String,DataFileValue> expectedMapFiles) throws Exception {
    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), SecurityConstants.getSystemCredentials(), Constants.METADATA_TABLE_ID,
        Constants.NO_AUTHS);
    scanner.setRange(extent.toMetadataRange());
   
    HashSet<ColumnFQ> expectedColumns = new HashSet<ColumnFQ>();
    expectedColumns.add(Constants.METADATA_DIRECTORY_COLUMN);
    expectedColumns.add(Constants.METADATA_PREV_ROW_COLUMN);
    expectedColumns.add(Constants.METADATA_TIME_COLUMN);
    expectedColumns.add(Constants.METADATA_LOCK_COLUMN);
   
    HashSet<Text> expectedColumnFamilies = new HashSet<Text>();
    expectedColumnFamilies.add(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
    expectedColumnFamilies.add(Constants.METADATA_FUTURE_LOCATION_COLUMN_FAMILY);
    expectedColumnFamilies.add(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY);
    expectedColumnFamilies.add(Constants.METADATA_LAST_LOCATION_COLUMN_FAMILY);
    expectedColumnFamilies.add(Constants.METADATA_BULKFILE_COLUMN_FAMILY);
   
    Iterator<Entry<Key,Value>> iter = scanner.iterator();
    while (iter.hasNext()) {
      Key key = iter.next().getKey();
     
      if (!key.getRow().equals(extent.getMetadataEntry())) {
        throw new Exception("Tablet " + extent + " contained unexpected " + Constants.METADATA_TABLE_NAME + " entry " + key);
View Full Code Here

TOP

Related Classes of org.apache.accumulo.core.client.impl.ScannerImpl

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.