Package org.apache.accumulo.core.client

Examples of org.apache.accumulo.core.client.Scanner.fetchColumnFamily()


    }
   
    // delete the clone markers and create directory entries
    Scanner mscanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
    mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
    mscanner.fetchColumnFamily(Constants.METADATA_CLONED_COLUMN_FAMILY);
   
    int dirCount = 0;
   
    for (Entry<Key,Value> entry : mscanner) {
      Key k = entry.getKey();
View Full Code Here


        Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
        scanner.setRange(deleteRange);
        ColumnFQ.fetch(scanner, Constants.METADATA_DIRECTORY_COLUMN);
        ColumnFQ.fetch(scanner, Constants.METADATA_TIME_COLUMN);
        scanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
        scanner.fetchColumnFamily(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY);
        Set<String> datafiles = new TreeSet<String>();
        for (Entry<Key,Value> entry : scanner) {
          Key key = entry.getKey();
          if (key.compareColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY) == 0) {
            datafiles.add(key.getColumnQualifier().toString());
View Full Code Here

  }
 
  public static void removeBulkLoadEntries(Connector conn, String tableId, long tid) throws Exception {
    Scanner mscanner = new IsolatedScanner(conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS));
    mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
    mscanner.fetchColumnFamily(Constants.METADATA_BULKFILE_COLUMN_FAMILY);
    BatchWriter bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, 10000000, 60000l, 1);
    for (Entry<Key,Value> entry : mscanner) {
      log.debug("Looking at entry " + entry + " with tid " + tid);
      if (Long.parseLong(entry.getValue().toString()) == tid) {
        log.debug("deleting entry " + entry);
View Full Code Here

        Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
        scanner.setRange(scanRange);
        ColumnFQ.fetch(scanner, Constants.METADATA_PREV_ROW_COLUMN);
        ColumnFQ.fetch(scanner, Constants.METADATA_TIME_COLUMN);
        ColumnFQ.fetch(scanner, Constants.METADATA_DIRECTORY_COLUMN);
        scanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
        Mutation m = new Mutation(stopRow);
        String maxLogicalTime = null;
        for (Entry<Key,Value> entry : scanner) {
          Key key = entry.getKey();
          Value value = entry.getValue();
View Full Code Here

  public static List<String> getBulkFilesLoaded(Connector conn, KeyExtent extent, long tid) {
    List<String> result = new ArrayList<String>();
    try {
      Scanner mscanner = new IsolatedScanner(conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS));
      mscanner.setRange(extent.toMetadataRange());
      mscanner.fetchColumnFamily(Constants.METADATA_BULKFILE_COLUMN_FAMILY);
      for (Entry<Key,Value> entry : mscanner) {
        if (Long.parseLong(entry.getValue().toString()) == tid) {
          result.add(entry.getKey().getColumnQualifier().toString());
        }
      }
View Full Code Here

   
    Map<String,Long> ret = new HashMap<String,Long>();
   
    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
    scanner.setRange(new Range(metadataRow));
    scanner.fetchColumnFamily(Constants.METADATA_BULKFILE_COLUMN_FAMILY);
    for (Entry<Key,Value> entry : scanner) {
      String file = entry.getKey().getColumnQualifier().toString();
      Long tid = Long.parseLong(entry.getValue().toString());
     
      ret.put(file, tid);
View Full Code Here

  @Override
  public void visit(State state, Properties props) throws Exception {
    log.debug("checking balance");
    Map<String,Long> counts = new HashMap<String,Long>();
    Scanner scanner = state.getConnector().createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
    scanner.fetchColumnFamily(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY);
    for (Entry<Key,Value> entry : scanner) {
      String location = entry.getKey().getColumnQualifier().toString();
      Long count = counts.get(location);
      if (count == null)
        count = new Long(0);
View Full Code Here

        if (c.getSecond() != null) {
          log.debug("Fetching column " + c.getFirst() + ":" + c.getSecond());
          scanner.fetchColumn(c.getFirst(), c.getSecond());
        } else {
          log.debug("Fetching column family " + c.getFirst());
          scanner.fetchColumnFamily(c.getFirst());
        }
      }

      scanner.setRange(split.getRange());
View Full Code Here

  public static SortedMap<String,DataFileValue> getDataFileSizes(KeyExtent extent, AuthInfo credentials) {
    TreeMap<String,DataFileValue> sizes = new TreeMap<String,DataFileValue>();
   
    Scanner mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
    mdScanner.setRange(Constants.METADATA_KEYSPACE);
    mdScanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
    Text row = extent.getMetadataEntry();
   
    Key endKey = new Key(row, Constants.METADATA_DATAFILE_COLUMN_FAMILY, new Text(""));
    endKey = endKey.followingKey(PartialKey.ROW_COLFAM);
   
View Full Code Here

    Key rowKey = new Key(metadataEntry);
   
    SortedMap<String,DataFileValue> origDatafileSizes = new TreeMap<String,DataFileValue>();
    SortedMap<String,DataFileValue> highDatafileSizes = new TreeMap<String,DataFileValue>();
    SortedMap<String,DataFileValue> lowDatafileSizes = new TreeMap<String,DataFileValue>();
    scanner3.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
    scanner3.setRange(new Range(rowKey, rowKey.followingKey(PartialKey.ROW)));
   
    for (Entry<Key,Value> entry : scanner3) {
      if (entry.getKey().compareColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY) == 0) {
        origDatafileSizes.put(entry.getKey().getColumnQualifier().toString(), new DataFileValue(entry.getValue().get()));
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.