Package org.apache.accumulo.core.client.impl

Examples of org.apache.accumulo.core.client.impl.ScannerImpl


        sizes.put(new FileRef(fileStatus.getPath().toString(), fileStatus.getPath()), dfv);
      }

    } else {
      String systemTableToCheck = extent.isMeta() ? RootTable.ID : MetadataTable.ID;
      Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, systemTableToCheck, Authorizations.EMPTY);
      scanner.fetchColumnFamily(LogColumnFamily.NAME);
      scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
      scanner.setRange(extent.toMetadataRange());

      for (Entry<Key,Value> entry : scanner) {
        if (!entry.getKey().getRow().equals(extent.getMetadataEntry())) {
          throw new RuntimeException("Unexpected row " + entry.getKey().getRow() + " expected " + extent.getMetadataEntry());
        }
View Full Code Here


  private static Scanner getTabletLogScanner(Credentials credentials, KeyExtent extent) {
    String tableId = MetadataTable.ID;
    if (extent.isMeta())
      tableId = RootTable.ID;
    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, tableId, Authorizations.EMPTY);
    scanner.fetchColumnFamily(LogColumnFamily.NAME);
    Text start = extent.getMetadataEntry();
    Key endKey = new Key(start, LogColumnFamily.NAME);
    endKey = endKey.followingKey(PartialKey.ROW_COLFAM);
    scanner.setRange(new Range(new Key(start), endKey));
    return scanner;
  }
View Full Code Here

  public static Map<FileRef,Long> getBulkFilesLoaded(Credentials credentials, KeyExtent extent) throws IOException {
    Text metadataRow = extent.getMetadataEntry();
    Map<FileRef,Long> ret = new HashMap<FileRef,Long>();

    VolumeManager fs = VolumeManagerImpl.get();
    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, extent.isMeta() ? RootTable.ID : MetadataTable.ID, Authorizations.EMPTY);
    scanner.setRange(new Range(metadataRow));
    scanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
    for (Entry<Key,Value> entry : scanner) {
      Long tid = Long.parseLong(entry.getValue().toString());
      ret.put(new FileRef(fs, entry.getKey()), tid);
    }
    return ret;
View Full Code Here

  public static void moveMetaDeleteMarkers(Instance instance, Credentials creds) {
    String oldDeletesPrefix = "!!~del";
    Range oldDeletesRange = new Range(oldDeletesPrefix, true, "!!~dem", false);

    // move old delete markers to new location, to standardize table schema between all metadata tables
    Scanner scanner = new ScannerImpl(instance, creds, RootTable.ID, Authorizations.EMPTY);
    scanner.setRange(oldDeletesRange);
    for (Entry<Key,Value> entry : scanner) {
      String row = entry.getKey().getRow().toString();
      if (row.startsWith(oldDeletesPrefix)) {
        moveDeleteEntry(creds, RootTable.OLD_EXTENT, entry, row, oldDeletesPrefix);
      } else {
View Full Code Here

  public static void moveMetaDeleteMarkersFrom14(Instance instance, Credentials creds) {
    // new KeyExtent is only added to force update to write to the metadata table, not the root table
    KeyExtent notMetadata = new KeyExtent(new Text("anythingNotMetadata"), null, null);

    // move delete markers from the normal delete keyspace to the root tablet delete keyspace if the files are for the !METADATA table
    Scanner scanner = new ScannerImpl(instance, creds, MetadataTable.ID, Authorizations.EMPTY);
    scanner.setRange(MetadataSchema.DeletesSection.getRange());
    for (Entry<Key,Value> entry : scanner) {
      String row = entry.getKey().getRow().toString();
      if (row.startsWith(MetadataSchema.DeletesSection.getRowPrefix() + "/" + MetadataTable.ID)) {
        moveDeleteEntry(creds, notMetadata, entry, row, MetadataSchema.DeletesSection.getRowPrefix());
      } else {
View Full Code Here

  }

  public static SortedMap<FileRef,DataFileValue> getDataFileSizes(KeyExtent extent, Credentials credentials) throws IOException {
    TreeMap<FileRef,DataFileValue> sizes = new TreeMap<FileRef,DataFileValue>();

    Scanner mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, Authorizations.EMPTY);
    mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
    Text row = extent.getMetadataEntry();
    VolumeManager fs = VolumeManagerImpl.get();

    Key endKey = new Key(row, DataFileColumnFamily.NAME, new Text(""));
    endKey = endKey.followingKey(PartialKey.ROW_COLFAM);

    mdScanner.setRange(new Range(new Key(row), endKey));
    for (Entry<Key,Value> entry : mdScanner) {

      if (!entry.getKey().getRow().equals(row))
        break;
      DataFileValue dfv = new DataFileValue(entry.getValue().get());
View Full Code Here

      }
    }
  }

  public static void deleteTable(String tableId, boolean insertDeletes, Credentials credentials, ZooLock lock) throws AccumuloException, IOException {
    Scanner ms = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, Authorizations.EMPTY);
    Text tableIdText = new Text(tableId);
    BatchWriter bw = new BatchWriterImpl(HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, new BatchWriterConfig().setMaxMemory(1000000)
        .setMaxLatency(120000l, TimeUnit.MILLISECONDS).setMaxWriteThreads(2));

    // scan metadata for our table and delete everything we find
    Mutation m = null;
    ms.setRange(new KeyExtent(tableIdText, null, null).toMetadataRange());

    // insert deletes before deleting data from metadata... this makes the code fault tolerant
    if (insertDeletes) {

      ms.fetchColumnFamily(DataFileColumnFamily.NAME);
      TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(ms);

      for (Entry<Key,Value> cell : ms) {
        Key key = cell.getKey();

        if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
          FileRef ref = new FileRef(VolumeManagerImpl.get(), key);
          bw.addMutation(createDeleteMutation(tableId, ref.meta().toString()));
        }

        if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
          bw.addMutation(createDeleteMutation(tableId, cell.getValue().toString()));
        }
      }

      bw.flush();

      ms.clearColumns();
    }

    for (Entry<Key,Value> cell : ms) {
      Key key = cell.getKey();
View Full Code Here

        candidates.clear();
      }
      return candidates;
    }
   
    Scanner scanner = new ScannerImpl(instance, credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
   
    // scan the reserved keyspace for deletes
    scanner.setRange(Constants.METADATA_DELETES_KEYSPACE);
   
    if (continueKey != null) {
      // want to ensure GC makes progress... if the 1st N deletes are stable and we keep processing them, then will never inspect deletes after N
      scanner.setRange(new Range(continueKey, true, Constants.METADATA_DELETES_KEYSPACE.getEndKey(), Constants.METADATA_DELETES_KEYSPACE.isEndKeyInclusive()));
      continueKey = null;
    } else {
      // scan the reserved keyspace for deletes
      scanner.setRange(Constants.METADATA_DELETES_KEYSPACE);
    }
   
    // find candidates for deletion; chop off the prefix
    checkForBulkProcessingFiles = false;
    for (Entry<Key,Value> entry : scanner) {
View Full Code Here

        scanner = new OfflineMetadataScanner();
      } catch (IOException e) {
        throw new IllegalStateException("Unable to create offline metadata scanner", e);
      }
    } else {
      scanner = new IsolatedScanner(new ScannerImpl(instance, credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS));
    }
   
    scanner.setRange(Constants.METADATA_KEYSPACE);
    scanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
    scanner.fetchColumnFamily(Constants.METADATA_SCANFILE_COLUMN_FAMILY);
View Full Code Here

  }
 
  public static SortedMap<String,DataFileValue> getDataFileSizes(KeyExtent extent, AuthInfo credentials) {
    TreeMap<String,DataFileValue> sizes = new TreeMap<String,DataFileValue>();
   
    Scanner mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
    mdScanner.setRange(Constants.METADATA_KEYSPACE);
    mdScanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
    Text row = extent.getMetadataEntry();
   
    Key endKey = new Key(row, Constants.METADATA_DATAFILE_COLUMN_FAMILY, new Text(""));
    endKey = endKey.followingKey(PartialKey.ROW_COLFAM);
   
    mdScanner.setRange(new Range(new Key(row), endKey));
    for (Entry<Key,Value> entry : mdScanner) {
     
      if (!entry.getKey().getRow().equals(row))
        break;
      DataFileValue dfv = new DataFileValue(entry.getValue().get());
View Full Code Here

TOP

Related Classes of org.apache.accumulo.core.client.impl.ScannerImpl

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.