Package org.apache.accumulo.core.data

Examples of org.apache.accumulo.core.data.Key


   
    for (Entry<String,DataFileValue> entry : files.entrySet()) {
      String file = entry.getKey();
      FileSKVIterator openReader = fileFactory.openReader(file, true, fs, conf, acuTableConf);
      try {
        Key first = openReader.getFirstKey();
        Key last = openReader.getLastKey();
        falks.put(file, new Pair<Key,Key>(first, last));
      } finally {
        openReader.close();
      }
    }
View Full Code Here


        // retry and also cause ugly error message that the admin has to ignore... could
        // go get the first and last key, but this code is called while the tablet lock
        // is held... or just compact the file....
        result.put(file, entry.getValue().getSize());
      } else {
        Key first = pair.getFirst();
        Key last = pair.getSecond();
        // If first and last are null, it's an empty file. Add it to the compact set so it goes away.
        if ( (first == null && last == null) || !this.extent.contains(first.getRow()) || !this.extent.contains(last.getRow())) {
          result.put(file, entry.getValue().getSize());
        }
      }
    }
    return result;
View Full Code Here

    // check to see if one row takes up most of the tablet, in which case we can not split
    try {
     
      Text lastRow;
      if (extent.getEndRow() == null) {
        Key lastKey = (Key) FileUtil.findLastKey(fs, tabletServer.getSystemConfiguration(), files);
        lastRow = lastKey.getRow();
      } else {
        lastRow = extent.getEndRow();
      }

      // We expect to get a midPoint for this set of files. If we don't get one, we have a problem.
      final Key mid = keys.get(.5);
      if (null == mid) {
        throw new IllegalStateException("Could not determine midpoint for files");
      }

      // check to see that the midPoint is not equal to the end key
      if (mid.compareRow(lastRow) == 0) {
        if (keys.firstKey() < .5) {
          Key candidate = keys.get(keys.firstKey());
          if (candidate.compareRow(lastRow) != 0) {
            // we should use this ratio in split size estimations
            if (log.isTraceEnabled())
              log.trace(String.format("Splitting at %6.2f instead of .5, row at .5 is same as end row%n", keys.firstKey()));
            return new SplitRowSpec(keys.firstKey(), candidate.getRow());
          }
         
        }
       
        log.warn("Cannot split tablet " + extent + " it contains a big row : " + lastRow);
View Full Code Here

       
        bw.close();
      } else if (opts.mode.equals("verifyDeleted")) {
        Scanner s = connector.createScanner(opts.tableName, opts.auths);
        s.setBatchSize(scanOpts.scanBatchSize);
        Key startKey = new Key(encodeLong(opts.start), CF_BYTES, CQ_BYTES, new byte[0], Long.MAX_VALUE);
        Key stopKey = new Key(encodeLong(opts.start + opts.num - 1), CF_BYTES, CQ_BYTES, new byte[0], 0);
        s.setBatchSize(50000);
        s.setRange(new Range(startKey, stopKey));
       
        for (Entry<Key,Value> entry : s) {
          System.err.println("ERROR : saw entries in range that should be deleted ( first value : " + entry.getValue().toString() + ")");
          System.err.println("exiting...");
          System.exit(1);
        }
       
      } else if (opts.mode.equals("verify")) {
        long t1 = System.currentTimeMillis();
       
        Scanner s = connector.createScanner(opts.tableName, opts.auths);
        Key startKey = new Key(encodeLong(opts.start), CF_BYTES, CQ_BYTES, new byte[0], Long.MAX_VALUE);
        Key stopKey = new Key(encodeLong(opts.start + opts.num - 1), CF_BYTES, CQ_BYTES, new byte[0], 0);
        s.setBatchSize(scanOpts.scanBatchSize);
        s.setRange(new Range(startKey, stopKey));
       
        long i = opts.start;
       
        for (Entry<Key,Value> e : s) {
          Key k = e.getKey();
          Value v = e.getValue();
         
          // System.out.println("v = "+v);
         
          checkKeyValue(i, k, v);
         
          i++;
        }
       
        if (i != opts.start + opts.num) {
          System.err.println("ERROR : did not see expected number of rows, saw " + (i - opts.start) + " expected " + opts.num);
          System.err.println("exiting... ARGHHHHHH");
          System.exit(1);
         
        }
       
        long t2 = System.currentTimeMillis();
       
        System.out.printf("time : %9.2f secs%n", ((t2 - t1) / 1000.0));
        System.out.printf("rate : %9.2f entries/sec%n", opts.num / ((t2 - t1) / 1000.0));
       
      } else if (opts.mode.equals("randomLookups")) {
        int numLookups = 1000;
       
        Random r = new Random();
       
        long t1 = System.currentTimeMillis();
       
        for (int i = 0; i < numLookups; i++) {
          long row = ((r.nextLong() & 0x7fffffffffffffffl) % opts.num) + opts.start;
         
          Scanner s = connector.createScanner(opts.tableName, opts.auths);
          s.setBatchSize(scanOpts.scanBatchSize);
          Key startKey = new Key(encodeLong(row), CF_BYTES, CQ_BYTES, new byte[0], Long.MAX_VALUE);
          Key stopKey = new Key(encodeLong(row), CF_BYTES, CQ_BYTES, new byte[0], 0);
          s.setRange(new Range(startKey, stopKey));
         
          Iterator<Entry<Key,Value>> si = s.iterator();
         
          if (si.hasNext()) {
            Entry<Key,Value> e = si.next();
            Key k = e.getKey();
            Value v = e.getValue();
           
            checkKeyValue(row, k, v);
           
            if (si.hasNext()) {
View Full Code Here

  /**
   * convenience method for reading entries from the metadata table
   */
  public static SortedMap<KeyExtent,Text> getMetadataDirectoryEntries(SortedMap<Key,Value> entries) {
    Key key;
    Value val;
    Text datafile = null;
    Value prevRow = null;
    KeyExtent ke;

    SortedMap<KeyExtent,Text> results = new TreeMap<KeyExtent,Text>();

    Text lastRowFromKey = new Text();

    // text obj below is meant to be reused in loop for efficiency
    Text colf = new Text();
    Text colq = new Text();

    for (Entry<Key,Value> entry : entries.entrySet()) {
      key = entry.getKey();
      val = entry.getValue();

      if (key.compareRow(lastRowFromKey) != 0) {
        prevRow = null;
        datafile = null;
        key.getRow(lastRowFromKey);
      }

      colf = key.getColumnFamily(colf);
      colq = key.getColumnQualifier(colq);

      // interpret the row id as a key extent
      if (Constants.METADATA_DIRECTORY_COLUMN.equals(colf, colq))
        datafile = new Text(val.toString());

      else if (Constants.METADATA_PREV_ROW_COLUMN.equals(colf, colq))
        prevRow = new Value(val);

      if (datafile != null && prevRow != null) {
        ke = new KeyExtent(key.getRow(), prevRow);
        results.put(ke, datafile);

        datafile = null;
        prevRow = null;
      }
View Full Code Here

    Scanner mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
    mdScanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
    Text row = extent.getMetadataEntry();

    Key endKey = new Key(row, Constants.METADATA_DATAFILE_COLUMN_FAMILY, new Text(""));
    endKey = endKey.followingKey(PartialKey.ROW_COLFAM);

    mdScanner.setRange(new Range(new Key(row), endKey));
    for (Entry<Key,Value> entry : mdScanner) {

      if (!entry.getKey().getRow().equals(row))
        break;
      DataFileValue dfv = new DataFileValue(entry.getValue().get());
View Full Code Here

      // something is wrong, this should not happen... if a tablet is split, it will always have a
      // prev end row....
      throw new AccumuloException("Split tablet does not have prev end row, something is amiss, extent = " + metadataEntry);
   
    // check to see if prev tablet exist in metadata tablet
    Key prevRowKey = new Key(new Text(KeyExtent.getMetadataEntry(table, metadataPrevEndRow)));

    ScannerImpl scanner2 = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
    scanner2.setRange(new Range(prevRowKey, prevRowKey.followingKey(PartialKey.ROW)));

    if (!scanner2.iterator().hasNext()) {
      log.info("Rolling back incomplete split " + metadataEntry + " " + metadataPrevEndRow);
      rollBackSplit(metadataEntry, KeyExtent.decodePrevEndRow(oper), credentials, lock);
      return new KeyExtent(metadataEntry, KeyExtent.decodePrevEndRow(oper));
    } else {
      log.info("Finishing incomplete split " + metadataEntry + " " + metadataPrevEndRow);

      List<String> highDatafilesToRemove = new ArrayList<String>();

      Scanner scanner3 = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
      Key rowKey = new Key(metadataEntry);
     
      SortedMap<String,DataFileValue> origDatafileSizes = new TreeMap<String,DataFileValue>();
      SortedMap<String,DataFileValue> highDatafileSizes = new TreeMap<String,DataFileValue>();
      SortedMap<String,DataFileValue> lowDatafileSizes = new TreeMap<String,DataFileValue>();
      scanner3.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
      scanner3.setRange(new Range(rowKey, rowKey.followingKey(PartialKey.ROW)));
     
      for (Entry<Key,Value> entry : scanner3) {
        if (entry.getKey().compareColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY) == 0) {
          origDatafileSizes.put(entry.getKey().getColumnQualifier().toString(), new DataFileValue(entry.getValue().get()));
        }
View Full Code Here

      ms.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
      Constants.METADATA_DIRECTORY_COLUMN.fetch(ms);
     
      for (Entry<Key,Value> cell : ms) {
        Key key = cell.getKey();

        if (key.getColumnFamily().equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
          String relPath = key.getColumnQualifier().toString();
          // only insert deletes for files owned by this table
          if (!relPath.startsWith("../"))
            bw.addMutation(createDeleteMutation(tableId, relPath));
        }

        if (Constants.METADATA_DIRECTORY_COLUMN.hasColumns(key)) {
          bw.addMutation(createDeleteMutation(tableId, cell.getValue().toString()));
        }
      }

      bw.flush();

      ms.clearColumns();
    }

    for (Entry<Key,Value> cell : ms) {
      Key key = cell.getKey();

      if (m == null) {
        m = new Mutation(key.getRow());
        if (lock != null)
          putLockID(lock, m);
      }

      if (key.getRow().compareTo(m.getRow(), 0, m.getRow().length) != 0) {
        bw.addMutation(m);
        m = new Mutation(key.getRow());
        if (lock != null)
          putLockID(lock, m);
      }
      m.putDelete(key.getColumnFamily(), key.getColumnQualifier());
    }

    if (m != null)
      bw.addMutation(m);
View Full Code Here

 
  private static Scanner getTabletLogScanner(TCredentials credentials, KeyExtent extent) {
    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
    scanner.fetchColumnFamily(Constants.METADATA_LOG_COLUMN_FAMILY);
    Text start = extent.getMetadataEntry();
    Key endKey = new Key(start, Constants.METADATA_LOG_COLUMN_FAMILY);
    endKey = endKey.followingKey(PartialKey.ROW_COLFAM);
    scanner.setRange(new Range(new Key(start), endKey));
    return scanner;
  }
View Full Code Here

      if (!srcFiles.containsAll(cloneFiles)) {
        // delete existing cloned tablet entry
        Mutation m = new Mutation(cloneTablet.keySet().iterator().next().getRow());

        for (Entry<Key,Value> entry : cloneTablet.entrySet()) {
          Key k = entry.getKey();
          m.putDelete(k.getColumnFamily(), k.getColumnQualifier(), k.getTimestamp());
        }

        bw.addMutation(m);

        for (Map<Key,Value> st : srcTablets)
View Full Code Here

TOP

Related Classes of org.apache.accumulo.core.data.Key

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.