Examples of HStoreKey


Examples of org.apache.hadoop.hbase.HStoreKey

   * @throws UnsupportedEncodingException
   */
  public void testMemcache() throws UnsupportedEncodingException {
    for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
      byte [] row = Bytes.toBytes("row_" + k);
      HStoreKey key =
        new HStoreKey(row, CONTENTS_BASIC, System.currentTimeMillis());
      hmemcache.add(key, (CONTENTSTR + k).getBytes(HConstants.UTF8_ENCODING));
     
      key =
        new HStoreKey(row, Bytes.toBytes(ANCHORNUM + k), System.currentTimeMillis());
      hmemcache.add(key, (ANCHORSTR + k).getBytes(HConstants.UTF8_ENCODING));
    }

    // Read them back

    for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
      List<Cell> results;
      byte [] row = Bytes.toBytes("row_" + k);
      HStoreKey key = new HStoreKey(row, CONTENTS_BASIC, Long.MAX_VALUE);
      results = hmemcache.get(key, 1);
      assertNotNull("no data for " + key.toString(), results);
      assertEquals(1, results.size());
      String bodystr = new String(results.get(0).getValue(),
          HConstants.UTF8_ENCODING);
      String teststr = CONTENTSTR + k;
      assertTrue("Incorrect value for key: (" + key.toString() +
          "), expected: '" + teststr + "' got: '" +
          bodystr + "'", teststr.compareTo(bodystr) == 0);
     
      key = new HStoreKey(row, Bytes.toBytes(ANCHORNUM + k), Long.MAX_VALUE);
      results = hmemcache.get(key, 1);
      assertNotNull("no data for " + key.toString(), results);
      assertEquals(1, results.size());
      bodystr = new String(results.get(0).getValue(),
          HConstants.UTF8_ENCODING);
      teststr = ANCHORSTR + k;
      assertTrue("Incorrect value for key: (" + key.toString() +
          "), expected: '" + teststr + "' got: '" + bodystr + "'",
          teststr.compareTo(bodystr) == 0);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.HStoreKey

  private void getRowKeyAtOrBefore(final SortedMap<HStoreKey, byte []> map,
      final byte [] row, final SortedMap<HStoreKey, Long> candidateKeys,
      final Set<HStoreKey> deletes) {
    // We want the earliest possible to start searching from.  Start before
    // the candidate key in case it turns out a delete came in later.
    HStoreKey search_key = candidateKeys.isEmpty()?
     new HStoreKey(row, this.regionInfo):
      new HStoreKey(candidateKeys.firstKey().getRow(), this.regionInfo);
    List<HStoreKey> victims = new ArrayList<HStoreKey>();
    long now = System.currentTimeMillis();

    // Get all the entries that come equal or after our search key
    SortedMap<HStoreKey, byte []> tailMap = map.tailMap(search_key);

    // if there are items in the tail map, there's either a direct match to
    // the search key, or a range of values between the first candidate key
    // and the ultimate search key (or the end of the cache)
    if (!tailMap.isEmpty() &&
        HStoreKey.compareTwoRowKeys(this.regionInfo,
          tailMap.firstKey().getRow(), search_key.getRow()) <= 0) {
      Iterator<HStoreKey> key_iterator = tailMap.keySet().iterator();

      // Keep looking at cells as long as they are no greater than the
      // ultimate search key and there's still records left in the map.
      HStoreKey deletedOrExpiredRow = null;
      for (HStoreKey found_key = null; key_iterator.hasNext() &&
          (found_key == null ||
            HStoreKey.compareTwoRowKeys(this.regionInfo,
                found_key.getRow(), row) <= 0);) {
        found_key = key_iterator.next();
        if (HStoreKey.compareTwoRowKeys(this.regionInfo,
            found_key.getRow(), row) <= 0) {
          if (HLogEdit.isDeleted(tailMap.get(found_key))) {
            HStore.handleDeleted(found_key, candidateKeys, deletes);
            if (deletedOrExpiredRow == null) {
              deletedOrExpiredRow = found_key;
            }
          } else {
            if (HStore.notExpiredAndNotInDeletes(this.ttl,
                found_key, now, deletes)) {
              candidateKeys.put(stripTimestamp(found_key),
                new Long(found_key.getTimestamp()));
            } else {
              if (deletedOrExpiredRow == null) {
                deletedOrExpiredRow = new HStoreKey(found_key);
              }
              addVictim(victims, found_key);
            }
          }
        }
View Full Code Here

Examples of org.apache.hadoop.hbase.HStoreKey

    if (candidateKeys.isEmpty()) {
      Set<HStoreKey> keys = headMap.keySet();
      HStoreKey [] cells = keys.toArray(new HStoreKey[keys.size()]);
      byte [] lastRowFound = null;
      for (int i = cells.length - 1; i >= 0; i--) {
        HStoreKey found_key = cells[i];
        // if the last row we found a candidate key for is different than
        // the row of the current candidate, we can stop looking -- if its
        // not a delete record.
        boolean deleted = HLogEdit.isDeleted(headMap.get(found_key));
        if (lastRowFound != null &&
            !HStoreKey.equalsTwoRowKeys(this.regionInfo, lastRowFound,
                found_key.getRow()) && !deleted) {
          break;
        }
        // If this isn't a delete, record it as a candidate key. Also
        // take note of the row of this candidate so that we'll know when
        // we cross the row boundary into the previous row.
        if (!deleted) {
          if (HStore.notExpiredAndNotInDeletes(this.ttl, found_key, now, deletes)) {
            lastRowFound = found_key.getRow();
            candidateKeys.put(stripTimestamp(found_key),
              new Long(found_key.getTimestamp()));
          } else {
            expires.add(found_key);
            if (LOG.isDebugEnabled()) {
              LOG.debug("getRowKeyBefore: " + found_key + ": expired, skipped");
            }
          }
        } else {
          deletes.add(found_key);
        }
      }
    } else {
      // If there are already some candidate keys, we only need to consider
      // the very last row's worth of keys in the headMap, because any
      // smaller acceptable candidate keys would have caused us to start
      // our search earlier in the list, and we wouldn't be searching here.
      SortedMap<HStoreKey, byte[]> thisRowTailMap =
        headMap.tailMap(new HStoreKey(headMap.lastKey().getRow(), this.regionInfo));
      Iterator<HStoreKey> key_iterator = thisRowTailMap.keySet().iterator();
      do {
        HStoreKey found_key = key_iterator.next();
        if (HLogEdit.isDeleted(thisRowTailMap.get(found_key))) {
          HStore.handleDeleted(found_key, candidateKeys, deletes);
        } else {
          if (ttl == HConstants.FOREVER ||
              now < found_key.getTimestamp() + ttl ||
              !deletes.contains(found_key)) {
            candidateKeys.put(stripTimestamp(found_key),
              Long.valueOf(found_key.getTimestamp()));
          } else {
            expires.add(found_key);
            if (LOG.isDebugEnabled()) {
              LOG.debug("internalGetRowKeyAtOrBefore: " + found_key +
                ": expired, skipped");
View Full Code Here

Examples of org.apache.hadoop.hbase.HStoreKey

      } while (key_iterator.hasNext());
    }
  }
 
  static HStoreKey stripTimestamp(HStoreKey key) {
    return new HStoreKey(key.getRow(), key.getColumn(), key.getHRegionInfo());
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.HStoreKey

      final long now) {
    ArrayList<Cell> result = new ArrayList<Cell>();
    List<HStoreKey> victims = new ArrayList<HStoreKey>();
    SortedMap<HStoreKey, byte[]> tailMap = map.tailMap(key);
    for (Map.Entry<HStoreKey, byte[]> es : tailMap.entrySet()) {
      HStoreKey itKey = es.getKey();
      if (itKey.matchesRowCol(key)) {
        if (!isDeleted(es.getValue())) {
          // Filter out expired results
          if (HStore.notExpiredAndNotInDeletes(ttl, itKey, now, deletes)) {
            result.add(new Cell(tailMap.get(itKey), itKey.getTimestamp()));
            if (numVersions > 0 && result.size() >= numVersions) {
              break;
            }
          } else {
            addVictim(victims, itKey);
View Full Code Here

Examples of org.apache.hadoop.hbase.HStoreKey

      final Pattern columnPattern) {
    List<HStoreKey> result = new ArrayList<HStoreKey>();
    List<HStoreKey> victims = new ArrayList<HStoreKey>();
    SortedMap<HStoreKey, byte []> tailMap = map.tailMap(origin);
    for (Map.Entry<HStoreKey, byte []> es: tailMap.entrySet()) {
      HStoreKey key = es.getKey();
      // if there's no column name, then compare rows and timestamps
      if (origin.getColumn() != null && origin.getColumn().length == 0) {
        // if the current and origin row don't match, then we can jump
        // out of the loop entirely.
        if (!HStoreKey.equalsTwoRowKeys(regionInfo, key.getRow(),
            origin.getRow())) {
          break;
        }
        // if the column pattern is not null, we use it for column matching.
        // we will skip the keys whose column doesn't match the pattern.
        if (columnPattern != null) {
          if (!(columnPattern.matcher(Bytes.toString(key.getColumn())).matches())) {
            continue;
          }
        }
        // if the rows match but the timestamp is newer, skip it so we can
        // get to the ones we actually want.
        if (key.getTimestamp() > origin.getTimestamp()) {
          continue;
        }
      } else { // compare rows and columns
        // if the key doesn't match the row and column, then we're done, since
        // all the cells are ordered.
        if (!key.matchesRowCol(origin)) {
          break;
        }
      }
      if (!isDeleted(es.getValue())) {
        if (HStore.notExpiredAndNotInDeletes(this.ttl, key, now, deletes)) {
View Full Code Here

Examples of org.apache.hadoop.hbase.HStoreKey

    Memcache memcache1 = new Memcache();
    // TODO: x32 vs x64
    long size = 0;
    final int count = 10000;
    for (int i = 0; i < count; i++) {
      size += memcache1.add(new HStoreKey(Bytes.toBytes(i)),
        HConstants.EMPTY_BYTE_ARRAY);
    }
    LOG.info("memcache1 estimated size=" + size);
    for (int i = 0; i < count; i++) {
      size += memcache1.add(new HStoreKey(Bytes.toBytes(i)),
        HConstants.EMPTY_BYTE_ARRAY);
    }
    LOG.info("memcache1 estimated size (2nd loading of same data)=" + size);
    // Make a variably sized memcache.
    Memcache memcache2 = new Memcache();
    for (int i = 0; i < count; i++) {
      byte [] b = Bytes.toBytes(i);
      size += memcache2.add(new HStoreKey(b, b),
        new byte [i]);
    }
    LOG.info("memcache2 estimated size=" + size);
    final int seconds = 30;
    LOG.info("Waiting " + seconds + " seconds while heap dump is taken");
View Full Code Here

Examples of org.apache.hadoop.hbase.HStoreKey

      for(int i = 0; i < nbRows; i++) {
        requestCount.incrementAndGet();
        // Collect values to be returned here
        HbaseMapWritable<byte [], Cell> values
          = new HbaseMapWritable<byte [], Cell>();
        HStoreKey key = new HStoreKey();
        while (s.next(key, values)) {
          if (values.size() > 0) {
            // Row has something in it. Return the value.
            resultSets.add(new RowResult(key.getRow(), values));
            break;
          }
        }
      }
      return resultSets.toArray(new RowResult[resultSets.size()]);
View Full Code Here

Examples of org.apache.hadoop.hbase.HStoreKey

  HStoreScanner(HStore store, byte [][] targetCols, byte [] firstRow,
    long timestamp, RowFilterInterface filter)
  throws IOException {
    this.store = store;
    this.previousNext =
      new HStoreKey(HConstants.EMPTY_BYTE_ARRAY, this.store.getHRegionInfo());
    this.dataFilter = filter;
    if (null != dataFilter) {
      dataFilter.reset();
    }
    this.scanners = new InternalScanner[2];
View Full Code Here

Examples of org.apache.hadoop.hbase.HStoreKey

   * Do scanner setup.
   * @param i
   * @throws IOException
   */
  private void setupScanner(final int i) throws IOException {
    this.keys[i] = new HStoreKey();
    this.resultSets[i] = new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
    if (this.scanners[i] != null && !this.scanners[i].next(this.keys[i], this.resultSets[i])) {
      closeScanner(i);
    }
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.