Package org.apache.hadoop.hbase

Examples of org.apache.hadoop.hbase.HStoreKey$MetaStoreKeyComparator


      // Advance to the first key in each store.
      // All results will match the required column-set and scanTime.
      this.resultSets = new TreeMap[scanners.length];
      this.keys = new HStoreKey[scanners.length];
      for (int i = 0; i < scanners.length; i++) {
        keys[i] = new HStoreKey(HConstants.EMPTY_BYTE_ARRAY,regionInfo);
        resultSets[i] = new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
        if(scanners[i] != null && !scanners[i].next(keys[i], resultSets[i])) {
          closeScanner(i);
        }
      }
View Full Code Here


    finalKey.setHRegionInfo(this.info);
    return finalKey;
  }
 
  static HStoreKey stripTimestamp(HStoreKey key) {
    return new HStoreKey(key.getRow(), key.getColumn(), key.getHRegionInfo());
  }
View Full Code Here

      // Cast to HbaseReader.
      HBaseMapFile.HBaseReader r =
        (HBaseMapFile.HBaseReader)this.readers.get(mapIndex);
      // Get first, last, and mid keys.
      r.reset();
      HStoreKey firstKey = new HStoreKey();
      HStoreKey lastKey = new HStoreKey();
      r.next(firstKey, new ImmutableBytesWritable());
      r.finalKey(lastKey);
      HStoreKey mk = (HStoreKey)r.midKey();
      if (mk != null) {
        // if the midkey is the same as the first and last keys, then we cannot
        // (ever) split this region.
        if (HStoreKey.equalsTwoRowKeys(info, mk.getRow(), firstKey.getRow()) &&
            HStoreKey.equalsTwoRowKeys(info, mk.getRow(), lastKey.getRow())) {
          if (LOG.isDebugEnabled()) {
            LOG.debug("cannot split because midkey is the same as first or last row");
          }
          return null;
        }
        return new StoreSize(maxSize, mk.getRow());
      }
    } catch(IOException e) {
      LOG.warn("Failed getting store size for " + this.storeNameStr, e);
    } finally {
      this.lock.readLock().unlock();
View Full Code Here

        if (val.isTransactionEntry() || Bytes.equals(column, HLog.METACOLUMN)
            || !Bytes.equals(key.getRegionName(), info.getRegionName())
            || !HStoreKey.matchingFamily(family.getName(), column)) {
          continue;
        }
        HStoreKey k = new HStoreKey(key.getRow(), column, val.getTimestamp(),
          this.info);
        reconstructedCache.put(k, val.getVal());
        editsCount++;
        // Every 2k edits, tell the reporter we're making progress.
        // Have seen 60k edits taking 3minutes to complete.
View Full Code Here

      // Related, looks like 'merging compactions' in BigTable paper interlaces
      // a memcache flush.  We don't.
      int entries = 0;
      try {
        for (Map.Entry<HStoreKey, byte []> es: cache.entrySet()) {
          HStoreKey curkey = es.getKey();
          byte[] bytes = es.getValue();
          if (HStoreKey.matchingFamily(this.family.getName(), curkey.getColumn())) {
            if (!isExpired(curkey, ttl, now)) {
              entries++;
              out.append(curkey, new ImmutableBytesWritable(bytes));
              flushed += this.memcache.heapSize(curkey, bytes, null);
            }
View Full Code Here

    try {
      HStoreKey [] keys = new HStoreKey[rdrs.length];
      ImmutableBytesWritable [] vals = new ImmutableBytesWritable[rdrs.length];
      boolean [] done = new boolean[rdrs.length];
      for(int i = 0; i < rdrs.length; i++) {
        keys[i] = new HStoreKey(HConstants.EMPTY_BYTE_ARRAY, this.info);
        vals[i] = new ImmutableBytesWritable();
        done[i] = false;
      }

      // Now, advance through the readers in order.  This will have the
      // effect of a run-time sort of the entire dataset.
      int numDone = 0;
      for (int i = 0; i < rdrs.length; i++) {
        rdrs[i].reset();
        done[i] = !rdrs[i].next(keys[i], vals[i]);
        if (done[i]) {
          numDone++;
        }
      }

      long now = System.currentTimeMillis();
      int timesSeen = 0;
      HStoreKey lastSeen = new HStoreKey();
      HStoreKey lastDelete = null;
      while (numDone < done.length) {
        // Get lowest key in all store files.
        int lowestKey = getLowestKey(rdrs, keys, done);
        HStoreKey sk = keys[lowestKey];
        // If its same row and column as last key, increment times seen.
        if (HStoreKey.equalsTwoRowKeys(info, lastSeen.getRow(), sk.getRow())
            && Bytes.equals(lastSeen.getColumn(), sk.getColumn())) {
          timesSeen++;
          // Reset last delete if not exact timestamp -- lastDelete only stops
          // exactly the same key making it out to the compacted store file.
          if (lastDelete != null &&
              lastDelete.getTimestamp() != sk.getTimestamp()) {
            lastDelete = null;
          }
        } else {
          timesSeen = 1;
          lastDelete = null;
        }

        // Don't write empty rows or columns.  Only remove cells on major
        // compaction.  Remove if expired of > VERSIONS
        if (sk.getRow().length != 0 && sk.getColumn().length != 0) {
          ImmutableBytesWritable value = vals[lowestKey];
          if (!majorCompaction) {
            // Write out all values if not a major compaction.
            compactedOut.append(sk, value);
          } else {
            boolean expired = false;
            boolean deleted = false;
            if (timesSeen <= family.getMaxVersions() &&
                !(expired = isExpired(sk, ttl, now))) {
              // If this value key is same as a deleted key, skip
              if (lastDelete != null && sk.equals(lastDelete)) {
                deleted = true;
              } else if (HLogEdit.isDeleted(value.get())) {
                // If a deleted value, skip
                deleted = true;
                lastDelete = new HStoreKey(sk);
              } else {
                compactedOut.append(sk, vals[lowestKey]);
              }
            }
            if (expired || deleted) {
              // HBASE-855 remove one from timesSeen because it did not make it
              // past expired check -- don't count against max versions.
              timesSeen--;
            }
          }
        }

        // Update last-seen items
        lastSeen = new HStoreKey(sk);

        // Advance the smallest key.  If that reader's all finished, then
        // mark it as done.
        if (!rdrs[lowestKey].next(keys[lowestKey], vals[lowestKey])) {
          done[lowestKey] = true;
View Full Code Here

      // seek back to the beginning
      map.reset();
     
      // seek to the closest key that should match the row we're looking for
      ImmutableBytesWritable readval = new ImmutableBytesWritable();
      HStoreKey readkey = (HStoreKey)map.getClosest(key, readval);
      if (readkey == null) {
        return;
      }
     
      do {
        byte [] readcol = readkey.getColumn();
       
        // if we're looking for this column (or all of them), and there isn't
        // already a value for this column in the results map or there is a value
        // but we haven't collected enough versions yet, and the key we
        // just read matches, then we'll consider it
        if ((columns == null || columns.contains(readcol))
          && (!results.containsKey(readcol)
              || results.get(readcol).getNumValues() < numVersions)
          && key.matchesWithoutColumn(readkey)) {
          // if the value of the cell we're looking at right now is a delete,
          // we need to treat it differently
          if(HLogEdit.isDeleted(readval.get())) {
            // if it's not already recorded as a delete or recorded with a more
            // recent delete timestamp, record it for later
            if (!deletes.containsKey(readcol)
              || deletes.get(readcol).longValue() < readkey.getTimestamp()) {
              deletes.put(readcol, Long.valueOf(readkey.getTimestamp()));             
            }
          } else if (!(deletes.containsKey(readcol)
            && deletes.get(readcol).longValue() >= readkey.getTimestamp()) ) {
            // So the cell itself isn't a delete, but there may be a delete
            // pending from earlier in our search. Only record this result if
            // there aren't any pending deletes.
            if (!(deletes.containsKey(readcol) &&
                deletes.get(readcol).longValue() >= readkey.getTimestamp())) {
              if (!isExpired(readkey, ttl, now)) {
                if (!results.containsKey(readcol)) {
                  results.put(readcol,
                              new Cell(readval.get(), readkey.getTimestamp()));
                } else {
                  results.get(readcol).add(readval.get(),
                                           readkey.getTimestamp());
                }
                // need to reinstantiate the readval so we can reuse it,
                // otherwise next iteration will destroy our result
                readval = new ImmutableBytesWritable();
              }
            }
          }
        } else if (HStoreKey.compareTwoRowKeys(info,key.getRow(), readkey.getRow()) < 0) {
          // if we've crossed into the next row, then we can just stop
          // iterating
          break;
        }
       
View Full Code Here

          i >= 0 && !hasEnoughVersions(versions, results); i--) {
        MapFile.Reader r = maparray[i];
        synchronized (r) {
          // Do the priming read
          ImmutableBytesWritable readval = new ImmutableBytesWritable();
          HStoreKey readkey = (HStoreKey)r.getClosest(key, readval);
          if (readkey == null) {
            // map.getClosest returns null if the passed key is > than the
            // last key in the map file.  getClosest is a bit of a misnomer
            // since it returns exact match or the next closest key AFTER not
            // BEFORE.  We use getClosest because we're usually passed a
            // key that has a timestamp of maximum long to indicate we want
            // most recent update.
            continue;
          }
          if (!readkey.matchesRowCol(key)) {
            continue;
          }
          if (get(readkey, readval.get(), versions, results, deletes, now)) {
            break;
          }
          for (readval = new ImmutableBytesWritable();
              r.next(readkey, readval) && readkey.matchesRowCol(key);
              readval = new ImmutableBytesWritable()) {
            if (get(readkey, readval.get(), versions, results, deletes, now)) {
              break;
            }
          }
View Full Code Here

      if (hasEnoughVersions(versions, results)) {
        return true;
      }
    } else {
      // Is this copy necessary?
      deletes.add(new HStoreKey(key));
    }
    return false;
  }
View Full Code Here

        MapFile.Reader map = maparray[i];
        synchronized(map) {
          map.reset();
          // Do the priming read
          ImmutableBytesWritable readval = new ImmutableBytesWritable();
          HStoreKey readkey = (HStoreKey)map.getClosest(origin, readval);
          if (readkey == null) {
            // map.getClosest returns null if the passed key is > than the
            // last key in the map file.  getClosest is a bit of a misnomer
            // since it returns exact match or the next closest key AFTER not
            // BEFORE.
            continue;
          }
          do {
            // if the row matches, we might want this one.
            if (rowMatches(origin, readkey)) {
              // if the column pattern is not null, we use it for column matching.
              // we will skip the keys whose column doesn't match the pattern.
              if (columnPattern != null) {
                if (!(columnPattern.matcher(Bytes.toString(readkey.getColumn())).matches())) {
                  continue;
                }
              }
              // if the cell address matches, then we definitely want this key.
              if (cellMatches(origin, readkey)) {
                // Store key if isn't deleted or superceded by memcache
                if (!HLogEdit.isDeleted(readval.get())) {
                  if (notExpiredAndNotInDeletes(this.ttl, readkey, now, deletes)) {
                    keys.add(new HStoreKey(readkey));
                  }
                  if (keys.size() >= versions) {
                    break;
                  }
                } else {
                  deletes.add(new HStoreKey(readkey));
                }
              } else {
                // the cell doesn't match, but there might be more with different
                // timestamps, so move to the next key
                continue;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.HStoreKey$MetaStoreKeyComparator

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.