Package org.apache.hadoop.hbase.io.hfile.HFile

Examples of org.apache.hadoop.hbase.io.hfile.HFile.Reader


        writer.close();
      }
    }
    StoreFile sf = new StoreFile(this.fs, writer.getPath(), blockcache,
      this.conf, this.inMemory);
    Reader r = sf.getReader();
    this.storeSize += r.length();
    if(LOG.isInfoEnabled()) {
      LOG.info("Added " + sf + ", entries=" + r.getEntries() +
        ", sequenceid=" + logCacheFlushId +
        ", memsize=" + StringUtils.humanReadableInt(flushed) +
        ", filesize=" + StringUtils.humanReadableInt(r.length()) +
        " to " + this.region.regionInfo.getRegionNameAsString());
    }
    return sf;
  }
View Full Code Here


        Path path = file.getPath();
        if (path == null) {
          LOG.warn("Path is null for " + file);
          return null;
        }
        Reader r = file.getReader();
        if (r == null) {
          LOG.warn("StoreFile " + file + " has a null Reader");
          return null;
        }
        long len = file.getReader().length();
View Full Code Here

      final boolean majorCompaction, final long maxId)
  throws IOException {
    // For each file, obtain a scanner:
    KeyValueScanner [] scanners = new KeyValueScanner[filesToCompact.size()];
    for (int i = 0; i < filesToCompact.size(); ++i) {
      Reader r = filesToCompact.get(i).getReader();
      if (r == null) {
        LOG.warn("StoreFile " + filesToCompact.get(i) + " has a null Reader");
        continue;
      }
      // Instantiate HFile.Reader.Scanner to not cache blocks and not use pread
      scanners[i] = new StoreFileScanner(r.getScanner(false, false));
    }

    // Make the instantiation lazy in case compaction produces no product; i.e.
    // where all source cells are expired or deleted.
    HFile.Writer writer = null;
View Full Code Here

          " some of which may have been already removed", e);
      }
      // 4. Compute new store size
      this.storeSize = 0L;
      for (StoreFile hsf : this.storefiles.values()) {
        Reader r = hsf.getReader();
        if (r == null) {
          LOG.warn("StoreFile " + hsf + " has a null Reader");
          continue;
        }
        this.storeSize += r.length();
      }
    } finally {
      this.lock.writeLock().unlock();
    }
    return result;
View Full Code Here

   * @throws IOException
   */
  private void rowAtOrBeforeFromStoreFile(final StoreFile f,
    final GetClosestRowBeforeTracker state)
  throws IOException {
    Reader r = f.getReader();
    if (r == null) {
      LOG.warn("StoreFile " + f + " has a null Reader");
      return;
    }
    // TODO: Cache these keys rather than make each time?
    byte [] fk = r.getFirstKey();
    KeyValue firstKV = KeyValue.createKeyValueFromKey(fk, 0, fk.length);
    byte [] lk = r.getLastKey();
    KeyValue lastKV = KeyValue.createKeyValueFromKey(lk, 0, lk.length);
    KeyValue firstOnRow = state.getTargetKey();
    if (this.comparator.compareRows(lastKV, firstOnRow) < 0) {
      // If last key in file is not of the target table, no candidates in this
      // file.  Return.
      if (!state.isTargetTable(lastKV)) return;
      // If the row we're looking for is past the end of file, set search key to
      // last key. TODO: Cache last and first key rather than make each time.
      firstOnRow = new KeyValue(lastKV.getRow(), HConstants.LATEST_TIMESTAMP);
    }
    // Get a scanner that caches blocks and that uses pread.
    HFileScanner scanner = r.getScanner(true, true);
    // Seek scanner.  If can't seek it, return.
    if (!seekToScanner(scanner, firstOnRow, firstKV)) return;
    // If we found candidate on firstOnRow, just return. THIS WILL NEVER HAPPEN!
    // Unlikely that there'll be an instance of actual first row in table.
    if (walkForwardInSingleRow(scanner, firstOnRow, state)) return;
View Full Code Here

              LOG.debug(sf +  " is not splittable");
            }
            return null;
          }
        }
        Reader r = sf.getReader();
        if (r == null) {
          LOG.warn("Storefile " + sf + " Reader is null");
          continue;
        }
        long size = r.length();
        if (size > maxSize) {
          // This is the largest one so far
          maxSize = size;
          mapIndex = e.getKey();
        }
      }
      StoreFile sf = this.storefiles.get(mapIndex);
      HFile.Reader r = sf.getReader();
      if (r == null) {
        LOG.warn("Storefile " + sf + " Reader is null");
        return null;
      }
      // Get first, last, and mid keys.  Midkey is the key that starts block
      // in middle of hfile.  Has column and timestamp.  Need to return just
      // the row we want to split on as midkey.
      byte [] midkey = r.midkey();
      if (midkey != null) {
        KeyValue mk = KeyValue.createKeyValueFromKey(midkey, 0, midkey.length);
        byte [] fk = r.getFirstKey();
        KeyValue firstKey = KeyValue.createKeyValueFromKey(fk, 0, fk.length);
        byte [] lk = r.getLastKey();
        KeyValue lastKey = KeyValue.createKeyValueFromKey(lk, 0, lk.length);
        // if the midkey is the same as the first and last keys, then we cannot
        // (ever) split this region.
        if (this.comparator.compareRows(mk, firstKey) == 0 &&
            this.comparator.compareRows(mk, lastKey) == 0) {
View Full Code Here

   * @return The size of the store files, in bytes.
   */
  long getStorefilesSize() {
    long size = 0;
    for (StoreFile s: storefiles.values()) {
      Reader r = s.getReader();
      if (r == null) {
        LOG.warn("StoreFile " + s + " has a null Reader");
        continue;
      }
      size += r.length();
    }
    return size;
  }
View Full Code Here

   * @return The size of the store file indexes, in bytes.
   */
  long getStorefilesIndexSize() {
    long size = 0;
    for (StoreFile s: storefiles.values()) {
      Reader r = s.getReader();
      if (r == null) {
        LOG.warn("StoreFile " + s + " has a null Reader");
        continue;
      }
      size += r.indexSize();
    }
    return size;
  }
View Full Code Here

    }
    if (isReference()) {
      this.reader = new HalfHFileReader(this.fs, this.referencePath,
          getBlockCache(), this.reference);
    } else {
      this.reader = new Reader(this.fs, this.path, getBlockCache(),
          this.inMemory);
    }
    // Load up indices and fileinfo.
    Map<byte [], byte []> map = this.reader.loadFileInfo();
    // Read in our metadata.
View Full Code Here

 
  public void seekTFile() throws IOException {
    int miss = 0;
    long totalBytes = 0;
    FSDataInputStream fsdis = fs.open(path);
    Reader reader =
      new Reader(fsdis, fs.getFileStatus(path).getLen(), null, false);
    reader.loadFileInfo();
    KeySampler kSampler =
        new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
            keyLenGen);
    HFileScanner scanner = reader.getScanner(false, false);
    BytesWritable key = new BytesWritable();
    timer.reset();
    timer.start();
    for (int i = 0; i < options.seekCount; ++i) {
      kSampler.next(key);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.io.hfile.HFile.Reader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.