Package org.apache.hadoop.hbase.io.hfile.HFile

Examples of org.apache.hadoop.hbase.io.hfile.HFile.Reader


   
    Path path = new Path("/Users/ryan/rfile.big.txt");
    long start = System.currentTimeMillis();
    SimpleBlockCache cache = new SimpleBlockCache();
    //LruBlockCache cache = new LruBlockCache();
    Reader reader = new HFile.Reader(lfs, path, cache, false);
    reader.loadFileInfo();
    System.out.println(reader.trailer);
    long end = System.currentTimeMillis();
   
    System.out.println("Index read time: " + (end - start));

    List<String> keys = slurp("/Users/ryan/xaa.50k");
   
    HFileScanner scanner = reader.getScanner();
    int count;
    long totalBytes = 0;
    int notFound = 0;

    start = System.nanoTime();
View Full Code Here


        writer.close();
      }
    }
    StoreFile sf = new StoreFile(this.fs, writer.getPath(), blockcache,
      this.conf, this.inMemory);
    Reader r = sf.getReader();
    this.storeSize += r.length();
    if(LOG.isDebugEnabled()) {
      LOG.debug("Added " + sf + ", entries=" + r.getEntries() +
        ", sequenceid=" + logCacheFlushId +
        ", memsize=" + StringUtils.humanReadableInt(flushed) +
        ", filesize=" + StringUtils.humanReadableInt(r.length()) +
        " to " + this.region.regionInfo.getRegionNameAsString());
    }
    return sf;
  }
View Full Code Here

        Path path = file.getPath();
        if (path == null) {
          LOG.warn("Path is null for " + file);
          return null;
        }
        Reader r = file.getReader();
        if (r == null) {
          LOG.warn("StoreFile " + file + " has a null Reader");
          continue;
        }
        long len = file.getReader().length();
View Full Code Here

      final boolean majorCompaction, final long maxId)
  throws IOException {
    // For each file, obtain a scanner:
    KeyValueScanner [] scanners = new KeyValueScanner[filesToCompact.size()];
    for (int i = 0; i < filesToCompact.size(); ++i) {
      Reader r = filesToCompact.get(i).getReader();
      if (r == null) {
        LOG.warn("StoreFile " + filesToCompact.get(i) + " has a null Reader");
        continue;
      }
      // Instantiate HFile.Reader.Scanner to not cache blocks
      scanners[i] = new StoreFileScanner(r.getScanner(false));
    }

    // Make the instantiation lazy in case compaction produces no product; i.e.
    // where all source cells are expired or deleted.
    HFile.Writer writer = null;
View Full Code Here

          " some of which may have been already removed", e);
      }
      // 4. Compute new store size
      this.storeSize = 0L;
      for (StoreFile hsf : this.storefiles.values()) {
        Reader r = hsf.getReader();
        if (r == null) {
          LOG.warn("StoreFile " + hsf + " has a null Reader");
          continue;
        }
        this.storeSize += r.length();
      }
    } finally {
      this.lock.writeLock().unlock();
    }
    return result;
View Full Code Here

   * @throws IOException
   */
  private void rowAtOrBeforeFromStoreFile(final StoreFile f,
    final GetClosestRowBeforeTracker state)
  throws IOException {
    Reader r = f.getReader();
    if (r == null) {
      LOG.warn("StoreFile " + f + " has a null Reader");
      return;
    }
    // TODO: Cache these keys rather than make each time?
    byte [] fk = r.getFirstKey();
    KeyValue firstKV = KeyValue.createKeyValueFromKey(fk, 0, fk.length);
    byte [] lk = r.getLastKey();
    KeyValue lastKV = KeyValue.createKeyValueFromKey(lk, 0, lk.length);
    KeyValue firstOnRow = state.getTargetKey();
    if (this.comparator.compareRows(lastKV, firstOnRow) < 0) {
      // If last key in file is not of the target table, no candidates in this
      // file.  Return.
      if (!state.isTargetTable(lastKV)) return;
      // If the row we're looking for is past the end of file, set search key to
      // last key. TODO: Cache last and first key rather than make each time.
      firstOnRow = new KeyValue(lastKV.getRow(), HConstants.LATEST_TIMESTAMP);
    }
    HFileScanner scanner = r.getScanner();
    // Seek scanner.  If can't seek it, return.
    if (!seekToScanner(scanner, firstOnRow, firstKV)) return;
    // If we found candidate on firstOnRow, just return. THIS WILL NEVER HAPPEN!
    // Unlikely that there'll be an instance of actual first row in table.
    if (walkForwardInSingleRow(scanner, firstOnRow, state)) return;
View Full Code Here

              LOG.debug(sf +  " is not splittable");
            }
            return null;
          }
        }
        Reader r = sf.getReader();
        if (r == null) {
          LOG.warn("Storefile " + sf + " Reader is null");
          continue;
        }
        long size = r.length();
        if (size > maxSize) {
          // This is the largest one so far
          maxSize = size;
          mapIndex = e.getKey();
        }
      }
      StoreFile sf = this.storefiles.get(mapIndex);
      HFile.Reader r = sf.getReader();
      if (r == null) {
        LOG.warn("Storefile " + sf + " Reader is null");
        return null;
      }
      // Get first, last, and mid keys.  Midkey is the key that starts block
      // in middle of hfile.  Has column and timestamp.  Need to return just
      // the row we want to split on as midkey.
      byte [] midkey = r.midkey();
      if (midkey != null) {
        KeyValue mk = KeyValue.createKeyValueFromKey(midkey, 0, midkey.length);
        byte [] fk = r.getFirstKey();
        KeyValue firstKey = KeyValue.createKeyValueFromKey(fk, 0, fk.length);
        byte [] lk = r.getLastKey();
        KeyValue lastKey = KeyValue.createKeyValueFromKey(lk, 0, lk.length);
        // if the midkey is the same as the first and last keys, then we cannot
        // (ever) split this region.
        if (this.comparator.compareRows(mk, firstKey) == 0 &&
            this.comparator.compareRows(mk, lastKey) == 0) {
View Full Code Here

   * @return The size of the store files, in bytes.
   */
  long getStorefilesSize() {
    long size = 0;
    for (StoreFile s: storefiles.values()) {
      Reader r = s.getReader();
      if (r == null) {
        LOG.warn("StoreFile " + s + " has a null Reader");
        continue;
      }
      size += r.length();
    }
    return size;
  }
View Full Code Here

   * @return The size of the store file indexes, in bytes.
   */
  long getStorefilesIndexSize() {
    long size = 0;
    for (StoreFile s: storefiles.values()) {
      Reader r = s.getReader();
      if (r == null) {
        LOG.warn("StoreFile " + s + " has a null Reader");
        continue;
      }
      size += r.indexSize();
    }
    return size;
  }
View Full Code Here

        String familyStr = f.getPath().getName();
        HColumnDescriptor hcd = htd.getFamily(Bytes.toBytes(familyStr));
        // verify that the compression on this file matches the configured
        // compression
        Path dataFilePath = fs.listStatus(f.getPath())[0].getPath();
        Reader reader = HFile.createReader(fs, dataFilePath, new CacheConfig(conf));
        Map<byte[], byte[]> fileInfo = reader.loadFileInfo();

        byte[] bloomFilter = fileInfo.get(StoreFile.BLOOM_FILTER_TYPE_KEY);
        if (bloomFilter == null) bloomFilter = Bytes.toBytes("NONE");
        assertEquals("Incorrect bloom filter used for column family " + familyStr +
          "(reader: " + reader + ")",
          hcd.getBloomFilterType(), BloomType.valueOf(Bytes.toString(bloomFilter)));
        assertEquals("Incorrect compression used for column family " + familyStr +
          "(reader: " + reader + ")", hcd.getCompression(), reader.getCompressionAlgorithm());
      }
    } finally {
      dir.getFileSystem(conf).delete(dir, true);
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.io.hfile.HFile.Reader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.