Package org.apache.hadoop.hbase.io.hfile.HFile

Examples of org.apache.hadoop.hbase.io.hfile.HFile.Reader


    }
   
    // Get storefiles for this store
    List<HFileScanner> storefileScanners = new ArrayList<HFileScanner>();
    for (StoreFile sf : this.storefiles.descendingMap().values()) {
      Reader r = sf.getReader();
      if (r == null) {
        LOG.warn("StoreFile " + sf + " has a null Reader");
        continue;
      }
      storefileScanners.add(r.getScanner());
    }
   
    // StoreFileGetScan will handle reading this store's storefiles
    StoreFileGetScan scanner = new StoreFileGetScan(storefileScanners, matcher);
   
View Full Code Here


    }
    if (isReference()) {
      this.reader = new HalfHFileReader(this.fs, this.referencePath,
          getBlockCache(), this.reference);
    } else {
      this.reader = new Reader(this.fs, this.path, getBlockCache(),
          this.inMemory);
    }
    // Load up indices and fileinfo.
    Map<byte [], byte []> map = this.reader.loadFileInfo();
    // Read in our metadata.
View Full Code Here

    Writer writer = new Writer(fout, minBlockSize,
        Compression.Algorithm.NONE, null);
    writer.append("foo".getBytes(), "value".getBytes());
    writer.close();
    fout.close();
    Reader reader = new Reader(fs, mFile, null, false);
    reader.loadFileInfo();
    assertNull(reader.getMetaBlock("non-existant"));
  }
View Full Code Here

   
    Path path = new Path("/Users/ryan/rfile.big.txt");
    long start = System.currentTimeMillis();
    SimpleBlockCache cache = new SimpleBlockCache();
    //LruBlockCache cache = new LruBlockCache();
    Reader reader = new HFile.Reader(lfs, path, cache, false);
    reader.loadFileInfo();
    System.out.println(reader.trailer);
    long end = System.currentTimeMillis();
   
    System.out.println("Index read time: " + (end - start));

    List<String> keys = slurp("/Users/ryan/xaa.50k");
   
    HFileScanner scanner = reader.getScanner();
    int count;
    long totalBytes = 0;
    int notFound = 0;

    start = System.nanoTime();
View Full Code Here

 
  public void seekTFile() throws IOException {
    int miss = 0;
    long totalBytes = 0;
    FSDataInputStream fsdis = fs.open(path);
    Reader reader =
      new Reader(fsdis, fs.getFileStatus(path).getLen(), null, false);
    reader.loadFileInfo();
    KeySampler kSampler =
        new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
            keyLenGen);
    HFileScanner scanner = reader.getScanner();
    BytesWritable key = new BytesWritable();
    timer.reset();
    timer.start();
    for (int i = 0; i < options.seekCount; ++i) {
      kSampler.next(key);
View Full Code Here

   */
  public void testEmptyHFile() throws IOException {
    Path f = new Path(ROOT_DIR, getName());
    Writer w = new Writer(this.fs, f);
    w.close();
    Reader r = new Reader(fs, f, null, false);
    r.loadFileInfo();
    assertNull(r.getFirstKey());
    assertNull(r.getLastKey());
  }
View Full Code Here

      Compression.getCompressionAlgorithmByName(codec), null);
    LOG.info(writer);
    writeRecords(writer);
    fout.close();
    FSDataInputStream fin = fs.open(ncTFile);
    Reader reader = new Reader(fs.open(ncTFile),
      fs.getFileStatus(ncTFile).getLen(), null, false);
    // Load up the index.
    reader.loadFileInfo();
    HFileScanner scanner = reader.getScanner();
    // Align scanner at start of the file.
    scanner.seekTo();
    readAllRecords(scanner);
    scanner.seekTo(getSomeKey(50));
    assertTrue("location lookup failed", scanner.seekTo(getSomeKey(50)) == 0);
    // read the key and see if it matches
    ByteBuffer readKey = scanner.getKey();
    assertTrue("seeked key does not match", Arrays.equals(getSomeKey(50),
      Bytes.toBytes(readKey)));

    scanner.seekTo(new byte[0]);
    ByteBuffer val1 = scanner.getValue();
    scanner.seekTo(new byte[0]);
    ByteBuffer val2 = scanner.getValue();
    assertTrue(Arrays.equals(Bytes.toBytes(val1), Bytes.toBytes(val2)));

    reader.close();
    fin.close();
    fs.delete(ncTFile, true);
  }
View Full Code Here

      Compression.getCompressionAlgorithmByName(compress), null);
    someTestingWithMetaBlock(writer);
    writer.close();
    fout.close();
    FSDataInputStream fin = fs.open(mFile);
    Reader reader = new Reader(fs.open(mFile), this.fs.getFileStatus(mFile)
        .getLen(), null, false);
    reader.loadFileInfo();
    // No data -- this should return false.
    assertFalse(reader.getScanner().seekTo());
    someReadingWithMetaBlock(reader);
    fs.delete(mFile, true);
    reader.close();
    fin.close();
  }
View Full Code Here

            found = true;

            // verify that the compression on this file matches the configured
            // compression
            Path dataFilePath = fileSystem.listStatus(f.getPath())[0].getPath();
            Reader reader = HFile.createReader(fileSystem, dataFilePath,
                new CacheConfig(conf));
            reader.loadFileInfo();
            assertEquals("Incorrect compression used for column family " + familyStr
                         + "(reader: " + reader + ")",
                         configuredCompression.get(familyStr), reader.getCompressionAlgorithm());
            break;
          }
        }

        if (!found) {
View Full Code Here

        writer.close();
      }
    }
    StoreFile sf = new StoreFile(this.fs, writer.getPath(), blockcache,
      this.conf, this.inMemory);
    Reader r = sf.getReader();
    this.storeSize += r.length();
    if(LOG.isDebugEnabled()) {
      LOG.debug("Added " + sf + ", entries=" + r.getEntries() +
        ", sequenceid=" + logCacheFlushId +
        ", memsize=" + StringUtils.humanReadableInt(flushed) +
        ", filesize=" + StringUtils.humanReadableInt(r.length()) +
        " to " + this.region.regionInfo.getRegionNameAsString());
    }
    return sf;
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.io.hfile.HFile.Reader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.