Examples of CacheConfig


Examples of org.apache.hadoop.hbase.io.hfile.CacheConfig

        byte[] startKey, byte[] endKey, int numRows) throws IOException {

      HFile.Writer writer = null;
      long now = System.currentTimeMillis();
      try {
        writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
            .withPath(fs, path)
            .create();
        // subtract 2 since numRows doesn't include boundary keys
        for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, true, numRows-2)) {
          KeyValue kv = new KeyValue(key, family, qualifier, now, key);
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.CacheConfig

    try {
      processTable(fs, tableDir, log, c, majorCompact);
    } finally {
       log.close();
       // TODO: is this still right?
       BlockCache bc = new CacheConfig(c).getBlockCache();
       if (bc != null) bc.shutdown();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.CacheConfig

  private static void createHFile(
      Configuration conf,
      FileSystem fs, Path path,
      byte[] family, byte[] qualifier) throws IOException {
    HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
        .withPath(fs, path)
        .create();
    long now = System.currentTimeMillis();
    try {
      for (int i =1;i<=9;i++) {
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.CacheConfig

    writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3)));
    writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4)));
    writer.close();

    // Verify that compression and encoding settings are respected
    HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf));
    assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm());
    assertEquals(hcd.getDataBlockEncoding(), reader.getEncodingOnDisk());
    reader.close();
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.CacheConfig

    StoreFile f = this.store.getStorefiles().iterator().next();
    Path storedir = f.getPath().getParent();
    long seqid = f.getMaxSequenceId();
    Configuration c = HBaseConfiguration.create();
    FileSystem fs = FileSystem.get(c);
    StoreFile.Writer w = new StoreFile.WriterBuilder(c, new CacheConfig(c),
        fs, StoreFile.DEFAULT_BLOCKSIZE_SMALL)
            .withOutputDir(storedir)
            .build();
    w.appendMetadata(seqid + 1, false);
    w.close();
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.CacheConfig

    scanInfo = new ScanInfo(family, ttl, timeToPurgeDeletes, this.comparator);
    this.memstore = new MemStore(conf, this.comparator);
    this.offPeakHours = OffPeakHours.getInstance(conf);

    // Setting up cache configuration for this family
    this.cacheConf = new CacheConfig(conf, family);

    this.verifyBulkLoads = conf.getBoolean("hbase.hstore.bulkload.verify", false);

    this.blockingFileCount =
        conf.getInt(BLOCKING_STOREFILES_KEY, DEFAULT_BLOCKING_STOREFILE_COUNT);
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.CacheConfig

   */
  @Override
  public StoreFile.Writer createWriterInTmp(long maxKeyCount,
    Compression.Algorithm compression, boolean isCompaction, boolean includeMVCCReadpoint)
  throws IOException {
    final CacheConfig writerCacheConf;
    if (isCompaction) {
      // Don't cache data on write on compactions.
      writerCacheConf = new CacheConfig(cacheConf);
      writerCacheConf.setCacheDataOnWrite(false);
    } else {
      writerCacheConf = cacheConf;
    }
    InetSocketAddress[] favoredNodes = null;
    if (region.getRegionServerServices() != null) {
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.CacheConfig

  public static void testCodecs(Configuration conf, int kvLimit,
      String hfilePath, String compressionName, boolean doBenchmark,
      boolean doVerify) throws IOException {
    // create environment
    Path path = new Path(hfilePath);
    CacheConfig cacheConf = new CacheConfig(conf);
    FileSystem fs = FileSystem.get(conf);
    StoreFile hsf = new StoreFile(fs, path, conf, cacheConf,
        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);

    StoreFile.Reader reader = hsf.createReader();
    reader.loadFileInfo();
    KeyValueScanner scanner = reader.getStoreFileScanner(true, true);

    // run the utilities
    DataBlockEncodingTool comp = new DataBlockEncodingTool(compressionName);
    comp.minorVersion = reader.getHFileMinorVersion();
    comp.checkStatistics(scanner, kvLimit);
    if (doVerify) {
      comp.verifyCodecs(scanner, kvLimit);
    }
    if (doBenchmark) {
      comp.benchmarkCodecs();
    }
    comp.displayStatistics();

    // cleanup
    scanner.close();
    reader.close(cacheConf.shouldEvictOnClose());
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.CacheConfig

    final Configuration conf = HBaseConfiguration.create();
    try {
      testCodecs(conf, kvLimit, pathName, compressionName, doBenchmark,
          doVerify);
    } finally {
      (new CacheConfig(conf)).getBlockCache().shutdown();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.CacheConfig

    // login the server principal (if using secure Hadoop)
    User.login(this.conf, "hbase.regionserver.keytab.file",
      "hbase.regionserver.kerberos.principal", this.isa.getHostName());
    regionServerAccounting = new RegionServerAccounting();
    cacheConfig = new CacheConfig(conf);
    uncaughtExceptionHandler = new UncaughtExceptionHandler() {
      @Override
      public void uncaughtException(Thread t, Throwable e) {
        abort("Uncaught exception in service thread " + t.getName(), e);
      }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.