Package org.apache.hadoop.hbase

Examples of org.apache.hadoop.hbase.HDFSBlocksDistribution$HostAndWeight


   * This function will return the HDFS blocks distribution based on the data
   * captured when HFile is created
   * @return The HDFS blocks distribution for the region.
   */
  public HDFSBlocksDistribution getHDFSBlocksDistribution() {
    HDFSBlocksDistribution hdfsBlocksDistribution =
      new HDFSBlocksDistribution();
    synchronized (this.stores) {
      for (Store store : this.stores.values()) {
        for (StoreFile sf : store.getStorefiles()) {
          HDFSBlocksDistribution storeFileBlocksDistribution =
            sf.getHDFSBlockDistribution();
          hdfsBlocksDistribution.add(storeFileBlocksDistribution);
        }
      }
    }
View Full Code Here


   * @throws IOException
   */
  static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
    Configuration conf, HTableDescriptor tableDescriptor,
    String regionEncodedName, Path tablePath) throws IOException {
    HDFSBlocksDistribution hdfsBlocksDistribution =
      new HDFSBlocksDistribution();
    FileSystem fs = tablePath.getFileSystem(conf);

    for (HColumnDescriptor family: tableDescriptor.getFamilies()) {
      Path storeHomeDir = Store.getStoreHomedir(tablePath, regionEncodedName,
      family.getName());
      if (!fs.exists(storeHomeDir))continue;

      FileStatus[] hfilesStatus = null;
      hfilesStatus = fs.listStatus(storeHomeDir);

      for (FileStatus hfileStatus : hfilesStatus) {
        Path p = hfileStatus.getPath();
        if (HFileLink.isHFileLink(p)) {
          hfileStatus = new HFileLink(conf, p).getFileStatus(fs);
        } else if (StoreFile.isReference(p)) {
          p = StoreFile.getReferredToFile(p);
          if (HFileLink.isHFileLink(p)) {
            hfileStatus = new HFileLink(conf, p).getFileStatus(fs);
          } else {
            hfileStatus = fs.getFileStatus(p);
          }
        }
        HDFSBlocksDistribution storeFileBlocksDistribution =
          FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0,
          hfileStatus.getLen());
        hdfsBlocksDistribution.add(storeFileBlocksDistribution);
      }
    }
View Full Code Here

      FileStatus[] files = FSUtils.listStatus(fs, path, null);
      if (files == null) {
        return new String[] {};
      }

      HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
      for (FileStatus hfileStatus: files) {
        HDFSBlocksDistribution storeFileBlocksDistribution =
          FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0, hfileStatus.getLen());
        hdfsBlocksDistribution.add(storeFileBlocksDistribution);
      }

      List<String> hosts = hdfsBlocksDistribution.getTopHosts();
View Full Code Here

   * @return The HDFS blocks distribution
   */ 
  static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
    final FileSystem fs, FileStatus status, long start, long length)
    throws IOException {
    HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
    BlockLocation [] blockLocations =
      fs.getFileBlockLocations(status, start, length);
    for(BlockLocation bl : blockLocations) {
      String [] hosts = bl.getHosts();
      long len = bl.getLength();
      blocksDistribution.addHostsAndBlockWeight(hosts, len);
    }
   
    return blocksDistribution;
  }
View Full Code Here

    List<ServerName> topServerNames = null;
    try {
      HTableDescriptor tableDescriptor = getTableDescriptor(
        region.getTableName());
      if (tableDescriptor != null) {
        HDFSBlocksDistribution blocksDistribution =
          HRegion.computeHDFSBlocksDistribution(config, tableDescriptor,
          region.getEncodedName());
        List<String> topHosts = blocksDistribution.getTopHosts();
        topServerNames = mapHostNameToServerName(topHosts);
      }
    } catch (IOException ioe) {
      LOG.debug("IOException during HDFSBlocksDistribution computation. for " +
        "region = " + region.getEncodedName() , ioe);
View Full Code Here

    int storefiles = 0;
    long memstoreSize = 0;
    int readRequestsCount = 0;
    int writeRequestsCount = 0;
    long storefileIndexSize = 0;
    HDFSBlocksDistribution hdfsBlocksDistribution =
      new HDFSBlocksDistribution();
    long totalStaticIndexSize = 0;
    long totalStaticBloomSize = 0;
    long numPutsWithoutWAL = 0;
    long dataInMemoryWithoutWAL = 0;
    long updatesBlockedMs = 0;

    // Note that this is a map of Doubles instead of Longs. This is because we
    // do effective integer division, which would perhaps truncate more than it
    // should because we do it only on one part of our sum at a time. Rather
    // than dividing at the end, where it is difficult to know the proper
    // factor, everything is exact then truncated.
    final Map<String, MutableDouble> tempVals =
        new HashMap<String, MutableDouble>();

    for (Map.Entry<String, HRegion> e : this.onlineRegions.entrySet()) {
      HRegion r = e.getValue();
      memstoreSize += r.memstoreSize.get();
      numPutsWithoutWAL += r.numPutsWithoutWAL.get();
      dataInMemoryWithoutWAL += r.dataInMemoryWithoutWAL.get();
      readRequestsCount += r.readRequestsCount.get();
      writeRequestsCount += r.writeRequestsCount.get();
      updatesBlockedMs += r.updatesBlockedMs.get();
      synchronized (r.stores) {
        stores += r.stores.size();
        for (Map.Entry<byte[], Store> ee : r.stores.entrySet()) {
            final Store store = ee.getValue();
            final SchemaMetrics schemaMetrics = store.getSchemaMetrics();

            {
              long tmpStorefiles = store.getStorefilesCount();
              schemaMetrics.accumulateStoreMetric(tempVals,
                  StoreMetricType.STORE_FILE_COUNT, tmpStorefiles);
              storefiles += tmpStorefiles;
            }


            {
              long tmpStorefileIndexSize = store.getStorefilesIndexSize();
              schemaMetrics.accumulateStoreMetric(tempVals,
                  StoreMetricType.STORE_FILE_INDEX_SIZE,
                  (long) (tmpStorefileIndexSize / (1024.0 * 1024)));
              storefileIndexSize += tmpStorefileIndexSize;
            }

            {
              long tmpStorefilesSize = store.getStorefilesSize();
              schemaMetrics.accumulateStoreMetric(tempVals,
                  StoreMetricType.STORE_FILE_SIZE_MB,
                  (long) (tmpStorefilesSize / (1024.0 * 1024)));
            }

            {
              long tmpStaticBloomSize = store.getTotalStaticBloomSize();
              schemaMetrics.accumulateStoreMetric(tempVals,
                  StoreMetricType.STATIC_BLOOM_SIZE_KB,
                  (long) (tmpStaticBloomSize / 1024.0));
              totalStaticBloomSize += tmpStaticBloomSize;
            }

            {
              long tmpStaticIndexSize = store.getTotalStaticIndexSize();
              schemaMetrics.accumulateStoreMetric(tempVals,
                  StoreMetricType.STATIC_INDEX_SIZE_KB,
                  (long) (tmpStaticIndexSize / 1024.0));
              totalStaticIndexSize += tmpStaticIndexSize;
            }

            schemaMetrics.accumulateStoreMetric(tempVals,
                StoreMetricType.MEMSTORE_SIZE_MB,
                (long) (store.getMemStoreSize() / (1024.0 * 1024)));
        }
      }

      hdfsBlocksDistribution.add(r.getHDFSBlocksDistribution());
    }

    for (Entry<String, MutableDouble> e : tempVals.entrySet()) {
      RegionMetricsStorage.setNumericMetric(e.getKey(), e.getValue().longValue());
    }

    this.metrics.stores.set(stores);
    this.metrics.storefiles.set(storefiles);
    this.metrics.memstoreSizeMB.set((int) (memstoreSize / (1024 * 1024)));
    this.metrics.mbInMemoryWithoutWAL.set((int) (dataInMemoryWithoutWAL / (1024 * 1024)));
    this.metrics.numPutsWithoutWAL.set(numPutsWithoutWAL);
    this.metrics.storefileIndexSizeMB.set(
        (int) (storefileIndexSize / (1024 * 1024)));
    this.metrics.rootIndexSizeKB.set(
        (int) (storefileIndexSize / 1024));
    this.metrics.totalStaticIndexSizeKB.set(
        (int) (totalStaticIndexSize / 1024));
    this.metrics.totalStaticBloomSizeKB.set(
        (int) (totalStaticBloomSize / 1024));
    this.metrics.readRequestsCount.set(readRequestsCount);
    this.metrics.writeRequestsCount.set(writeRequestsCount);
    this.metrics.compactionQueueSize.set(compactSplitThread
        .getCompactionQueueSize());
    this.metrics.flushQueueSize.set(cacheFlusher
        .getFlushQueueSize());
    this.metrics.updatesBlockedSeconds.update(updatesBlockedMs > 0 ?
        updatesBlockedMs/1000: 0);
    final long updatesBlockedMsHigherWater = cacheFlusher.getUpdatesBlockedMsHighWater().get();
    this.metrics.updatesBlockedSecondsHighWater.update(updatesBlockedMsHigherWater > 0 ?
        updatesBlockedMsHigherWater/1000: 0);

    BlockCache blockCache = cacheConfig.getBlockCache();
    if (blockCache != null) {
      this.metrics.blockCacheCount.set(blockCache.size());
      this.metrics.blockCacheFree.set(blockCache.getFreeSize());
      this.metrics.blockCacheSize.set(blockCache.getCurrentSize());
      CacheStats cacheStats = blockCache.getStats();
      this.metrics.blockCacheHitCount.set(cacheStats.getHitCount());
      this.metrics.blockCacheMissCount.set(cacheStats.getMissCount());
      this.metrics.blockCacheEvictedCount.set(blockCache.getEvictedCount());
      double ratio = blockCache.getStats().getHitRatio();
      int percent = (int) (ratio * 100);
      this.metrics.blockCacheHitRatio.set(percent);
      ratio = blockCache.getStats().getHitCachingRatio();
      percent = (int) (ratio * 100);
      this.metrics.blockCacheHitCachingRatio.set(percent);
      // past N period block cache hit / hit caching ratios
      cacheStats.rollMetricsPeriod();
      ratio = cacheStats.getHitRatioPastNPeriods();
      percent = (int) (ratio * 100);
      this.metrics.blockCacheHitRatioPastNPeriods.set(percent);
      ratio = cacheStats.getHitCachingRatioPastNPeriods();
      percent = (int) (ratio * 100);
      this.metrics.blockCacheHitCachingRatioPastNPeriods.set(percent);
    }
    float localityIndex = hdfsBlocksDistribution.getBlockLocalityIndex(
      getServerName().getHostname());
    int percent = (int) (localityIndex * 100);
    this.metrics.hdfsBlocksLocalityIndex.set(percent);

  }
View Full Code Here

      final long maxTime = System.currentTimeMillis() + 2000;
      boolean ok;
      do {
        ok = true;
        FileStatus status = fs.getFileStatus(testFile);
        HDFSBlocksDistribution blocksDistribution =
          FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
        long uniqueBlocksTotalWeight =
          blocksDistribution.getUniqueBlocksTotalWeight();
        for (String host : hosts) {
          long weight = blocksDistribution.getWeight(host);
          ok = (ok && uniqueBlocksTotalWeight == weight);
        }
      } while (!ok && System.currentTimeMillis() < maxTime);
      assertTrue(ok);
      } finally {
      htu.shutdownMiniDFSCluster();
    }


    try {
      // set up a cluster with 4 nodes
      String hosts[] = new String[] { "host1", "host2", "host3", "host4" };
      cluster = htu.startMiniDFSCluster(hosts);
      cluster.waitActive();
      FileSystem fs = cluster.getFileSystem();

      // create a file with three blocks
      testFile = new Path("/test2.txt");       
      WriteDataToHDFS(fs, testFile, 3*DEFAULT_BLOCK_SIZE);
             
      // given the default replication factor is 3, we will have total of 9
      // replica of blocks; thus the host with the highest weight should have
      // weight == 3 * DEFAULT_BLOCK_SIZE
      final long maxTime = System.currentTimeMillis() + 2000;
      long weight;
      long uniqueBlocksTotalWeight;
      do {
        FileStatus status = fs.getFileStatus(testFile);
        HDFSBlocksDistribution blocksDistribution =
          FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
        uniqueBlocksTotalWeight = blocksDistribution.getUniqueBlocksTotalWeight();

        String tophost = blocksDistribution.getTopHosts().get(0);
        weight = blocksDistribution.getWeight(tophost);

        // NameNode is informed asynchronously, so we may have a delay. See HBASE-6175
      } while (uniqueBlocksTotalWeight != weight && System.currentTimeMillis() < maxTime);
      assertTrue(uniqueBlocksTotalWeight == weight);

    } finally {
      htu.shutdownMiniDFSCluster();
    }

   
    try {
      // set up a cluster with 4 nodes
      String hosts[] = new String[] { "host1", "host2", "host3", "host4" };
      cluster = htu.startMiniDFSCluster(hosts);
      cluster.waitActive();
      FileSystem fs = cluster.getFileSystem();

      // create a file with one block
      testFile = new Path("/test3.txt");       
      WriteDataToHDFS(fs, testFile, DEFAULT_BLOCK_SIZE);
     
      // given the default replication factor is 3, we will have total of 3
      // replica of blocks; thus there is one host without weight
      final long maxTime = System.currentTimeMillis() + 2000;
      HDFSBlocksDistribution blocksDistribution;
      do {
        FileStatus status = fs.getFileStatus(testFile);
        blocksDistribution = FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
        // NameNode is informed asynchronously, so we may have a delay. See HBASE-6175
      }
      while (blocksDistribution.getTopHosts().size() != 3 && System.currentTimeMillis() < maxTime);
      assertEquals("Wrong number of hosts distributing blocks.", 3,
        blocksDistribution.getTopHosts().size());
    } finally {
      htu.shutdownMiniDFSCluster();
    }
  }
View Full Code Here

   * This function will return the HDFS blocks distribution based on the data
   * captured when HFile is created
   * @return The HDFS blocks distribution for the region.
   */
  public HDFSBlocksDistribution getHDFSBlocksDistribution() {
    HDFSBlocksDistribution hdfsBlocksDistribution =
      new HDFSBlocksDistribution();
    synchronized (this.stores) {
      for (Store store : this.stores.values()) {
        for (StoreFile sf : store.getStorefiles()) {
          HDFSBlocksDistribution storeFileBlocksDistribution =
            sf.getHDFSBlockDistribution();
          hdfsBlocksDistribution.add(storeFileBlocksDistribution);
        }
      }
    }
View Full Code Here

* @throws IOException
   */
  static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
    Configuration conf, HTableDescriptor tableDescriptor,
    String regionEncodedName) throws IOException {
    HDFSBlocksDistribution hdfsBlocksDistribution =
      new HDFSBlocksDistribution();
    Path tablePath = FSUtils.getTablePath(FSUtils.getRootDir(conf),
      tableDescriptor.getName());
    FileSystem fs = tablePath.getFileSystem(conf);

    for (HColumnDescriptor family: tableDescriptor.getFamilies()) {
      Path storeHomeDir = Store.getStoreHomedir(tablePath, regionEncodedName,
      family.getName());
      if (!fs.exists(storeHomeDir))continue;

      FileStatus[] hfilesStatus = null;
      hfilesStatus = fs.listStatus(storeHomeDir);

      for (FileStatus hfileStatus : hfilesStatus) {
        HDFSBlocksDistribution storeFileBlocksDistribution =
          FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0,
          hfileStatus.getLen());
        hdfsBlocksDistribution.add(storeFileBlocksDistribution);
      }
    }
View Full Code Here

    int storefiles = 0;
    long memstoreSize = 0;
    int readRequestsCount = 0;
    int writeRequestsCount = 0;
    long storefileIndexSize = 0;
    HDFSBlocksDistribution hdfsBlocksDistribution =
      new HDFSBlocksDistribution();
    long totalStaticIndexSize = 0;
    long totalStaticBloomSize = 0;
    long numPutsWithoutWAL = 0;
    long dataInMemoryWithoutWAL = 0;
    long updatesBlockedMs = 0;

    // Note that this is a map of Doubles instead of Longs. This is because we
    // do effective integer division, which would perhaps truncate more than it
    // should because we do it only on one part of our sum at a time. Rather
    // than dividing at the end, where it is difficult to know the proper
    // factor, everything is exact then truncated.
    final Map<String, MutableDouble> tempVals =
        new HashMap<String, MutableDouble>();

    for (Map.Entry<String, HRegion> e : this.onlineRegions.entrySet()) {
      HRegion r = e.getValue();
      memstoreSize += r.memstoreSize.get();
      numPutsWithoutWAL += r.numPutsWithoutWAL.get();
      dataInMemoryWithoutWAL += r.dataInMemoryWithoutWAL.get();
      readRequestsCount += r.readRequestsCount.get();
      writeRequestsCount += r.writeRequestsCount.get();
      updatesBlockedMs += r.updatesBlockedMs.get();
      synchronized (r.stores) {
        stores += r.stores.size();
        for (Map.Entry<byte[], Store> ee : r.stores.entrySet()) {
            final Store store = ee.getValue();
            final SchemaMetrics schemaMetrics = store.getSchemaMetrics();

            {
              long tmpStorefiles = store.getStorefilesCount();
              schemaMetrics.accumulateStoreMetric(tempVals,
                  StoreMetricType.STORE_FILE_COUNT, tmpStorefiles);
              storefiles += tmpStorefiles;
            }


            {
              long tmpStorefileIndexSize = store.getStorefilesIndexSize();
              schemaMetrics.accumulateStoreMetric(tempVals,
                  StoreMetricType.STORE_FILE_INDEX_SIZE,
                  (long) (tmpStorefileIndexSize / (1024.0 * 1024)));
              storefileIndexSize += tmpStorefileIndexSize;
            }

            {
              long tmpStorefilesSize = store.getStorefilesSize();
              schemaMetrics.accumulateStoreMetric(tempVals,
                  StoreMetricType.STORE_FILE_SIZE_MB,
                  (long) (tmpStorefilesSize / (1024.0 * 1024)));
            }

            {
              long tmpStaticBloomSize = store.getTotalStaticBloomSize();
              schemaMetrics.accumulateStoreMetric(tempVals,
                  StoreMetricType.STATIC_BLOOM_SIZE_KB,
                  (long) (tmpStaticBloomSize / 1024.0));
              totalStaticBloomSize += tmpStaticBloomSize;
            }

            {
              long tmpStaticIndexSize = store.getTotalStaticIndexSize();
              schemaMetrics.accumulateStoreMetric(tempVals,
                  StoreMetricType.STATIC_INDEX_SIZE_KB,
                  (long) (tmpStaticIndexSize / 1024.0));
              totalStaticIndexSize += tmpStaticIndexSize;
            }

            schemaMetrics.accumulateStoreMetric(tempVals,
                StoreMetricType.MEMSTORE_SIZE_MB,
                (long) (store.getMemStoreSize() / (1024.0 * 1024)));
        }
      }

      hdfsBlocksDistribution.add(r.getHDFSBlocksDistribution());
    }

    for (Entry<String, MutableDouble> e : tempVals.entrySet()) {
      RegionMetricsStorage.setNumericMetric(e.getKey(), e.getValue().longValue());
    }

    this.metrics.stores.set(stores);
    this.metrics.storefiles.set(storefiles);
    this.metrics.hlogFileCount.set(this.hlog.getNumLogFiles());
    this.metrics.hlogFileSizeMB.set(this.hlog.getNumLogFileSize() /(1024 * 1024));
    this.metrics.memstoreSizeMB.set((int) (memstoreSize / (1024 * 1024)));
    this.metrics.mbInMemoryWithoutWAL.set((int) (dataInMemoryWithoutWAL / (1024 * 1024)));
    this.metrics.numPutsWithoutWAL.set(numPutsWithoutWAL);
    this.metrics.storefileIndexSizeMB.set(
        (int) (storefileIndexSize / (1024 * 1024)));
    this.metrics.rootIndexSizeKB.set(
        (int) (storefileIndexSize / 1024));
    this.metrics.totalStaticIndexSizeKB.set(
        (int) (totalStaticIndexSize / 1024));
    this.metrics.totalStaticBloomSizeKB.set(
        (int) (totalStaticBloomSize / 1024));
    this.metrics.readRequestsCount.set(readRequestsCount);
    this.metrics.writeRequestsCount.set(writeRequestsCount);
    this.metrics.compactionQueueSize.set(compactSplitThread
        .getCompactionQueueSize());
    this.metrics.flushQueueSize.set(cacheFlusher
        .getFlushQueueSize());
    this.metrics.updatesBlockedSeconds.set(updatesBlockedMs/1000);
    final long updatesBlockedMsHigherWater = cacheFlusher.getUpdatesBlockedMsHighWater().get();
    this.metrics.updatesBlockedSecondsHighWater.set(updatesBlockedMsHigherWater/1000);

    BlockCache blockCache = cacheConfig.getBlockCache();
    if (blockCache != null) {
      this.metrics.blockCacheCount.set(blockCache.size());
      this.metrics.blockCacheFree.set(blockCache.getFreeSize());
      this.metrics.blockCacheSize.set(blockCache.getCurrentSize());
      CacheStats cacheStats = blockCache.getStats();
      this.metrics.blockCacheHitCount.set(cacheStats.getHitCount());
      this.metrics.blockCacheMissCount.set(cacheStats.getMissCount());
      this.metrics.blockCacheEvictedCount.set(blockCache.getEvictedCount());
      double ratio = blockCache.getStats().getHitRatio();
      int percent = (int) (ratio * 100);
      this.metrics.blockCacheHitRatio.set(percent);
      ratio = blockCache.getStats().getHitCachingRatio();
      percent = (int) (ratio * 100);
      this.metrics.blockCacheHitCachingRatio.set(percent);
      // past N period block cache hit / hit caching ratios
      cacheStats.rollMetricsPeriod();
      ratio = cacheStats.getHitRatioPastNPeriods();
      percent = (int) (ratio * 100);
      this.metrics.blockCacheHitRatioPastNPeriods.set(percent);
      ratio = cacheStats.getHitCachingRatioPastNPeriods();
      percent = (int) (ratio * 100);
      this.metrics.blockCacheHitCachingRatioPastNPeriods.set(percent);
    }
    float localityIndex = hdfsBlocksDistribution.getBlockLocalityIndex(
      getServerName().getHostname());
    int percent = (int) (localityIndex * 100);
    this.metrics.hdfsBlocksLocalityIndex.set(percent);

  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.HDFSBlocksDistribution$HostAndWeight

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.