Package org.apache.hadoop.hbase

Examples of org.apache.hadoop.hbase.HDFSBlocksDistribution$HostAndWeight


      final long maxTime = System.currentTimeMillis() + 2000;
      boolean ok;
      do {
        ok = true;
        FileStatus status = fs.getFileStatus(testFile);
        HDFSBlocksDistribution blocksDistribution =
          FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
        long uniqueBlocksTotalWeight =
          blocksDistribution.getUniqueBlocksTotalWeight();
        for (String host : hosts) {
          long weight = blocksDistribution.getWeight(host);
          ok = (ok && uniqueBlocksTotalWeight == weight);
        }
      } while (!ok && System.currentTimeMillis() < maxTime);
      assertTrue(ok);
      } finally {
      htu.shutdownMiniDFSCluster();
    }


    try {
      // set up a cluster with 4 nodes
      String hosts[] = new String[] { "host1", "host2", "host3", "host4" };
      cluster = htu.startMiniDFSCluster(hosts);
      cluster.waitActive();
      FileSystem fs = cluster.getFileSystem();

      // create a file with three blocks
      testFile = new Path("/test2.txt");       
      WriteDataToHDFS(fs, testFile, 3*DEFAULT_BLOCK_SIZE);
             
      // given the default replication factor is 3, we will have total of 9
      // replica of blocks; thus the host with the highest weight should have
      // weight == 3 * DEFAULT_BLOCK_SIZE
      final long maxTime = System.currentTimeMillis() + 2000;
      long weight;
      long uniqueBlocksTotalWeight;
      do {
        FileStatus status = fs.getFileStatus(testFile);
        HDFSBlocksDistribution blocksDistribution =
          FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
        uniqueBlocksTotalWeight = blocksDistribution.getUniqueBlocksTotalWeight();

        String tophost = blocksDistribution.getTopHosts().get(0);
        weight = blocksDistribution.getWeight(tophost);

        // NameNode is informed asynchronously, so we may have a delay. See HBASE-6175
      } while (uniqueBlocksTotalWeight != weight && System.currentTimeMillis() < maxTime);
      assertTrue(uniqueBlocksTotalWeight == weight);

    } finally {
      htu.shutdownMiniDFSCluster();
    }

   
    try {
      // set up a cluster with 4 nodes
      String hosts[] = new String[] { "host1", "host2", "host3", "host4" };
      cluster = htu.startMiniDFSCluster(hosts);
      cluster.waitActive();
      FileSystem fs = cluster.getFileSystem();

      // create a file with one block
      testFile = new Path("/test3.txt");       
      WriteDataToHDFS(fs, testFile, DEFAULT_BLOCK_SIZE);
     
      // given the default replication factor is 3, we will have total of 3
      // replica of blocks; thus there is one host without weight
      final long maxTime = System.currentTimeMillis() + 2000;
      HDFSBlocksDistribution blocksDistribution;
      do {
        FileStatus status = fs.getFileStatus(testFile);
        blocksDistribution = FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
        // NameNode is informed asynchronously, so we may have a delay. See HBASE-6175
      }
      while (blocksDistribution.getTopHosts().size() != 3 && System.currentTimeMillis() < maxTime);
      assertEquals("Wrong number of hosts distributing blocks.", 3,
        blocksDistribution.getTopHosts().size());
    } finally {
      htu.shutdownMiniDFSCluster();
    }
  }
View Full Code Here


  protected List<ServerName> internalGetTopBlockLocation(HRegionInfo region) {
    List<ServerName> topServerNames = null;
    try {
      HTableDescriptor tableDescriptor = getTableDescriptor(region.getTableName());
      if (tableDescriptor != null) {
        HDFSBlocksDistribution blocksDistribution =
            HRegion.computeHDFSBlocksDistribution(getConf(), tableDescriptor, region);
        List<String> topHosts = blocksDistribution.getTopHosts();
        topServerNames = mapHostNameToServerName(topHosts);
      }
    } catch (IOException ioe) {
      LOG.debug("IOException during HDFSBlocksDistribution computation. for " + "region = "
          + region.getEncodedName(), ioe);
View Full Code Here

   * This function will return the HDFS blocks distribution based on the data
   * captured when HFile is created
   * @return The HDFS blocks distribution for the region.
   */
  public HDFSBlocksDistribution getHDFSBlocksDistribution() {
    HDFSBlocksDistribution hdfsBlocksDistribution =
      new HDFSBlocksDistribution();
    synchronized (this.stores) {
      for (Store store : this.stores.values()) {
        for (StoreFile sf : store.getStorefiles()) {
          HDFSBlocksDistribution storeFileBlocksDistribution =
            sf.getHDFSBlockDistribution();
          hdfsBlocksDistribution.add(storeFileBlocksDistribution);
        }
      }
    }
View Full Code Here

   * @return The HDFS blocks distribution for the given region.
   * @throws IOException
   */
  public static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf,
      final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo) throws IOException {
    HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
    Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), tableDescriptor.getTableName());
    FileSystem fs = tablePath.getFileSystem(conf);

    HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tablePath, regionInfo);
    for (HColumnDescriptor family: tableDescriptor.getFamilies()) {
      Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family.getNameAsString());
      if (storeFiles == null) continue;

      for (StoreFileInfo storeFileInfo : storeFiles) {
        hdfsBlocksDistribution.add(storeFileInfo.computeHDFSBlocksDistribution(fs));
      }
    }
    return hdfsBlocksDistribution;
  }
View Full Code Here

   * @return The HDFS blocks distribution
   */
  static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
    final FileSystem fs, FileStatus status, long start, long length)
    throws IOException {
    HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
    BlockLocation [] blockLocations =
      fs.getFileBlockLocations(status, start, length);
    for(BlockLocation bl : blockLocations) {
      String [] hosts = bl.getHosts();
      long len = bl.getLength();
      blocksDistribution.addHostsAndBlockWeight(hosts, len);
    }

    return blocksDistribution;
  }
View Full Code Here

    @Override
    synchronized public void run() {
      initBlockCache();
      cacheStats = blockCache.getStats();

      HDFSBlocksDistribution hdfsBlocksDistribution =
          new HDFSBlocksDistribution();

      long tempNumStores = 0;
      long tempNumStoreFiles = 0;
      long tempMemstoreSize = 0;
      long tempStoreFileSize = 0;
      long tempReadRequestsCount = 0;
      long tempWriteRequestsCount = 0;
      long tempCheckAndMutateChecksFailed = 0;
      long tempCheckAndMutateChecksPassed = 0;
      long tempStorefileIndexSize = 0;
      long tempTotalStaticIndexSize = 0;
      long tempTotalStaticBloomSize = 0;
      long tempNumMutationsWithoutWAL = 0;
      long tempDataInMemoryWithoutWAL = 0;
      int tempPercentFileLocal = 0;


      for (HRegion r : regionServer.getOnlineRegionsLocalContext()) {
        tempNumMutationsWithoutWAL += r.numMutationsWithoutWAL.get();
        tempDataInMemoryWithoutWAL += r.dataInMemoryWithoutWAL.get();
        tempReadRequestsCount += r.readRequestsCount.get();
        tempWriteRequestsCount += r.writeRequestsCount.get();
        tempCheckAndMutateChecksFailed += r.checkAndMutateChecksFailed.get();
        tempCheckAndMutateChecksPassed += r.checkAndMutateChecksPassed.get();
        tempNumStores += r.stores.size();
        for (Store store : r.stores.values()) {
          tempNumStoreFiles += store.getStorefilesCount();
          tempMemstoreSize += store.getMemStoreSize();
          tempStoreFileSize += store.getStorefilesSize();
          tempStorefileIndexSize += store.getStorefilesIndexSize();
          tempTotalStaticBloomSize += store.getTotalStaticBloomSize();
          tempTotalStaticIndexSize += store.getTotalStaticIndexSize();
        }

        hdfsBlocksDistribution.add(r.getHDFSBlocksDistribution());
      }

      float localityIndex = hdfsBlocksDistribution.getBlockLocalityIndex(
          regionServer.getServerName().getHostname());
      tempPercentFileLocal = (int) (localityIndex * 100);


      //Compute the number of requests per second
View Full Code Here

      ht.put(put);

      HRegion firstRegion = htu.getHBaseCluster().
          getRegions(TableName.valueOf(this.getName())).get(0);
      firstRegion.flushcache();
      HDFSBlocksDistribution blocksDistribution1 =
          firstRegion.getHDFSBlocksDistribution();

      // given the default replication factor is 2 and we have 2 HFiles,
      // we will have total of 4 replica of blocks on 3 datanodes; thus there
      // must be at least one host that have replica for 2 HFiles. That host's
      // weight will be equal to the unique block weight.
      long uniqueBlocksWeight1 =
          blocksDistribution1.getUniqueBlocksTotalWeight();

      String topHost = blocksDistribution1.getTopHosts().get(0);
      long topHostWeight = blocksDistribution1.getWeight(topHost);
      assertTrue(uniqueBlocksWeight1 == topHostWeight);

      // use the static method to compute the value, it should be the same.
      // static method is used by load balancer or other components
      HDFSBlocksDistribution blocksDistribution2 =
        HRegion.computeHDFSBlocksDistribution(htu.getConfiguration(),
        firstRegion.getTableDesc(), firstRegion.getRegionInfo());
      long uniqueBlocksWeight2 =
        blocksDistribution2.getUniqueBlocksTotalWeight();

      assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);

      ht.close();
      } finally {
View Full Code Here

   * @return The HDFS blocks distribution
   */
  static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
    final FileSystem fs, FileStatus status, long start, long length)
    throws IOException {
    HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
    BlockLocation [] blockLocations =
      fs.getFileBlockLocations(status, start, length);
    for(BlockLocation bl : blockLocations) {
      String [] hosts = bl.getHosts();
      long len = bl.getLength();
      blocksDistribution.addHostsAndBlockWeight(hosts, len);
    }

    return blocksDistribution;
  }
View Full Code Here

   * This function will return the HDFS blocks distribution based on the data
   * captured when HFile is created
   * @return The HDFS blocks distribution for the region.
   */
  public HDFSBlocksDistribution getHDFSBlocksDistribution() {
    HDFSBlocksDistribution hdfsBlocksDistribution =
      new HDFSBlocksDistribution();
    synchronized (this.stores) {
      for (Store store : this.stores.values()) {
        for (StoreFile sf : store.getStorefiles()) {
          HDFSBlocksDistribution storeFileBlocksDistribution =
            sf.getHDFSBlockDistribution();
          hdfsBlocksDistribution.add(storeFileBlocksDistribution);
        }
      }
    }
View Full Code Here

   * @return The HDFS blocks distribution for the given region.
   * @throws IOException
   */
  public static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf,
      final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo) throws IOException {
    HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
    Path tablePath = FSUtils.getTablePath(FSUtils.getRootDir(conf), tableDescriptor.getName());
    FileSystem fs = tablePath.getFileSystem(conf);

    HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tablePath, regionInfo);
    for (HColumnDescriptor family: tableDescriptor.getFamilies()) {
      Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family.getNameAsString());
      if (storeFiles == null) continue;

      for (StoreFileInfo storeFileInfo : storeFiles) {
        hdfsBlocksDistribution.add(storeFileInfo.computeHDFSBlocksDistribution(fs));
      }
    }
    return hdfsBlocksDistribution;
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.HDFSBlocksDistribution$HostAndWeight

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.