Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.CachePoolEntry


        } catch (IOException e) {
          fail("got IOException while calling " +
              "listCachePools: " + e.getMessage());
        }
        while (true) {
          CachePoolEntry entry = null;
          try {
            if (!iter.hasNext()) {
              break;
            }
            entry = iter.next();
          } catch (IOException e) {
            fail("got IOException while iterating through " +
                "listCachePools: " + e.getMessage());
          }
          if (entry == null) {
            break;
          }
          if (!entry.getInfo().getPoolName().equals(pool.getPoolName())) {
            continue;
          }
          CachePoolStats stats = entry.getStats();
          if ((targetBytesNeeded == stats.getBytesNeeded()) &&
              (targetBytesCached == stats.getBytesCached()) &&
              (targetFilesNeeded == stats.getFilesNeeded()) &&
              (targetFilesCached == stats.getFilesCached())) {
            return true;
View Full Code Here


        pc.checkPermission(this, FsAction.READ);
      } catch (AccessControlException e) {
        hasPermission = false;
      }
    }
    return new CachePoolEntry(getInfo(hasPermission),
        hasPermission ? getStats() : new CachePoolStats.Builder().build());
  }
View Full Code Here

      HashSet<String> poolNames, int active) throws Exception {
    HashSet<String> tmpNames = (HashSet<String>)poolNames.clone();
    RemoteIterator<CachePoolEntry> pools = dfs.listCachePools();
    int poolCount = poolNames.size();
    for (int i=0; i<poolCount; i++) {
      CachePoolEntry pool = pools.next();
      String pollName = pool.getInfo().getPoolName();
      assertTrue("The pool name should be expected", tmpNames.remove(pollName));
      if (i % 2 == 0) {
        int standby = active;
        active = (standby == 0) ? 1 : 0;
        cluster.transitionToStandby(standby);
View Full Code Here

      TableListing listing = builder.build();
      int numResults = 0;
      try {
        RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
        while (iter.hasNext()) {
          CachePoolEntry entry = iter.next();
          CachePoolInfo info = entry.getInfo();
          LinkedList<String> row = new LinkedList<String>();
          if (name == null || info.getPoolName().equals(name)) {
            row.add(info.getPoolName());
            row.add(info.getOwnerName());
            row.add(info.getGroupName());
            row.add(info.getMode() != null ? info.getMode().toString() : null);
            Long limit = info.getLimit();
            String limitString;
            if (limit != null && limit.equals(CachePoolInfo.LIMIT_UNLIMITED)) {
              limitString = "unlimited";
            } else {
              limitString = "" + limit;
            }
            row.add(limitString);
            Long maxTtl = info.getMaxRelativeExpiryMs();
            String maxTtlString = null;

            if (maxTtl != null) {
              if (maxTtl.longValue() == CachePoolInfo.RELATIVE_EXPIRY_NEVER) {
                maxTtlString  = "never";
              } else {
                maxTtlString = DFSUtil.durationToString(maxTtl);
              }
            }
            row.add(maxTtlString);
            if (printStats) {
              CachePoolStats stats = entry.getStats();
              row.add(Long.toString(stats.getBytesNeeded()));
              row.add(Long.toString(stats.getBytesCached()));
              row.add(Long.toString(stats.getBytesOverlimit()));
              row.add(Long.toString(stats.getFilesNeeded()));
              row.add(Long.toString(stats.getFilesCached()));
View Full Code Here

  }

  public static CachePoolEntry convert (CachePoolEntryProto proto) {
    CachePoolInfo info = PBHelper.convert(proto.getInfo());
    CachePoolStats stats = PBHelper.convert(proto.getStats());
    return new CachePoolEntry(info, stats);
  }
View Full Code Here

      HashSet<String> poolNames, int active) throws Exception {
    HashSet<String> tmpNames = (HashSet<String>)poolNames.clone();
    RemoteIterator<CachePoolEntry> pools = dfs.listCachePools();
    int poolCount = poolNames.size();
    for (int i=0; i<poolCount; i++) {
      CachePoolEntry pool = pools.next();
      String pollName = pool.getInfo().getPoolName();
      assertTrue("The pool name should be expected", tmpNames.remove(pollName));
      if (i % 2 == 0) {
        int standby = active;
        active = (standby == 0) ? 1 : 0;
        cluster.transitionToStandby(standby);
View Full Code Here

        pc.checkPermission(this, FsAction.READ);
      } catch (AccessControlException e) {
        hasPermission = false;
      }
    }
    return new CachePoolEntry(getInfo(hasPermission),
        hasPermission ? getStats() : new CachePoolStats.Builder().build());
  }
View Full Code Here

        } catch (IOException e) {
          fail("got IOException while calling " +
              "listCachePools: " + e.getMessage());
        }
        while (true) {
          CachePoolEntry entry = null;
          try {
            if (!iter.hasNext()) {
              break;
            }
            entry = iter.next();
          } catch (IOException e) {
            fail("got IOException while iterating through " +
                "listCachePools: " + e.getMessage());
          }
          if (entry == null) {
            break;
          }
          if (!entry.getInfo().getPoolName().equals(pool.getPoolName())) {
            continue;
          }
          CachePoolStats stats = entry.getStats();
          if ((targetBytesNeeded == stats.getBytesNeeded()) &&
              (targetBytesCached == stats.getBytesCached()) &&
              (targetFilesNeeded == stats.getFilesNeeded()) &&
              (targetFilesCached == stats.getFilesCached())) {
            return true;
View Full Code Here

      TableListing listing = builder.build();
      int numResults = 0;
      try {
        RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
        while (iter.hasNext()) {
          CachePoolEntry entry = iter.next();
          CachePoolInfo info = entry.getInfo();
          LinkedList<String> row = new LinkedList<String>();
          if (name == null || info.getPoolName().equals(name)) {
            row.add(info.getPoolName());
            row.add(info.getOwnerName());
            row.add(info.getGroupName());
            row.add(info.getMode() != null ? info.getMode().toString() : null);
            Long limit = info.getLimit();
            String limitString;
            if (limit != null && limit.equals(CachePoolInfo.LIMIT_UNLIMITED)) {
              limitString = "unlimited";
            } else {
              limitString = "" + limit;
            }
            row.add(limitString);
            Long maxTtl = info.getMaxRelativeExpiryMs();
            String maxTtlString = null;

            if (maxTtl != null) {
              if (maxTtl.longValue() == CachePoolInfo.RELATIVE_EXPIRY_NEVER) {
                maxTtlString  = "never";
              } else {
                maxTtlString = DFSUtil.durationToString(maxTtl);
              }
            }
            row.add(maxTtlString);
            if (printStats) {
              CachePoolStats stats = entry.getStats();
              row.add(Long.toString(stats.getBytesNeeded()));
              row.add(Long.toString(stats.getBytesCached()));
              row.add(Long.toString(stats.getBytesOverlimit()));
              row.add(Long.toString(stats.getFilesNeeded()));
              row.add(Long.toString(stats.getFilesCached()));
View Full Code Here

  }

  public static CachePoolEntry convert (CachePoolEntryProto proto) {
    CachePoolInfo info = PBHelper.convert(proto.getInfo());
    CachePoolStats stats = PBHelper.convert(proto.getStats());
    return new CachePoolEntry(info, stats);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.CachePoolEntry

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.