Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.CacheDirectiveStats


        } catch (IOException e) {
          fail("got IOException while calling " +
              "listCacheDirectives: " + e.getMessage());
        }
        Assert.assertNotNull(entry);
        CacheDirectiveStats stats = entry.getStats();
        if ((targetBytesNeeded == stats.getBytesNeeded()) &&
            (targetBytesCached == stats.getBytesCached()) &&
            (targetFilesNeeded == stats.getFilesNeeded()) &&
            (targetFilesCached == stats.getFilesCached())) {
          return true;
        } else {
          LOG.info(infoString + ": " +
              "filesNeeded: " +
              stats.getFilesNeeded() + "/" + targetFilesNeeded +
              ", filesCached: " +
              stats.getFilesCached() + "/" + targetFilesCached +
              ", bytesNeeded: " +
              stats.getBytesNeeded() + "/" + targetBytesNeeded +
              ", bytesCached: " +
              stats.getBytesCached() + "/" + targetBytesCached);
          return false;
        }
      }
    }, 500, 60000);
  }
View Full Code Here


   * @param replication Replication factor of the path
   * @throws InvalidRequestException if the pool does not have enough capacity
   */
  private void checkLimit(CachePool pool, String path,
      short replication) throws InvalidRequestException {
    CacheDirectiveStats stats = computeNeeded(path, replication);
    if (pool.getLimit() == CachePoolInfo.LIMIT_UNLIMITED) {
      return;
    }
    if (pool.getBytesNeeded() + (stats.getBytesNeeded() * replication) > pool
        .getLimit()) {
      throw new InvalidRequestException("Caching path " + path + " of size "
          + stats.getBytesNeeded() / replication + " bytes at replication "
          + replication + " would exceed pool " + pool.getPoolName()
          + "'s remaining capacity of "
          + (pool.getLimit() - pool.getBytesNeeded()) + " bytes.");
    }
  }
View Full Code Here

      directives = new ArrayList<CacheDirective>(1);
      directivesByPath.put(path, directives);
    }
    directives.add(directive);
    // Fix up pool stats
    CacheDirectiveStats stats =
        computeNeeded(directive.getPath(), directive.getReplication());
    directive.addBytesNeeded(stats.getBytesNeeded());
    directive.addFilesNeeded(directive.getFilesNeeded());

    setNeedsRescan();
  }
View Full Code Here

            dfs.listCacheDirectives(builder.build());
        int numEntries = 0;
        while (iter.hasNext()) {
          CacheDirectiveEntry entry = iter.next();
          CacheDirectiveInfo directive = entry.getInfo();
          CacheDirectiveStats stats = entry.getStats();
          List<String> row = new LinkedList<String>();
          row.add("" + directive.getId());
          row.add(directive.getPool());
          row.add("" + directive.getReplication());
          String expiry;
          // This is effectively never, round for nice printing
          if (directive.getExpiration().getMillis() >
              Expiration.MAX_RELATIVE_EXPIRY_MS / 2) {
            expiry = "never";
          } else {
            expiry = directive.getExpiration().toString();
          }
          row.add(expiry);
          row.add(directive.getPath().toUri().getPath());
          if (printStats) {
            row.add("" + stats.getBytesNeeded());
            row.add("" + stats.getBytesCached());
            row.add("" + stats.getFilesNeeded());
            row.add("" + stats.getFilesCached());
          }
          tableListing.addRow(row.toArray(new String[0]));
          numEntries++;
        }
        System.out.print(String.format("Found %d entr%s%n",
View Full Code Here

    return builder.build();
  }
 
  public static CacheDirectiveEntry convert(CacheDirectiveEntryProto proto) {
    CacheDirectiveInfo info = PBHelper.convert(proto.getInfo());
    CacheDirectiveStats stats = PBHelper.convert(proto.getStats());
    return new CacheDirectiveEntry(info, stats);
  }
View Full Code Here

   * @param replication Replication factor of the path
   * @throws InvalidRequestException if the pool does not have enough capacity
   */
  private void checkLimit(CachePool pool, String path,
      short replication) throws InvalidRequestException {
    CacheDirectiveStats stats = computeNeeded(path, replication);
    if (pool.getLimit() == CachePoolInfo.LIMIT_UNLIMITED) {
      return;
    }
    if (pool.getBytesNeeded() + (stats.getBytesNeeded() * replication) > pool
        .getLimit()) {
      throw new InvalidRequestException("Caching path " + path + " of size "
          + stats.getBytesNeeded() / replication + " bytes at replication "
          + replication + " would exceed pool " + pool.getPoolName()
          + "'s remaining capacity of "
          + (pool.getLimit() - pool.getBytesNeeded()) + " bytes.");
    }
  }
View Full Code Here

      directives = new ArrayList<CacheDirective>(1);
      directivesByPath.put(path, directives);
    }
    directives.add(directive);
    // Fix up pool stats
    CacheDirectiveStats stats =
        computeNeeded(directive.getPath(), directive.getReplication());
    directive.addBytesNeeded(stats.getBytesNeeded());
    directive.addFilesNeeded(directive.getFilesNeeded());

    setNeedsRescan();
  }
View Full Code Here

        } catch (IOException e) {
          fail("got IOException while calling " +
              "listCacheDirectives: " + e.getMessage());
        }
        Assert.assertNotNull(entry);
        CacheDirectiveStats stats = entry.getStats();
        if ((targetBytesNeeded == stats.getBytesNeeded()) &&
            (targetBytesCached == stats.getBytesCached()) &&
            (targetFilesNeeded == stats.getFilesNeeded()) &&
            (targetFilesCached == stats.getFilesCached())) {
          return true;
        } else {
          LOG.info(infoString + ": " +
              "filesNeeded: " +
              stats.getFilesNeeded() + "/" + targetFilesNeeded +
              ", filesCached: " +
              stats.getFilesCached() + "/" + targetFilesCached +
              ", bytesNeeded: " +
              stats.getBytesNeeded() + "/" + targetBytesNeeded +
              ", bytesCached: " +
              stats.getBytesCached() + "/" + targetBytesCached);
          return false;
        }
      }
    }, 500, 60000);
  }
View Full Code Here

            dfs.listCacheDirectives(builder.build());
        int numEntries = 0;
        while (iter.hasNext()) {
          CacheDirectiveEntry entry = iter.next();
          CacheDirectiveInfo directive = entry.getInfo();
          CacheDirectiveStats stats = entry.getStats();
          List<String> row = new LinkedList<String>();
          row.add("" + directive.getId());
          row.add(directive.getPool());
          row.add("" + directive.getReplication());
          String expiry;
          // This is effectively never, round for nice printing
          if (directive.getExpiration().getMillis() >
              Expiration.MAX_RELATIVE_EXPIRY_MS / 2) {
            expiry = "never";
          } else {
            expiry = directive.getExpiration().toString();
          }
          row.add(expiry);
          row.add(directive.getPath().toUri().getPath());
          if (printStats) {
            row.add("" + stats.getBytesNeeded());
            row.add("" + stats.getBytesCached());
            row.add("" + stats.getFilesNeeded());
            row.add("" + stats.getFilesCached());
          }
          tableListing.addRow(row.toArray(new String[0]));
          numEntries++;
        }
        System.out.print(String.format("Found %d entr%s\n",
View Full Code Here

    return builder.build();
  }
 
  public static CacheDirectiveEntry convert(CacheDirectiveEntryProto proto) {
    CacheDirectiveInfo info = PBHelper.convert(proto.getInfo());
    CacheDirectiveStats stats = PBHelper.convert(proto.getStats());
    return new CacheDirectiveEntry(info, stats);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.CacheDirectiveStats

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.