Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry


        targetFilesCached + " targetFilesCached");
    GenericTestUtils.waitFor(new Supplier<Boolean>() {
      @Override
      public Boolean get() {
        RemoteIterator<CacheDirectiveEntry> iter = null;
        CacheDirectiveEntry entry = null;
        try {
          iter = dfs.listCacheDirectives(filter);
          entry = iter.next();
        } catch (IOException e) {
          fail("got IOException while calling " +
              "listCacheDirectives: " + e.getMessage());
        }
        Assert.assertNotNull(entry);
        CacheDirectiveStats stats = entry.getStats();
        if ((targetBytesNeeded == stats.getBytesNeeded()) &&
            (targetBytesCached == stats.getBytesCached()) &&
            (targetFilesNeeded == stats.getFilesNeeded()) &&
            (targetFilesCached == stats.getFilesCached())) {
          return true;
View Full Code Here


    // Uncache and check each path in sequence
    RemoteIterator<CacheDirectiveEntry> entries =
      new CacheDirectiveIterator(nnRpc, null);
    for (int i=0; i<numFiles; i++) {
      CacheDirectiveEntry entry = entries.next();
      nnRpc.removeCacheDirective(entry.getInfo().getId());
      expected -= numBlocksPerFile;
      waitForCachedBlocks(namenode, expected, expected,
          "testWaitForCachedReplicas:2");
    }
  }
View Full Code Here

    // Change it to expire sooner
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
        .setExpiration(Expiration.newRelative(0)).build());
    waitForCachedBlocks(cluster.getNameNode(), 0, 0, "testExpiry:2");
    RemoteIterator<CacheDirectiveEntry> it = dfs.listCacheDirectives(null);
    CacheDirectiveEntry ent = it.next();
    assertFalse(it.hasNext());
    Date entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
    assertTrue("Directive should have expired",
        entryExpiry.before(new Date()));
    // Change it back to expire later
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
        .setExpiration(Expiration.newRelative(120000)).build());
    waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:3");
    it = dfs.listCacheDirectives(null);
    ent = it.next();
    assertFalse(it.hasNext());
    entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
    assertTrue("Directive should not have expired",
        entryExpiry.after(new Date()));
    // Verify that setting a negative TTL throws an error
    try {
      dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
View Full Code Here

        } catch (AccessControlException e) {
          hasPermission = false;
        }
      }
      if (hasPermission) {
        replies.add(new CacheDirectiveEntry(info, cur.getValue().toStats()));
        numReplies++;
      }
    }
    return new BatchedListEntries<CacheDirectiveEntry>(replies, false);
  }
View Full Code Here

      HashSet<String> poolNames, int active) throws Exception {
    HashSet<String> tmpNames = (HashSet<String>)poolNames.clone();
    RemoteIterator<CacheDirectiveEntry> directives = dfs.listCacheDirectives(null);
    int poolCount = poolNames.size();
    for (int i=0; i<poolCount; i++) {
      CacheDirectiveEntry directive = directives.next();
      String pollName = directive.getInfo().getPool();
      assertTrue("The pool name should be expected", tmpNames.remove(pollName));
      if (i % 2 == 0) {
        int standby = active;
        active = (standby == 0) ? 1 : 0;
        cluster.transitionToStandby(standby);
View Full Code Here

        RemoteIterator<CacheDirectiveEntry> iter =
            dfs.listCacheDirectives(
                new CacheDirectiveInfo.Builder().
                    setPath(new Path(path)).build());
        while (iter.hasNext()) {
          CacheDirectiveEntry entry = iter.next();
          try {
            dfs.removeCacheDirective(entry.getInfo().getId());
            System.out.println("Removed cache directive " +
                entry.getInfo().getId());
          } catch (IOException e) {
            System.err.println(prettifyException(e));
            exitCode = 2;
          }
        }
View Full Code Here

        DistributedFileSystem dfs = getDFS(conf);
        RemoteIterator<CacheDirectiveEntry> iter =
            dfs.listCacheDirectives(builder.build());
        int numEntries = 0;
        while (iter.hasNext()) {
          CacheDirectiveEntry entry = iter.next();
          CacheDirectiveInfo directive = entry.getInfo();
          CacheDirectiveStats stats = entry.getStats();
          List<String> row = new LinkedList<String>();
          row.add("" + directive.getId());
          row.add(directive.getPool());
          row.add("" + directive.getReplication());
          String expiry;
View Full Code Here

      @Override
      public CacheDirectiveEntry next() throws IOException {
        // Although the paths we get back from the NameNode should always be
        // absolute, we call makeQualified to add the scheme and authority of
        // this DistributedFilesystem.
        CacheDirectiveEntry desc = iter.next();
        CacheDirectiveInfo info = desc.getInfo();
        Path p = info.getPath().makeQualified(getUri(), getWorkingDirectory());
        return new CacheDirectiveEntry(
            new CacheDirectiveInfo.Builder(info).setPath(p).build(),
            desc.getStats());
      }
    };
  }
View Full Code Here

  }
 
  public static CacheDirectiveEntry convert(CacheDirectiveEntryProto proto) {
    CacheDirectiveInfo info = PBHelper.convert(proto.getInfo());
    CacheDirectiveStats stats = PBHelper.convert(proto.getStats());
    return new CacheDirectiveEntry(info, stats);
  }
View Full Code Here

      HashSet<String> poolNames, int active) throws Exception {
    HashSet<String> tmpNames = (HashSet<String>)poolNames.clone();
    RemoteIterator<CacheDirectiveEntry> directives = dfs.listCacheDirectives(null);
    int poolCount = poolNames.size();
    for (int i=0; i<poolCount; i++) {
      CacheDirectiveEntry directive = directives.next();
      String pollName = directive.getInfo().getPool();
      assertTrue("The pool name should be expected", tmpNames.remove(pollName));
      if (i % 2 == 0) {
        int standby = active;
        active = (standby == 0) ? 1 : 0;
        cluster.transitionToStandby(standby);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.