Examples of BlockTokenSecretManager


Examples of org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager

      long blockKeyUpdateInterval = keys.getKeyUpdateInterval();
      long blockTokenLifetime = keys.getTokenLifetime();
      LOG.info("Block token params received from NN: keyUpdateInterval="
          + blockKeyUpdateInterval / (60 * 1000) + " min(s), tokenLifetime="
          + blockTokenLifetime / (60 * 1000) + " min(s)");
      this.blockTokenSecretManager = new BlockTokenSecretManager(false,
          blockKeyUpdateInterval, blockTokenLifetime);
      this.blockTokenSecretManager.setKeys(keys);
      /*
       * Balancer should sync its block keys with NN more frequently than NN
       * updates its block keys
View Full Code Here

Examples of org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager

    this.dnRegistration.setInfoPort(this.infoServer.getPort());
    myMetrics = new DataNodeMetrics(conf, dnRegistration.getName());

    // BlockTokenSecretManager is created here, but it shouldn't be
    // used until it is initialized in register().
    this.blockTokenSecretManager = new BlockTokenSecretManager(false,
        0, 0);
    //init ipc server
    InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
        conf.get("dfs.datanode.ipc.address"));
    ipcServer = RPC.getServer(this, ipcAddr.getHostName(), ipcAddr.getPort(),
View Full Code Here

Examples of org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager

    setBlockTotal();
    pendingReplications = new PendingReplicationBlocks(
                            conf.getInt("dfs.replication.pending.timeout.sec",
                                        -1) * 1000L);
    if (isAccessTokenEnabled) {
      accessTokenHandler = new BlockTokenSecretManager(true,
          accessKeyUpdateInterval, accessTokenLifetime);
    }
    this.hbthread = new Daemon(new HeartbeatMonitor());
    this.lmthread = new Daemon(leaseManager.new Monitor());
    this.replthread = new Daemon(new ReplicationMonitor());
View Full Code Here

Examples of org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager

      cluster.waitActive();
      assertEquals(numDataNodes, cluster.getDataNodes().size());

      final NameNode nn = cluster.getNameNode();
      final BlockManager bm = nn.getNamesystem().getBlockManager();
      final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();

      // set a short token lifetime (1 second)
      SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
      Path fileToAppend = new Path(FILE_TO_APPEND);
      FileSystem fs = cluster.getFileSystem();
View Full Code Here

Examples of org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager

      cluster.waitActive();
      assertEquals(numDataNodes, cluster.getDataNodes().size());

      final NameNode nn = cluster.getNameNode();
      final BlockManager bm = nn.getNamesystem().getBlockManager();
      final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();

      // set a short token lifetime (1 second)
      SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
      Path fileToWrite = new Path(FILE_TO_WRITE);
      FileSystem fs = cluster.getFileSystem();
View Full Code Here

Examples of org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager

      assertEquals(numDataNodes, cluster.getDataNodes().size());

      final NameNode nn = cluster.getNameNode();
      final NamenodeProtocols nnProto = nn.getRpcServer();
      final BlockManager bm = nn.getNamesystem().getBlockManager();
      final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();

      // set a short token lifetime (1 second) initially
      SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);

      Path fileToRead = new Path(FILE_TO_READ);
      FileSystem fs = cluster.getFileSystem();
      createFile(fs, fileToRead);

      /*
       * setup for testing expiration handling of cached tokens
       */

      // read using blockSeekTo(). Acquired tokens are cached in in1
      FSDataInputStream in1 = fs.open(fileToRead);
      assertTrue(checkFile1(in1));
      // read using blockSeekTo(). Acquired tokens are cached in in2
      FSDataInputStream in2 = fs.open(fileToRead);
      assertTrue(checkFile1(in2));
      // read using fetchBlockByteRange(). Acquired tokens are cached in in3
      FSDataInputStream in3 = fs.open(fileToRead);
      assertTrue(checkFile2(in3));

      /*
       * testing READ interface on DN using a BlockReader
       */
      DFSClient client = null;
      try {
        client = new DFSClient(new InetSocketAddress("localhost",
          cluster.getNameNodePort()), conf);
      } finally {
        if (client != null) client.close();
      }
      List<LocatedBlock> locatedBlocks = nnProto.getBlockLocations(
          FILE_TO_READ, 0, FILE_SIZE).getLocatedBlocks();
      LocatedBlock lblock = locatedBlocks.get(0); // first block
      Token<BlockTokenIdentifier> myToken = lblock.getBlockToken();
      // verify token is not expired
      assertFalse(SecurityTestUtil.isBlockTokenExpired(myToken));
      // read with valid token, should succeed
      tryRead(conf, lblock, true);

      /*
       * wait till myToken and all cached tokens in in1, in2 and in3 expire
       */

      while (!SecurityTestUtil.isBlockTokenExpired(myToken)) {
        try {
          Thread.sleep(10);
        } catch (InterruptedException ignored) {
        }
      }

      /*
       * continue testing READ interface on DN using a BlockReader
       */

      // verify token is expired
      assertTrue(SecurityTestUtil.isBlockTokenExpired(myToken));
      // read should fail
      tryRead(conf, lblock, false);
      // use a valid new token
      lblock.setBlockToken(sm.generateToken(lblock.getBlock(),
              EnumSet.of(BlockTokenSecretManager.AccessMode.READ)));
      // read should succeed
      tryRead(conf, lblock, true);
      // use a token with wrong blockID
      ExtendedBlock wrongBlock = new ExtendedBlock(lblock.getBlock()
          .getBlockPoolId(), lblock.getBlock().getBlockId() + 1);
      lblock.setBlockToken(sm.generateToken(wrongBlock,
          EnumSet.of(BlockTokenSecretManager.AccessMode.READ)));
      // read should fail
      tryRead(conf, lblock, false);
      // use a token with wrong access modes
      lblock.setBlockToken(sm.generateToken(lblock.getBlock(),
          EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE,
                     BlockTokenSecretManager.AccessMode.COPY,
                     BlockTokenSecretManager.AccessMode.REPLACE)));
      // read should fail
      tryRead(conf, lblock, false);
View Full Code Here

Examples of org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager

      LOG.info("Block token params received from NN: for block pool " +
          blockPoolId + " keyUpdateInterval="
          + blockKeyUpdateInterval / (60 * 1000)
          + " min(s), tokenLifetime=" + blockTokenLifetime / (60 * 1000)
          + " min(s)");
      final BlockTokenSecretManager secretMgr =
          new BlockTokenSecretManager(0, blockTokenLifetime, blockPoolId,
              dnConf.encryptionAlgorithm);
      blockPoolTokenSecretManager.addBlockPool(blockPoolId, secretMgr);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager

    boolean isHaEnabled = HAUtil.isHAEnabled(conf, nsId);

    if (isHaEnabled) {
      String thisNnId = HAUtil.getNameNodeId(conf, nsId);
      String otherNnId = HAUtil.getNameNodeIdOfOtherNode(conf, nsId);
      return new BlockTokenSecretManager(updateMin*60*1000L,
          lifetimeMin*60*1000L, thisNnId.compareTo(otherNnId) < 0 ? 0 : 1, null,
          encryptionAlgorithm);
    } else {
      return new BlockTokenSecretManager(updateMin*60*1000L,
          lifetimeMin*60*1000L, 0, null, encryptionAlgorithm);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager

      LOG.info("Block token params received from NN: keyUpdateInterval="
          + blockKeyUpdateInterval / (60 * 1000) + " min(s), tokenLifetime="
          + blockTokenLifetime / (60 * 1000) + " min(s)");
      String encryptionAlgorithm = conf.get(
          DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
      this.blockTokenSecretManager = new BlockTokenSecretManager(
          blockKeyUpdateInterval, blockTokenLifetime, blockpoolID,
          encryptionAlgorithm);
      this.blockTokenSecretManager.addKeys(keys);
      /*
       * Balancer should sync its block keys with NN more frequently than NN
View Full Code Here

Examples of org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager

    boolean isHaEnabled = HAUtil.isHAEnabled(conf, nsId);

    if (isHaEnabled) {
      String thisNnId = HAUtil.getNameNodeId(conf, nsId);
      String otherNnId = HAUtil.getNameNodeIdOfOtherNode(conf, nsId);
      return new BlockTokenSecretManager(updateMin*60*1000L,
          lifetimeMin*60*1000L, thisNnId.compareTo(otherNnId) < 0 ? 0 : 1, null,
          encryptionAlgorithm);
    } else {
      return new BlockTokenSecretManager(updateMin*60*1000L,
          lifetimeMin*60*1000L, 0, null, encryptionAlgorithm);
    }
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.