Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.DFSInputStream


    }

    private byte[] cacheInitialContents() throws IOException {
      HdfsFileStatus status = dfsClient.getFileInfo(name);
      byte[] content = new byte[(int)status.getLen()];
      DFSInputStream in = null;
      try {
        in = dfsClient.open(name);
        IOUtils.readFully(in, content, 0, content.length);
      } finally {
        in.close();
      }
      return content;
    }
View Full Code Here


    public void checkSalvagedRemains() throws IOException {
      int chainIdx = 0;
      HdfsFileStatus status = dfsClient.getFileInfo(name);
      long length = status.getLen();
      int numBlocks = (int)((length + blockSize - 1) / blockSize);
      DFSInputStream in = null;
      byte[] blockBuffer = new byte[blockSize];

      try {
        for (int blockIdx = 0; blockIdx < numBlocks; blockIdx++) {
          if (blocksToCorrupt.contains(blockIdx)) {
            if (in != null) {
              in.close();
              in = null;
            }
            continue;
          }
          if (in == null) {
            in = dfsClient.open("/lost+found" + name + "/" + chainIdx);
            chainIdx++;
          }
          int len = blockBuffer.length;
          if (blockIdx == (numBlocks - 1)) {
            // The last block might not be full-length
            len = (int)(in.getFileLength() % blockSize);
            if (len == 0) len = blockBuffer.length;
          }
          IOUtils.readFully(in, blockBuffer, 0, len);
          int startIdx = blockIdx * blockSize;
          for (int i = 0; i < len; i++) {
View Full Code Here

      stm.close();

      in = fs.open(file1);
      in.readByte();

      DFSInputStream dfsClientIn = findDFSClientInputStream(in);     
      Field blockReaderField = DFSInputStream.class.getDeclaredField("blockReader");
      blockReaderField.setAccessible(true);
      BlockReader blockReader = (BlockReader) blockReaderField.get(dfsClientIn);

      blockReader.setArtificialSlowdown(1000);
View Full Code Here

               path);

      // Pessimistically update last block length from DataNode.
      // File could have been renamed, and a new file created in its place.
      try {
        DFSInputStream stm = client.open(path);
        DFSLocatedBlocks locBlks = stm.fetchLocatedBlocks();

        if (locBlks.locatedBlockCount() >= blks.length) {
          if (blks[index] != null && locBlks.get(index) != null) {
            if (blks[index].getBlockId() == locBlks.get(index).getBlock().getBlockId()) {
              blks[index].setNumBytes(locBlks.get(index).getBlock().getNumBytes());
              return;
            }
          }
        }

        stm.close();
        client.close(); // close dfs client
      }
      catch (IOException e) {
        LOG.error(StringUtils.stringifyException(e));
      }
View Full Code Here

    // Client's name-node proxy should keep the same if the same namenode
    // sends the same fingerprint
    //
    ClientProtocol namenode1 = client.namenode;
    cluster.getNameNode().setClientProtocolMethodsFingerprint(oldFingerprint);
    DFSInputStream dis = client.open("/testClientUpdateMethodList.txt");
    int val = dis.read();
    TestCase.assertEquals(66, val);
    dis.close();
    TestCase.assertSame(namenode1, client.namenode);

    // Namenode's fingerprint will be different to client. Client is suppsoed
    // to get a new proxy.
    //
    cluster.getNameNode().setClientProtocolMethodsFingerprint(888);
    dis = client.open("/testClientUpdateMethodList1.txt");
    val = dis.read();
    TestCase.assertEquals(88, val);
    dis.close();
    // Since we didn't change method list of name-node, the fingerprint
    // got from the new proxy should be the same as the previous one.
    TestCase.assertNotSame(namenode1, client.namenode);
  }
View Full Code Here

        // Now the tricky case - if we fail a few times on one read, then succeed,
        // then fail some more on another read, it shouldn't fail.
        doAnswer(new FailNTimesAnswer(preSpyNN, numDataNode,
            Math.min(maxBlockAcquires, numDataNode)))
          .when(spyNN).openAndFetchMetaInfo(anyString(), anyLong(), anyLong());
        DFSInputStream is = client.open(file.toString());
        byte buf[] = new byte[10];
        IOUtils.readFully(is, buf, 0, buf.length);
 
        DFSClient.LOG.info("First read successful after some failures.");
 
        // Further reads at this point will succeed since it has the good block locations.
        // So, force the block locations on this stream to be refreshed from bad info.
        // When reading again, it should start from a fresh failure count, since
        // we're starting a new operation on the user level.
        doAnswer(new FailNTimesAnswer(preSpyNN, numDataNode,
            Math.min(maxBlockAcquires, numDataNode)))
          .when(spyNN).openAndFetchMetaInfo(anyString(), anyLong(), anyLong());
        is.openInfo();
        // Seek to beginning forces a reopen of the BlockReader - otherwise it'll
        // just keep reading on the existing stream and the fact that we've poisoned
        // the block info won't do anything.
        is.seek(0);
        IOUtils.readFully(is, buf, 0, buf.length);
 
      } finally {
        if (null != cluster) {
          cluster.shutdown();
View Full Code Here

    return new CacheLoader<DFSInputStreamCaheKey, FSDataInputStream>() {

      @Override
      public FSDataInputStream load(DFSInputStreamCaheKey key) throws Exception {
        DFSClient client = getDfsClient(key.userId);
        DFSInputStream dis = client.open(key.inodePath);
        return new FSDataInputStream(dis);
      }
    };
  }
View Full Code Here

    }

    private byte[] cacheInitialContents() throws IOException {
      HdfsFileStatus status = dfsClient.getFileInfo(name);
      byte[] content = new byte[(int)status.getLen()];
      DFSInputStream in = null;
      try {
        in = dfsClient.open(name);
        IOUtils.readFully(in, content, 0, content.length);
      } finally {
        in.close();
      }
      return content;
    }
View Full Code Here

    public void checkSalvagedRemains() throws IOException {
      int chainIdx = 0;
      HdfsFileStatus status = dfsClient.getFileInfo(name);
      long length = status.getLen();
      int numBlocks = (int)((length + blockSize - 1) / blockSize);
      DFSInputStream in = null;
      byte[] blockBuffer = new byte[blockSize];

      try {
        for (int blockIdx = 0; blockIdx < numBlocks; blockIdx++) {
          if (blocksToCorrupt.contains(blockIdx)) {
            if (in != null) {
              in.close();
              in = null;
            }
            continue;
          }
          if (in == null) {
            in = dfsClient.open("/lost+found" + name + "/" + chainIdx);
            chainIdx++;
          }
          int len = blockBuffer.length;
          if (blockIdx == (numBlocks - 1)) {
            // The last block might not be full-length
            len = (int)(in.getFileLength() % blockSize);
            if (len == 0) len = blockBuffer.length;
          }
          IOUtils.readFully(in, blockBuffer, 0, (int)len);
          int startIdx = blockIdx * blockSize;
          for (int i = 0; i < len; i++) {
View Full Code Here

   
    try {
      int buffSize = Math.min(MAX_READ_TRANSFER_SIZE, count);
      byte[] readbuffer = new byte[buffSize];

      DFSInputStream is = dfsClient.open(Nfs3Utils.getFileIdPath(handle));
      FSDataInputStream fis = new FSDataInputStream(is);
     
      int readCount = fis.read(offset, readbuffer, 0, count);
      fis.close();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.DFSInputStream

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.