Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.DFSInputStream


    }

    private byte[] cacheInitialContents() throws IOException {
      HdfsFileStatus status = dfsClient.getFileInfo(name);
      byte[] content = new byte[(int)status.getLen()];
      DFSInputStream in = null;
      try {
        in = dfsClient.open(name);
        IOUtils.readFully(in, content, 0, content.length);
      } finally {
        in.close();
      }
      return content;
    }
View Full Code Here


    public void checkSalvagedRemains() throws IOException {
      int chainIdx = 0;
      HdfsFileStatus status = dfsClient.getFileInfo(name);
      long length = status.getLen();
      int numBlocks = (int)((length + blockSize - 1) / blockSize);
      DFSInputStream in = null;
      byte[] blockBuffer = new byte[blockSize];

      try {
        for (int blockIdx = 0; blockIdx < numBlocks; blockIdx++) {
          if (blocksToCorrupt.contains(blockIdx)) {
            if (in != null) {
              in.close();
              in = null;
            }
            continue;
          }
          if (in == null) {
            in = dfsClient.open("/lost+found" + name + "/" + chainIdx);
            chainIdx++;
          }
          int len = blockBuffer.length;
          if (blockIdx == (numBlocks - 1)) {
            // The last block might not be full-length
            len = (int)(in.getFileLength() % blockSize);
            if (len == 0) len = blockBuffer.length;
          }
          IOUtils.readFully(in, blockBuffer, 0, (int)len);
          int startIdx = blockIdx * blockSize;
          for (int i = 0; i < len; i++) {
View Full Code Here

      setUpForDoGetTest(cluster, testFile);

      Mockito.doThrow(new IOException()).when(mockHttpServletResponse)
          .getOutputStream();
      DFSInputStream fsMock = Mockito.mock(DFSInputStream.class);

      Mockito.doReturn(fsMock).when(clientMock).open(testFile.toString());

      Mockito.doReturn(Long.valueOf(4)).when(fsMock).getFileLength();
View Full Code Here

    // Client's name-node proxy should keep the same if the same namenode
    // sends the same fingerprint
    //
    ClientProtocol namenode1 = client.namenode;
    cluster.getNameNode().setClientProtocolMethodsFingerprint(oldFingerprint);
    DFSInputStream dis = client.open("/testClientUpdateMethodList.txt");
    int val = dis.read();
    TestCase.assertEquals(66, val);
    dis.close();
    TestCase.assertSame(namenode1, client.namenode);

    // Namenode's fingerprint will be different to client. Client is suppsoed
    // to get a new proxy.
    //
    cluster.getNameNode().setClientProtocolMethodsFingerprint(888);
    dis = client.open("/testClientUpdateMethodList1.txt");
    val = dis.read();
    TestCase.assertEquals(88, val);
    dis.close();
    // Since we didn't change method list of name-node, the fingerprint
    // got from the new proxy should be the same as the previous one.
    TestCase.assertNotSame(namenode1, client.namenode);
  }
View Full Code Here

    // Current system has foo deleted and bar with length 2
    // test snapshot has foo with length 1 and bar with length 1

    // Checking current file system
    assertTrue(!dfs.exists(foo));
    DFSInputStream in = client.open("/bar");
    assertTrue(in.getFileLength() == 2);
    assertTrue(in.read() == 1);
    assertTrue(in.read() == 2);
    assertTrue(in.read() == -1); //eof

    // Checking test snapshot
    in = ssClient.open("test", "/foo");
    assertTrue(in.getFileLength() == 1);
    assertTrue(in.read() == 0);
    assertTrue(in.read() == -1); //eof
    in = ssClient.open("test", "/bar");
    assertTrue(in.getFileLength() == 1);
    assertTrue(in.read() == 1);
    assertTrue(in.read() == -1); //eof
  }
View Full Code Here

               path);

      // Pessimistically update last block length from DataNode.
      // File could have been renamed, and a new file created in its place.
      try {
        DFSInputStream stm = client.open(path);
        DFSLocatedBlocks locBlks = stm.fetchLocatedBlocks();

        if (locBlks.locatedBlockCount() >= blks.length) {
          if (blks[index] != null && locBlks.get(index) != null) {
            if (blks[index].getBlockId() == locBlks.get(index).getBlock().getBlockId()) {
              blks[index].setNumBytes(locBlks.get(index).getBlock().getNumBytes());
              return;
            }
          }
        }

        stm.close();
        client.close(); // close dfs client
      }
      catch (IOException e) {
        LOG.error(StringUtils.stringifyException(e));
      }
View Full Code Here

      for (FileStatus child: children) {
        if (!child.isDir()) { // get block ids for file
          Path path = child.getPath(); // paths will be unique
          fileMap.put(path, new ArrayList<Long>());

          DFSInputStream stm = client.open(child.getPath().toUri().getPath());
          LocatedBlocks blocks = stm.fetchLocatedBlocks();
          stm.close();

          for (int i = 0; i < blocks.locatedBlockCount(); i++) {
            Long blockId = blocks.get(i).getBlock().getBlockId();
            fileMap.get(path).add(blockId); // add to file block list
            blockRefMap.put(blockId, null); // mark as unrefereced
View Full Code Here

      stm.close();

      in = fs.open(file1);
      in.readByte();

      DFSInputStream dfsClientIn = findDFSClientInputStream(in);     
      Field blockReaderField = DFSInputStream.class.getDeclaredField("blockReader");
      blockReaderField.setAccessible(true);
      BlockReader blockReader = (BlockReader) blockReaderField.get(dfsClientIn);

      blockReader.setArtificialSlowdown(1000);
View Full Code Here

    LocatedBlocks[] blocksArr = ssProtocol.getLocatedBlocks("test", "/bar/foo");
    assertTrue(blocksArr.length == 1); // 1 file

    DFSClient client = new DFSClient(conf);
    DFSInputStream stm = client.open("/bar/foo");
    LocatedBlocks locBlks = blocksArr[0];
    DFSLocatedBlocks dfsLocBlks = stm.fetchLocatedBlocks();
    stm.close();

    assertTrue(locBlks.locatedBlockCount() == 1); // one byte so must be one block
    assertTrue(locBlks.locatedBlockCount() == dfsLocBlks.locatedBlockCount());
    assertTrue(locBlks.get(0).getBlock().getBlockId() ==
               dfsLocBlks.get(0).getBlock().getBlockId());
    assertTrue(locBlks.getFileLength() == 1);

    blocksArr = ssProtocol.getLocatedBlocks("test", "/bar/woot");
    assertTrue(blocksArr.length == 1); // 1 file

    stm = client.open("/bar/woot");
    locBlks = blocksArr[0];
    dfsLocBlks = stm.fetchLocatedBlocks();
    stm.close();

    assertTrue(locBlks.locatedBlockCount() == 1); // one byte so must be one block
    assertTrue(locBlks.locatedBlockCount() == dfsLocBlks.locatedBlockCount());
    assertTrue(locBlks.get(0).getBlock().getBlockId() ==
               dfsLocBlks.get(0).getBlock().getBlockId());
View Full Code Here

   
    try{
      for (Map.Entry<String, OutputStream> file : files_.entrySet()) {
       
        long startTime = System.nanoTime();
        DFSInputStream os = dfsClient_.open(file.getKey());
        timingOpen_.add(new Double((System.nanoTime() - startTime)/(1E9)));
       
        os.read();
        os.close();
      }
    } catch (IOException e) {
      e.printStackTrace();
    }
   
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.DFSInputStream

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.