Examples of DFSDataInputStream


Examples of org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream

    switch(op.getValue()) {
    case OPEN:
    {
      final int b = bufferSize.getValue(conf);
      final DFSDataInputStream in = new DFSClient.DFSDataInputStream(
          dfsclient.open(fullpath, b, true, null));
      in.seek(offset.getValue());

      final StreamingOutput streaming = new StreamingOutput() {
        @Override
        public void write(final OutputStream out) throws IOException {
          final Long n = length.getValue();
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream

    switch(op.getValue()) {
    case OPEN:
    {
      final int b = bufferSize.getValue(conf);
      final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
      DFSDataInputStream in = null;
      try {
        in = new DFSClient.DFSDataInputStream(
        dfsclient.open(fullpath, b, true, null));
        in.seek(offset.getValue());
      } catch(IOException ioe) {
        IOUtils.cleanup(LOG, in);
        IOUtils.cleanup(LOG, dfsclient);
        throw ioe;
      }
      final DFSDataInputStream dis = in;
      final StreamingOutput streaming = new StreamingOutput() {
        @Override
        public void write(final OutputStream out) throws IOException {
          final Long n = length.getValue();
          DFSDataInputStream dfsin = dis;
          DFSClient client = dfsclient;
          try {
            if (n == null) {
              IOUtils.copyBytes(dfsin, out, b);
            } else {
              IOUtils.copyBytes(dfsin, out, n, b, false);
            }
            dfsin.close();
            dfsin = null;
            client.close();
            client = null;
          } finally {
            IOUtils.cleanup(LOG, dfsin);
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream

    switch(op.getValue()) {
    case OPEN:
    {
      final int b = bufferSize.getValue(conf);
      final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
      DFSDataInputStream in = null;
      try {
        in = new DFSClient.DFSDataInputStream(
            dfsclient.open(fullpath, b, true));
        in.seek(offset.getValue());
      } catch(IOException ioe) {
        IOUtils.cleanup(LOG, in);
        IOUtils.cleanup(LOG, dfsclient);
        throw ioe;
      }
     
      final long n = length.getValue() != null? length.getValue()
        : in.getVisibleLength() - offset.getValue();
      return Response.ok(new OpenEntity(in, n, dfsclient)).type(
          MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case GETFILECHECKSUM:
    {
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream

    UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username,
                                 new String[] {"supergroup"});
   
    final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);
   
    final DFSDataInputStream in = (DFSDataInputStream)fs.open(p);

    //Check visible length
    Assert.assertTrue(in.getVisibleLength() >= expectedsize);

    //Able to read?
    for(int i = 0; i < expectedsize; i++) {
      Assert.assertEquals((byte)i, (byte)in.read())
    }

    in.close();
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream

    fs.delete(root, true);
    files = null;
  }
 
  public static ExtendedBlock getFirstBlock(FileSystem fs, Path path) throws IOException {
    DFSDataInputStream in =
      (DFSDataInputStream) ((DistributedFileSystem)fs).open(path);
    in.readByte();
    return in.getCurrentBlock();
 
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream

    fs.delete(root, true);
    files = null;
  }
 
  public static ExtendedBlock getFirstBlock(FileSystem fs, Path path) throws IOException {
    DFSDataInputStream in =
      (DFSDataInputStream) ((DistributedFileSystem)fs).open(path);
    in.readByte();
    return in.getCurrentBlock();
 
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream

    UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username,
                                 new String[] {"supergroup"});
   
    final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);
   
    final DFSDataInputStream in = (DFSDataInputStream)fs.open(p);

    //Check visible length
    Assert.assertTrue(in.getVisibleLength() >= expectedsize);

    //Able to read?
    for(int i = 0; i < expectedsize; i++) {
      Assert.assertEquals((byte)i, (byte)in.read())
    }

    in.close();
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream

    final Configuration conf = new Configuration();
    // create cluster
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512);

    final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
    DFSDataInputStream in = null;
    FSDataOutputStream out = null;
    DistributedFileSystem dfs = null;
    try {
      Path path = new Path(MiniDFSCluster.getBaseDir().getPath(), "test");
      dfs = (DistributedFileSystem) cluster.getFileSystem();
      out = dfs.create(path);
      int fileLength = 1030;
      out.write(new byte[fileLength]);
      out.sync();
      cluster.restartNameNode();
      cluster.waitActive();
      in = (DFSDataInputStream) dfs.open(path, 1024);
      // Verify the length when we just restart NN. DNs will register
      // immediately.
      Assert.assertEquals(fileLength, in.getVisibleLength());
      cluster.shutdownDataNodes();
      cluster.restartNameNode(false);
      // This is just for ensuring NN started.
      verifyNNIsInSafeMode(dfs);

      try {
        in = (DFSDataInputStream) dfs.open(path);
        Assert.fail("Expected IOException");
      } catch (IOException e) {
        Assert.assertTrue(e.getLocalizedMessage().indexOf(
            "Name node is in safe mode") >= 0);
      }

    } finally {
      if (null != in) {
        in.close();
      }
      if (null != dfs) {
        dfs.dfs.clientRunning = false;
      }
      cluster.shutdown();
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream

    fs.delete(root, true);
    files = null;
  }
 
  public static Block getFirstBlock(FileSystem fs, Path path) throws IOException {
    DFSDataInputStream in =
      (DFSDataInputStream) ((DistributedFileSystem)fs).open(path);
    in.readByte();
    return in.getCurrentBlock();
 
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream

    fs.delete(root, true);
    files = null;
  }
 
  public static ExtendedBlock getFirstBlock(FileSystem fs, Path path) throws IOException {
    DFSDataInputStream in =
      (DFSDataInputStream) ((DistributedFileSystem)fs).open(path);
    in.readByte();
    return in.getCurrentBlock();
 
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.