Examples of HdfsDataOutputStream


Examples of org.apache.hadoop.hdfs.client.HdfsDataOutputStream

    OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
    if (openFileCtx == null) {
      LOG.info("No opened stream for fileId:" + fileHandle.getFileId());

      String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle.getFileId());
      HdfsDataOutputStream fos = null;
      Nfs3FileAttributes latestAttr = null;
      try {
        int bufferSize = config.getInt(
            CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
            CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
       
        fos = dfsClient.append(fileIdPath, bufferSize, null, null);

        latestAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
      } catch (RemoteException e) {
        IOException io = e.unwrapRemoteException();
        if (io instanceof AlreadyBeingCreatedException) {
          LOG.warn("Can't append file:" + fileIdPath
              + ". Possibly the file is being closed. Drop the request:"
              + request + ", wait for the client to retry...");
          return;
        }
        throw e;
      } catch (IOException e) {
        LOG.error("Can't apapend to file:" + fileIdPath, e);
        if (fos != null) {
          fos.close();
        }
        WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr),
            preOpAttr);
        WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
            fileWcc, count, request.getStableHow(),
            Nfs3Constant.WRITE_COMMIT_VERF);
        Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
            new XDR(), xid, new VerifierNone()), xid);
        return;
      }

      // Add open stream
      String writeDumpDir = config.get(NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY,
          NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_DEFAULT);
      openFileCtx = new OpenFileCtx(fos, latestAttr, writeDumpDir + "/"
          + fileHandle.getFileId(), dfsClient, iug, aixCompatMode);

      if (!addOpenFileStream(fileHandle, openFileCtx)) {
        LOG.info("Can't add new stream. Close it. Tell client to retry.");
        try {
          fos.close();
        } catch (IOException e) {
          LOG.error("Can't close stream for fileId:" + handle.getFileId(), e);
        }
        // Notify client to retry
        WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataOutputStream

    OpenFileCtx openFileCtx = openFileMap.get(fileHandle);
    if (openFileCtx == null) {
      LOG.info("No opened stream for fileId:" + fileHandle.getFileId());

      String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle.getFileId());
      HdfsDataOutputStream fos = null;
      Nfs3FileAttributes latestAttr = null;
      try {
        int bufferSize = config.getInt(
            CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
            CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
       
        fos = dfsClient.append(fileIdPath, bufferSize, null, null);

        latestAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
      } catch (IOException e) {
        LOG.error("Can't apapend to file:" + fileIdPath + ", error:" + e);
        if (fos != null) {
          fos.close();
        }
        WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr),
            preOpAttr);
        WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
            fileWcc, count, request.getStableHow(),
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataOutputStream

      LOG.error("Setting file size is not supported when creating file: "
          + fileName + " dir fileId:" + dirHandle.getFileId());
      return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL);
    }

    HdfsDataOutputStream fos = null;
    String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
    Nfs3FileAttributes preOpDirAttr = null;
    Nfs3FileAttributes postOpObjAttr = null;
    FileHandle fileHandle = null;
    WccData dirWcc = null;
    try {
      preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
      if (preOpDirAttr == null) {
        LOG.error("Can't get path for dirHandle:" + dirHandle);
        return new CREATE3Response(Nfs3Status.NFS3ERR_STALE);
      }
     
      if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) {
        return new CREATE3Response(Nfs3Status.NFS3ERR_ACCES, null,
            preOpDirAttr, new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
                preOpDirAttr));
      }

      String fileIdPath = Nfs3Utils.getFileIdPath(dirHandle) + "/" + fileName;
      SetAttr3 setAttr3 = request.getObjAttr();
      assert (setAttr3 != null);
      FsPermission permission = setAttr3.getUpdateFields().contains(
          SetAttrField.MODE) ? new FsPermission((short) setAttr3.getMode())
          : FsPermission.getDefault().applyUMask(umask);
         
      EnumSet<CreateFlag> flag = (createMode != Nfs3Constant.CREATE_EXCLUSIVE) ?
          EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) :
          EnumSet.of(CreateFlag.CREATE);
     
      fos = new HdfsDataOutputStream(dfsClient.create(fileIdPath, permission,
          flag, false, replication, blockSize, null, bufferSize, null),
          statistics);
     
      if ((createMode == Nfs3Constant.CREATE_UNCHECKED)
          || (createMode == Nfs3Constant.CREATE_GUARDED)) {
        // Set group if it's not specified in the request.
        if (!setAttr3.getUpdateFields().contains(SetAttrField.GID)) {
          setAttr3.getUpdateFields().add(SetAttrField.GID);
          setAttr3.setGid(securityHandler.getGid());
        }
        setattrInternal(dfsClient, fileIdPath, setAttr3, false);
      }

      postOpObjAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
      dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr),
          dfsClient, dirFileIdPath, iug);
    } catch (IOException e) {
      LOG.error("Exception", e);
      if (fos != null) {
        try {
          fos.close();
        } catch (IOException e1) {
          LOG.error("Can't close stream for dirFileId:" + dirHandle.getFileId()
              + " filename: " + fileName);
        }
      }
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataOutputStream

      LOG.error("Setting file size is not supported when creating file: "
          + fileName + " dir fileId:" + dirHandle.getFileId());
      return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL);
    }

    HdfsDataOutputStream fos = null;
    String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
    Nfs3FileAttributes preOpDirAttr = null;
    Nfs3FileAttributes postOpObjAttr = null;
    FileHandle fileHandle = null;
    WccData dirWcc = null;
    try {
      preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
      if (preOpDirAttr == null) {
        LOG.error("Can't get path for dirHandle:" + dirHandle);
        return new CREATE3Response(Nfs3Status.NFS3ERR_STALE);
      }
     
      if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) {
        return new CREATE3Response(Nfs3Status.NFS3ERR_ACCES, null,
            preOpDirAttr, new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
                preOpDirAttr));
      }

      String fileIdPath = Nfs3Utils.getFileIdPath(dirHandle) + "/" + fileName;
      SetAttr3 setAttr3 = request.getObjAttr();
      assert (setAttr3 != null);
      FsPermission permission = setAttr3.getUpdateFields().contains(
          SetAttrField.MODE) ? new FsPermission((short) setAttr3.getMode())
          : FsPermission.getDefault().applyUMask(umask);
         
      EnumSet<CreateFlag> flag = (createMode != Nfs3Constant.CREATE_EXCLUSIVE) ?
          EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) :
          EnumSet.of(CreateFlag.CREATE);
     
      fos = new HdfsDataOutputStream(dfsClient.create(fileIdPath, permission,
          flag, false, replication, blockSize, null, bufferSize, null),
          statistics);
     
      if ((createMode == Nfs3Constant.CREATE_UNCHECKED)
          || (createMode == Nfs3Constant.CREATE_GUARDED)) {
        // Set group if it's not specified in the request.
        if (!setAttr3.getUpdateFields().contains(SetAttrField.GID)) {
          setAttr3.getUpdateFields().add(SetAttrField.GID);
          setAttr3.setGid(securityHandler.getGid());
        }
        setattrInternal(dfsClient, fileIdPath, setAttr3, false);
      }

      postOpObjAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
      dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr),
          dfsClient, dirFileIdPath, iug);
    } catch (IOException e) {
      LOG.error("Exception", e);
      if (fos != null) {
        try {
          fos.close();
        } catch (IOException e1) {
          LOG.error("Can't close stream for dirFileId:" + dirHandle.getFileId()
              + " filename: " + fileName);
        }
      }
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataOutputStream

    OpenFileCtx openFileCtx = openFileMap.get(fileHandle);
    if (openFileCtx == null) {
      LOG.info("No opened stream for fileId:" + fileHandle.getFileId());

      String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle.getFileId());
      HdfsDataOutputStream fos = null;
      Nfs3FileAttributes latestAttr = null;
      try {
        int bufferSize = config.getInt(
            CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
            CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
       
        fos = dfsClient.append(fileIdPath, bufferSize, null, null);

        latestAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
      } catch (IOException e) {
        LOG.error("Can't apapend to file:" + fileIdPath + ", error:" + e);
        if (fos != null) {
          fos.close();
        }
        WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr),
            preOpAttr);
        WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
            fileWcc, count, request.getStableHow(),
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataOutputStream

      fs = cluster.getFileSystem();
      final int nnport = cluster.getNameNodePort();

      // create a new file.
      Path file1 = new Path("/filestatus.dat");
      HdfsDataOutputStream stm = create(fs, file1, 1);
      System.out.println("testFileCreationNamenodeRestart: "
                         + "Created file " + file1);
      assertEquals(file1 + " should be replicated to 1 datanode.", 1,
          stm.getCurrentBlockReplication());

      // write two full blocks.
      writeFile(stm, numBlocks * blockSize);
      stm.hflush();
      assertEquals(file1 + " should still be replicated to 1 datanode.", 1,
          stm.getCurrentBlockReplication());

      // rename file wile keeping it open.
      Path fileRenamed = new Path("/filestatusRenamed.dat");
      fs.rename(file1, fileRenamed);
      System.out.println("testFileCreationNamenodeRestart: "
                         + "Renamed file " + file1 + " to " +
                         fileRenamed);
      file1 = fileRenamed;

      // create another new file.
      //
      Path file2 = new Path("/filestatus2.dat");
      FSDataOutputStream stm2 = createFile(fs, file2, 1);
      System.out.println("testFileCreationNamenodeRestart: "
                         + "Created file " + file2);

      // create yet another new file with full path name.
      // rename it while open
      //
      Path file3 = new Path("/user/home/fullpath.dat");
      FSDataOutputStream stm3 = createFile(fs, file3, 1);
      System.out.println("testFileCreationNamenodeRestart: "
                         + "Created file " + file3);
      Path file4 = new Path("/user/home/fullpath4.dat");
      FSDataOutputStream stm4 = createFile(fs, file4, 1);
      System.out.println("testFileCreationNamenodeRestart: "
                         + "Created file " + file4);

      fs.mkdirs(new Path("/bin"));
      fs.rename(new Path("/user/home"), new Path("/bin"));
      Path file3new = new Path("/bin/home/fullpath.dat");
      System.out.println("testFileCreationNamenodeRestart: "
                         + "Renamed file " + file3 + " to " +
                         file3new);
      Path file4new = new Path("/bin/home/fullpath4.dat");
      System.out.println("testFileCreationNamenodeRestart: "
                         + "Renamed file " + file4 + " to " +
                         file4new);

      // restart cluster with the same namenode port as before.
      // This ensures that leases are persisted in fsimage.
      cluster.shutdown();
      try {
        Thread.sleep(2*MAX_IDLE_TIME);
      } catch (InterruptedException e) {
      }
      cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
                                               .format(false)
                                               .build();
      cluster.waitActive();

      // restart cluster yet again. This triggers the code to read in
      // persistent leases from fsimage.
      cluster.shutdown();
      try {
        Thread.sleep(5000);
      } catch (InterruptedException e) {
      }
      cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
                                                .format(false)
                                                .build();
      cluster.waitActive();
      fs = cluster.getFileSystem();

      // instruct the dfsclient to use a new filename when it requests
      // new blocks for files that were renamed.
      DFSOutputStream dfstream = (DFSOutputStream)
                                                 (stm.getWrappedStream());
      dfstream.setTestFilename(file1.toString());
      dfstream = (DFSOutputStream) (stm3.getWrappedStream());
      dfstream.setTestFilename(file3new.toString());
      dfstream = (DFSOutputStream) (stm4.getWrappedStream());
      dfstream.setTestFilename(file4new.toString());

      // write 1 byte to file.  This should succeed because the
      // namenode should have persisted leases.
      byte[] buffer = AppendTestUtil.randomBytes(seed, 1);
      stm.write(buffer);
      stm.close();
      stm2.write(buffer);
      stm2.close();
      stm3.close();
      stm4.close();

View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataOutputStream

      dfs = (DistributedFileSystem)cluster.getFileSystem();

      // create a new file.
      final String f = DIR + "foo";
      final Path fpath = new Path(f);
      HdfsDataOutputStream out = create(dfs, fpath, DATANODE_NUM);
      out.write("something".getBytes());
      out.hflush();
      int actualRepl = out.getCurrentBlockReplication();
      assertTrue(f + " should be replicated to " + DATANODE_NUM + " datanodes.",
                 actualRepl == DATANODE_NUM);

      // set the soft and hard limit to be 1 second so that the
      // namenode triggers lease recovery
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataOutputStream

   */
  public HdfsDataOutputStream append(final String src, final int buffersize,
      final Progressable progress, final FileSystem.Statistics statistics
      ) throws IOException {
    final DFSOutputStream out = append(src, buffersize, progress);
    return new HdfsDataOutputStream(out, statistics, out.getInitialLen());
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataOutputStream

  protected HdfsDataOutputStream primitiveCreate(Path f,
    FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
    short replication, long blockSize, Progressable progress,
    ChecksumOpt checksumOpt) throws IOException {
    statistics.incrementWriteOps(1);
    return new HdfsDataOutputStream(dfs.primitiveCreate(
        getPathName(fixRelativePart(f)),
        absolutePermission, flag, true, replication, blockSize,
        progress, bufferSize, checksumOpt),statistics);
   }
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataOutputStream

    Path absF = fixRelativePart(f);
    return new FileSystemLinkResolver<FSDataOutputStream>() {
      @Override
      public FSDataOutputStream doCall(final Path p) throws IOException,
          UnresolvedLinkException {
        return new HdfsDataOutputStream(dfs.create(getPathName(p), permission,
            flag, false, replication, blockSize, progress, bufferSize, null),
            statistics);
      }

      @Override
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.