Package org.apache.hadoop.fs.permission

Examples of org.apache.hadoop.fs.permission.FsPermission


        return false;
      }

      try {
        if (filesMode > 0 && stat.getPermission().toShort() != filesMode) {
          outputFs.setPermission(path, new FsPermission(filesMode));
        } else if (!stat.getPermission().equals(refStat.getPermission())) {
          outputFs.setPermission(path, refStat.getPermission());
        }
      } catch (IOException e) {
        LOG.error("Unable to set the permission for file=" + path, e);
View Full Code Here


   * @throws IOException if we can't reach the filesystem and the file cannot be cleaned up on
   *           failure
   */
  public static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingDir, FileSystem fs)
      throws IOException {
    FsPermission perms = FSUtils.getFilePermissions(fs, fs.getConf(),
      HConstants.DATA_FILE_UMASK_KEY);
    Path snapshotInfo = new Path(workingDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE);
    try {
      FSDataOutputStream out = FSUtils.create(fs, snapshotInfo, perms, true);
      try {
View Full Code Here

    boolean enablePermissions = conf.getBoolean(
        HConstants.ENABLE_DATA_FILE_UMASK, false);

    if (enablePermissions) {
      try {
        FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
        // make sure that we have a mask, if not, go default.
        String mask = conf.get(permssionConfKey);
        if (mask == null)
          return FsPermission.getDefault();
        // appy the umask
        FsPermission umask = new FsPermission(mask);
        return perm.applyUMask(umask);
      } catch (IllegalArgumentException e) {
        LOG.warn(
            "Incorrect umask attempted to be created: "
                + conf.get(permssionConfKey)
View Full Code Here

    // create but before close.  If we don't successfully close the file,
    // subsequent region reopens will fail the below because create is
    // registered in NN.

    // first check to get the permissions
    FsPermission perms = FSUtils.getFilePermissions(fs, conf,
        HConstants.DATA_FILE_UMASK_KEY);

    // and then create the file
    Path tmpPath = new Path(getTmpDir(regiondir), REGIONINFO_FILE);
View Full Code Here

    private void createTempDir(Cluster cluster, Path coordPath) throws FalconException {
        try {
            FileSystem fs = coordPath.getFileSystem(ClusterHelper.getConfiguration(cluster));
            Path tempDir = new Path(coordPath, "../../logs");
            fs.mkdirs(tempDir);
            fs.setPermission(tempDir, new FsPermission((short) 511));
        } catch (Exception e) {
            throw new FalconException("Unable to create temp dir in " + coordPath, e);
        }
    }
View Full Code Here

    // create but before close.  If we don't successfully close the file,
    // subsequent region reopens will fail the below because create is
    // registered in NN.

    // first check to get the permissions
    FsPermission perms = FSUtils.getFilePermissions(fs, conf,
        HConstants.DATA_FILE_UMASK_KEY);

    // and then create the file
    Path tmpPath = new Path(getTmpDir(), REGIONINFO_FILE);
   
View Full Code Here

      return mode;
    }

    @Override
    public void run(FileStatus file) throws IOException {
      FsPermission perms = file.getPermission();
      int existing = perms.toShort();
      boolean exeOk = file.isDir() || (existing & 0111) != 0;
      int newperms = ( applyChmod(userType, userMode,
                                  (existing>>>6)&7, exeOk) << 6 |
                       applyChmod(groupType, groupMode,
                                  (existing>>>3)&7, exeOk) << 3 |
                       applyChmod(othersType, othersMode, existing&7, exeOk) );

      if (existing != newperms) {
        try {
          getFS().setPermission(file.getPath(),
                                new FsPermission((short)newperms));
        } catch (IOException e) {
          System.err.println(getName() + ": changing permissions of '" +
                             file.getPath() + "':" + e.getMessage());
        }
      }
View Full Code Here

        }
        // clean up the system dir, which will only work if hdfs is out of
        // safe mode
        fs.delete(systemDir);
        if (FileSystem.mkdirs(fs, systemDir,
            new FsPermission(SYSTEM_DIR_PERMISSION))) {
          break;
        }
        LOG.error("Mkdirs failed to create " + systemDir);
      } catch (IOException ie) {
        if (ie instanceof RemoteException &&
View Full Code Here

    String jobId = jobSubmitClient.getNewJobId();
    Path submitJobDir = new Path(job.getSystemDir(), jobId);
    FileSystem fs = getFs();
    LOG.debug("default FileSystem: " + fs.getUri());
    fs.delete(submitJobDir);   
    FileSystem.mkdirs(fs, submitJobDir, new FsPermission(JOB_DIR_PERMISSION));
    Path submitJobFile = new Path(submitJobDir, "job.xml");
    Path submitJarFile = new Path(submitJobDir, "job.jar");
    Path submitSplitFile = new Path(submitJobDir, "job.split");
       
    // set the timestamps of the archives and files
    URI[] tarchives = DistributedCache.getCacheArchives(job);
    if (tarchives != null) {
      StringBuffer archiveTimestamps =
        new StringBuffer(String.valueOf(DistributedCache.getTimestamp(job, tarchives[0])));
      for (int i = 1; i < tarchives.length; i++) {
        archiveTimestamps.append(",");
        archiveTimestamps.append(String.valueOf(DistributedCache.getTimestamp(job, tarchives[i])));
      }
      DistributedCache.setArchiveTimestamps(job, archiveTimestamps.toString());
    }

    URI[] tfiles = DistributedCache.getCacheFiles(job);
    if (tfiles != null) {
      StringBuffer fileTimestamps =
        new StringBuffer(String.valueOf(DistributedCache.getTimestamp(job, tfiles[0])));
      for (int i = 1; i < tfiles.length; i++) {
        fileTimestamps.append(",");
        fileTimestamps.append(String.valueOf(DistributedCache.getTimestamp(job, tfiles[i])));
      }
      DistributedCache.setFileTimestamps(job, fileTimestamps.toString());
    }
      
    String originalJarPath = job.getJar();
    short replication = (short)job.getInt("mapred.submit.replication", 10);

    if (originalJarPath != null) {           // copy jar to JobTracker's fs
      // use jar name if job is not named.
      if ("".equals(job.getJobName())){
        job.setJobName(new Path(originalJarPath).getName());
      }
      job.setJar(submitJarFile.toString());
      fs.copyFromLocalFile(new Path(originalJarPath), submitJarFile);
      fs.setReplication(submitJarFile, replication);
      fs.setPermission(submitJarFile, new FsPermission(JOB_FILE_PERMISSION));
    } else {
      LOG.warn("No job jar file set.  User classes may not be found. "+
               "See JobConf(Class) or JobConf#setJar(String).");
    }

    // Set the user's name and working directory
    job.setUser(ugi.getUserName());
    if (job.getWorkingDirectory() == null) {
      job.setWorkingDirectory(fs.getWorkingDirectory());         
    }

    // Check the input specification
    job.getInputFormat().validateInput(job);

    // Check the output specification
    job.getOutputFormat().checkOutputSpecs(fs, job);

    // Create the splits for the job
    LOG.debug("Creating splits at " + fs.makeQualified(submitSplitFile));
    InputSplit[] splits =
      job.getInputFormat().getSplits(job, job.getNumMapTasks());
    // sort the splits into order based on size, so that the biggest
    // go first
    Arrays.sort(splits, new Comparator<InputSplit>() {
      public int compare(InputSplit a, InputSplit b) {
        try {
          long left = a.getLength();
          long right = b.getLength();
          if (left == right) {
            return 0;
          } else if (left < right) {
            return 1;
          } else {
            return -1;
          }
        } catch (IOException ie) {
          throw new RuntimeException("Problem getting input split size",
                                     ie);
        }
      }
    });
    // write the splits to a file for the job tracker
    FSDataOutputStream out = FileSystem.create(fs,
        submitSplitFile, new FsPermission(JOB_FILE_PERMISSION));
    try {
      writeSplitsFile(splits, out);
    } finally {
      out.close();
    }
    job.set("mapred.job.split.file", submitSplitFile.toString());
    job.setNumMapTasks(splits.length);
       
    // Write job file to JobTracker's fs       
    out = FileSystem.create(fs, submitJobFile,
        new FsPermission(JOB_FILE_PERMISSION));

    try {
      job.write(out);
    } finally {
      out.close();
View Full Code Here

                             ) throws IOException {
    checkOpen();
    if (permission == null) {
      permission = FsPermission.getDefault();
    }
    FsPermission masked = permission.applyUMask(FsPermission.getUMask(conf));
    LOG.debug(src + ": masked=" + masked);
    OutputStream result = new DFSOutputStream(src, masked,
        overwrite, replication, blockSize, progress, buffersize);
    synchronized (pendingCreates) {
      pendingCreates.put(src.toString(), result);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.permission.FsPermission

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.