Examples of FsPermission


Examples of org.apache.hadoop.fs.permission.FsPermission

      replication = (short) fs.getConf().getInt("dfs.replication", 3);
    }
    if (blockSize == -1) {
      blockSize = fs.getConf().getInt("dfs.block.size", 67108864);
    }
    FsPermission fsPermission = FSUtils.getPermission(permission);
    int bufferSize = fs.getConf().getInt("hoop.buffer.size", 4096);
    OutputStream os = fs.create(path, fsPermission, override, bufferSize, replication, blockSize, null);
    IOUtils.copy(is, os);
    os.close();
    return FSUtils.convertPathToHoop(path, HoopServer.get().getBaseUrl()).toUri();
View Full Code Here

Examples of org.apache.hadoop.fs.permission.FsPermission

   * <code>false</code> otherwise.
   * @throws IOException thrown if an IO error occured.
   */
  @Override
  public JSONObject execute(FileSystem fs) throws IOException {
    FsPermission fsPermission = FSUtils.getPermission(permission);
    boolean mkdirs = fs.mkdirs(path, fsPermission);
    return FSUtils.toJSON("mkdirs", mkdirs);
  }
View Full Code Here

Examples of org.apache.hadoop.fs.permission.FsPermission

   * @param str Unix permission symbolic representation.
   * @return the Hadoop permission. If the given string was
   * 'default', it returns <code>FsPermission.getDefault()</code>.
   */
  public static FsPermission getPermission(String str) {
    FsPermission permission;
    if (str.equals(DEFAULT_PERMISSION)) {
      permission = FsPermission.getDefault();
    }
    else {
      //TODO: there is something funky here, it does not detect 'x'
View Full Code Here

Examples of org.apache.hadoop.fs.permission.FsPermission

  private void testCreate(Path path, boolean override) throws Exception {
    Configuration conf = new Configuration();
    conf.set("fs.http.impl", HoopFileSystem.class.getName());
    FileSystem fs = FileSystem.get(getJettyURL().toURI(), conf);
    FsPermission permission = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
    OutputStream os = fs.create(new Path(path.toUri().getPath()), permission, override, 1024,
                                (short) 2, 100 * 1024 * 1024, null);
    os.write(1);
    os.close();
    fs.close();
View Full Code Here

Examples of org.apache.hadoop.fs.permission.FsPermission

    fs.close();

    Configuration conf = new Configuration();
    conf.set("fs.http.impl", HoopFileSystem.class.getName());
    fs = FileSystem.get(getJettyURL().toURI(), conf);
    FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
    fs.setPermission(path, permission1);
    fs.close();

    fs = FileSystem.get(getHadoopConf());
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();
    FsPermission permission2 = status1.getPermission();
    Assert.assertEquals(permission2, permission1);
  }
View Full Code Here

Examples of org.apache.hadoop.fs.permission.FsPermission

    Path path = new Path((String) json.get("path"));
    boolean isDir = (Boolean) json.get("isDir");
    long len = (Long) json.get("len");
    String owner = (String) json.get("owner");
    String group = (String) json.get("group");
    FsPermission permission = FsPermission.valueOf((String) json.get("permission"));
    long aTime = (Long) json.get("accessTime");
    long mTime = (Long) json.get("modificationTime");
    long blockSize = (Long) json.get("blockSize");
    short replication = (short) (long) (Long) json.get("replication");
    return new FileStatus(len, isDir, replication, blockSize, mTime, aTime, permission, owner, group, path);
View Full Code Here

Examples of org.apache.hadoop.fs.permission.FsPermission

    Path rootDir = new Path(TEST_ROOT_DIR);
    if(!fs.exists(rootDir)) {
      fs.mkdirs(rootDir);
    }
    fs.setPermission(rootDir,
        new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
    runKillingJobAndValidate(jt, conf);
    runFailingJobAndValidate(jt, conf);
    runSuccessfulJobAndValidate(jt, conf);
  }
View Full Code Here

Examples of org.apache.hadoop.fs.permission.FsPermission

      }

      // Create the directory and set open permissions so that the TT can
      // access.
      fs.mkdirs(scriptDir);
      fs.setPermission(scriptDir, new FsPermission(FsAction.ALL, FsAction.ALL,
          FsAction.ALL));

     // create shell script
     Random rm = new Random();
      Path scriptPath = new Path(scriptDirName, "_shellScript_" + rm.nextInt()
View Full Code Here

Examples of org.apache.hadoop.fs.permission.FsPermission

            }
           
            int defaultBufferSize =
              LOGDIR_FS.getConf().getInt("io.file.buffer.size", 4096);
            out = LOGDIR_FS.create(logFile,
                            new FsPermission(HISTORY_FILE_PERMISSION),
                            true,
                            defaultBufferSize,
                            LOGDIR_FS.getDefaultReplication(),
                            jobHistoryBlockSize, null);
            writer = new PrintWriter(out);
            fileManager.addWriter(jobId, writer);

            // cache it ...
            fileManager.setHistoryFile(jobId, logFile);
          }
          if (userLogFile != null) {
            // Get the actual filename as recoverJobHistoryFile() might return
            // a different filename
            userLogDir = userLogFile.getParent().toString();
            userLogFile = new Path(userLogDir, logFileName);
           
            // create output stream for logging
            // in hadoop.job.history.user.location
            fs = userLogFile.getFileSystem(jobConf);
            out = fs.create(userLogFile, true, 4096);
            writer = new PrintWriter(out);
            fileManager.addWriter(jobId, writer);
          }
         
          ArrayList<PrintWriter> writers = fileManager.getWriters(jobId);
          // Log the history meta info
          JobHistory.MetaInfoManager.logMetaInfo(writers);

          String viewJobACL = "*";
          String modifyJobACL = "*";
          if (aclsEnabled) {
            viewJobACL = jobConf.get(JobACL.VIEW_JOB.getAclName(), " ");
            modifyJobACL = jobConf.get(JobACL.MODIFY_JOB.getAclName(), " ");
          }
          //add to writer as well
          JobHistory.log(writers, RecordTypes.Job,
                         new Keys[]{Keys.JOBID, Keys.JOBNAME, Keys.USER,
                                    Keys.SUBMIT_TIME, Keys.JOBCONF,
                                    Keys.VIEW_JOB, Keys.MODIFY_JOB,
                                    Keys.JOB_QUEUE},
                         new String[]{jobId.toString(), jobName, user,
                                      String.valueOf(submitTime) , jobConfPath,
                                      viewJobACL, modifyJobACL,
                                      jobConf.getQueueName()}
                        );
            
        }catch(IOException e){
          LOG.error("Failed creating job history log file, disabling history", e);
          disableHistory = true;
        }
      }
      // Always store job conf on local file system
      String localJobFilePath =  JobInfo.getLocalJobFilePath(jobId);
      File localJobFile = new File(localJobFilePath);
      FileOutputStream jobOut = null;
      try {
        jobOut = new FileOutputStream(localJobFile);
        jobConf.writeXml(jobOut);
        if (LOG.isDebugEnabled()) {
          LOG.debug("Job conf for " + jobId + " stored at "
                    + localJobFile.getAbsolutePath());
        }
      } catch (IOException ioe) {
        LOG.error("Failed to store job conf on the local filesystem ", ioe);
      } finally {
        if (jobOut != null) {
          try {
            jobOut.close();
          } catch (IOException ie) {
            LOG.info("Failed to close the job configuration file "
                       + StringUtils.stringifyException(ie));
          }
        }
      }

      /* Storing the job conf on the log dir */
      Path jobFilePath = null;
      if (LOG_DIR != null) {
        jobFilePath = new Path(LOG_DIR + File.separator +
                               jobUniqueString + "_conf.xml");
        fileManager.setConfFile(jobId, jobFilePath);
      }
      Path userJobFilePath = null;
      if (userLogDir != null) {
        userJobFilePath = new Path(userLogDir + File.separator +
                                   jobUniqueString + "_conf.xml");
      }
      FSDataOutputStream jobFileOut = null;
      try {
        if (LOG_DIR != null) {
          int defaultBufferSize =
              LOGDIR_FS.getConf().getInt("io.file.buffer.size", 4096);
          if (!LOGDIR_FS.exists(jobFilePath)) {
            jobFileOut = LOGDIR_FS.create(jobFilePath,
                                   new FsPermission(HISTORY_FILE_PERMISSION),
                                   true,
                                   defaultBufferSize,
                                   LOGDIR_FS.getDefaultReplication(),
                                   LOGDIR_FS.getDefaultBlockSize(), null);
            jobConf.writeXml(jobFileOut);
View Full Code Here

Examples of org.apache.hadoop.fs.permission.FsPermission

  private void processPermission(DataInputStream in, ImageVisitor v)
      throws IOException {
    v.visitEnclosingElement(ImageElement.PERMISSIONS);
    v.visit(ImageElement.USER_NAME, Text.readString(in));
    v.visit(ImageElement.GROUP_NAME, Text.readString(in));
    FsPermission fsp = new FsPermission(in.readShort());
    v.visit(ImageElement.PERMISSION_STRING, fsp.toString());
    v.leaveEnclosingElement(); // Permissions
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.