Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FSDataOutputStream


    Path localPath = new Path(localPasswordFile);
    if (localFs.isFile(localPath)) {
      LOG.debug("Password file is already created by previous path");
      return;
    }
    FSDataOutputStream out = FileSystem.create(localFs, localPath,
        new FsPermission("400"));
    out.write(password);
    out.close();
  }
View Full Code Here


  }
 
  public static <T extends InputSplit> void createSplitFiles(Path jobSubmitDir,
      Configuration conf, FileSystem fs, T[] splits)
  throws IOException, InterruptedException {
    FSDataOutputStream out = createFile(fs,
        JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
    SplitMetaInfo[] info = writeNewSplits(conf, splits, out);
    out.close();
    writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir),
        new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
        info);
  }
View Full Code Here

 
  public static void createSplitFiles(Path jobSubmitDir,
      Configuration conf, FileSystem fs,
      org.apache.hadoop.mapred.InputSplit[] splits)
  throws IOException {
    FSDataOutputStream out = createFile(fs,
        JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
    SplitMetaInfo[] info = writeOldSplits(splits, out);
    out.close();
    writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir),
        new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
        info);
  }
View Full Code Here

        info);
  }
 
  private static FSDataOutputStream createFile(FileSystem fs, Path splitFile,
      Configuration jobthrows IOException {
    FSDataOutputStream out = FileSystem.create(fs, splitFile,
        new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION));
    int replication = job.getInt("mapred.submit.replication", 10);
    fs.setReplication(splitFile, (short)replication);
    writeSplitHeader(out);
    return out;
View Full Code Here

  private static void writeJobSplitMetaInfo(FileSystem fs, Path filename,
      FsPermission p, int splitMetaInfoVersion,
      JobSplit.SplitMetaInfo[] allSplitMetaInfo)
  throws IOException {
    // write the splits meta-info to a file for the job tracker
    FSDataOutputStream out =
      FileSystem.create(fs, filename, p);
    out.write(JobSplit.META_SPLIT_FILE_HEADER);
    WritableUtils.writeVInt(out, splitMetaInfoVersion);
    WritableUtils.writeVInt(out, allSplitMetaInfo.length);
    for (JobSplit.SplitMetaInfo splitMetaInfo : allSplitMetaInfo) {
      splitMetaInfo.write(out);
    }
    out.close();
  }
View Full Code Here

      }

      Path tmpfile = new Path(job.get(TMP_DIR_LABEL), relativedst);
      long cbcopied = 0L;
      FSDataInputStream in = null;
      FSDataOutputStream out = null;
      try {
        // open src file
        in = srcstat.getPath().getFileSystem(job).open(srcstat.getPath());
        reporter.incrCounter(Counter.BYTESEXPECTED, srcstat.getLen());
        // open tmp file
        out = create(tmpfile, reporter, srcstat);
        // copy file
        for(int cbread; (cbread = in.read(buffer)) >= 0; ) {
          out.write(buffer, 0, cbread);
          cbcopied += cbread;
          reporter.setStatus(
              String.format("%.2f ", cbcopied*100.0/srcstat.getLen())
              + absdst + " [ " +
              StringUtils.humanReadableInt(cbcopied) + " / " +
View Full Code Here

  public void writeStr(Path file, String contents) throws IOException {
    if (fs.exists(file)) {
      fs.delete(file, true);
    }

    FSDataOutputStream write = fs.create(file);
    write.write(contents.getBytes());
    write.close();
  }
View Full Code Here

        Configuration conf=new Configuration();
        HadoopBaseUtils.grabConfiguration(hdpConf, conf)
        conf.setInt("dfs.replication", maxReplicaton);
          FileSystem fs = FileSystem.get(conf);
          IndexUtils.truncate(fs, new Path(upFolder));
         FSDataOutputStream output= fs.create( new Path(upFolder),true);   
        for(String s : listStrs){
          if(s!=null && !s.equals(""))
            output.write(new String(s+"\n").getBytes());
        }               
        output.close();
      }catch(Exception e){
        e.printStackTrace();
      }
     
      return fqStr;
View Full Code Here

        String hdpConf=(String) stormconf.get("hadoop.conf.dir");
        Configuration conf=new Configuration();
        HadoopBaseUtils.grabConfiguration(hdpConf, conf)
          FileSystem fs = FileSystem.get(conf);
          IndexUtils.truncate(fs, new Path(upFolder));
         FSDataOutputStream output= fs.create( new Path(upFolder),true);   
        for(String s : listStrs){
          if(s!=null && !s.equals(""))
            output.write(new String(s+"\n").getBytes());
        }               
        output.close();
       
      }catch(Exception e){
        e.printStackTrace();
      }
     
View Full Code Here

      this.makePath(filepath, conf);

      Path randompath = new Path(this.file + "_" + java.util.UUID.randomUUID().toString());
      try {
        FSDataOutputStream writer = fs.create(randompath);

        Reader reader = stream.getReader();
        if (!(reader instanceof BufferedReader)) {
          reader = new BufferedReader(reader);
        }
        BufferedReader br = (BufferedReader) reader;
        String s1 = null;
        while ((s1 = br.readLine()) != null) {
          writer.write(new String(s1.trim().replaceAll("(\r)|(\n)","")+ "\n").getBytes());
        }
        br.close();

        writer.close();
        reader.close();
        fs.delete(filepath, true);
        fs.rename(randompath, filepath);
      } catch (Exception e) {
        fs.delete(randompath, true);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FSDataOutputStream

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.