Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FSDataInputStream$Checker


                                                               int fromEventId,
                                                               int maxEvents) {
    TaskCompletionEvent[] events = TaskCompletionEvent.EMPTY_ARRAY;
    if (active) {
      try {
        FSDataInputStream dataIn = getJobInfoFile(jobId);
        if (dataIn != null) {
          readJobStatus(dataIn);
          readJobProfile(dataIn);
          readCounters(dataIn);
          events = readEvents(dataIn, fromEventId, maxEvents);
          dataIn.close();
        }
      } catch (IOException ex) {
        LOG.warn("Could not read [" + jobId + "] job events : " + ex, ex);
      }
    }
View Full Code Here


  }
  @SuppressWarnings("unchecked")
  private <T> T getSplitDetails(Path file, long offset)
   throws IOException {
    FileSystem fs = file.getFileSystem(conf);
    FSDataInputStream inFile = fs.open(file);
    inFile.seek(offset);
    String className = Text.readString(inFile);
    Class<T> cls;
    try {
      cls = (Class<T>) conf.getClassByName(className);
    } catch (ClassNotFoundException ce) {
      IOException wrap = new IOException("Split class " + className +
                                          " not found");
      wrap.initCause(ce);
      throw wrap;
    }
    SerializationFactory factory = new SerializationFactory(conf);
    Deserializer<T> deserializer =
      (Deserializer<T>) factory.getDeserializer(cls);
    deserializer.open(inFile);
    T split = deserializer.deserialize(null);
    long pos = inFile.getPos();
    getCounters().findCounter(
         Task.Counter.SPLIT_RAW_BYTES).increment(pos - offset);
    inFile.close();
    return split;
  }
View Full Code Here

  private LineReader maybeUncompressedPath(Path p)
      throws FileNotFoundException, IOException {
    CompressionCodecFactory codecs = new CompressionCodecFactory(getConf());
    inputCodec = codecs.getCodec(p);
    FileSystem fs = p.getFileSystem(getConf());
    FSDataInputStream fileIn = fs.open(p);

    if (inputCodec == null) {
      return new LineReader(fileIn, getConf());
    } else {
      inputDecompressor = CodecPool.getDecompressor(inputCodec);
View Full Code Here

        reporter.progress();
      }
      else {
        FileSystem srcFs = srcPath.getFileSystem(conf);
        FileStatus srcStatus = srcFs.getFileStatus(srcPath);
        FSDataInputStream input = srcFs.open(srcStatus.getPath());
        reporter.setStatus("Copying file " + srcStatus.getPath() +
            " to archive.");
        copyData(srcStatus.getPath(), input, partStream, reporter);
        towrite = relPath.toString() + " file " + partname + " " + startPos
        + " " + srcStatus.getLen() + " ";
View Full Code Here

      {
        DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
        assertFalse(dfs.dfs.isLeaseCheckerStarted());

        //open and check the file
        FSDataInputStream in = dfs.open(filepath);
        assertFalse(dfs.dfs.isLeaseCheckerStarted());
        assertEquals(millis, in.readLong());
        assertFalse(dfs.dfs.isLeaseCheckerStarted());
        in.close();
        assertFalse(dfs.dfs.isLeaseCheckerStarted());
        dfs.close();
      }
    }
    finally {
View Full Code Here

    out.close();
  }
 
  void readFile(FileSystem fs, Path path, int fileLen) throws IOException {
    byte [] arr = new byte[fileLen];
    FSDataInputStream in = fs.open(path);
    in.readFully(arr);
  }
View Full Code Here

            Thread.sleep(2000);
          } catch (InterruptedException e) {}
        } else {
          // Lease is valid; test and return the JobTracker in the jtPath file
          try {
            FSDataInputStream in = fs.open(jtPath);
            BufferedReader br = new BufferedReader(new InputStreamReader(in));
            String jtAddress = br.readLine().trim();
            in.close();
            // Try to connect to the JT listed in jtAddress
            JobConf clientConf = new JobConf(conf);
            clientConf.set("mapred.job.tracker", jtAddress);
            JobClient client = new JobClient();
            client.init(clientConf);
View Full Code Here

    InputStream input;
    if (codec == null) {
      input = fs.open(path);
      decompressor = null;
    } else {
      FSDataInputStream fsdis = fs.open(path);
      decompressor = CodecPool.getDecompressor(codec);
      input = codec.createInputStream(fsdis, decompressor);
    }
    jsonParser = mapper.getJsonFactory().createJsonParser(input);
  }
View Full Code Here

    int writeSize = blockSize / 2;
    out.write(new byte[writeSize]);
    out.sync();
   
    FSDataInputStream in = fileSystem.open(file1);
   
    byte[] buf = new byte[4096];
    in.readFully(0, buf);
    in.close();

    waitForBlocks(fileSystem, file1, 1, writeSize);
   
    int blockMapSize = cluster.getDataNodes().get(0).blockScanner.blockMap.size();
    assertTrue(
View Full Code Here

        return;
      }

      Path tmpfile = new Path(job.get(TMP_DIR_LABEL), relativedst);
      long cbcopied = 0L;
      FSDataInputStream in = null;
      FSDataOutputStream out = null;
      try {
        // open src file
        in = srcstat.getPath().getFileSystem(job).open(srcstat.getPath());
        reporter.incrCounter(Counter.BYTESEXPECTED, srcstat.getLen());
        // open tmp file
        out = create(tmpfile, reporter, srcstat);
        // copy file
        for(int cbread; (cbread = in.read(buffer)) >= 0; ) {
          out.write(buffer, 0, cbread);
          cbcopied += cbread;
          reporter.setStatus(
              String.format("%.2f ", cbcopied*100.0/srcstat.getLen())
              + absdst + " [ " +
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FSDataInputStream$Checker

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.