Package org.apache.flink.core.fs

Examples of org.apache.flink.core.fs.FileStatus


 
  protected FileBaseStatistics getFileStats(FileBaseStatistics cachedStats, Path filePath, FileSystem fs,
      ArrayList<FileStatus> files) throws IOException {
   
    // get the file info and check whether the cached statistics are still valid.
    final FileStatus file = fs.getFileStatus(filePath);
    long latestModTime = file.getModificationTime();

    // enumerate all files and check their modification time stamp.
    if (file.isDir()) {
      FileStatus[] fss = fs.listStatus(filePath);
      files.ensureCapacity(fss.length);
     
      for (FileStatus s : fss) {
        if (!s.isDir()) {
View Full Code Here


    // get all the files that are involved in the splits
    List<FileStatus> files = new ArrayList<FileStatus>();
    long totalLength = 0;

    final FileSystem fs = path.getFileSystem();
    final FileStatus pathFile = fs.getFileStatus(path);

    if(!acceptFile(pathFile)) {
      throw new IOException("The given file does not pass the file-filter");
    }
    if (pathFile.isDir()) {
      // input is directory. list all contained files
      final FileStatus[] dir = fs.listStatus(path);
      for (int i = 0; i < dir.length; i++) {
        if (!dir[i].isDir() && acceptFile(dir[i])) {
          files.add(dir[i]);
          totalLength += dir[i].getLen();
          // as soon as there is one deflate file in a directory, we can not split it
          testForUnsplittable(dir[i]);
        }
      }
    } else {
      testForUnsplittable(pathFile);
     
      files.add(pathFile);
      totalLength += pathFile.getLen();
    }
    // returns if unsplittable
    if(unsplittable) {
      int splitNum = 0;
      for (final FileStatus file : files) {
View Full Code Here

    inputFormat.setFilePath(normalizedPath);
    inputFormat.setOpenTimeout(0);
    inputFormat.configure(configuration);

    final FileSystem fs = FileSystem.get(normalizedPath.toUri());
    FileStatus fileStatus = fs.getFileStatus(normalizedPath);

    BlockLocation[] blocks = fs.getFileBlockLocations(fileStatus, 0, fileStatus.getLen());
    inputFormat.open(new FileInputSplit(0, new Path(path), 0, fileStatus.getLen(), blocks[0].getHosts()));
    return inputFormat;
  }
View Full Code Here

  @SuppressWarnings("unchecked")
  public static <T, F extends FileInputFormat<T>> List<F> openAllInputs(
      Class<F> inputFormatClass, String path, Configuration configuration) throws IOException {
    Path nephelePath = new Path(path);
    FileSystem fs = nephelePath.getFileSystem();
    FileStatus fileStatus = fs.getFileStatus(nephelePath);
    if (!fileStatus.isDir()) {
      return Arrays.asList(openInput(inputFormatClass, path, configuration));
    }
    FileStatus[] list = fs.listStatus(nephelePath);
    List<F> formats = new ArrayList<F>();
    for (int index = 0; index < list.length; index++) {
View Full Code Here

        fail("Cannot find entry " + bucketName + " in directory " + S3_BASE_URI);
      }

      // Check the concrete directory file status
      try {
        final FileStatus directoryFileStatus = fs.getFileStatus(bucketPath);
        assertTrue(directoryFileStatus.isDir());
        assertEquals(0L, directoryFileStatus.getAccessTime());
        assertTrue(directoryFileStatus.getModificationTime() > 0L);

      } catch (FileNotFoundException e) {
        fail(e.getMessage());
      }
View Full Code Here

      final OutputStream os = fs.create(file, true);
      generateTestData(os, SMALL_FILE_SIZE);
      os.close();

      final FileStatus fileStatus = fs.getFileStatus(file);
      assertNotNull(fileStatus);

      BlockLocation[] blockLocations = fs.getFileBlockLocations(fileStatus, 0, SMALL_FILE_SIZE + 1);
      assertNull(blockLocations);
View Full Code Here

    for(org.apache.hadoop.fs.Path hadoopPath : hadoopFilePaths) {
     
      final Path filePath = new Path(hadoopPath.toUri());
      final FileSystem fs = FileSystem.get(filePath.toUri());
     
      final FileStatus file = fs.getFileStatus(filePath);
      latestModTime = Math.max(latestModTime, file.getModificationTime());
     
      // enumerate all files and check their modification time stamp.
      if (file.isDir()) {
        FileStatus[] fss = fs.listStatus(filePath);
        files.ensureCapacity(files.size() + fss.length);
       
        for (FileStatus s : fss) {
          if (!s.isDir()) {
View Full Code Here

    for(org.apache.hadoop.fs.Path hadoopPath : hadoopFilePaths) {
     
      final Path filePath = new Path(hadoopPath.toUri());
      final FileSystem fs = FileSystem.get(filePath.toUri());
     
      final FileStatus file = fs.getFileStatus(filePath);
      latestModTime = Math.max(latestModTime, file.getModificationTime());
     
      // enumerate all files and check their modification time stamp.
      if (file.isDir()) {
        FileStatus[] fss = fs.listStatus(filePath);
        files.ensureCapacity(files.size() + fss.length);
       
        for (FileStatus s : fss) {
          if (!s.isDir()) {
View Full Code Here

      assertFalse(lfs.exists(pathtotmpdir));
      tempdir.mkdirs();

      // check that local file system recognizes file..
      assertTrue(lfs.exists(pathtotmpdir));
      final FileStatus localstatus1 = lfs.getFileStatus(pathtotmpdir);

      // check that lfs recognizes directory..
      assertTrue(localstatus1.isDir());

      // get status for files in this (empty) directory..
      final FileStatus[] statusforfiles = lfs.listStatus(pathtotmpdir);

      // no files in there.. hence, must be zero
View Full Code Here

      int samplesTaken = 0;

      // take the samples
      while (samplesTaken < numSamples && fileNum < allFiles.size()) {
        // make a split for the sample and use it to read a record
        FileStatus file = allFiles.get(fileNum);
        FileInputSplit split = new FileInputSplit(0, file.getPath(), offset, file.getLen() - offset, null);

        // we open the split, read one line, and take its length
        try {
          open(split);
          if (readLine()) {
            totalNumBytes += this.currLen + this.delimiter.length;
            samplesTaken++;
          }
        } finally {
          // close the file stream, do not release the buffers
          super.close();
        }

        offset += stepSize;

        // skip to the next file, if necessary
        while (fileNum < allFiles.size() && offset >= (file = allFiles.get(fileNum)).getLen()) {
          offset -= file.getLen();
          fileNum++;
        }
      }
     
      // we have the width, store it
View Full Code Here

TOP

Related Classes of org.apache.flink.core.fs.FileStatus

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.