Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FileStatus


  public void touchz(String... uris) {
    for (String uri : uris) {
      try {
        Path src = new Path(uri);
        FileSystem srcFs = getFS(src);
        FileStatus st;
        if (srcFs.exists(src)) {
          st = srcFs.getFileStatus(src);
          if (st.isDir()) {
            // TODO: handle this
            throw new IllegalArgumentException(src + " is a directory");
          }
          else if (st.getLen() != 0)
            throw new IllegalArgumentException(src + " must be a zero-length file");
        }
        else {
          IOUtils.closeStream(srcFs.create(src));
        }
View Full Code Here


        final Store<ByteArray, byte[], byte[]> store = new BdbStorageConfiguration(new VoldemortConfig(new Props(new File(serverPropsFile)))).getStore(TestUtils.makeStoreDefinition(storeName), TestUtils.makeSingleNodeRoutingStrategy());

        final AtomicInteger obsoletes = new AtomicInteger(0);

        Path jsonFilePath = new Path(jsonDataFile);
        FileStatus jsonFileStatus = jsonFilePath.getFileSystem(new Configuration())
                                                .listStatus(jsonFilePath)[0];
        final SequenceFileRecordReader<BytesWritable, BytesWritable> reader = new SequenceFileRecordReader<BytesWritable, BytesWritable>(new Configuration(),
                                                                                                                                         new FileSplit(jsonFilePath,
                                                                                                                                                       0,
                                                                                                                                                       jsonFileStatus.getLen(),
                                                                                                                                                       (String[]) null));

        PerformanceTest readWriteTest = new PerformanceTest() {

            @Override
View Full Code Here

        final Store<ByteArray, byte[], byte[]> store = new MysqlStorageConfiguration(new VoldemortConfig(new Props(new File(serverPropsFile)))).getStore(TestUtils.makeStoreDefinition(storeName), TestUtils.makeSingleNodeRoutingStrategy());

        final AtomicInteger obsoletes = new AtomicInteger(0);

        Path jsonFilePath = new Path(jsonDataFile);
        FileStatus jsonFileStatus = jsonFilePath.getFileSystem(new Configuration())
                                                .listStatus(jsonFilePath)[0];
        final SequenceFileRecordReader<BytesWritable, BytesWritable> reader = new SequenceFileRecordReader<BytesWritable, BytesWritable>(new Configuration(),
                                                                                                                                         new FileSplit(jsonFilePath,
                                                                                                                                                       0,
                                                                                                                                                       jsonFileStatus.getLen(),
                                                                                                                                                       (String[]) null));

        PerformanceTest readWriteTest = new PerformanceTest() {

            @Override
View Full Code Here

            te.printStackTrace();
            logger.error("Error thrown while trying to get Hadoop filesystem");
            System.exit(-1);
        }

        FileStatus status = fs.listStatus(p)[0];
        long size = status.getLen();
        HdfsFetcher fetcher = new HdfsFetcher(null,
                                              maxBytesPerSec,
                                              VoldemortConfig.REPORTING_INTERVAL_BYTES,
                                              VoldemortConfig.DEFAULT_BUFFER_SIZE,
                                              0,
View Full Code Here

  @Test
  public void testAlterTableDMl() throws HIHOException, IOException {
    Configuration conf = mock(Configuration.class);
    Path path = mock(Path.class);
    FileStatus status1 = mock(FileStatus.class);
    Path path1 = mock(Path.class);
    when(path1.getName()).thenReturn("part-xxxxx");
    when(status1.getPath()).thenReturn(path1);
    FileStatus status2 = mock(FileStatus.class);
    Path path2 = mock(Path.class);
    when(path2.getName()).thenReturn("part-yyyyy");
    when(status2.getPath()).thenReturn(path2);
    FileSystem fs = mock(FileSystem.class);
    when(fs.listStatus(path)).thenReturn(
        new FileStatus[] { status1, status2 });
    when(path.getFileSystem(conf)).thenReturn(fs);
    when(conf.get(HIHOConf.EXTERNAL_TABLE_DML))
View Full Code Here

   * @return a Map object (JSON friendly) with the file status.
   * @throws IOException thrown if an IO error occured.
   */
  @Override
  public Map execute(FileSystem fs) throws IOException {
    FileStatus status = fs.getFileStatus(path);
    return FSUtils.fileStatusToJSON(status, HoopServer.get().getBaseUrl());
  }
View Full Code Here

    os.write(1);
    os.close();
    fs.close();

    fs = FileSystem.get(getHadoopConf());
    FileStatus status = fs.getFileStatus(path);
    Assert.assertEquals(status.getReplication(), 2);
    Assert.assertEquals(status.getBlockSize(), 100 * 1024 * 1024);
    Assert.assertEquals(status.getPermission(), permission);
    InputStream is = fs.open(path);
    Assert.assertEquals(is.read(), 1);
    is.close();
    fs.close();
  }
View Full Code Here

    FileSystem fs = FileSystem.get(getHadoopConf());
    Path path = new Path(getHadoopTestDir(), "foo.txt");
    OutputStream os = fs.create(path);
    os.write(1);
    os.close();
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();

    Configuration conf = new Configuration();
    conf.set("fs.http.impl", HoopFileSystem.class.getName());
    fs = FileSystem.get(getJettyURL().toURI(), conf);
    FileStatus status2 = fs.getFileStatus(new Path(path.toUri().getPath()));
    fs.close();

    Assert.assertEquals(status2.getPermission(), status1.getPermission());
    Assert.assertEquals(status2.getPath().toUri().getPath(), status1.getPath().toUri().getPath());
    Assert.assertEquals(status2.getReplication(), status1.getReplication());
    Assert.assertEquals(status2.getBlockSize(), status1.getBlockSize());
    Assert.assertEquals(status2.getAccessTime(), status1.getAccessTime());
    Assert.assertEquals(status2.getModificationTime(), status1.getModificationTime());
    Assert.assertEquals(status2.getOwner(), status1.getOwner());
    Assert.assertEquals(status2.getGroup(), status1.getGroup());
    Assert.assertEquals(status2.getLen(), status1.getLen());

    FileStatus[] stati = fs.listStatus(path.getParent());
    Assert.assertEquals(stati.length, 1);
    Assert.assertEquals(stati[0].getPath().getName(), path.getName());
  }
View Full Code Here

    FileSystem fs = FileSystem.get(getHadoopConf());
    Path path = new Path(getHadoopTestDir(), "foo.txt");
    OutputStream os = fs.create(path);
    os.write(1);
    os.close();
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();
    long at = status1.getAccessTime();
    long mt = status1.getModificationTime();

    Configuration conf = new Configuration();
    conf.set("fs.http.impl", HoopFileSystem.class.getName());
    fs = FileSystem.get(getJettyURL().toURI(), conf);
    fs.setTimes(path, mt + 10, at + 20);
    fs.close();

    fs = FileSystem.get(getHadoopConf());
    status1 = fs.getFileStatus(path);
    fs.close();
    long atNew = status1.getAccessTime();
    long mtNew = status1.getModificationTime();
    Assert.assertEquals(mtNew, mt + 10);
    Assert.assertEquals(atNew, at + 20);
  }
View Full Code Here

    FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
    fs.setPermission(path, permission1);
    fs.close();

    fs = FileSystem.get(getHadoopConf());
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();
    FsPermission permission2 = status1.getPermission();
    Assert.assertEquals(permission2, permission1);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FileStatus

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.