Examples of FileStatus


Examples of org.apache.hadoop.fs.FileStatus

        if (srcs.length > 1 && !isDstDir) {
          throw new IllegalArgumentException("When moving multiple files, destination should be a directory.");
        }
        for (Path s : srcs) {
          if (!srcFs.rename(s, dstPath)) {
            FileStatus srcFstatus = null;
            FileStatus dstFstatus = null;
            try {
              srcFstatus = srcFs.getFileStatus(s);
            } catch (FileNotFoundException e) {
              // ignore
            }
            try {
              dstFstatus = dstFs.getFileStatus(dstPath);
            } catch (IOException e) {
            }
            if ((srcFstatus != null) && (dstFstatus != null)) {
              if (srcFstatus.isDir() && !dstFstatus.isDir()) {
                throw new IllegalArgumentException("cannot overwrite non directory " + dstPath
                    + " with directory " + s);
              }
            }
            throw new HadoopException("Failed to rename " + s + " to " + dstPath);
View Full Code Here

Examples of org.apache.hadoop.fs.FileStatus

      try {
        Path src = new Path(uri);
        FileSystem srcFs = getFS(src);

        for (Path p : FileUtil.stat2Paths(srcFs.globStatus(src), src)) {
          FileStatus status = srcFs.getFileStatus(p);
          if (status.isDir() && !recursive) {
            throw new IllegalStateException("Cannot remove directory \"" + src
                + "\", if recursive deletion was not specified");
          }
          if (!skipTrash) {
            try {
View Full Code Here

Examples of org.apache.hadoop.fs.FileStatus

        boolean waitUntilDone = (secondsToWait == 0);
        long timeLeft = TimeUnit.SECONDS.toMillis(secondsToWait);

        for (Path path : waitList) {
          FileSystem srcFs = getFS(path);
          FileStatus status = srcFs.getFileStatus(path);
          long len = status.getLen();

          boolean done = false;

          while (!done) {
            BlockLocation[] locations = srcFs.getFileBlockLocations(status, 0, len);
View Full Code Here

Examples of org.apache.hadoop.fs.FileStatus

        throw new HadoopException("Cannot set replication for " + src);
      }
    }
    else {
      if (recursive) {
        FileStatus items[] = srcFs.listStatus(src);
        if (!ObjectUtils.isEmpty(items)) {
          for (FileStatus status : items) {
            setrep(replication, recursive, srcFs, status.getPath(), waitList);
          }
        }
View Full Code Here

Examples of org.apache.hadoop.fs.FileStatus

  public void touchz(String... uris) {
    for (String uri : uris) {
      try {
        Path src = new Path(uri);
        FileSystem srcFs = getFS(src);
        FileStatus st;
        if (srcFs.exists(src)) {
          st = srcFs.getFileStatus(src);
          if (st.isDir()) {
            // TODO: handle this
            throw new IllegalArgumentException(src + " is a directory");
          }
          else if (st.getLen() != 0)
            throw new IllegalArgumentException(src + " must be a zero-length file");
        }
        else {
          IOUtils.closeStream(srcFs.create(src));
        }
View Full Code Here

Examples of org.apache.hadoop.fs.FileStatus

        final Store<ByteArray, byte[], byte[]> store = new BdbStorageConfiguration(new VoldemortConfig(new Props(new File(serverPropsFile)))).getStore(TestUtils.makeStoreDefinition(storeName), TestUtils.makeSingleNodeRoutingStrategy());

        final AtomicInteger obsoletes = new AtomicInteger(0);

        Path jsonFilePath = new Path(jsonDataFile);
        FileStatus jsonFileStatus = jsonFilePath.getFileSystem(new Configuration())
                                                .listStatus(jsonFilePath)[0];
        final SequenceFileRecordReader<BytesWritable, BytesWritable> reader = new SequenceFileRecordReader<BytesWritable, BytesWritable>(new Configuration(),
                                                                                                                                         new FileSplit(jsonFilePath,
                                                                                                                                                       0,
                                                                                                                                                       jsonFileStatus.getLen(),
                                                                                                                                                       (String[]) null));

        PerformanceTest readWriteTest = new PerformanceTest() {

            @Override
View Full Code Here

Examples of org.apache.hadoop.fs.FileStatus

        final Store<ByteArray, byte[], byte[]> store = new MysqlStorageConfiguration(new VoldemortConfig(new Props(new File(serverPropsFile)))).getStore(TestUtils.makeStoreDefinition(storeName), TestUtils.makeSingleNodeRoutingStrategy());

        final AtomicInteger obsoletes = new AtomicInteger(0);

        Path jsonFilePath = new Path(jsonDataFile);
        FileStatus jsonFileStatus = jsonFilePath.getFileSystem(new Configuration())
                                                .listStatus(jsonFilePath)[0];
        final SequenceFileRecordReader<BytesWritable, BytesWritable> reader = new SequenceFileRecordReader<BytesWritable, BytesWritable>(new Configuration(),
                                                                                                                                         new FileSplit(jsonFilePath,
                                                                                                                                                       0,
                                                                                                                                                       jsonFileStatus.getLen(),
                                                                                                                                                       (String[]) null));

        PerformanceTest readWriteTest = new PerformanceTest() {

            @Override
View Full Code Here

Examples of org.apache.hadoop.fs.FileStatus

            te.printStackTrace();
            logger.error("Error thrown while trying to get Hadoop filesystem");
            System.exit(-1);
        }

        FileStatus status = fs.listStatus(p)[0];
        long size = status.getLen();
        HdfsFetcher fetcher = new HdfsFetcher(null,
                                              maxBytesPerSec,
                                              VoldemortConfig.REPORTING_INTERVAL_BYTES,
                                              VoldemortConfig.DEFAULT_BUFFER_SIZE,
                                              0,
View Full Code Here

Examples of org.apache.hadoop.fs.FileStatus

  @Test
  public void testAlterTableDMl() throws HIHOException, IOException {
    Configuration conf = mock(Configuration.class);
    Path path = mock(Path.class);
    FileStatus status1 = mock(FileStatus.class);
    Path path1 = mock(Path.class);
    when(path1.getName()).thenReturn("part-xxxxx");
    when(status1.getPath()).thenReturn(path1);
    FileStatus status2 = mock(FileStatus.class);
    Path path2 = mock(Path.class);
    when(path2.getName()).thenReturn("part-yyyyy");
    when(status2.getPath()).thenReturn(path2);
    FileSystem fs = mock(FileSystem.class);
    when(fs.listStatus(path)).thenReturn(
        new FileStatus[] { status1, status2 });
    when(path.getFileSystem(conf)).thenReturn(fs);
    when(conf.get(HIHOConf.EXTERNAL_TABLE_DML))
View Full Code Here

Examples of org.apache.hadoop.fs.FileStatus

   * @return a Map object (JSON friendly) with the file status.
   * @throws IOException thrown if an IO error occured.
   */
  @Override
  public Map execute(FileSystem fs) throws IOException {
    FileStatus status = fs.getFileStatus(path);
    return FSUtils.fileStatusToJSON(status, HoopServer.get().getBaseUrl());
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.