Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FSOutputStream



  private void writeFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
    // create and write a file that contains three blocks of data
    FSOutputStream stm = fileSys.createRaw(name, true, (short)repl,
        (long)blockSize);
    byte[] buffer = new byte[fileSize];
    Random rand = new Random(seed);
    rand.nextBytes(buffer);
    stm.write(buffer);
    stm.close();
  }
View Full Code Here


  private void writeReadAndDelete(int len) throws IOException {
    Path path = new Path("/test/hadoop/file");
   
    s3FileSystem.mkdirs(path.getParent());

    FSOutputStream out = s3FileSystem.createRaw(path, false, (short) 1, BLOCK_SIZE);
    out.write(data, 0, len);
    out.close();

    assertTrue("Exists", s3FileSystem.exists(path));
   
    assertEquals("Block size", Math.min(len, BLOCK_SIZE), s3FileSystem.getBlockSize(path));
View Full Code Here

      fail("Should throw IOException.");
    } catch (IOException e) {
      // Expected
    }
   
    FSOutputStream out = s3FileSystem.createRaw(path, true, (short) 1, BLOCK_SIZE);
    out.write(data, 0, BLOCK_SIZE / 2);
    out.close();
   
    assertTrue("Exists", s3FileSystem.exists(path));
    assertEquals("Length", BLOCK_SIZE / 2, s3FileSystem.getLength(path));
   
  }
View Full Code Here

    assertEquals("Source exists", srcExists, s3FileSystem.exists(src));
    assertEquals("Destination exists", dstExists, s3FileSystem.exists(dst));
  }

  private void createEmptyFile(Path path) throws IOException {
    FSOutputStream out = s3FileSystem.createRaw(path, false, (short) 1, BLOCK_SIZE);
    out.write(data, 0, BLOCK_SIZE);
    out.close();
  }
View Full Code Here

  static final int numDatanodes = 4;

  private void writeFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
    // create and write a file that contains three blocks of data
    FSOutputStream stm = fileSys.createRaw(name, true, (short)repl,
        (long)blockSize);
    byte[] buffer = new byte[fileSize];
    Random rand = new Random(seed);
    rand.nextBytes(buffer);
    stm.write(buffer);
    stm.close();
  }
View Full Code Here

    // as tasks with side effect may write to locations not set in jobconf
    // as output path.
    if( baseFS.exists(f) && !overwrite ){
      throw new IOException("Error creating file - already exists : " + f);
    }
    FSOutputStream stream =
      baseFS.createRaw(setupFile(f, overwrite), overwrite, replication, blockSize);
    finalNameToFileInfo.get(f).setOpenFileStream(stream);
    return stream;
  }
View Full Code Here

      Progressable progress)
      throws IOException {
    if( baseFS.exists(f) && !overwrite ){
      throw new IOException("Error creating file - already exists : " + f);
    }
    FSOutputStream stream =
      baseFS.createRaw(setupFile(f, overwrite), overwrite, replication,
          blockSize, progress);
    finalNameToFileInfo.get(f).setOpenFileStream(stream);
    return stream ;
  }
View Full Code Here

  private void writeReadAndDelete(int len) throws IOException {
    Path path = new Path("/test/hadoop/file");
   
    s3FileSystem.mkdirs(path.getParent());

    FSOutputStream out = s3FileSystem.createRaw(path, false, (short) 1, BLOCK_SIZE);
    out.write(data, 0, len);
    out.close();

    assertTrue("Exists", s3FileSystem.exists(path));
   
    assertEquals("Block size", Math.min(len, BLOCK_SIZE), s3FileSystem.getBlockSize(path));
View Full Code Here

  public void testOverwrite() throws IOException {
    Path path = new Path("/test/hadoop/file");
   
    s3FileSystem.mkdirs(path.getParent());

    FSOutputStream out = s3FileSystem.createRaw(path, false, (short) 1, BLOCK_SIZE);
    out.write(data, 0, BLOCK_SIZE);
    out.close();
   
    assertTrue("Exists", s3FileSystem.exists(path));
    assertEquals("Length", BLOCK_SIZE, s3FileSystem.getLength(path));
   
    try {
      s3FileSystem.createRaw(path, false, (short) 1, 128);
      fail("Should throw IOException.");
    } catch (IOException e) {
      // Expected
    }
   
    out = s3FileSystem.createRaw(path, true, (short) 1, BLOCK_SIZE);
    out.write(data, 0, BLOCK_SIZE / 2);
    out.close();
   
    assertTrue("Exists", s3FileSystem.exists(path));
    assertEquals("Length", BLOCK_SIZE / 2, s3FileSystem.getLength(path));
   
  }
View Full Code Here

   
    Path path = new Path("/test/hadoop/file");
   
    s3FileSystem.mkdirs(path.getParent());

    FSOutputStream out = s3FileSystem.createRaw(path, false, (short) 1, BLOCK_SIZE);
    out.write(data, 0, len);
    out.close();

    assertTrue("Exists", s3FileSystem.exists(path));

    Path newPath = new Path("/test/hadoop/newfile");
    s3FileSystem.rename(path, newPath);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FSOutputStream

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.