Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.LocalFileSystem


  public static void main(String [] argv) throws IOException {
    Configuration conf = new Configuration();
    conf.setInt("io.file.buffer.size", 64*1024);
    RawLocalFileSystem rlfs = new RawLocalFileSystem();
    rlfs.setConf(conf);
    LocalFileSystem lfs = new LocalFileSystem(rlfs);
   
    Path path = new Path("/Users/ryan/rfile.big.txt");
    long start = System.currentTimeMillis();
    SimpleBlockCache cache = new SimpleBlockCache();
    //LruBlockCache cache = new LruBlockCache();
View Full Code Here


    List<SoftReference<FaultyOutputStream>> outStreams =
      new ArrayList<SoftReference<FaultyOutputStream>>();
    private long faultPos = 200;

    public FaultyFileSystem() {
      super(new LocalFileSystem());
      System.err.println("Creating faulty!");
    }
View Full Code Here

  /**
   * Gets the current umask.
   */
  private String getCurrentUmask(String tmpDir, Configuration config) throws IOException {
    try {
      LocalFileSystem localFS = FileSystem.getLocal(config);
      return Integer.toOctalString(localFS.getFileStatus(new Path(getTmpDir())).getPermission().toShort());
    } catch (Exception e) {
      return null;
    }
  }
View Full Code Here

    }
   
    // do the deletion, after releasing the global lock
    for (CacheStatus lcacheStatus : deleteList) {
      synchronized (lcacheStatus) {
        LocalFileSystem localFS = FileSystem.getLocal(conf);
        Path potentialDeletee = lcacheStatus.localizedLoadPath;

        deleteLocalPath(asyncDiskService, localFS, potentialDeletee);

        // Update the maps baseDirSize and baseDirNumberSubDir
View Full Code Here

   * no directory from this directory list can be created.
   * @throws IOException
   */
  public static DataNode makeInstance(String[] dataDirs, Configuration conf,
      SecureResources resources) throws IOException {
    LocalFileSystem localFS = FileSystem.getLocal(conf);
    ArrayList<File> dirs = new ArrayList<File>();
    FsPermission dataDirPermission =
      new FsPermission(conf.get(DATA_DIR_PERMISSION_KEY,
                                DEFAULT_DATA_DIR_PERMISSION));
    for (String dir : dataDirs) {
View Full Code Here

    public void io_fragment() throws Exception {
        File file = folder.newFile();
        Assume.assumeThat(file.delete() || file.exists() == false, is(true));

        ParquetFileFormat<MockSimple> format = format(MockSimple.class);
        LocalFileSystem fs = FileSystem.getLocal(format.getConf());
        ModelOutput<MockSimple> output = format.createOutput(
                MockSimple.class,
                fs, new Path(file.toURI()),
                new Counter());
        try {
            output.write(new MockSimple(100, "Hello, world!"));
        } finally {
            output.close();
        }
        assertThat(file.exists(), is(true));

        FileStatus stat = fs.getFileStatus(new Path(file.toURI()));
        List<DirectInputFragment> fragments = format.computeInputFragments(new StripedDataFormat.InputContext(
                MockSimple.class,
                Arrays.asList(stat), fs,
                -1L, -1L,
                false, false));
View Full Code Here

    }

    private <T> File save(ParquetFileFormat<T> format, List<T> values) throws IOException, InterruptedException {
        File file = folder.newFile();
        Assume.assumeThat(file.delete() || file.exists() == false, is(true));
        LocalFileSystem fs = FileSystem.getLocal(format.getConf());
        ModelOutput<T> output = format.createOutput(
                format.getSupportedType(),
                fs, new Path(file.toURI()),
                new Counter());
        try {
View Full Code Here

        assertThat(file.exists(), is(true));
        return file;
    }

    private <T> List<T> load(ParquetFileFormat<T> format, File file) throws IOException, InterruptedException {
        LocalFileSystem fs = FileSystem.getLocal(format.getConf());
        ModelInput<T> input = format.createInput(
                format.getSupportedType(),
                fs, new Path(file.toURI()),
                0, file.length(),
                new Counter());
View Full Code Here

    public void io_fragment() throws Exception {
        File file = folder.newFile();
        Assume.assumeThat(file.delete() || file.exists() == false, is(true));

        OrcFileFormat<MockSimple> format = format(MockSimple.class);
        LocalFileSystem fs = FileSystem.getLocal(format.getConf());
        ModelOutput<MockSimple> output = format.createOutput(
                MockSimple.class,
                fs, new Path(file.toURI()),
                new Counter());
        try {
            output.write(new MockSimple(100, "Hello, world!"));
        } finally {
            output.close();
        }
        assertThat(file.exists(), is(true));

        FileStatus stat = fs.getFileStatus(new Path(file.toURI()));
        List<DirectInputFragment> fragments = format.computeInputFragments(new StripedDataFormat.InputContext(
                MockSimple.class,
                Arrays.asList(stat), fs,
                -1L, -1L,
                false, false));
View Full Code Here

    }

    private <T> File save(OrcFileFormat<T> format, List<T> values) throws IOException, InterruptedException {
        File file = folder.newFile();
        Assume.assumeThat(file.delete() || file.exists() == false, is(true));
        LocalFileSystem fs = FileSystem.getLocal(format.getConf());
        ModelOutput<T> output = format.createOutput(
                format.getSupportedType(),
                fs, new Path(file.toURI()),
                new Counter());
        try {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.LocalFileSystem

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.