Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FSDataOutputStream


    assertThat(context.getLong(DataStoreItemReader.READ_POSITION), is(300l));
  }

  private void createTestData(Path path) throws IOException {
    FileSystem fs = FileSystem.get(getYarnCluster().getConfiguration());
    FSDataOutputStream out = fs.create(path);
    for (int i = 0; i < 300; i++) {
      out.writeBytes("line" + i + "\n");
    }
    out.close();
    assertTrue(fs.exists(path));
    assertThat(fs.getFileStatus(path).getLen(), greaterThan(0l));
  }
View Full Code Here


    assertThat(fs.exists(new Path(path)), is(false));
  }

  private void createFile(Configuration configuration, String path) throws Exception {
    FileSystem fs = FileSystem.get(configuration);
    FSDataOutputStream create = fs.create(new Path(path));
    create.close();
    assertThat(fs.exists(new Path(path)), is(true));
  }
View Full Code Here

  }

  public void createTextFileInHDFS(String inputData, String filePath,
      String nameOfFile) throws IOException {
    FileSystem fs = getFileSystem();
    FSDataOutputStream out = null;
    Path inputFile = new Path(filePath + "/" + nameOfFile);
    try {
      out = fs.create(inputFile, false);
      out.write(inputData.getBytes(), 0, inputData.getBytes().length);
      out.close();
      out = null;
      // Cheking input file exists or not.
      Path inputPath = new Path(fs.getHomeDirectory(), filePath + "/"
          + nameOfFile);
      assertTrue(fs.exists(inputPath));
    } finally {
      if (out != null) {
        out.close();
      }
    }
  }
View Full Code Here

                Path checkSumValueFile = new Path(nodeDir, fileNamePrefix + ".data.checksum");

                if(outputFs.exists(checkSumIndexFile)) {
                    outputFs.delete(checkSumIndexFile);
                }
                FSDataOutputStream output = outputFs.create(checkSumIndexFile);
                outputFs.setPermission(checkSumIndexFile,
                                       new FsPermission(HadoopStoreBuilder.HADOOP_FILE_PERMISSION));
                output.write(this.checkSumDigestIndex.getCheckSum());
                output.close();

                if(outputFs.exists(checkSumValueFile)) {
                    outputFs.delete(checkSumValueFile);
                }
                output = outputFs.create(checkSumValueFile);
                outputFs.setPermission(checkSumValueFile,
                                       new FsPermission(HadoopStoreBuilder.HADOOP_FILE_PERMISSION));
                output.write(this.checkSumDigestValue.getCheckSum());
                output.close();
            } else {
                throw new RuntimeException("Failed to open checksum digest for node " + nodeId
                                           + " ( partition - " + this.partitionId + ", chunk - "
                                           + chunkId + " )");
            }
View Full Code Here

                    Path checkSumValueFile = new Path(nodeDir, chunkFileName + ".data.checksum");

                    if(outputFs.exists(checkSumIndexFile)) {
                        outputFs.delete(checkSumIndexFile);
                    }
                    FSDataOutputStream output = outputFs.create(checkSumIndexFile);
                    outputFs.setPermission(checkSumIndexFile,
                                           new FsPermission(HadoopStoreBuilder.HADOOP_FILE_PERMISSION));
                    output.write(this.checkSumDigestIndex[chunkId].getCheckSum());
                    output.close();

                    if(outputFs.exists(checkSumValueFile)) {
                        outputFs.delete(checkSumValueFile);
                    }
                    output = outputFs.create(checkSumValueFile);
                    outputFs.setPermission(checkSumValueFile,
                                           new FsPermission(HadoopStoreBuilder.HADOOP_FILE_PERMISSION));
                    output.write(this.checkSumDigestValue[chunkId].getCheckSum());
                    output.close();
                } else {
                    throw new RuntimeException("Failed to open checksum digest for node " + nodeId
                                               + " ( partition - " + this.partitionId
                                               + ", chunk - " + chunkId + " )");
                }
View Full Code Here

                    }
                }

                // Write metadata
                Path metadataPath = new Path(nodePath, ".metadata");
                FSDataOutputStream metadataStream = outputFs.create(metadataPath);
                outputFs.setPermission(metadataPath, new FsPermission(HADOOP_FILE_PERMISSION));
                logger.info("Setting permission to 755 for " + metadataPath);
                metadataStream.write(metadata.toJsonString().getBytes());
                metadataStream.flush();
                metadataStream.close();

            }

        } catch(Exception e) {
            logger.error("Error in Store builder", e);
View Full Code Here

      ext = codec.getDefaultExtension();
    }

    Path file = getDefaultWorkFile(context, ext);
    FileSystem fs = file.getFileSystem(conf);
    FSDataOutputStream fileOut = fs.create(file, false);
    DataOutputStream ostream = fileOut;

    if (isCompressed) {
      ostream = new DataOutputStream(codec.createOutputStream(fileOut));
    }
View Full Code Here

  private void writeFile(FileSystem fileSys, Path name, int repl,
                         int fileSize, int blockSize)
    throws IOException {
    // create and write a file that contains three blocks of data
    FSDataOutputStream stm = fileSys.create(name, true,
                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
                                            (short)repl, (long)blockSize);
    byte[] buffer = new byte[fileSize];
    Random rand = new Random(seed);
    rand.nextBytes(buffer);
    stm.write(buffer);
    stm.close();
  }
View Full Code Here

  /*
   * creates a file but does not close it
   */
  private FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
    throws IOException {
    FSDataOutputStream stm = fileSys.create(name, true,
                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
                                            (short)repl, (long)blockSize);
    return stm;
  }
View Full Code Here

    throws Exception {
    initBuffer(bufferSize);

    // create a new file.
    FileSystem fs = p.getFileSystem(conf);
    FSDataOutputStream stm = createFile(fs, p, 1);
    System.out.println("Created file simpleFlush.dat");

    // TODO move this bit to another test case
    // There have been a couple issues with flushing empty buffers, so do
    // some empty flushes first.
    stm.sync();
    stm.sync();
    stm.write(1);
    stm.sync();
    stm.sync();

    CountDownLatch countdown = new CountDownLatch(1);
    ArrayList<Thread> threads = new ArrayList<Thread>();
    AtomicReference<Throwable> thrown = new AtomicReference<Throwable>();
    for (int i = 0; i < numThreads; i++) {
      Thread t = new AppendTestUtil.WriterThread(stm, toWrite, thrown, countdown, numWrites);
      threads.add(t);
      t.start();
    }

    // Start all the threads at the same time for maximum raciness!
    countdown.countDown();
   
    for (Thread t : threads) {
      t.join();
    }
    if (thrown.get() != null) {
     
      throw new RuntimeException("Deferred", thrown.get());
    }
    stm.close();
    System.out.println("Closed file.");
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FSDataOutputStream

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.