Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FSOutputStream


        byte[] buffer = new byte[bufferSize];
        UTF8 testFileName = null;
        for (int iFileNumber = 0; iFileNumber < numFiles; iFileNumber++) {
          testFileName = new UTF8("/f" + iFileNumber);
          testfilesList.add(testFileName);
          FSOutputStream nos = dfsClient.create(testFileName, false);
          try {
            for (long nBytesWritten = 0L;
                 nBytesWritten < nBytes;
                 nBytesWritten += buffer.length) {
              if ((nBytesWritten + buffer.length) > nBytes) {
                // calculate byte count needed to exactly hit nBytes in length
                //  to keep randomDataGenerator in sync during the verify step
                int pb = (int) (nBytes - nBytesWritten);
                byte[] bufferPartial = new byte[pb];
                randomDataGenerator.nextBytes(bufferPartial);
                nos.write(bufferPartial);
              } else {
                randomDataGenerator.nextBytes(buffer);
                nos.write(buffer);
              }
            }
          } finally {
            nos.flush();
            nos.close();
          }
        }

        //
        // No need to wait for blocks to be replicated because replication
View Full Code Here


    //
    //           write filesize of data to file
    //
    byte[] buffer = new byte[BUFFER_SIZE];
    UTF8 testFileName = new UTF8(filename); // hardcode filename
    FSOutputStream nos;
  nos = dfsClient.create(testFileName, false);
    try {
      for (long nBytesWritten = 0L;
                nBytesWritten < fileSize;
                nBytesWritten += buffer.length) {
        if ((nBytesWritten + buffer.length) > fileSize) {
          int pb = (int) (fileSize - nBytesWritten);
          byte[] bufferPartial = new byte[pb];
          for( int i=0; i<pb; i++) {
            bufferPartial[i]='a';
          }
          nos.write(buffer);
        } else {
          for( int i=0; i<buffer.length;i++) {
            buffer[i]='a';
          }
          nos.write(buffer);
        }
      }
    } finally {
      nos.flush();
      nos.close();
    }
  }
View Full Code Here

        System.err.println(errmsg);
        return;
      }
      // create chains
      int chain = 0;
      FSOutputStream fos = null;
      for (int i = 0; i < blocks.length; i++) {
        LocatedBlock lblock = blocks[i];
        DatanodeInfo[] locs = lblock.getLocations();
        if (locs == null || locs.length == 0) {
          if (fos != null) {
            fos.flush();
            fos.close();
            fos = null;
          }
          continue;
        }
        if (fos == null) {
          fos = dfs.create(new UTF8(target.toString() + "/" + chain), true);
          if (fos != null) chain++;
        }
        if (fos == null) {
          System.err.println(errmsg + ": could not store chain " + chain);
          // perhaps we should bail out here...
          // return;
          continue;
        }
       
        // copy the block. It's a pity it's not abstracted from DFSInputStream ...
        try {
          copyBlock(lblock, fos);
        } catch (Exception e) {
          e.printStackTrace();
          // something went wrong copying this block...
          System.err.println(" - could not copy block " + lblock.getBlock().getBlockName() + " to " + target);
          fos.flush();
          fos.close();
          fos = null;
        }
      }
      if (fos != null) fos.close();
      System.err.println("\n - moved corrupted file " + file.getPath() + " to /lost+found");
      dfs.delete(new UTF8(file.getPath()));
    } catch (Exception e) {
      e.printStackTrace();
      System.err.println(errmsg + ": " + e.getMessage());
View Full Code Here

    // as tasks with side effect may write to locations not set in jobconf
    // as output path.
    if( baseFS.exists(f) && !overwrite ){
      throw new IOException("Error creating file - already exists : " + f);
    }
    FSOutputStream stream =
      baseFS.createRaw(setupFile(f, overwrite), overwrite, replication, blockSize);
    finalNameToFileInfo.get(f).setOpenFileStream(stream);
    return stream;
  }
View Full Code Here

      Progressable progress)
      throws IOException {
    if( baseFS.exists(f) && !overwrite ){
      throw new IOException("Error creating file - already exists : " + f);
    }
    FSOutputStream stream =
      baseFS.createRaw(setupFile(f, overwrite), overwrite, replication,
          blockSize, progress);
    finalNameToFileInfo.get(f).setOpenFileStream(stream);
    return stream ;
  }
View Full Code Here

     *
     * @return the number of exceptions caught
     */
    static int createWrite() {
      int exceptions = 0;
      FSOutputStream out = null;
      boolean success = false;
      for (int index = 0; index < numFiles; index++) {
        do { // create file until is succeeds
          try {
              out = fileSys.createRaw(
              new Path(taskDir, "" + index), false, (short)1, bytesPerBlock);
            success = true;
          } catch (IOException ioe) { success=false; exceptions++; }
        } while (!success);
        long toBeWritten = bytesPerFile;
        while (toBeWritten > 0) {
          int nbytes = (int) Math.min(buffer.length, toBeWritten);
          toBeWritten -= nbytes;
          try { // only try once
            out.write(buffer, 0, nbytes);
          } catch (IOException ioe) {
            exceptions++;
          }
        }
        do { // close file until is succeeds
          try {
            out.close();
            success = true;
          } catch (IOException ioe) { success=false; exceptions++; }
        } while (!success);
      }
      return exceptions;
View Full Code Here

  Random myrand = new Random();

  private void writeFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
    // create and write a file that contains three blocks of data
    FSOutputStream stm = fileSys.createRaw(name, true, (short)repl,
        (long)blockSize);
    byte[] buffer = new byte[fileSize];
    Random rand = new Random(seed);
    rand.nextBytes(buffer);
    stm.write(buffer);
    stm.close();
  }
View Full Code Here

  private void writeReadAndDelete(int len) throws IOException {
    Path path = new Path("/test/hadoop/file");
   
    s3FileSystem.mkdirs(path.getParent());

    FSOutputStream out = s3FileSystem.createRaw(path, false, (short) 1, BLOCK_SIZE);
    out.write(data, 0, len);
    out.close();

    assertTrue("Exists", s3FileSystem.exists(path));
   
    assertEquals("Block size", Math.min(len, BLOCK_SIZE), s3FileSystem.getBlockSize(path));
View Full Code Here

      fail("Should throw IOException.");
    } catch (IOException e) {
      // Expected
    }
   
    FSOutputStream out = s3FileSystem.createRaw(path, true, (short) 1, BLOCK_SIZE);
    out.write(data, 0, BLOCK_SIZE / 2);
    out.close();
   
    assertTrue("Exists", s3FileSystem.exists(path));
    assertEquals("Length", BLOCK_SIZE / 2, s3FileSystem.getLength(path));
   
  }
View Full Code Here

      assertEquals("Position " + i, data[i], buf[i]);
    }
  }

  private void createEmptyFile(Path path) throws IOException {
    FSOutputStream out = s3FileSystem.createRaw(path, false, (short) 1, BLOCK_SIZE);
    out.write(data, 0, BLOCK_SIZE);
    out.close();
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FSOutputStream

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.