Examples of FSOutputStream


Examples of org.apache.hadoop.fs.FSOutputStream

        byte[] buffer = new byte[bufferSize];
        UTF8 testFileName = null;
        for (int iFileNumber = 0; iFileNumber < numFiles; iFileNumber++) {
          testFileName = new UTF8("/f" + iFileNumber);
          testfilesList.add(testFileName);
          FSOutputStream nos = dfsClient.create(testFileName, false);
          try {
            for (long nBytesWritten = 0L;
                 nBytesWritten < nBytes;
                 nBytesWritten += buffer.length) {
              if ((nBytesWritten + buffer.length) > nBytes) {
                // calculate byte count needed to exactly hit nBytes in length
                //  to keep randomDataGenerator in sync during the verify step
                int pb = (int) (nBytes - nBytesWritten);
                byte[] bufferPartial = new byte[pb];
                randomDataGenerator.nextBytes(bufferPartial);
                nos.write(bufferPartial);
              } else {
                randomDataGenerator.nextBytes(buffer);
                nos.write(buffer);
              }
            }
          } finally {
            nos.flush();
            nos.close();
          }
        }

        //
        // No need to wait for blocks to be replicated because replication
View Full Code Here

Examples of org.apache.hadoop.fs.FSOutputStream

        System.err.println(errmsg);
        return;
      }
      // create chains
      int chain = 0;
      FSOutputStream fos = null;
      for (int i = 0; i < blocks.length; i++) {
        LocatedBlock lblock = blocks[i];
        DatanodeInfo[] locs = lblock.getLocations();
        if (locs == null || locs.length == 0) {
          if (fos != null) {
            fos.flush();
            fos.close();
            fos = null;
          }
          continue;
        }
        if (fos == null) {
          fos = dfs.create(new UTF8(target.toString() + "/" + chain), true);
          if (fos != null) chain++;
        }
        if (fos == null) {
          System.err.println(errmsg + ": could not store chain " + chain);
          // perhaps we should bail out here...
          // return;
          continue;
        }
       
        // copy the block. It's a pity it's not abstracted from DFSInputStream ...
        try {
          copyBlock(lblock, fos);
        } catch (Exception e) {
          e.printStackTrace();
          // something went wrong copying this block...
          System.err.println(" - could not copy block " + lblock.getBlock().getBlockName() + " to " + target);
          fos.flush();
          fos.close();
          fos = null;
        }
      }
      if (fos != null) fos.close();
      System.err.println("\n - moved corrupted file " + file.getPath() + " to /lost+found");
      dfs.delete(new UTF8(file.getPath()));
    } catch (Exception e) {
      e.printStackTrace();
      System.err.println(errmsg + ": " + e.getMessage());
View Full Code Here

Examples of org.apache.hadoop.fs.FSOutputStream

    //
    //           write filesize of data to file
    //
    byte[] buffer = new byte[BUFFER_SIZE];
    UTF8 testFileName = new UTF8(filename); // hardcode filename
    FSOutputStream nos;
  nos = dfsClient.create(testFileName, false);
    try {
      for (long nBytesWritten = 0L;
                nBytesWritten < fileSize;
                nBytesWritten += buffer.length) {
        if ((nBytesWritten + buffer.length) > fileSize) {
          int pb = (int) (fileSize - nBytesWritten);
          byte[] bufferPartial = new byte[pb];
          for( int i=0; i<pb; i++) {
            bufferPartial[i]='a';
          }
          nos.write(buffer);
        } else {
          for( int i=0; i<buffer.length;i++) {
            buffer[i]='a';
          }
          nos.write(buffer);
        }
      }
    } finally {
      nos.flush();
      nos.close();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.fs.FSOutputStream

        System.err.println(errmsg);
        return;
      }
      // create chains
      int chain = 0;
      FSOutputStream fos = null;
      for (int i = 0; i < blocks.length; i++) {
        LocatedBlock lblock = blocks[i];
        DatanodeInfo[] locs = lblock.getLocations();
        if (locs == null || locs.length == 0) {
          if (fos != null) {
            fos.flush();
            fos.close();
            fos = null;
          }
          continue;
        }
        if (fos == null) {
          fos = dfs.create(new UTF8(target.toString() + "/" + chain), true);
          if (fos != null) chain++;
        }
        if (fos == null) {
          System.err.println(errmsg + ": could not store chain " + chain);
          // perhaps we should bail out here...
          // return;
          continue;
        }
       
        // copy the block. It's a pity it's not abstracted from DFSInputStream ...
        try {
          copyBlock(lblock, fos);
        } catch (Exception e) {
          e.printStackTrace();
          // something went wrong copying this block...
          System.err.println(" - could not copy block " + lblock.getBlock().getBlockName() + " to " + target);
          fos.flush();
          fos.close();
          fos = null;
        }
      }
      if (fos != null) fos.close();
      System.err.println("\n - moved corrupted file " + file.getPath() + " to /lost+found");
      dfs.delete(new UTF8(file.getPath()));
    } catch (Exception e) {
      e.printStackTrace();
      System.err.println(errmsg + ": " + e.getMessage());
View Full Code Here

Examples of org.apache.hadoop.fs.FSOutputStream

    //
    //           write filesize of data to file
    //
    byte[] buffer = new byte[BUFFER_SIZE];
    UTF8 testFileName = new UTF8(filename); // hardcode filename
    FSOutputStream nos;
  nos = dfsClient.create(testFileName, false);
    try {
      for (long nBytesWritten = 0L;
                nBytesWritten < fileSize;
                nBytesWritten += buffer.length) {
        if ((nBytesWritten + buffer.length) > fileSize) {
          int pb = (int) (fileSize - nBytesWritten);
          byte[] bufferPartial = new byte[pb];
          for( int i=0; i<pb; i++) {
            bufferPartial[i]='a';
          }
          nos.write(buffer);
        } else {
          for( int i=0; i<buffer.length;i++) {
            buffer[i]='a';
          }
          nos.write(buffer);
        }
      }
    } finally {
      nos.flush();
      nos.close();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.fs.FSOutputStream

  static final int numDatanodes = 4;

  private void writeFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
    // create and write a file that contains three blocks of data
    FSOutputStream stm = fileSys.createRaw(name, true, (short)repl,
        (long)blockSize);
    byte[] buffer = new byte[fileSize];
    Random rand = new Random(seed);
    rand.nextBytes(buffer);
    stm.write(buffer);
    stm.close();
  }
View Full Code Here

Examples of org.apache.hadoop.fs.FSOutputStream

  static final int blockSize = 1;
  static final int fileSize = 20;

  private void writeFile(FileSystem fileSys, Path name) throws IOException {
    // create and write a file that contains three blocks of data
    FSOutputStream stm = fileSys.createRaw(name, true, (short)1,
        (long)blockSize);
    byte[] buffer = new byte[fileSize];
    Random rand = new Random(seed);
    rand.nextBytes(buffer);
    stm.write(buffer);
    stm.close();
  }
View Full Code Here

Examples of org.apache.hadoop.fs.FSOutputStream

    // as tasks with side effect may write to locations not set in jobconf
    // as output path.
    if( baseFS.exists(f) && !overwrite ){
      throw new IOException("Error creating file - already exists : " + f);
    }
    FSOutputStream stream =
      baseFS.createRaw(setupFile(f, overwrite), overwrite, replication, blockSize);
    finalNameToFileInfo.get(f).setOpenFileStream(stream);
    return stream;
  }
View Full Code Here

Examples of org.apache.hadoop.fs.FSOutputStream

      Progressable progress)
      throws IOException {
    if( baseFS.exists(f) && !overwrite ){
      throw new IOException("Error creating file - already exists : " + f);
    }
    FSOutputStream stream =
      baseFS.createRaw(setupFile(f, overwrite), overwrite, replication,
          blockSize, progress);
    finalNameToFileInfo.get(f).setOpenFileStream(stream);
    return stream ;
  }
View Full Code Here

Examples of org.apache.hadoop.fs.FSOutputStream

        LOG.warn(errmsg);
        return;
      }
      // create chains
      int chain = 0;
      FSOutputStream fos = null;
      for (int i = 0; i < blocks.length; i++) {
        LocatedBlock lblock = blocks[i];
        DatanodeInfo[] locs = lblock.getLocations();
        if (locs == null || locs.length == 0) {
          if (fos != null) {
            fos.flush();
            fos.close();
            fos = null;
          }
          continue;
        }
        if (fos == null) {
          fos = dfs.create(new UTF8(target.toString() + "/" + chain), true);
          if (fos != null) chain++;
        }
        if (fos == null) {
          LOG.warn(errmsg + ": could not store chain " + chain);
          // perhaps we should bail out here...
          // return;
          continue;
        }
       
        // copy the block. It's a pity it's not abstracted from DFSInputStream ...
        try {
          copyBlock(dfs, lblock, fos);
        } catch (Exception e) {
          e.printStackTrace();
          // something went wrong copying this block...
          LOG.warn(" - could not copy block " + lblock.getBlock().getBlockName() + " to " + target);
          fos.flush();
          fos.close();
          fos = null;
        }
      }
      if (fos != null) fos.close();
      LOG.warn("\n - moved corrupted file " + file.getPath() + " to /lost+found");
      dfs.delete(new UTF8(file.getPath()));
    catch (Exception e) {
      e.printStackTrace();
      LOG.warn(errmsg + ": " + e.getMessage());
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.