Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FSOutputStream


        LOG.warn(errmsg);
        return;
      }
      // create chains
      int chain = 0;
      FSOutputStream fos = null;
      for (int i = 0; i < blocks.length; i++) {
        LocatedBlock lblock = blocks[i];
        DatanodeInfo[] locs = lblock.getLocations();
        if (locs == null || locs.length == 0) {
          if (fos != null) {
            fos.flush();
            fos.close();
            fos = null;
          }
          continue;
        }
        if (fos == null) {
          fos = dfs.create(new UTF8(target.toString() + "/" + chain), true);
          if (fos != null) chain++;
        }
        if (fos == null) {
          LOG.warn(errmsg + ": could not store chain " + chain);
          // perhaps we should bail out here...
          // return;
          continue;
        }
       
        // copy the block. It's a pity it's not abstracted from DFSInputStream ...
        try {
          copyBlock(dfs, lblock, fos);
        } catch (Exception e) {
          e.printStackTrace();
          // something went wrong copying this block...
          LOG.warn(" - could not copy block " + lblock.getBlock().getBlockName() + " to " + target);
          fos.flush();
          fos.close();
          fos = null;
        }
      }
      if (fos != null) fos.close();
      LOG.warn("\n - moved corrupted file " + file.getPath() + " to /lost+found");
      dfs.delete(new UTF8(file.getPath()));
    catch (Exception e) {
      e.printStackTrace();
      LOG.warn(errmsg + ": " + e.getMessage());
View Full Code Here


    //
    //           write filesize of data to file
    //
    byte[] buffer = new byte[BUFFER_SIZE];
    UTF8 testFileName = new UTF8(filename); // hardcode filename
    FSOutputStream nos;
  nos = dfsClient.create(testFileName, false);
    try {
      for (long nBytesWritten = 0L;
                nBytesWritten < fileSize;
                nBytesWritten += buffer.length) {
        if ((nBytesWritten + buffer.length) > fileSize) {
          int pb = (int) (fileSize - nBytesWritten);
          byte[] bufferPartial = new byte[pb];
          for( int i=0; i<pb; i++) {
            bufferPartial[i]='a';
          }
          nos.write(buffer);
        } else {
          for( int i=0; i<buffer.length;i++) {
            buffer[i]='a';
          }
          nos.write(buffer);
        }
      }
    } finally {
      nos.flush();
      nos.close();
    }
  }
View Full Code Here

        byte[] buffer = new byte[bufferSize];
        UTF8 testFileName = null;
        for (int iFileNumber = 0; iFileNumber < numFiles; iFileNumber++) {
          testFileName = new UTF8("/f" + iFileNumber);
          testfilesList.add(testFileName);
          FSOutputStream nos = dfsClient.create(testFileName, false);
          try {
            for (long nBytesWritten = 0L;
                 nBytesWritten < nBytes;
                 nBytesWritten += buffer.length) {
              if ((nBytesWritten + buffer.length) > nBytes) {
                // calculate byte count needed to exactly hit nBytes in length
                //  to keep randomDataGenerator in sync during the verify step
                int pb = (int) (nBytes - nBytesWritten);
                byte[] bufferPartial = new byte[pb];
                randomDataGenerator.nextBytes(bufferPartial);
                nos.write(bufferPartial);
              } else {
                randomDataGenerator.nextBytes(buffer);
                nos.write(buffer);
              }
            }
          } finally {
            nos.flush();
            nos.close();
          }
        }

        //
        // No need to wait for blocks to be replicated because replication
View Full Code Here

    //
    //           write filesize of data to file
    //
    byte[] buffer = new byte[BUFFER_SIZE];
    UTF8 testFileName = new UTF8(filename); // hardcode filename
    FSOutputStream nos;
  nos = dfsClient.create(testFileName, false);
    try {
      for (long nBytesWritten = 0L;
                nBytesWritten < fileSize;
                nBytesWritten += buffer.length) {
        if ((nBytesWritten + buffer.length) > fileSize) {
          int pb = (int) (fileSize - nBytesWritten);
          byte[] bufferPartial = new byte[pb];
          for( int i=0; i<pb; i++) {
            bufferPartial[i]='a';
          }
          nos.write(buffer);
        } else {
          for( int i=0; i<buffer.length;i++) {
            buffer[i]='a';
          }
          nos.write(buffer);
        }
      }
    } finally {
      nos.flush();
      nos.close();
    }
  }
View Full Code Here

        byte[] buffer = new byte[bufferSize];
        UTF8 testFileName = null;
        for (int iFileNumber = 0; iFileNumber < numFiles; iFileNumber++) {
          testFileName = new UTF8("/f" + iFileNumber);
          testfilesList.add(testFileName);
          FSOutputStream nos = dfsClient.create(testFileName, false);
          try {
            for (long nBytesWritten = 0L;
                 nBytesWritten < nBytes;
                 nBytesWritten += buffer.length) {
              if ((nBytesWritten + buffer.length) > nBytes) {
                // calculate byte count needed to exactly hit nBytes in length
                //  to keep randomDataGenerator in sync during the verify step
                int pb = (int) (nBytes - nBytesWritten);
                byte[] bufferPartial = new byte[pb];
                randomDataGenerator.nextBytes(bufferPartial);
                nos.write(bufferPartial);
              } else {
                randomDataGenerator.nextBytes(buffer);
                nos.write(buffer);
              }
            }
          } finally {
            nos.flush();
            nos.close();
          }
        }

        //
        // No need to wait for blocks to be replicated because replication
View Full Code Here

        System.err.println(errmsg);
        return;
      }
      // create chains
      int chain = 0;
      FSOutputStream fos = null;
      for (int i = 0; i < blocks.length; i++) {
        LocatedBlock lblock = blocks[i];
        DatanodeInfo[] locs = lblock.getLocations();
        if (locs == null || locs.length == 0) {
          if (fos != null) {
            fos.flush();
            fos.close();
            fos = null;
          }
          continue;
        }
        if (fos == null) {
          fos = dfs.create(new UTF8(target.toString() + "/" + chain), true);
          if (fos != null) chain++;
        }
        if (fos == null) {
          System.err.println(errmsg + ": could not store chain " + chain);
          // perhaps we should bail out here...
          // return;
          continue;
        }
       
        // copy the block. It's a pity it's not abstracted from DFSInputStream ...
        try {
          copyBlock(lblock, fos);
        } catch (Exception e) {
          e.printStackTrace();
          // something went wrong copying this block...
          System.err.println(" - could not copy block " + lblock.getBlock().getBlockName() + " to " + target);
          fos.flush();
          fos.close();
          fos = null;
        }
      }
      if (fos != null) fos.close();
      System.err.println("\n - moved corrupted file " + file.getPath() + " to /lost+found");
      dfs.delete(new UTF8(file.getPath()));
    } catch (Exception e) {
      e.printStackTrace();
      System.err.println(errmsg + ": " + e.getMessage());
View Full Code Here

    //
    //           write filesize of data to file
    //
    byte[] buffer = new byte[BUFFER_SIZE];
    UTF8 testFileName = new UTF8(filename); // hardcode filename
    FSOutputStream nos;
  nos = dfsClient.create(testFileName, false);
    try {
      for (long nBytesWritten = 0L;
                nBytesWritten < fileSize;
                nBytesWritten += buffer.length) {
        if ((nBytesWritten + buffer.length) > fileSize) {
          int pb = (int) (fileSize - nBytesWritten);
          byte[] bufferPartial = new byte[pb];
          for( int i=0; i<pb; i++) {
            bufferPartial[i]='a';
          }
          nos.write(buffer);
        } else {
          for( int i=0; i<buffer.length;i++) {
            buffer[i]='a';
          }
          nos.write(buffer);
        }
      }
    } finally {
      nos.flush();
      nos.close();
    }
  }
View Full Code Here

        byte[] buffer = new byte[bufferSize];
        UTF8 testFileName = null;
        for (int iFileNumber = 0; iFileNumber < numFiles; iFileNumber++) {
          testFileName = new UTF8("/f" + iFileNumber);
          testfilesList.add(testFileName);
          FSOutputStream nos = dfsClient.create(testFileName, false);
          try {
            for (long nBytesWritten = 0L;
                 nBytesWritten < nBytes;
                 nBytesWritten += buffer.length) {
              if ((nBytesWritten + buffer.length) > nBytes) {
                // calculate byte count needed to exactly hit nBytes in length
                //  to keep randomDataGenerator in sync during the verify step
                int pb = (int) (nBytes - nBytesWritten);
                byte[] bufferPartial = new byte[pb];
                randomDataGenerator.nextBytes(bufferPartial);
                nos.write(bufferPartial);
              } else {
                randomDataGenerator.nextBytes(buffer);
                nos.write(buffer);
              }
            }
          } finally {
            nos.flush();
            nos.close();
          }
        }

        //
        // No need to wait for blocks to be replicated because replication
View Full Code Here

        System.err.println(errmsg);
        return;
      }
      // create chains
      int chain = 0;
      FSOutputStream fos = null;
      for (int i = 0; i < blocks.length; i++) {
        LocatedBlock lblock = blocks[i];
        DatanodeInfo[] locs = lblock.getLocations();
        if (locs == null || locs.length == 0) {
          if (fos != null) {
            fos.flush();
            fos.close();
            fos = null;
          }
          continue;
        }
        if (fos == null) {
          fos = dfs.create(new UTF8(target.toString() + "/" + chain), true);
          if (fos != null) chain++;
        }
        if (fos == null) {
          System.err.println(errmsg + ": could not store chain " + chain);
          // perhaps we should bail out here...
          // return;
          continue;
        }
       
        // copy the block. It's a pity it's not abstracted from DFSInputStream ...
        try {
          copyBlock(lblock, fos);
        } catch (Exception e) {
          e.printStackTrace();
          // something went wrong copying this block...
          System.err.println(" - could not copy block " + lblock.getBlock().getBlockName() + " to " + target);
          fos.flush();
          fos.close();
          fos = null;
        }
      }
      if (fos != null) fos.close();
      System.err.println("\n - moved corrupted file " + file.getPath() + " to /lost+found");
      dfs.delete(new UTF8(file.getPath()));
    } catch (Exception e) {
      e.printStackTrace();
      System.err.println(errmsg + ": " + e.getMessage());
View Full Code Here

    //
    //           write filesize of data to file
    //
    byte[] buffer = new byte[BUFFER_SIZE];
    UTF8 testFileName = new UTF8(filename); // hardcode filename
    FSOutputStream nos;
  nos = dfsClient.create(testFileName, false);
    try {
      for (long nBytesWritten = 0L;
                nBytesWritten < fileSize;
                nBytesWritten += buffer.length) {
        if ((nBytesWritten + buffer.length) > fileSize) {
          int pb = (int) (fileSize - nBytesWritten);
          byte[] bufferPartial = new byte[pb];
          for( int i=0; i<pb; i++) {
            bufferPartial[i]='a';
          }
          nos.write(buffer);
        } else {
          for( int i=0; i<buffer.length;i++) {
            buffer[i]='a';
          }
          nos.write(buffer);
        }
      }
    } finally {
      nos.flush();
      nos.close();
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FSOutputStream

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.