Package org.apache.hadoop.io

Examples of org.apache.hadoop.io.UTF8


          throw new IncorrectVersionException(imgVersion, "file system image");

        // read file info
        short replication = (short)conf.getInt("dfs.replication", 3);
        for (int i = 0; i < numFiles; i++) {
          UTF8 name = new UTF8();
          name.readFields(in);
          // version 0 does not support per file replication
          if( !(imgVersion >= 0) ) {
            replication = in.readShort(); // other versions do
            replication = FSEditLog.adjustReplication( replication, conf );
          }
View Full Code Here


                  FSDirectory.INode root,
                  DataOutputStream out ) throws IOException {
    String fullName = "";
    if( root.getParent() != null) {
      fullName = parentPrefix + "/" + root.getLocalName();
      new UTF8(fullName).write(out);
      out.writeShort( root.getReplication() );
      if( root.isDir() ) {
        out.writeInt(0);
      } else {
        int nrBlocks = root.getBlocks().length;
View Full Code Here

          throw new IncorrectVersionException(imgVersion, "file system image");

        // read file info
        short replication = (short)conf.getInt("dfs.replication", 3);
        for (int i = 0; i < numFiles; i++) {
          UTF8 name = new UTF8();
          name.readFields(in);
          // version 0 does not support per file replication
          if( !(imgVersion >= 0) ) {
            replication = in.readShort(); // other versions do
            replication = FSEditLog.adjustReplication( replication, conf );
          }
View Full Code Here

            fos = null;
          }
          continue;
        }
        if (fos == null) {
          fos = dfs.create(new UTF8(target.toString() + "/" + chain), true);
          if (fos != null) chain++;
        }
        if (fos == null) {
          LOG.warn(errmsg + ": could not store chain " + chain);
          // perhaps we should bail out here...
          // return;
          continue;
        }
       
        // copy the block. It's a pity it's not abstracted from DFSInputStream ...
        try {
          copyBlock(dfs, lblock, fos);
        } catch (Exception e) {
          e.printStackTrace();
          // something went wrong copying this block...
          LOG.warn(" - could not copy block " + lblock.getBlock().getBlockName() + " to " + target);
          fos.flush();
          fos.close();
          fos = null;
        }
      }
      if (fos != null) fos.close();
      LOG.warn("\n - moved corrupted file " + file.getPath() + " to /lost+found");
      dfs.delete(new UTF8(file.getPath()));
    catch (Exception e) {
      e.printStackTrace();
      LOG.warn(errmsg + ": " + e.getMessage());
    }
  }
View Full Code Here

  }
 
  private void lostFoundInit(DFSClient dfs) {
    lfInited = true;
    try {
      UTF8 lfName = new UTF8("/lost+found");
      // check that /lost+found exists
      if (!dfs.exists(lfName)) {
        lfInitedOk = dfs.mkdirs(lfName);
        lostFound = lfName;
      } else        if (!dfs.isDirectory(lfName)) {
View Full Code Here

                  FSDirectory.INode root,
                  DataOutputStream out ) throws IOException {
    String fullName = "";
    if( root.getParent() != null) {
      fullName = parentPrefix + "/" + root.getLocalName();
      new UTF8(fullName).write(out);
      out.writeShort( root.getReplication() );
      if( root.isDir() ) {
        out.writeInt(0);
      } else {
        int nrBlocks = root.getBlocks().length;
View Full Code Here

        //
        //           write nBytes of data using randomDataGenerator to numFiles
        //
        ArrayList<UTF8> testfilesList = new ArrayList<UTF8>();
        byte[] buffer = new byte[bufferSize];
        UTF8 testFileName = null;
        for (int iFileNumber = 0; iFileNumber < numFiles; iFileNumber++) {
          testFileName = new UTF8("/f" + iFileNumber);
          testfilesList.add(testFileName);
          OutputStream nos = dfsClient.create(testFileName, false);
          try {
            for (long nBytesWritten = 0L;
                 nBytesWritten < nBytes;
View Full Code Here

  public static class Map extends MapReduceBase implements Mapper {
    public void map(WritableComparable key, Writable value,
                    OutputCollector output, Reporter reporter) throws IOException
    {
      String line = value.toString();
      output.collect(new UTF8(process(line)), new UTF8(""));   
    }
View Full Code Here

  public static class Reduce extends MapReduceBase implements Reducer {
    public void reduce(WritableComparable key, Iterator values,
                       OutputCollector output, Reporter reporter) throws IOException
    {
      while(values.hasNext()) {
        output.collect(key, new UTF8(values.next().toString()));
      }
    }
View Full Code Here

  CRC32 overallChecksum = new CRC32();
 
  private void verifyDir(DFSClient client, String dir)
                                           throws IOException {
   
    DFSFileInfo[] fileArr = client.listPaths(new UTF8(dir));
    TreeMap<String, Boolean> fileMap = new TreeMap<String, Boolean>();
   
    for(DFSFileInfo file : fileArr) {
      String path = file.getPath();
      fileMap.put(path, Boolean.valueOf(file.isDir()));
    }
   
    for(Iterator<String> it = fileMap.keySet().iterator(); it.hasNext();) {
      String path = it.next();
      boolean isDir = fileMap.get(path);
     
      overallChecksum.update(path.getBytes());
     
      if ( isDir ) {
        verifyDir(client, path);
      } else {
        // this is not a directory. Checksum the file data.
        CRC32 fileCRC = new CRC32();
        FSInputStream in = client.open(new UTF8(path));
        byte[] buf = new byte[4096];
        int nRead = 0;
        while ( (nRead = in.read(buf, 0, buf.length)) > 0 ) {
          fileCRC.update(buf, 0, nRead);
        }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.io.UTF8

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.