Package org.apache.hadoop.io

Examples of org.apache.hadoop.io.UTF8$TempArrays


 
  /**
   * Add delete file record to edit log
   */
  void logDelete(String src, long timestamp) {
    UTF8 info[] = new UTF8[] {
      new UTF8(src),
      FSEditLog.toLogLong(timestamp)};
    logEdit(OP_DELETE, new ArrayWritable(UTF8.class, info), null);
  }
View Full Code Here


      FSEditLog.toLogLong(timestamp)};
    logEdit(OP_DELETE, new ArrayWritable(UTF8.class, info), null);
  }
 
  static UTF8 toLogReplication(short replication) {
    return new UTF8(Short.toString(replication));
  }
View Full Code Here

  static short fromLogReplication(UTF8 replication) {
    return Short.parseShort(replication.toString());
  }

  static UTF8 toLogLong(long timestamp) {
    return new UTF8(Long.toString(timestamp));
  }
View Full Code Here

  private void createFile(String filename, long fileSize) throws IOException {
    //
    //           write filesize of data to file
    //
    byte[] buffer = new byte[BUFFER_SIZE];
    UTF8 testFileName = new UTF8(filename); // hardcode filename
    OutputStream nos;
    nos = dfsClient.create(testFileName.toString(), false);
    try {
      for (long nBytesWritten = 0L;
           nBytesWritten < fileSize;
           nBytesWritten += buffer.length) {
        if ((nBytesWritten + buffer.length) > fileSize) {
View Full Code Here

        //
        //           write nBytes of data using randomDataGenerator to numFiles
        //
        ArrayList<UTF8> testfilesList = new ArrayList<UTF8>();
        byte[] buffer = new byte[bufferSize];
        UTF8 testFileName = null;
        for (int iFileNumber = 0; iFileNumber < numFiles; iFileNumber++) {
          testFileName = new UTF8("/f" + iFileNumber);
          testfilesList.add(testFileName);
          OutputStream nos = dfsClient.create(testFileName.toString(), false);
          try {
            for (long nBytesWritten = 0L;
                 nBytesWritten < nBytes;
                 nBytesWritten += buffer.length) {
              if ((nBytesWritten + buffer.length) > nBytes) {
                // calculate byte count needed to exactly hit nBytes in length
                //  to keep randomDataGenerator in sync during the verify step
                int pb = (int) (nBytes - nBytesWritten);
                byte[] bufferPartial = new byte[pb];
                randomDataGenerator.nextBytes(bufferPartial);
                nos.write(bufferPartial);
              } else {
                randomDataGenerator.nextBytes(buffer);
                nos.write(buffer);
              }
            }
          } finally {
            nos.flush();
            nos.close();
          }
        }

        //
        // No need to wait for blocks to be replicated because replication
        //  is supposed to be complete when the file is closed.
        //

        //
        //                     take one datanode down
        iDatanodeClosed =
          currentTestCycleNumber % listOfDataNodeDaemons.size();
        DataNode dn = (DataNode) listOfDataNodeDaemons.get(iDatanodeClosed);
        msg("shutdown datanode daemon " + iDatanodeClosed +
            " dn=" + dn.data);
        try {
          dn.shutdown();
        } catch (Exception e) {
          msg("ignoring datanode shutdown exception=" + e);
        }

        //
        //          verify data against a "rewound" randomDataGenerator
        //               that all of the data is intact
        long lastLong = randomDataGenerator.nextLong();
        randomDataGenerator = makeRandomDataGenerator(); // restart (make new) PRNG
        ListIterator li = testfilesList.listIterator();
        while (li.hasNext()) {
          testFileName = (UTF8) li.next();
          FSInputStream nis = dfsClient.open(testFileName.toString());
          byte[] bufferGolden = new byte[bufferSize];
          int m = 42;
          try {
            while (m != -1) {
              m = nis.read(buffer);
              if (m == buffer.length) {
                randomDataGenerator.nextBytes(bufferGolden);
                assertBytesEqual(buffer, bufferGolden, buffer.length);
              } else if (m > 0) {
                byte[] bufferGoldenPartial = new byte[m];
                randomDataGenerator.nextBytes(bufferGoldenPartial);
                assertBytesEqual(buffer, bufferGoldenPartial, bufferGoldenPartial.length);
              }
            }
          } finally {
            nis.close();
          }
        }
        // verify last randomDataGenerator rand val to ensure last file length was checked
        long lastLongAgain = randomDataGenerator.nextLong();
        assertEquals(lastLong, lastLongAgain);
        msg("Finished validating all file contents");

        //
        //                    now delete all the created files
        msg("Delete all random test files under DFS via remaining datanodes");
        li = testfilesList.listIterator();
        while (li.hasNext()) {
          testFileName = (UTF8) li.next();
          assertTrue(dfsClient.delete(testFileName.toString()));
        }

        //
        //                   wait for delete to be propagated
        //                  (unlike writing files, delete is lazy)
View Full Code Here

    assertTrue(Arrays.equals(stringResults, new String[]{"foo","bar"}));

    stringResults = proxy.echo((String[])null);
    assertTrue(Arrays.equals(stringResults, null));

    UTF8 utf8Result = (UTF8)proxy.echo(new UTF8("hello world"));
    assertEquals(utf8Result, new UTF8("hello world"));

    utf8Result = (UTF8)proxy.echo((UTF8)null);
    assertEquals(utf8Result, null);

    int intResult = proxy.add(1, 2);
View Full Code Here

    assertTrue(Arrays.equals(stringResults, new String[]{"foo","bar"}));

    stringResults = proxy.echo((String[])null);
    assertTrue(Arrays.equals(stringResults, null));

    UTF8 utf8Result = (UTF8)proxy.echo(new UTF8("hello world"));
    assertEquals(utf8Result, new UTF8("hello world"));

    utf8Result = (UTF8)proxy.echo((UTF8)null);
    assertEquals(utf8Result, null);

    int intResult = proxy.add(1, 2);
View Full Code Here

      this.reporter = reporter;
    }
    public void run() {
      try {
            try {
              UTF8 EMPTY = new UTF8("");
              UTF8 key = new UTF8();
              UTF8 val = new UTF8();
              // 3/4 Tool to Hadoop
              while((answer = clientIn_.readLine()) != null) {
                // 4/4 Hadoop out
                splitKeyVal(answer, key, val);
                output.collect(key, val);
View Full Code Here

      needToSave = (imgVersion != FSConstants.LAYOUT_VERSION);

      // read file info
      short replication = FSNamesystem.getFSNamesystem().getDefaultReplication();
      for (int i = 0; i < numFiles; i++) {
        UTF8 name = new UTF8();
        long modificationTime = 0;
        long blockSize = 0;
        name.readFields(in);
        // version 0 does not support per file replication
        if (!(imgVersion >= 0)) {
          replication = in.readShort(); // other versions do
          replication = FSEditLog.adjustReplication(replication);
        }
        if (imgVersion <= -5) {
          modificationTime = in.readLong();
        }
        if (imgVersion <= -8) {
          blockSize = in.readLong();
        }
        int numBlocks = in.readInt();
        Block blocks[] = null;

        // for older versions, a blocklist of size 0
        // indicates a directory.
        if ((-9 <= imgVersion && numBlocks > 0) ||
            (imgVersion < -9 && numBlocks >= 0)) {
          blocks = new Block[numBlocks];
          for (int j = 0; j < numBlocks; j++) {
            blocks[j] = new Block();
            blocks[j].readFields(in);
          }
        }
        // Older versions of HDFS does not store the block size in inode.
        // If the file has more than one block, use the size of the
        // first block as the blocksize. Otherwise use the default block size.
        //
        if (-8 <= imgVersion && blockSize == 0) {
          if (numBlocks > 1) {
            blockSize = blocks[0].getNumBytes();
          } else {
            long first = ((numBlocks == 1) ? blocks[0].getNumBytes(): 0);
            blockSize = Math.max(fsNamesys.getDefaultBlockSize(), first);
          }
        }
        PermissionStatus permissions = fsNamesys.getUpgradePermission();
        if (imgVersion <= -11) {
          permissions = PermissionStatus.read(in);
        }
        fsDir.unprotectedAddFile(name.toString(), permissions,
            blocks, replication, modificationTime, blockSize);
      }
     
      // load datanode info
      this.loadDatanodes(imgVersion, in);
View Full Code Here

                                INode inode,
                                DataOutputStream out) throws IOException {
    String fullName = "";
    if (inode.getParent() != null) {
      fullName = parentPrefix + "/" + inode.getLocalName();
      new UTF8(fullName).write(out);
      if (!inode.isDirectory()) {  // write file inode
        INodeFile fileINode = (INodeFile)inode;
        out.writeShort(fileINode.getReplication());
        out.writeLong(inode.getModificationTime());
        out.writeLong(fileINode.getPreferredBlockSize());
View Full Code Here

TOP

Related Classes of org.apache.hadoop.io.UTF8$TempArrays

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.