Examples of ArrayWritable


Examples of org.apache.hadoop.io.ArrayWritable

    assertEquals(md5HashKey1, md5HashKey2);
  }

  @Test
  public void testMD5HashForArrayWritableKey() throws IOException {
    ArrayWritable key = new ArrayWritable(new String[] { "abc123" });
    MD5Hash md5HashKey1 = HashUtility.getMD5Hash(key);
    MD5Hash md5HashKey2 = HashUtility.getMD5Hash(key);
    assertEquals(md5HashKey1, md5HashKey2);
  }
View Full Code Here

Examples of org.apache.hadoop.io.ArrayWritable

  public void testWritableIO() throws IOException {
    Writable[] vectorValues = new Writable[] {
      new Text("test1"), new Text("test2"), new Text("test3")
    };
    ArrayWritable vector = new ArrayWritable(Text.class, vectorValues);
    MapWritable map = new MapWritable();
    map.put(new Text("one"), new VIntWritable(1));
    map.put(new Text("two"), new VLongWritable(2));
    Writable[] writables = new Writable[] {
      new BytesWritable(new byte[] { 1, 2, 3, 4 }),
      new ByteWritable((byte) 123), new BooleanWritable(true),
      new VIntWritable(12345), new VLongWritable(123456789L),
      new FloatWritable((float) 1.2), new DoubleWritable(1.234),
      new Text("random string"),
      new ObjectWritable("test")
    };
    TypedBytesWritable tbw = new TypedBytesWritable();
    tbw.setValue("typed bytes text");
    RecRecord1 r1 = new RecRecord1();
    r1.setBoolVal(true);
    r1.setByteVal((byte) 0x66);
    r1.setFloatVal(3.145F);
    r1.setDoubleVal(1.5234);
    r1.setIntVal(-4567);
    r1.setLongVal(-2367L);
    r1.setStringVal("random text");
    r1.setBufferVal(new Buffer());
    r1.setVectorVal(new ArrayList<String>());
    r1.setMapVal(new TreeMap<String, String>());
    RecRecord0 r0 = new RecRecord0();
    r0.setStringVal("other random text");
    r1.setRecordVal(r0);

    FileOutputStream ostream = new FileOutputStream(tmpfile);
    DataOutputStream dostream = new DataOutputStream(ostream);
    TypedBytesWritableOutput out = new TypedBytesWritableOutput(dostream);
    for (Writable w : writables) {
      out.write(w);
    }
    out.write(tbw);
    out.write(vector);
    out.write(map);
    out.write(r1);
    dostream.close();
    ostream.close();

    FileInputStream istream = new FileInputStream(tmpfile);
    DataInputStream distream = new DataInputStream(istream);

    TypedBytesWritableInput in = new TypedBytesWritableInput(distream);
    for (Writable w : writables) {
      assertEquals(w.toString(), in.read().toString());
    }

    assertEquals(tbw.getValue().toString(), in.read().toString());

    assertEquals(ArrayWritable.class, in.readType());
    ArrayWritable aw = in.readArray();
    Writable[] writables1 = vector.get(), writables2 = aw.get();
    assertEquals(writables1.length, writables2.length);
    for (int i = 0; i < writables1.length; i++) {
      assertEquals(((Text) writables1[i]).toString(),
        ((TypedBytesWritable) writables2[i]).getValue());
    }
View Full Code Here

Examples of org.apache.hadoop.io.ArrayWritable

      DeprecatedUTF8 info[] = new DeprecatedUTF8[srcs.length];
      int idx = 0;
      for(int i=0; i<srcs.length; i++) {
        info[idx++] = new DeprecatedUTF8(srcs[i]);
      }
      new ArrayWritable(DeprecatedUTF8.class, info).write(out);

      FSImageSerialization.writeLong(timestamp, out);
     
      // rpc ids
      writeRpcIds(rpcClientId, rpcCallId, out);
View Full Code Here

Examples of org.apache.hadoop.io.ArrayWritable

      FSImageSerialization.writeString(path, out);
      FSImageSerialization.writeShort(replication, out);
      FSImageSerialization.writeLong(mtime, out);
      FSImageSerialization.writeLong(atime, out);
      FSImageSerialization.writeLong(blockSize, out);
      new ArrayWritable(Block.class, blocks).write(out);
      permissions.write(out);

      if (this.opCode == OP_ADD) {
        AclEditLogUtil.write(aclEntries, out);
        XAttrEditLogProto.Builder b = XAttrEditLogProto.newBuilder();
View Full Code Here

Examples of org.apache.hadoop.io.ArrayWritable

          }
          numEdits++;
          switch (opcode) {
          case OP_ADD: {
            UTF8 name = new UTF8();
            ArrayWritable aw = null;
            Writable writables[];
            // version 0 does not support per file replication
            if (logVersion >= 0)
              name.readFields(in)// read name only
            else // other versions do
              // get name and replication
              aw = new ArrayWritable(UTF8.class);
              aw.readFields(in);
              writables = aw.get();
              if (writables.length != 2)
                throw new IOException("Incorrect data fortmat. "
                                      + "Name & replication pair expected");
              name = (UTF8) writables[0];
              replication = Short.parseShort(
                                             ((UTF8)writables[1]).toString());
              replication = adjustReplication(replication);
            }
            // get blocks
            aw = new ArrayWritable(Block.class);
            aw.readFields(in);
            writables = aw.get();
            Block blocks[] = new Block[writables.length];
            System.arraycopy(writables, 0, blocks, 0, blocks.length);
            // add to the file tree
            fsDir.unprotectedAddFile(name, blocks, replication);
            break;
View Full Code Here

Examples of org.apache.hadoop.io.ArrayWritable

  void logCreateFile(FSDirectory.INode newNode) {
    UTF8 nameReplicationPair[] = new UTF8[] {
      new UTF8(newNode.computeName()),
      FSEditLog.toLogReplication(newNode.getReplication())};
    logEdit(OP_ADD,
            new ArrayWritable(UTF8.class, nameReplicationPair),
            new ArrayWritable(Block.class, newNode.getBlocks()));
  }
View Full Code Here

Examples of org.apache.hadoop.io.ArrayWritable

          }
          numEdits++;
          switch (opcode) {
          case OP_ADD: {
            UTF8 name = new UTF8();
            ArrayWritable aw = null;
            Writable writables[];
            // version 0 does not support per file replication
            if (logVersion >= 0)
              name.readFields(in)// read name only
            else // other versions do
              // get name and replication
              aw = new ArrayWritable(UTF8.class);
              aw.readFields(in);
              writables = aw.get();
              if (-4 <= logVersion && writables.length != 2 ||
                  -7 <= logVersion && logVersion < -4 && writables.length != 3||
                  logVersion < -7 && writables.length != 4) {
                  throw new IOException("Incorrect data format."  +
                                        " logVersion is " + logVersion +
                                        " but writables.length is " +
                                        writables.length + ". ");
              }
              name = (UTF8) writables[0];
              replication = Short.parseShort(
                                             ((UTF8)writables[1]).toString());
              replication = adjustReplication(replication);
              if (logVersion < -4) {
                mtime = Long.parseLong(((UTF8)writables[2]).toString());
              }
              if (logVersion < -7) {
                blockSize = Long.parseLong(((UTF8)writables[3]).toString());
              }
            }
            // get blocks
            aw = new ArrayWritable(Block.class);
            aw.readFields(in);
            writables = aw.get();
            Block blocks[] = new Block[writables.length];
            System.arraycopy(writables, 0, blocks, 0, blocks.length);

            // Older versions of HDFS does not store the block size in inode.
            // If the file has more than one block, use the size of the
            // first block as the blocksize. Otherwise use the default
            // block size.
            if (-8 <= logVersion && blockSize == 0) {
              if (blocks.length > 1) {
                blockSize = blocks[0].getNumBytes();
              } else {
                long first = ((blocks.length == 1)? blocks[0].getNumBytes(): 0);
                blockSize = Math.max(fsNamesys.getDefaultBlockSize(), first);
              }
            }

            // add to the file tree
            fsDir.unprotectedAddFile(name.toString(), blocks, replication,
                                     mtime, blockSize);
            break;
          }
          case OP_SET_REPLICATION: {
            UTF8 src = new UTF8();
            UTF8 repl = new UTF8();
            src.readFields(in);
            repl.readFields(in);
            replication = adjustReplication(fromLogReplication(repl));
            fsDir.unprotectedSetReplication(src.toString(),
                                            replication,
                                            null);
            break;
          }
          case OP_RENAME: {
            UTF8 src = null;
            UTF8 dst = null;
            if (logVersion >= -4) {
              src = new UTF8();
              dst = new UTF8();
              src.readFields(in);
              dst.readFields(in);
            } else {
              ArrayWritable aw = null;
              Writable writables[];
              aw = new ArrayWritable(UTF8.class);
              aw.readFields(in);
              writables = aw.get();
              if (writables.length != 3) {
                throw new IOException("Incorrect data format. "
                                      + "Mkdir operation.");
              }
              src = (UTF8) writables[0];
              dst = (UTF8) writables[1];
              timestamp = Long.parseLong(((UTF8)writables[2]).toString());
            }
            fsDir.unprotectedRenameTo(src.toString(), dst.toString(), timestamp);
            break;
          }
          case OP_DELETE: {
            UTF8 src = null;
            if (logVersion >= -4) {
              src = new UTF8();
              src.readFields(in);
            } else {
              ArrayWritable aw = null;
              Writable writables[];
              aw = new ArrayWritable(UTF8.class);
              aw.readFields(in);
              writables = aw.get();
              if (writables.length != 2) {
                throw new IOException("Incorrect data format. "
                                      + "delete operation.");
              }
              src = (UTF8) writables[0];
              timestamp = Long.parseLong(((UTF8)writables[1]).toString());
            }
            fsDir.unprotectedDelete(src.toString(), timestamp);
            break;
          }
          case OP_MKDIR: {
            UTF8 src = null;
            if (logVersion >= -4) {
              src = new UTF8();
              src.readFields(in);
            } else {
              ArrayWritable aw = null;
              Writable writables[];
              aw = new ArrayWritable(UTF8.class);
              aw.readFields(in);
              writables = aw.get();
              if (writables.length != 2) {
                throw new IOException("Incorrect data format. "
                                      + "Mkdir operation.");
              }
              src = (UTF8) writables[0];
View Full Code Here

Examples of org.apache.hadoop.io.ArrayWritable

      new UTF8(path),
      FSEditLog.toLogReplication(newNode.getReplication()),
      FSEditLog.toLogLong(newNode.getModificationTime()),
      FSEditLog.toLogLong(newNode.getPreferredBlockSize())};
    logEdit(OP_ADD,
            new ArrayWritable(UTF8.class, nameReplicationPair),
            new ArrayWritable(Block.class, newNode.getBlocks()));
  }
View Full Code Here

Examples of org.apache.hadoop.io.ArrayWritable

  void logMkDir(String path, INode newNode) {
    UTF8 info[] = new UTF8[] {
      new UTF8(path),
      FSEditLog.toLogLong(newNode.getModificationTime())
    };
    logEdit(OP_MKDIR, new ArrayWritable(UTF8.class, info), null);
  }
View Full Code Here

Examples of org.apache.hadoop.io.ArrayWritable

  void logRename(String src, String dst, long timestamp) {
    UTF8 info[] = new UTF8[] {
      new UTF8(src),
      new UTF8(dst),
      FSEditLog.toLogLong(timestamp)};
    logEdit(OP_RENAME, new ArrayWritable(UTF8.class, info), null);
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.