Package org.apache.hadoop.io

Examples of org.apache.hadoop.io.DataInputBuffer


   * @throws IOException if an error occurrs while reading the index descriptors
   */
  public static Map<ImmutableBytesWritable, IdxIndexDescriptor> getIndexDescriptors(HColumnDescriptor columnDescriptor) throws IOException {
    Map<ImmutableBytesWritable, ImmutableBytesWritable> values = columnDescriptor.getValues();
    if (hasIndexDescriptors(columnDescriptor)) {
      DataInputBuffer in = new DataInputBuffer();
      byte[] bytes = values.get(INDEX_DESCRIPTORS).get();
      in.reset(bytes, bytes.length);

      int size = in.readInt();
      Map<ImmutableBytesWritable, IdxIndexDescriptor> indexDescriptors
      = new HashMap<ImmutableBytesWritable, IdxIndexDescriptor>(size);

      for (int i = 0; i < size; i++) {
        IdxIndexDescriptor indexDescriptor
View Full Code Here


      return ((IdxScan) scan).getExpression();
    }

    Map<ImmutableBytesWritable,ImmutableBytesWritable> values = scan.getValues();
    if (values.containsKey(EXPRESSION)) {
      DataInputBuffer in = new DataInputBuffer();
      byte[] bytes = values.get(EXPRESSION).get();
      in.reset(bytes, bytes.length);

      return WritableHelper.readInstanceNullable(in, Expression.class);
    } else {
      return null;
    }
View Full Code Here

    assertEquals(hri.getEncodedName(), deserializedHri.getEncodedName());
    assertEquals(hri, deserializedHri);

    //test toDelimitedByteArray()
    hrib = hri.toDelimitedByteArray();
    DataInputBuffer buf = new DataInputBuffer();
    try {
      buf.reset(hrib, hrib.length);
      deserializedHri = HRegionInfo.parseFrom(buf);
      assertEquals(hri.getEncodedName(), deserializedHri.getEncodedName());
      assertEquals(hri, deserializedHri);
    } finally {
      buf.close();
    }
  }
View Full Code Here

      r = new SequenceFile.Reader(getFS(), f.getPath(), getConf());
      key = ReflectionUtils.newInstance(r.getKeyClass().asSubclass(WritableComparable.class),
                                        getConf());
      val = ReflectionUtils.newInstance(r.getValueClass().asSubclass(Writable.class),
                                        getConf());
      inbuf = new DataInputBuffer();
      outbuf = new DataOutputBuffer();
    }
View Full Code Here

      return totalBytesProcessed;
    }
   
    @SuppressWarnings("unchecked")
    protected boolean lessThan(Object a, Object b) {
      DataInputBuffer key1 = ((Segment<K, V>)a).getKey();
      DataInputBuffer key2 = ((Segment<K, V>)b).getKey();
      int s1 = key1.getPosition();
      int l1 = key1.getLength() - s1;
      int s2 = key2.getPosition();
      int l2 = key2.getLength() - s2;

      return comparator.compare(key1.getData(), s1, l1, key2.getData(), s2, l2) < 0;
    }
View Full Code Here

            long segmentStart = out.getPos();
            writer = new Writer<K, V>(job, out, keyClass, valClass, codec,
                                      spilledRecordsCounter);
            if (combinerRunner == null) {
              // spill directly
              DataInputBuffer key = new DataInputBuffer();
              while (spindex < endPosition &&
                  kvindices[kvoffsets[spindex % kvoffsets.length]
                            + PARTITION] == i) {
                final int kvoff = kvoffsets[spindex % kvoffsets.length];
                getVBytesForOffset(kvoff, value);
                key.reset(kvbuffer, kvindices[kvoff + KEYSTART],
                          (kvindices[kvoff + VALSTART] -
                           kvindices[kvoff + KEYSTART]));
                writer.append(key, value);
                ++spindex;
              }
View Full Code Here

          );
        }

        // TODO: Remove this after a 'fix' for HADOOP-3647
        if (mapOutputLength > 0) {
          DataInputBuffer dib = new DataInputBuffer();
          dib.reset(shuffleData, 0, shuffleData.length);
          LOG.info("Rec #1 from " + mapOutputLoc.getTaskAttemptId() + " -> (" +
                   WritableUtils.readVInt(dib) + ", " +
                   WritableUtils.readVInt(dib) + ") from " +
                   mapOutputLoc.getHost());
        }
View Full Code Here

      }

      public boolean next(DataInputBuffer key, DataInputBuffer value)
          throws IOException {
        if (kvIter.next()) {
          final DataInputBuffer kb = kvIter.getKey();
          final DataInputBuffer vb = kvIter.getValue();
          final int kp = kb.getPosition();
          final int klen = kb.getLength() - kp;
          key.reset(kb.getData(), kp, klen);
          final int vp = vb.getPosition();
          final int vlen = vb.getLength() - vp;
          value.reset(vb.getData(), vp, vlen);
          bytesRead += klen + vlen;
          return true;
        }
        return false;
      }
View Full Code Here

      IOException wrap = new IOException("Split class " + splitClass +
                                         " not found");
      wrap.initCause(exp);
      throw wrap;
    }
    DataInputBuffer splitBuffer = new DataInputBuffer();
    splitBuffer.reset(split.getBytes(), 0, split.getLength());
    inputSplit.readFields(splitBuffer);
   
    updateJobWithSplit(job, inputSplit);
    reporter.setInputSplit(inputSplit);
View Full Code Here

    org.apache.hadoop.mapreduce.InputFormat<INKEY,INVALUE> inputFormat =
      (org.apache.hadoop.mapreduce.InputFormat<INKEY,INVALUE>)
        ReflectionUtils.newInstance(taskContext.getInputFormatClass(), job);
    // rebuild the input split
    org.apache.hadoop.mapreduce.InputSplit split = null;
    DataInputBuffer splitBuffer = new DataInputBuffer();
    splitBuffer.reset(rawSplit.getBytes(), 0, rawSplit.getLength());
    SerializationFactory factory = new SerializationFactory(job);
    Deserializer<? extends org.apache.hadoop.mapreduce.InputSplit>
      deserializer =
        (Deserializer<? extends org.apache.hadoop.mapreduce.InputSplit>)
        factory.getDeserializer(job.getClassByName(splitClass));
View Full Code Here

TOP

Related Classes of org.apache.hadoop.io.DataInputBuffer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.