Package org.apache.hadoop.io

Examples of org.apache.hadoop.io.WritableComparable


    Path pin = new Path(conf.getInputPaths()[0], fn);
    FileStatus in = pin.getFileSystem(conf).getFileStatus(pin);
    RecordReader rr = inf.getRecordReader(
        new FileSplit(pin, 0, in.getLen(), conf), conf, Reporter.NULL);
    try {
      WritableComparable key = rr.createKey();
      Writable val = rr.createValue();
      Date start = new Date();
      while (rr.next(key, val));
      Date end = new Date();
      return end.getTime() - start.getTime();
View Full Code Here


      if (top) {
        super.finalKey(key);
      } else {
        reset();
        Writable value = new ImmutableBytesWritable();
        WritableComparable k = super.getClosest(midkey, value, true);
        ByteArrayOutputStream byteout = new ByteArrayOutputStream();
        DataOutputStream out = new DataOutputStream(byteout);
        k.write(out);
        ByteArrayInputStream bytein =
          new ByteArrayInputStream(byteout.toByteArray());
        DataInputStream in = new DataInputStream(bytein);
        key.readFields(in);
      }
View Full Code Here

    @SuppressWarnings("unchecked")
    @Override
    public synchronized WritableComparable getClosest(WritableComparable key,
      Writable val)
    throws IOException {
      WritableComparable closest = null;
      if (top) {
        // If top, the lowest possible key is midkey.  Do not have to check
        // what comes back from super getClosest.  Will return exact match or
        // greater.
        closest = (key.compareTo(this.midkey) < 0)?
          this.midkey: super.getClosest(key, val);
      } else {
        // We're serving bottom of the file.
        if (key.compareTo(this.midkey) < 0) {
          // Check key is within range for bottom.
          closest = super.getClosest(key, val);
          // midkey was made against largest store file at time of split. Smaller
          // store files could have anything in them.  Check return value is
          // not beyond the midkey (getClosest returns exact match or next
          // after).
          if (closest != null && closest.compareTo(this.midkey) >= 0) {
            // Don't let this value out.
            closest = null;
          }
        }
        // Else, key is > midkey so let out closest = null.
View Full Code Here

      if (firstNextCall) {
        firstNextCall = false;
        if (this.top) {
          // Seek to midkey.  Midkey may not exist in this file.  That should be
          // fine.  Then we'll either be positioned at end or start of file.
          WritableComparable nearest = getClosest(midkey, val);
          // Now copy the mid key into the passed key.
          if (nearest != null) {
            Writables.copyWritable(nearest, key);
            return true;
          }
View Full Code Here

   * using two HalfMapFiles.
   * @throws Exception
   */
  public void testBasicHalfMapFile() throws Exception {
    Path p = writeMapFile(getName());
    WritableComparable midkey = getMidkey(p);
    checkHalfMapFile(p, midkey);
  }
View Full Code Here

      new MapFile.Reader(this.fs, p.toString(), this.conf);
    HStoreKey key = new HStoreKey();
    ImmutableBytesWritable value = new ImmutableBytesWritable();
    reader.next(key, value);
    String firstKey = key.toString();
    WritableComparable midkey = reader.midKey();
    reader.finalKey(key);
    LOG.info("First key " + firstKey + ", midkey " + midkey.toString()
        + ", last key " + key.toString());
    reader.close();
    return midkey;
  }
View Full Code Here

      assertTrue(Bytes.equals(value.get(), ((HStoreKey) midkey).getRow()));

      // Next test using a midkey that does not exist in the file.
      // First, do a key that is < than first key. Ensure splits behave
      // properly.
      WritableComparable badkey = new HStoreKey("   ");
      bottom = new HStoreFile.HalfMapFileReader(this.fs, p.toString(),
          this.conf, HStoreFile.Range.bottom, badkey, null);
      // When badkey is < than the bottom, should return no values.
      assertFalse(bottom.next(key, value));
      // Now read from the top.
View Full Code Here

        try {
            setupMapPipe(properties, reporter);

            // allocate key & value instances that are re-used for all entries
            WritableComparable key = input.createKey();
            Tuple value = input.createValue();
            while (input.next(key, value)) {
                evalPipe.add(value);
            }
        } finally {
View Full Code Here

   
    public void run(RecordReader input, OutputCollector output,
            Reporter reporter)
    throws IOException {
        try {
            WritableComparable key = input.createKey();
            Writable value = input.createValue();
            while (input.next(key, value)) {
                doArchive(value.toString(), output, new ARCReporter(reporter));
            }
        } finally {
View Full Code Here

          }
      }
     
      for (int i=0;i<arrSize;i++) {       
        int indexDocNo = in.readInt();              // read indexDocNo
            WritableComparable sortValue = null;
        try {
          sortValue = (WritableComparable)sortClass.newInstance();
        }
        catch (Exception e) {
            throw new IOException(e.toString());
        }
        sortValue.readFields(in);                   // read sortValue
        String dedupValue = Text.readString(in);    // read dedupValue
       
        hits[i] = new Hit(indexDocNo, sortValue, dedupValue);
      }
    }   
View Full Code Here

TOP

Related Classes of org.apache.hadoop.io.WritableComparable

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.