Package org.apache.hadoop.io

Examples of org.apache.hadoop.io.DataInputBuffer$Buffer


      IOException wrap = new IOException("Split class " + splitClass +
                                         " not found");
      wrap.initCause(exp);
      throw wrap;
    }
    DataInputBuffer splitBuffer = new DataInputBuffer();
    splitBuffer.reset(split.getBytes(), 0, split.getLength());
    inputSplit.readFields(splitBuffer);
   
    updateJobWithSplit(job, inputSplit);
    reporter.setInputSplit(inputSplit);
View Full Code Here


    org.apache.hadoop.mapreduce.InputFormat<INKEY,INVALUE> inputFormat =
      (org.apache.hadoop.mapreduce.InputFormat<INKEY,INVALUE>)
        ReflectionUtils.newInstance(taskContext.getInputFormatClass(), job);
    // rebuild the input split
    org.apache.hadoop.mapreduce.InputSplit split = null;
    DataInputBuffer splitBuffer = new DataInputBuffer();
    splitBuffer.reset(rawSplit.getBytes(), 0, rawSplit.getLength());
    SerializationFactory factory = new SerializationFactory(job);
    Deserializer<? extends org.apache.hadoop.mapreduce.InputSplit>
      deserializer =
        (Deserializer<? extends org.apache.hadoop.mapreduce.InputSplit>)
        factory.getDeserializer(job.getClassByName(splitClass));
View Full Code Here

     * read the next key
     */
    private void readNextKey() throws IOException {
      more = in.next();
      if (more) {
        DataInputBuffer nextKeyBytes = in.getKey();
        keyIn.reset(nextKeyBytes.getData(), nextKeyBytes.getPosition(), nextKeyBytes.getLength());
        nextKey = keyDeserializer.deserialize(nextKey);
        hasNext = key != null && (comparator.compare(key, nextKey) == 0);
      } else {
        hasNext = false;
      }
View Full Code Here

    /**
     * Read the next value
     * @throws IOException
     */
    private void readNextValue() throws IOException {
      DataInputBuffer nextValueBytes = in.getValue();
      valueIn.reset(nextValueBytes.getData(), nextValueBytes.getPosition(), nextValueBytes.getLength());
      value = valDeserializer.deserialize(value);
    }
View Full Code Here

   * available Hadoop serializations.
   * @throws IOException
   * @throws ClassNotFoundException */
  public static<T> T deserialize(Configuration conf, byte[] in
      , T obj) throws IOException, ClassNotFoundException {
    DataInputBuffer buffer = new DataInputBuffer();
    buffer.reset(in, in.length);
    return deserialize(conf, buffer, obj);
  }
View Full Code Here

          );
        }

        // TODO: Remove this after a 'fix' for HADOOP-3647
        if (mapOutputLength > 0) {
          DataInputBuffer dib = new DataInputBuffer();
          dib.reset(shuffleData, 0, shuffleData.length);
          LOG.info("Rec #1 from " + mapOutputLoc.getTaskAttemptId() + " -> (" +
                   WritableUtils.readVInt(dib) + ", " +
                   WritableUtils.readVInt(dib) + ") from " +
                   mapOutputLoc.getHost());
        }
View Full Code Here

      }

      public boolean next(DataInputBuffer key, DataInputBuffer value)
          throws IOException {
        if (kvIter.next()) {
          final DataInputBuffer kb = kvIter.getKey();
          final DataInputBuffer vb = kvIter.getValue();
          final int kp = kb.getPosition();
          final int klen = kb.getLength() - kp;
          key.reset(kb.getData(), kp, klen);
          final int vp = vb.getPosition();
          final int vlen = vb.getLength() - vp;
          value.reset(vb.getData(), vp, vlen);
          bytesRead += klen + vlen;
          return true;
        }
        return false;
      }
View Full Code Here

      RandomDatum value = generator.getValue();
     
      key.write(data);
      value.write(data);
    }
    DataInputBuffer originalData = new DataInputBuffer();
    DataInputStream originalIn = new DataInputStream(new BufferedInputStream(originalData));
    originalData.reset(data.getData(), 0, data.getLength());
   
    LOG.info("Generated " + count + " records");
   
    // Compress data
    DataOutputBuffer compressedDataBuffer = new DataOutputBuffer();
    CompressionOutputStream deflateFilter =
      codec.createOutputStream(compressedDataBuffer);
    DataOutputStream deflateOut =
      new DataOutputStream(new BufferedOutputStream(deflateFilter));
    deflateOut.write(data.getData(), 0, data.getLength());
    deflateOut.flush();
    deflateFilter.finish();
    LOG.info("Finished compressing data");
   
    // De-compress data
    DataInputBuffer deCompressedDataBuffer = new DataInputBuffer();
    deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0,
                                 compressedDataBuffer.getLength());
    CompressionInputStream inflateFilter =
      codec.createInputStream(deCompressedDataBuffer);
    DataInputStream inflateIn =
      new DataInputStream(new BufferedInputStream(inflateFilter));
View Full Code Here

   * except as arguments to the q option.
   */
  static Varargs format( Varargs args ) {
    LuaString fmt = args.checkstring( 1 );
    final int n = fmt.length();
    Buffer result = new Buffer(n);
    int arg = 1;
    int c;
   
    for ( int i = 0; i < n; ) {
      switch ( c = fmt.luaByte( i++ ) ) {
      case '\n':
        result.append( "\n" );
        break;
      default:
        result.append( (byte) c );
        break;
      case L_ESC:
        if ( i < n ) {
          if ( ( c = fmt.luaByte( i ) ) == L_ESC ) {
            ++i;
            result.append( (byte)L_ESC );
          } else {
            arg++;
            FormatDesc fdsc = new FormatDesc(args, fmt, i );
            i += fdsc.length;
            switch ( fdsc.conversion ) {
            case 'c':
              fdsc.format( result, (byte)args.checkint( arg ) );
              break;
            case 'i':
            case 'd':
              fdsc.format( result, args.checkint( arg ) );
              break;
            case 'o':
            case 'u':
            case 'x':
            case 'X':
              fdsc.format( result, args.checklong( arg ) );
              break;
            case 'e':
            case 'E':
            case 'f':
            case 'g':
            case 'G':
              fdsc.format( result, args.checkdouble( arg ) );
              break;
            case 'q':
              addquoted( result, args.checkstring( arg ) );
              break;
            case 's': {
              LuaString s = args.checkstring( arg );
              if ( fdsc.precision == -1 && s.length() >= 100 ) {
                result.append( s );
              } else {
                fdsc.format( result, s );
              }
            }  break;
            default:
              error("invalid option '%"+(char)fdsc.conversion+"' to 'format'");
              break;
            }
          }
        }
      }
    }
   
    return result.tostring();
  }
View Full Code Here

    LuaString p = args.checkstring( 2 );
    LuaValue repl = args.arg( 3 );
    int max_s = args.optint( 4, srclen + 1 );
    final boolean anchor = p.length() > 0 && p.charAt( 0 ) == '^';
   
    Buffer lbuf = new Buffer( srclen );
    MatchState ms = new MatchState( args, src, p );
   
    int soffset = 0;
    int n = 0;
    while ( n < max_s ) {
      ms.reset();
      int res = ms.match( soffset, anchor ? 1 : 0 );
      if ( res != -1 ) {
        n++;
        ms.add_value( lbuf, soffset, res, repl );
      }
      if ( res != -1 && res > soffset )
        soffset = res;
      else if ( soffset < srclen )
        lbuf.append( (byte) src.luaByte( soffset++ ) );
      else
        break;
      if ( anchor )
        break;
    }
    lbuf.append( src.substring( soffset, srclen ) );
    return varargsOf(lbuf.tostring(), valueOf(n));
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.io.DataInputBuffer$Buffer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.