Package org.apache.hadoop.io

Examples of org.apache.hadoop.io.Writable


          }
          String errorClass = null;
          String error = null;
          RpcStatusProto returnStatus = RpcStatusProto.SUCCESS;
          RpcErrorCodeProto detailedErr = null;
          Writable value = null;

          CurCall.set(call);
          try {
            // Make the call as the user via Subject.doAs, thus associating
            // the call with the Subject
View Full Code Here


          LOG.debug(getName() + " got value #" + callId);

        Call call = calls.get(callId);
        RpcStatusProto status = header.getStatus();
        if (status == RpcStatusProto.SUCCESS) {
          Writable value = ReflectionUtils.newInstance(valueClass, conf);
          value.readFields(in);                 // read value
          calls.remove(callId);
          call.setRpcResponse(value);
         
          // verify that length was correct
          // only for ProtobufEngine where len can be verified easily
View Full Code Here

   * @param params
   * @return Feature Frequency List
   * @throws IOException
   */
  public static List<Pair<String,Long>> readFList(Parameters params) throws IOException {
    Writable key = new Text();
    LongWritable value = new LongWritable();
    int minSupport = Integer.valueOf(params.get("minSupport", "3"));
    Configuration conf = new Configuration();
   
    FileSystem fs = FileSystem.get(new Path(params.get("output") + "/parallelcounting").toUri(), conf);
    FileStatus[] outputFiles = fs.globStatus(new Path(params.get("output") + "/parallelcounting/part-*"));
   
    PriorityQueue<Pair<String,Long>> queue = new PriorityQueue<Pair<String,Long>>(11,
        new Comparator<Pair<String,Long>>() {
         
          @Override
          public int compare(Pair<String,Long> o1, Pair<String,Long> o2) {
            int ret = o2.getSecond().compareTo(o1.getSecond());
            if (ret != 0) {
              return ret;
            }
            return o1.getFirst().compareTo(o2.getFirst());
          }
         
        });
    for (FileStatus fileStatus : outputFiles) {
      Path path = fileStatus.getPath();
      SequenceFile.Reader reader = new SequenceFile.Reader(fs, path, conf);
      // key is feature value is count
      while (reader.next(key, value)) {
        if (value.get() < minSupport) {
          continue;
        }
        queue.add(new Pair<String,Long>(key.toString(), value.get()));
      }
    }
    List<Pair<String,Long>> fList = new ArrayList<Pair<String,Long>>();
    while (queue.isEmpty() == false) {
      fList.add(queue.poll());
View Full Code Here

    vector = v;
  }

  @Override
  public void write(DataOutput out) throws IOException {
    Writable w;
    if (vector instanceof Writable) {
      w = (Writable) vector;
    } else if(vector instanceof RandomAccessSparseVector) {
      w = new RandomAccessSparseVectorWritable(vector);
    } else if(vector instanceof SequentialAccessSparseVector) {
      w = new SequentialAccessSparseVectorWritable((SequentialAccessSparseVector)vector);
    } else {
      w = new DenseVectorWritable(new DenseVector(vector));
    }
    w.write(out);
  }
View Full Code Here

    ClusteringTestUtils.writePointsToFile(points, "testdata/random-input", fs, job);
   
    RandomSeedGenerator.buildRandom("testdata/random-input", "testdata/random-output", 4);
   
    SequenceFile.Reader reader = new SequenceFile.Reader(fs, new Path("testdata/random-output/part-randomSeed"), job);
    Writable key = (Writable) reader.getKeyClass().newInstance();
    Cluster value = (Cluster) reader.getValueClass().newInstance();
   
    int clusterCount = 0;
    Set<Integer> set = new HashSet<Integer>();
    while (reader.next(key, value)) {
View Full Code Here

                                                   int chunkSizeInMegabytes,
                                                   Writable value,
                                                   int[] maxTermDimension) throws IOException {
    List<Path> chunkPaths = new ArrayList<Path>();
   
    Writable key = new Text();
    Configuration conf = new Configuration();
   
    FileSystem fs = FileSystem.get(wordCountPath.toUri(), conf);
    FileStatus[] outputFiles = fs.globStatus(new Path(wordCountPath.toString()
                                                      + OUTPUT_FILES_PATTERN));
   
    long chunkSizeLimit = chunkSizeInMegabytes * 1024 * 1024;
    int chunkIndex = 0;
    Path chunkPath = getPath(dictionaryPathBase + DICTIONARY_FILE,
      chunkIndex);
    chunkPaths.add(chunkPath);
   
    SequenceFile.Writer dictWriter = new SequenceFile.Writer(fs, conf, chunkPath, Text.class,
        IntWritable.class);
   
    long currentChunkSize = 0;
   
    int i = 0;
    for (FileStatus fileStatus : outputFiles) {
      Path path = fileStatus.getPath();
      SequenceFile.Reader reader = new SequenceFile.Reader(fs, path, conf);
      // key is feature value is count
      while (reader.next(key, value)) {
        if (currentChunkSize > chunkSizeLimit) {
          dictWriter.close();
          chunkIndex++;
         
          chunkPath = getPath(dictionaryPathBase + DICTIONARY_FILE,
            chunkIndex);
          chunkPaths.add(chunkPath);
         
          dictWriter = new SequenceFile.Writer(fs, conf, chunkPath, Text.class, IntWritable.class);
          currentChunkSize = 0;
        }
       
        int fieldSize = DICTIONARY_BYTE_OVERHEAD + key.toString().length() * 2
                        + Integer.SIZE / 8;
        currentChunkSize += fieldSize;
        dictWriter.append(key, new IntWritable(i++));
      }
    }
View Full Code Here

        int sub = Integer.MAX_VALUE;
        if (cmdLine.hasOption(substringOpt)) {
          sub = Integer.parseInt(cmdLine.getValue(substringOpt).toString());
        }
        boolean countOnly = cmdLine.hasOption(countOpt);
        Writable key = (Writable) reader.getKeyClass().newInstance();
        Writable value = (Writable) reader.getValueClass().newInstance();
        writer.append("Key class: ").append(String.valueOf(reader.getKeyClass())).append(" Value Class: ")
        .append(String.valueOf(value.getClass())).append('\n');
        writer.flush();
        long count = 0;
        if (countOnly == false) {
          while (reader.next(key, value)) {
            writer.append("Key: ").append(String.valueOf(key));
            String str = value.toString();
            writer.append(": Value: ").append(str.length() > sub ? str.substring(0, sub) : str);
            writer.write('\n');
            writer.flush();
            count++;
          }
View Full Code Here

      }
      Path path = new Path(seqFile.getAbsolutePath());
      System.out.println("Input Path: " + path);
      FileSystem fs = FileSystem.get(path.toUri(), conf);
      SequenceFile.Reader reader = new SequenceFile.Reader(fs, path, conf);
      Writable key = (Writable) reader.getKeyClass().newInstance();
      ClusterBase value = (ClusterBase) reader.getValueClass().newInstance();
      while (reader.next(key, value)) {
        Vector center = value.getCenter();
        String fmtStr = useJSON ? center.asFormatString() : VectorHelper.vectorToString(center, dictionary);
        writer.append("Id: ").append(String.valueOf(value.getId())).append(":");
View Full Code Here

        SequenceFile.Reader reader = new SequenceFile.Reader(fs, path, job);
        try {
          // recordReader = new KeyValueLineRecordReader(job, new FileSplit(path, 0,
          // fs.getFileStatus(path).getLen(), (String[]) null));
          Class<?> valueClass = reader.getValueClass();
          Writable key;
          try {
            key = (Writable) reader.getKeyClass().newInstance();
          } catch (InstantiationException e) { // Should not be possible
            log.error("Exception", e);
            throw new IllegalStateException(e);
View Full Code Here

            "Call queue is full, is ipc.server.max.callqueue.size too small?");
        responder.doRespond(callTooBig);
        return;
      }

      Writable param;
      try {
        param = ReflectionUtils.newInstance(paramClass, conf);//read param
        param.readFields(dis);
      } catch (Throwable t) {
        LOG.warn("Unable to read call parameters for client " +
                 getHostAddress(), t);
        final Call readParamsFailedCall =
          new Call(id, null, this, responder, callSize);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.io.Writable

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.