Package org.apache.hadoop.io

Examples of org.apache.hadoop.io.MapWritable


    KeyValuePair<Writable, Writable> convertedRecord = null;

    Class convertedKeyClass = null;
    Class rawKeyClass = null;
    Class rawValueClass = null;
    MapWritable raw = null;

    while ((rawRecord = peer.readNext()) != null) {
      if (rawKeyClass == null && rawValueClass == null) {
        rawKeyClass = rawRecord.getKey().getClass();
        rawValueClass = rawRecord.getValue().getClass();
      }
      convertedRecord = converter.convertRecord(rawRecord, conf);

      if (convertedRecord == null) {
        throw new IOException("The converted record can't be null.");
      }

      Writable convertedKey = convertedRecord.getKey();
      convertedKeyClass = convertedKey.getClass();

      int index = converter.getPartitionId(convertedRecord, partitioner, conf,
          peer, desiredNum);

      if (!writerCache.containsKey(index)) {
        Path destFile = new Path(partitionDir + "/part-" + index + "/file-"
            + peer.getPeerIndex());
        SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf,
            destFile, convertedKeyClass, MapWritable.class,
            CompressionType.NONE);
        writerCache.put(index, writer);
      }

      raw = new MapWritable();
      raw.put(rawRecord.getKey(), rawRecord.getValue());

      writerCache.get(index).append(convertedKey, raw);
    }

    for (SequenceFile.Writer w : writerCache.values()) {
View Full Code Here


      Class convertedKeyClass, Class rawKeyClass, Class rawValueClass)
      throws IOException {
    SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf,
        destinationFilePath, rawKeyClass, rawValueClass, CompressionType.NONE);
    WritableComparable convertedKey;
    MapWritable value;

    Map<Integer, SequenceFile.Reader> readers = new HashMap<Integer, SequenceFile.Reader>();
    for (int i = 0; i < status.length; i++) {
      SequenceFile.Sorter sorter = new SequenceFile.Sorter(fs,
          convertedKeyClass, MapWritable.class, conf);
      sorter.setMemory(conf
          .getInt("bsp.input.runtime.partitioning.sort.mb", 50) * 1024 * 1024);
      sorter.setFactor(conf.getInt(
          "bsp.input.runtime.partitioning.sort.factor", 10));
      sorter.sort(status[i].getPath(), status[i].getPath().suffix(".sorted"));

      readers.put(i,
          new SequenceFile.Reader(fs, status[i].getPath().suffix(".sorted"),
              conf));
    }

    for (int i = 0; i < readers.size(); i++) {
      convertedKey = (WritableComparable) ReflectionUtils.newInstance(
          convertedKeyClass, conf);
      value = new MapWritable();

      readers.get(i).next(convertedKey, value);
      candidates.put(i, new KeyValuePair(convertedKey, value));
    }

    while (readers.size() > 0) {
      convertedKey = (WritableComparable) ReflectionUtils.newInstance(
          convertedKeyClass, conf);
      value = new MapWritable();

      int readerIndex = 0;
      WritableComparable firstKey = null;
      MapWritable rawRecord = null;

      for (Map.Entry<Integer, KeyValuePair<WritableComparable, MapWritable>> keys : candidates
          .entrySet()) {
        if (firstKey == null) {
          readerIndex = keys.getKey();
          firstKey = keys.getValue().getKey();
          rawRecord = (MapWritable) keys.getValue().getValue();
        } else {
          WritableComparable currentKey = keys.getValue().getKey();
          if (firstKey.compareTo(currentKey) > 0) {
            readerIndex = keys.getKey();
            firstKey = currentKey;
            rawRecord = (MapWritable) keys.getValue().getValue();
          }
        }
      }

      for (Map.Entry<Writable, Writable> e : rawRecord.entrySet()) {
        writer.append(e.getKey(), e.getValue());
      }

      candidates.remove(readerIndex);

View Full Code Here

      Class convertedKeyClass, Class rawKeyClass, Class rawValueClass)
      throws IOException {
    SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf,
        destinationFilePath, rawKeyClass, rawValueClass, CompressionType.NONE);
    Writable key;
    MapWritable rawRecord;

    for (int i = 0; i < status.length; i++) {
      SequenceFile.Reader reader = new SequenceFile.Reader(fs,
          status[i].getPath(), conf);
      key = (Writable) ReflectionUtils.newInstance(convertedKeyClass, conf);
      rawRecord = new MapWritable();

      while (reader.next(key, rawRecord)) {
        for (Map.Entry<Writable, Writable> e : rawRecord.entrySet()) {
          writer.append(e.getKey(), e.getValue());
        }
      }
      reader.close();
    }
View Full Code Here

    // Do initial RPC setup.  The final argument indicates that the RPC should retry indefinitely.
    this.hbaseMaster = (HMasterRegionInterface)HbaseRPC.waitForProxy(
      HMasterRegionInterface.class, HMasterRegionInterface.versionID,
      new HServerAddress(conf.get(MASTER_ADDRESS)).getInetSocketAddress(),
      this.conf, -1);
    MapWritable result = null;
    long lastMsg = 0;
    while(!stopRequested.get()) {
      try {
        this.requestCount.set(0);
        this.serverInfo.setLoad(new HServerLoad(0, onlineRegions.size()));
View Full Code Here

          options.getSqlQuery());
      } else {
        columnTypeInts = connManager.getColumnTypesForProcedure(
          options.getCall());
      }
      MapWritable columnTypes = new MapWritable();
      for (Map.Entry<String, Integer> e : columnTypeInts.entrySet()) {
        Text columnName = new Text(e.getKey());
        Text columnText = new Text(
            connManager.toJavaType(tableName, e.getKey(), e.getValue()));
        columnTypes.put(columnName, columnText);
      }
      DefaultStringifier.store(job.getConfiguration(), columnTypes,
          AvroExportMapper.AVRO_COLUMN_TYPES_MAP);
    }
View Full Code Here

  /**
   * @return Subset of configuration to pass initializing regionservers: e.g.
   * the filesystem to use and root directory to use.
   */
  protected MapWritable createConfigurationSubset() {
    MapWritable mw = addConfig(new MapWritable(), HConstants.HBASE_DIR);
    // Get the real address of the HRS.
    String rsAddress = Server.getRemoteAddress();
    if (rsAddress != null) {
      mw.put(new Text("hbase.regionserver.address"), new Text(rsAddress));
    }
   
    return addConfig(mw, "fs.default.name");
  }
View Full Code Here

    private IntWritable one = new IntWritable(1);
    private Text foo = new Text("foo");
    private Text bar = new Text("bar");
    @Test
    public void testReducer() throws Exception {
        MapWritable map = new MapWritable();
        map.put(foo,new IntWritable(2));
        map.put(bar,new IntWritable(2));


        MapWritable map1 = new MapWritable();
        map1.put(foo, new IntWritable(2));
        map1.put(bar, new IntWritable(2));


        List<MapWritable> mapList = new ArrayList<MapWritable>();
        mapList.add(map);
        mapList.add(map1);

        Text text = new Text("key");
        MapWritable resultMap = new MapWritable();
        resultMap.put(new Text("foo"),new IntWritable(4));
        resultMap.put(new Text("bar"),new IntWritable(4));


        new ReduceDriver<Text,MapWritable,Text,MapWritable>()
               .withReducer(new StripesReducer())
               .withInput(text,mapList)
View Full Code Here

    public static class ConstantMapper extends MapReduceBase implements Mapper {

        @Override
        public void map(Object key, Object value, OutputCollector output, Reporter reporter) throws IOException {
            MapWritable map = new MapWritable();
            map.put(new Text("key"), new Text("value"));
            output.collect(new LongWritable(), map);
        }
View Full Code Here

            List<TypeInfo> info = structType.getAllStructFieldTypeInfos();

            // return just the values
            List<Object> struct = new ArrayList<Object>();

            MapWritable map = (MapWritable) data;
            Text reuse = new Text();
            for (int index = 0; index < names.size(); index++) {
                String esAlias = alias.toES(names.get(index));
                if (IS_ES_10) {
                    // check for multi-level alias
                    Writable result = map;
                    for (String level : StringUtils.tokenize(esAlias, ".")) {
                        reuse.set(level);
                        result = ((MapWritable) result).get(reuse);
                        if (result == null) {
                            break;
                        }
                    }
                    struct.add(hiveFromWritable(info.get(index), result, alias, IS_ES_10));
                }
                else {
                    reuse.set(alias.toES(names.get(index)));
                    struct.add(hiveFromWritable(info.get(index), map.get(reuse), alias, IS_ES_10));
                }
            }
            return struct;
        }
View Full Code Here

   */
  public void run() {
    regionServerThread = Thread.currentThread();
    boolean quiesceRequested = false;
    try {
      MapWritable w = null;
      while (!stopRequested.get()) {
        w = reportForDuty();
        if (w != null) {
          init(w);
          break;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.io.MapWritable

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.