Package com.datasalt.pangool.io

Examples of com.datasalt.pangool.io.ITuple


    @Override
    public void reduce(ITuple group, Iterable<ITuple> tuples, TupleMRContext context, Collector collector)
        throws IOException, InterruptedException, TupleMRException {

      int count = 0;
      ITuple outputTuple = null;
      Record outputRecord=null;
      for(ITuple tuple : tuples) {
        Record record = (Record)tuple.get("my_avro");
        count += (Integer) record.get("count");
        outputTuple = tuple;
        outputRecord = record;
      }
      outputRecord.put("count",count);
      outputTuple.set("my_avro",outputRecord);
      collector.write(outputTuple, NullWritable.get());
    }
View Full Code Here


    @Override
    public void reduce(ITuple group, Iterable<ITuple> tuples, TupleMRContext context, Collector collector)
        throws IOException, InterruptedException, TupleMRException {

      int count = 0;
      ITuple outputTuple = null;
      Record outputRecord = null;
      for (ITuple tuple : tuples) {
        Record record = (Record) tuple.get("my_avro");
        count += (Integer) record.get("count");
        outputTuple = tuple;
        outputRecord = record;
      }
      outputRecord.put("count", count);
      outputTuple.set("my_avro", outputRecord);
      collector.write(outputTuple, NullWritable.get());
    }
View Full Code Here

    @Override
    public void reduce(ITuple group, Iterable<ITuple> tuples, TupleMRContext context, Collector collector)
        throws IOException, InterruptedException, TupleMRException {

      int count = 0;
      ITuple outputTuple = null;
      for(ITuple tuple : tuples) {
        count += (Integer) tuple.get("count");
        outputTuple = tuple;
      }
      outputTuple.set("count", count);
      collector.write(outputTuple, NullWritable.get());
    }
View Full Code Here

        throws java.io.IOException, InterruptedException, TupleMRException {

      int totalCount = 0;
      Iterator<ITuple> iterator = tuples.iterator();
      for(int i = 0; i < n && iterator.hasNext(); i++) {
        ITuple tuple = iterator.next();
        collector.write(tuple, NullWritable.get());
        totalCount += (Integer) tuple.get("count");
      }
     
      outputCountTuple.set("topic", group.get("topic"));
      outputCountTuple.set("totalcount", totalCount);
      collector.getNamedOutput(OUTPUT_TOTALCOUNT).write(outputCountTuple, NullWritable.get());
View Full Code Here

    @Override
    public void reduce(ITuple group, Iterable<ITuple> tuples, TupleMRContext context, Collector collector)
        throws IOException, InterruptedException, TupleMRException {

      int count = 0;
      ITuple outputTuple = null;
      for(ITuple tuple : tuples) {
        count += (Integer) tuple.get("count");
        outputTuple = tuple;
      }
      outputTuple.set("count", count);
      collector.write(outputTuple, NullWritable.get());
    }
View Full Code Here

        throws java.io.IOException, InterruptedException, TupleMRException {

      int totalCount = 0;
      Iterator<ITuple> iterator = tuples.iterator();
      for(int i = 0; i < n && iterator.hasNext(); i++) {
        ITuple tuple = iterator.next();
        collector.write(tuple, NullWritable.get());
        totalCount += (Integer) tuple.get("count");
      }

      outputCountTuple.set("topic", group.get("topic"));
      outputCountTuple.set("totalcount", totalCount);
      collector.getNamedOutput(OUTPUT_TOTALCOUNT).write(outputCountTuple, NullWritable.get());
View Full Code Here

    TupleReducer countReducer = new TupleReducer<ITuple, NullWritable>() {

      public void reduce(ITuple group, Iterable<ITuple> tuples, TupleMRContext context,
          Collector collector) throws IOException, InterruptedException, TupleMRException {
        int count = 0;
        ITuple outputTuple = null;
        for(ITuple tuple : tuples) {
          count += (Integer) tuple.get("count");
          outputTuple = tuple;
        }
        outputTuple.set("count", count);
        collector.write(outputTuple, NullWritable.get());
      }
    };
    job.setTupleCombiner(countReducer);
    job.setTupleReducer(countReducer);
View Full Code Here

 
  /**
   * Returns a Tuple conforming to a simple schema: {@link #SCHEMA}.
   */
  public static ITuple getTuple(String id, int value) {
    ITuple tuple = new Tuple(SCHEMA);
    tuple.set("id", id);
    tuple.set("value", value);
    return tuple;
  }
View Full Code Here

                }
              }

              // For each input Tuple from this File execute the RecordProcessor
              // The Default IdentityRecordProcessor just bypasses the same Tuple
              ITuple processedTuple = null;
              try {
                processedTuple = recordProcessor.process(fileTuple, counterInterface);
              } catch (Throwable e1) {
                throw new RuntimeException(e1);
              }
              if (processedTuple == null) {
                // The tuple has been filtered out by the user
                return;
              }

              // Get the partition Id from this record
              String strKey = "";
              try {
                strKey = getPartitionByKey(processedTuple, tableSpec, jsEngine);
              } catch (Throwable e) {
                throw new RuntimeException(e);
              }
              int shardId = partitionMap.findPartition(strKey);
              if (shardId == -1) {
                throw new RuntimeException(
                    "shard id = -1 must be some sort of software bug. This shouldn't happen if PartitionMap is complete.");
              }

              // Finally write it to the Hadoop output
              for (Field field : processedTuple.getSchema().getFields()) {
                tableTuple.set(field.getName(), processedTuple.get(field.getName()));
              }
              tableTuple.set(SploutSQLOutputFormat.PARTITION_TUPLE_FIELD, shardId);
              collector.write(tableTuple);
            }
          }, inputFile.getSpecificHadoopInputFormatContext());
        }
      }
      tableSpecs.add(table.getTableSpec());
    }

    // We do the same for the replicated tables but the Mapper logic will be different
    // We will send the data to all the partitions
    for (final Table table : tablespace.getReplicateAllTables()) {
      List<Field> fields = new ArrayList<Field>();
      fields.addAll(table.getTableSpec().getSchema().getFields());
      fields.add(SploutSQLOutputFormat.getPartitionField());
      final Schema tableSchema = new Schema(table.getTableSpec().getSchema().getName(), fields);
      schemaCounter++;
      builder.addIntermediateSchema(NullableSchema.nullableSchema(tableSchema));
      // For each input file for the Table we add an input and a TupleMapper
      for (TableInput inputFile : table.getFiles()) {

        final RecordProcessor recordProcessor = inputFile.getRecordProcessor();

        for (Path path : inputFile.getPaths()) {
          builder.addInput(path, inputFile.getFormat(), new TupleMapper<ITuple, NullWritable>() {

            Tuple tableTuple = new Tuple(tableSchema);
            CounterInterface counterInterface = null;

            @Override
            public void map(ITuple key, NullWritable value, TupleMRContext context, Collector collector)
                throws IOException, InterruptedException {

              if (counterInterface == null) {
                counterInterface = new CounterInterface(context.getHadoopContext());
              }

              // For each input Tuple from this File execute the RecordProcessor
              // The Default IdentityRecordProcessor just bypasses the same Tuple
              ITuple processedTuple = null;
              try {
                processedTuple = recordProcessor.process(key, counterInterface);
              } catch (Throwable e1) {
                throw new RuntimeException(e1);
              }
              if (processedTuple == null) {
                // The tuple has been filtered out by the user
                return;
              }

              // Finally write it to the Hadoop output
              for (Field field : processedTuple.getSchema().getFields()) {
                tableTuple.set(field.getName(), processedTuple.get(field.getName()));
              }

              // Send the data of the replicated table to all partitions!
              for (int i = 0; i < nPartitions; i++) {
                tableTuple.set(SploutSQLOutputFormat.PARTITION_TUPLE_FIELD, i);
View Full Code Here

        Assert.assertEquals("Pere", it.next().get("name").toString());
        Assert.assertEquals("Iván", it.next().get("name").toString());
       
        // Second table in schema order is logs, sorted by (action asc, loc asc)
       
        ITuple next = it.next();
        Assert.assertEquals("DOWN", next.get("action").toString());
        Assert.assertEquals("Germany", next.get("loc").toString());
        next = it.next();
        Assert.assertEquals("DOWN", next.get("action").toString());
        Assert.assertEquals("Spain", next.get("loc").toString());
        next = it.next();
        Assert.assertEquals("DOWN", next.get("action").toString());
        Assert.assertEquals("Spain", next.get("loc").toString());
        next = it.next();
        Assert.assertEquals("UP", next.get("action").toString());
        Assert.assertEquals("Germany", next.get("loc").toString());
        next = it.next();
        Assert.assertEquals("UP", next.get("action").toString());
        Assert.assertEquals("Greece", next.get("loc").toString());
        next = it.next();
        Assert.assertEquals("UP", next.get("action").toString());
        Assert.assertEquals("Greece", next.get("loc").toString());
       
        // Thrid table in schema order is geodata, sorted by loc desc

        Assert.assertEquals("Spain", it.next().get("loc").toString());
        Assert.assertEquals("Greece", it.next().get("loc").toString());
View Full Code Here

TOP

Related Classes of com.datasalt.pangool.io.ITuple

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.