Package storm.trident

Examples of storm.trident.Stream$GlobalAggScheme


                new Values("the man went to the store and bought some candy", 2l), new Values("four score and seven years ago", 3l),
                new Values("how many apples can you eat", 4l), new Values("to be or not to be the person", 5l));
        spout.setCycle(true);

        TridentTopology topology = new TridentTopology();
        Stream stream = topology.newStream("spout1", spout);

        Fields hdfsFields = new Fields("sentence", "key");

        FileNameFormat fileNameFormat = new DefaultFileNameFormat()
                .withPath("/trident")
                .withPrefix("trident")
                .withExtension(".txt");

        RecordFormat recordFormat = new DelimitedRecordFormat()
                .withFields(hdfsFields);

        FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB);

        HdfsState.Options options = new HdfsState.HdfsFileOptions()
                .withFileNameFormat(fileNameFormat)
                .withRecordFormat(recordFormat)
                .withRotationPolicy(rotationPolicy)
                .withFsUrl(hdfsUrl);

        StateFactory factory = new HdfsStateFactory().withOptions(options);

        TridentState state = stream
                .partitionPersist(factory, hdfsFields, new HdfsUpdater(), new Fields());

        return topology.build();
    }
View Full Code Here


                new Values("the man went to the store and bought some candy", 2l), new Values("four score and seven years ago", 3l),
                new Values("how many apples can you eat", 4l), new Values("to be or not to be the person", 5l));
        spout.setCycle(true);

        TridentTopology topology = new TridentTopology();
        Stream stream = topology.newStream("spout1", spout);

        Fields hdfsFields = new Fields("sentence", "key");

        FileNameFormat fileNameFormat = new DefaultFileNameFormat()
                .withPath("/trident")
                .withPrefix("trident")
                .withExtension(".seq");

        FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB);

        HdfsState.Options seqOpts = new HdfsState.SequenceFileOptions()
                .withFileNameFormat(fileNameFormat)
                .withSequenceFormat(new DefaultSequenceFormat("key", "sentence"))
                .withRotationPolicy(rotationPolicy)
                .withFsUrl(hdfsUrl)
                .addRotationAction(new MoveFileAction().toDestination("/dest2/"));

        StateFactory factory = new HdfsStateFactory().withOptions(seqOpts);

        TridentState state = stream
                .partitionPersist(factory, hdfsFields, new HdfsUpdater(), new Fields());

        return topology.build();
    }
View Full Code Here

                new Values("javadoc", "1")
        );
        spout.setCycle(true);

        TridentTopology topology = new TridentTopology();
        Stream stream = topology.newStream("spout1", spout);

        TridentKafkaStateFactory stateFactory = new TridentKafkaStateFactory()
                .withKafkaTopicSelector(new DefaultTopicSelector("test"))
                .withTridentTupleToKafkaMapper(new FieldNameBasedTupleToKafkaMapper("word", "count"));
        stream.partitionPersist(stateFactory, fields, new TridentKafkaUpdater(), new Fields());

        return topology.build();
    }
View Full Code Here

                .withTableName("WordCount");

        StateFactory factory = new HBaseStateFactory(options);

        TridentTopology topology = new TridentTopology();
        Stream stream = topology.newStream("spout1", spout);

        stream.partitionPersist(factory, fields,  new HBaseUpdater(), new Fields());

        TridentState state = topology.newStaticState(factory);
        stream = stream.stateQuery(state, new Fields("word"), new HBaseQuery(), new Fields("columnName","columnValue"));
        stream.each(new Fields("word","columnValue"), new PrintFunction(), new Fields());
        return topology.build();
    }
View Full Code Here

      MemoryMapState.Factory perceptronModelStateFactory = new MemoryMapState.Factory();
      TridentState perceptronModel = toppology.newStaticState(perceptronModelStateFactory);
      TridentState perceptronEvaluation = toppology.newStaticState(evaluationStateFactory);

      // Predict
      Stream predictionStream = toppology.newStream("nandsamples", new NANDSpout()) //
          .stateQuery(perceptronModel, new Fields("instance"), new ClassifyQuery<Boolean>("perceptron"), new Fields("prediction"));

      // Update evaluation
      predictionStream //
          .persistentAggregate(evaluationStateFactory, new Fields("instance", "prediction"), new AccuracyAggregator<Boolean>(), new Fields("accuracy"));

      // Update model
      predictionStream.partitionPersist(perceptronModelStateFactory, new Fields("instance"), new ClassifierUpdater<Boolean>("perceptron", new PerceptronClassifier()));

      // Classification stream
      toppology.newDRPCStream("predict", localDRPC)
      // convert DRPC args to instance
          .each(new Fields("args"), new DRPCArgsToInstance(), new Fields("instance"))
View Full Code Here

        /**
         * First, grab the tweets stream. We're going to use it in two different places
         * and then, we'll going to join them.
         *
         */
        Stream contents = topology
                .newStream("tweets", spout)
                .each(new Fields("str"), new ParseTweet(), new Fields("text", "content", "user"));

        /**
         * Now, let's select and project only hashtags for each tweet.
         * This stream is basically a list of couples (tweetId, hashtag).
         *
         */
        Stream hashtags = contents
                .each(new Fields("content"), new OnlyHashtags())
                .each(new Fields("content"), new TweetIdExtractor(), new Fields("tweetId"))
                .each(new Fields("content"), new GetContentName(), new Fields("hashtag"))
                .project(new Fields("hashtag", "tweetId"));
                //.each(new Fields("content", "tweetId"), new DebugFilter());

        /**
         * And let's do the same for urls, obtaining a stream of couples
         * like (tweetId, url).
         *
         */
        Stream urls = contents
                .each(new Fields("content"), new OnlyUrls())
                .each(new Fields("content"), new TweetIdExtractor(), new Fields("tweetId"))
                .each(new Fields("content"), new GetContentName(), new Fields("url"))
                .project(new Fields("url", "tweetId"));
                //.each(new Fields("content", "tweetId"), new DebugFilter());
View Full Code Here

        return stateQuery(state, null, function, functionFields);
    }
   
    @Override
    public IAggregatableStream each(Fields inputFields, Function function, Fields functionFields) {
        Stream s = _stream.each(inputFields, function, functionFields);
        return new GroupedStream(s, _groupFields);
    }
View Full Code Here

    @Override
    public IAggregatableStream partitionAggregate(Fields inputFields, Aggregator agg, Fields functionFields) {
        Aggregator groupedAgg = new GroupedAggregator(agg, _groupFields, inputFields, functionFields.size());
        Fields allInFields = TridentUtils.fieldsUnion(_groupFields, inputFields);
        Fields allOutFields = TridentUtils.fieldsConcat(_groupFields, functionFields);
        Stream s = _stream.partitionAggregate(allInFields, groupedAgg, allOutFields);
        return new GroupedStream(s, _groupFields);
    }
View Full Code Here

        return stateQuery(state, null, function, functionFields);
    }
   
    @Override
    public IAggregatableStream each(Fields inputFields, Function function, Fields functionFields) {
        Stream s = _stream.each(inputFields, function, functionFields);
        return new GroupedStream(s, _groupFields);
    }
View Full Code Here

    @Override
    public IAggregatableStream partitionAggregate(Fields inputFields, Aggregator agg, Fields functionFields) {
        Aggregator groupedAgg = new GroupedAggregator(agg, _groupFields, inputFields, functionFields.size());
        Fields allInFields = TridentUtils.fieldsUnion(_groupFields, inputFields);
        Fields allOutFields = TridentUtils.fieldsConcat(_groupFields, functionFields);
        Stream s = _stream.partitionAggregate(allInFields, groupedAgg, allOutFields);
        return new GroupedStream(s, _groupFields);
    }
View Full Code Here

TOP

Related Classes of storm.trident.Stream$GlobalAggScheme

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.