Package backtype.storm.topology

Examples of backtype.storm.topology.TopologyBuilder


    String assignmentports=String.valueOf(stormconf.get(MdrillDefaultTaskAssignment.MDRILL_ASSIGNMENT_PORTS+"."+topologyName));
    conf.put(CustomAssignment.TOPOLOGY_CUSTOM_ASSIGNMENT, MdrillDefaultTaskAssignment.class.getName());
    conf.put(MdrillDefaultTaskAssignment.MDRILL_ASSIGNMENT_DEFAULT, assignment);
    conf.put(MdrillDefaultTaskAssignment.MDRILL_ASSIGNMENT_PORTS, assignmentports);

    TopologyBuilder builder = new TopologyBuilder();
   
    if(args.length>=6)
    {
      String tttime=args[5];
      for(String prefix:prefixlist)
      {
        if(!tttime.equals("0"))
        {
          conf.put(prefix+"-start-time", tttime);
        }
      }
    }
   
    if(args.length>=7)
    {
      String[] prefix_timeist=args[6].split(";");
      for(String prefix_time:prefix_timeist)
      {
        String[] prefix_time_col=prefix_time.split(":");
        if(prefix_time_col.length>1)
        {
          conf.put(prefix_time_col[0]+"-start-time", prefix_time_col[1]);
        }
      }
    }
   
    for(String prefix:prefixlist)
    {
      conf.put(prefix+"-validate-time", System.currentTimeMillis());
    }
   
   
    for(String prefix:prefixlist)
    {
      String mode=String.valueOf(conf.get(prefix+"-mode"));
      String threadconfig=conf.get(prefix+"-threads")!=null?String.valueOf(conf.get(prefix+"-threads")):String.valueOf(workescount);
      int threads=workescount;
      try{
        threads=Integer.parseInt(threadconfig);
      }catch(Throwable e)
      {
        threads=workescount;
      }
      int threads_reduce=threads;
      String threadconfig_reduce=conf.get(prefix+"-threads_reduce")!=null?String.valueOf(conf.get(prefix+"-threads_reduce")):String.valueOf(threads_reduce);
      try{
        threads_reduce=Integer.parseInt(threadconfig_reduce);
      }catch(Throwable e)
      {
        threads_reduce=threads;
      }
     
      if(mode.equals("local"))
      {
        builder.setSpout("map_"+prefix, new ImportSpoutLocal(prefix), threads);
      }else{
        builder.setSpout("map_"+prefix, new ImportSpout(prefix), threads);
        builder.setBolt("reduce_"+prefix, new ImportBolt(prefix), threads_reduce).fieldsGrouping("map_"+prefix, new Fields("key") );
      }
    }
   
    StormSubmitter.submitTopology(topologyName, conf,builder.createTopology());

  }
View Full Code Here


                new DefaultTupleMapper("stormks", "stormcf", "word"));
        cassandraBolt.setAckStrategy(AckStrategy.ACK_ON_WRITE);

        // setup topology:
        // wordSpout ==> countBolt ==> cassandraBolt
        TopologyBuilder builder = new TopologyBuilder();

        builder.setSpout(WORD_SPOUT, wordSpout, 3);
        builder.setBolt(COUNT_BOLT, countBolt, 3).fieldsGrouping(WORD_SPOUT, new Fields("word"));
        builder.setBolt(CASSANDRA_BOLT, cassandraBolt, 3).shuffleGrouping(COUNT_BOLT);

        if (args.length == 0) {
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("test", config, builder.createTopology());
            Thread.sleep(10000);
            cluster.killTopology("test");
            cluster.shutdown();
            System.exit(0);
        } else {
            config.setNumWorkers(3);
            StormSubmitter.submitTopology(args[0], config, builder.createTopology());
        }
    }
View Full Code Here

    }

  }

  public static void main(String[] args) {
    TopologyBuilder builder = new TopologyBuilder();
    LocalDRPC drpc = new LocalDRPC();

    DRPCSpout spout = new DRPCSpout("exclamation", drpc);
    builder.setSpout("drpc", spout);
    builder.setBolt("exclaim", new ExclamationBolt(), 3).shuffleGrouping("drpc");
    builder.setBolt("return", new ReturnResults(), 3).shuffleGrouping("exclaim");

    LocalCluster cluster = new LocalCluster();
    Config conf = new Config();
    cluster.submitTopology("exclaim", conf, builder.createTopology());

    System.out.println(drpc.execute("exclamation", "aaa"));
    System.out.println(drpc.execute("exclamation", "bbb"));

  }
View Full Code Here

public class SingleJoinExample {
  public static void main(String[] args) {
    FeederSpout genderSpout = new FeederSpout(new Fields("id", "gender"));
    FeederSpout ageSpout = new FeederSpout(new Fields("id", "age"));

    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("gender", genderSpout);
    builder.setSpout("age", ageSpout);
    builder.setBolt("join", new SingleJoinBolt(new Fields("gender", "age"))).fieldsGrouping("gender", new Fields("id"))
        .fieldsGrouping("age", new Fields("id"));

    Config conf = new Config();
    conf.setDebug(true);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("join-example", conf, builder.createTopology());

    for (int i = 0; i < 10; i++) {
      String gender;
      if (i % 2 == 0) {
        gender = "male";
View Full Code Here


  }

  public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("word", new TestWordSpout(), 10);
    builder.setBolt("exclaim1", new ExclamationBolt(), 3).shuffleGrouping("word");
    builder.setBolt("exclaim2", new ExclamationBolt(), 2).shuffleGrouping("exclaim1");

    Config conf = new Config();
    conf.setDebug(true);

    if (args != null && args.length > 0) {
      conf.setNumWorkers(3);

      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    }
    else {

      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("test", conf, builder.createTopology());
      Utils.sleep(10000);
      cluster.killTopology("test");
      cluster.shutdown();
    }
  }
View Full Code Here

        return new BoltDeclarerImpl(component);
    }
   
    public TopologyBuilder buildTopologyBuilder() {
        String coordinator = _spoutId + "/coordinator";
        TopologyBuilder builder = new TopologyBuilder();
        SpoutDeclarer declarer = builder.setSpout(coordinator, new TransactionalSpoutCoordinator(_spout));
        for(Map conf: _spoutConfs) {
            declarer.addConfigurations(conf);
        }
        declarer.addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id);

        BoltDeclarer emitterDeclarer =
                builder.setBolt(_spoutId,
                        new CoordinatedBolt(new TransactionalSpoutBatchExecutor(_spout),
                                             null,
                                             null),
                        _spoutParallelism)
                .allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_BATCH_STREAM_ID)
                .addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id);
        if(_spout instanceof ICommitterTransactionalSpout) {
            emitterDeclarer.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
        }
        for(String id: _bolts.keySet()) {
            Component component = _bolts.get(id);
            Map<String, SourceArgs> coordinatedArgs = new HashMap<String, SourceArgs>();
            for(String c: componentBoltSubscriptions(component)) {
                coordinatedArgs.put(c, SourceArgs.all());
            }
           
            IdStreamSpec idSpec = null;
            if(component.committer) {
                idSpec = IdStreamSpec.makeDetectSpec(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);         
            }
            BoltDeclarer input = builder.setBolt(id,
                                                  new CoordinatedBolt(component.bolt,
                                                                      coordinatedArgs,
                                                                      idSpec),
                                                  component.parallelism);
            for(Map conf: component.componentConfs) {
View Full Code Here

      conf.put("click-spout-start-time", args[0]);
      conf.put("access-spout-start-time", args[0]);

    }
   
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("pvmap", new PvSpout("pv-spout"), 48);
    builder.setBolt("reduce", new SumReduceBolt("pv"), 16).fieldsGrouping("pvmap", new Fields("key") );
    //builder.setSpout("accessmap", new AccessSpout("access-spout"), 24);
    //builder.setBolt("accessreduce", new AccessReduceBolt("access"), 4).fieldsGrouping("accessmap", new Fields("hashkey") );

    builder.setSpout("clickmap", new ClickSpout("click-spout"), 4);
    builder.setBolt("clickreduce", new ClickReduceBolt("click"), 4).fieldsGrouping("clickmap", new Fields("hashkey") );
    StormSubmitter.submitTopology("quanjingmointor", conf,builder.createTopology());

  }
View Full Code Here

   
    private StormTopology createTopology(DRPCSpout spout) {
        final String SPOUT_ID = "spout";
        final String PREPARE_ID = "prepare-request";
       
        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout(SPOUT_ID, spout);
        builder.setBolt(PREPARE_ID, new PrepareRequest())
                .noneGrouping(SPOUT_ID);
        int i=0;
        for(; i<_components.size();i++) {
            Component component = _components.get(i);
           
            Map<String, SourceArgs> source = new HashMap<String, SourceArgs>();
            if (i==1) {
                source.put(boltId(i-1), SourceArgs.single());
            } else if (i>=2) {
                source.put(boltId(i-1), SourceArgs.all());
            }
            IdStreamSpec idSpec = null;
            if(i==_components.size()-1 && component.bolt instanceof FinishedCallback) {
                idSpec = IdStreamSpec.makeDetectSpec(PREPARE_ID, PrepareRequest.ID_STREAM);
            }
            BoltDeclarer declarer = builder.setBolt(
                    boltId(i),
                    new CoordinatedBolt(component.bolt, source, idSpec),
                    component.parallelism);
           
            for(Map conf: component.componentConfs) {
                declarer.addConfigurations(conf);
            }
           
            if(idSpec!=null) {
                declarer.fieldsGrouping(idSpec.getGlobalStreamId().get_componentId(), PrepareRequest.ID_STREAM, new Fields("request"));
            }
            if(i==0 && component.declarations.isEmpty()) {
                declarer.noneGrouping(PREPARE_ID, PrepareRequest.ARGS_STREAM);
            } else {
                String prevId;
                if(i==0) {
                    prevId = PREPARE_ID;
                } else {
                    prevId = boltId(i-1);
                }
                for(InputDeclaration declaration: component.declarations) {
                    declaration.declare(prevId, declarer);
                }
            }
            if(i>0) {
                declarer.directGrouping(boltId(i-1), Constants.COORDINATED_STREAM_ID);
            }
        }
       
        IRichBolt lastBolt = _components.get(_components.size()-1).bolt;
        OutputFieldsGetter getter = new OutputFieldsGetter();
        lastBolt.declareOutputFields(getter);
        Map<String, StreamInfo> streams = getter.getFieldsDeclaration();
        if(streams.size()!=1) {
            throw new RuntimeException("Must declare exactly one stream from last bolt in LinearDRPCTopology");
        }
        String outputStream = streams.keySet().iterator().next();
        List<String> fields = streams.get(outputStream).get_output_fields();
        if(fields.size()!=2) {
            throw new RuntimeException("Output stream of last component in LinearDRPCTopology must contain exactly two fields. The first should be the request id, and the second should be the result.");
        }

        builder.setBolt(boltId(i), new JoinResult(PREPARE_ID))
                .fieldsGrouping(boltId(i-1), outputStream, new Fields(fields.get(0)))
                .fieldsGrouping(PREPARE_ID, PrepareRequest.RETURN_STREAM, new Fields("request"));
        i++;
        builder.setBolt(boltId(i), new ReturnResults())
                .noneGrouping(boltId(i-1));
        return builder.createTopology();
    }
View Full Code Here

   
    MdrillSeriviceSpout ms = new MdrillSeriviceSpout(paramsMs, hadoopConfDir, hdfsSolrDir,
        tableName, localWorkDirList, portbase + shards*replication + 100, 0,
        partions, topologyName);

    TopologyBuilder builder = new TopologyBuilder();
    for(int i=0;i<replication;i++)
    {
      BoltParams paramsShard=new BoltParams();
      paramsShard.replication=replication;
      paramsShard.replicationindex=i;
      paramsShard.compname="shard_"+i;
      paramsShard.binlog=realtimebinlog;


      MdrillSeriviceSpout solr = new MdrillSeriviceSpout(paramsShard, hadoopConfDir, hdfsSolrDir,
          tableName, localWorkDirList, portbase+shards*i, shards, partions,
          topologyName);
      builder.setSpout("shard@"+i, solr, shards);
    }
    builder.setSpout("merge", ms, msCount);
    StormSubmitter.submitTopology(topologyName, conf,builder.createTopology());
    System.out.println("start complete ");
    System.exit(0);
  }
View Full Code Here

    @Test
    public void testBolt() throws Exception {
        TupleMapper<String, String, String> tupleMapper = new DefaultTupleMapper(KEYSPACE, "users", "VALUE");
        String configKey = "cassandra-config";
        CassandraBatchingBolt<String, String, String> bolt = new CassandraBatchingBolt<String, String, String>(configKey, tupleMapper);
        TopologyBuilder builder = new TopologyBuilder();
        builder.setBolt("TEST_BOLT", bolt);

        Fields fields = new Fields("VALUE");
        TopologyContext context = new MockTopologyContext(builder.createTopology(), fields);

        Config config = new Config();
        config.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 5000);
       
        Map<String, Object> clientConfig = new HashMap<String, Object>();
View Full Code Here

TOP

Related Classes of backtype.storm.topology.TopologyBuilder

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.