Package backtype.storm

Examples of backtype.storm.Config


        }
    }

    @Override
    public Map<String, Object> getComponentConfiguration() {
        Config ret = new Config();
        ret.setMaxTaskParallelism(1);
        return ret;
    }
View Full Code Here


        declarer.declare(new Fields(toDeclare));
    }

    @Override
    public Map<String, Object> getComponentConfiguration() {
        Config conf = new Config();
        conf.registerSerialization(MemoryTransactionalSpoutMeta.class);
        return conf;
    }
View Full Code Here

public class Topology {
  public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
    Map stormconf = Utils.readStormConfig();
   
    Config conf = new Config();
    conf.putAll(stormconf);
    conf.put("topology.worker.childopts", "-Xms4g -Xmx4g -Xmn2g -XX:SurvivorRatio=3 -XX:PermSize=96m -XX:MaxPermSize=256m -XX:+UseParallelGC -XX:ParallelGCThreads=16 -XX:+UseAdaptiveSizePolicy -XX:+PrintGCDetails -XX:+PrintGCTimeStamps  -Xloggc:%storm.home%/logs/gc-%port%.log ");
    conf.setMaxSpoutPending(10000);
    int workescount=12;
    conf.setMessageTimeoutSecs(60);
    conf.setNumWorkers(workescount);
    conf.setNumAckers(4);
   
    if(args.length>0)
    {
      conf.put("pv-spout-start-time", args[0]);
      conf.put("click-spout-start-time", args[0]);
      conf.put("access-spout-start-time", args[0]);

    }
   
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("pvmap", new PvSpout("pv-spout"), 48);
View Full Code Here

    return Utils.readDefaultConfig();
  }

  public static List<Object> All_CONFIGS() {
    List<Object> rtn = new ArrayList<Object>();
    Config config = new Config();
    Class<?> ConfigClass = config.getClass();
    Field[] fields = ConfigClass.getFields();
    for (int i = 0; i < fields.length; i++) {
      try {
        Object obj = fields[i].get(null);
        rtn.add(obj);
View Full Code Here

    String hadoopConfDir = (String) stormconf.get("hadoop.conf.dir");
    String localWorkDirList = (String) stormconf.get("higo.workdir.list");
    Integer msCount = StormUtils.parseInt(stormconf
        .get("higo.mergeServer.count"));
    Config conf = new Config();
    String[] tablelist=tableName.split(",");
    for(String tbl:tablelist)
    {
      String key="higo.mode."+tbl;
      Object val=stormconf.get(key);
      if(val!=null)
      {
        conf.put(key, val);
      }
    }

    conf.setNumWorkers(shards + msCount);
    conf.setNumAckers(0);
//    conf.setMaxSpoutPending(100);
   
    List<String> assignment=(List<String>) stormconf.get(MdrillDefaultTaskAssignment.MDRILL_ASSIGNMENT_DEFAULT+"."+topologyName);
    String assignmentports=String.valueOf(stormconf.get(MdrillDefaultTaskAssignment.MDRILL_ASSIGNMENT_PORTS+"."+topologyName));
    if((assignment==null||assignment.size()==0)&&stormconf.containsKey("higo.fixed.shards"))
    {
      //兼容旧的调度
      Integer fixassign = StormUtils.parseInt(stormconf.containsKey("higo.fixed.shards")?stormconf.get("higo.fixed.shards"):0);
      conf.put(CustomAssignment.TOPOLOGY_CUSTOM_ASSIGNMENT,MdrillTaskAssignment.class.getName());
      conf.put(MdrillTaskAssignment.SHARD_REPLICATION, replication);
      conf.put(MdrillTaskAssignment.HIGO_FIX_SHARDS,fixassign);
      for(int i=1;i<=fixassign;i++)
      {
        conf.put(MdrillTaskAssignment.HIGO_FIX_SHARDS+"."+i,(String) stormconf.get("higo.fixed.shards"+"."+i));
      }
      conf.put(MdrillTaskAssignment.MS_PORTS,  (String) stormconf.get("higo.merge.ports"));
      conf.put(MdrillTaskAssignment.MS_NAME, "merge");
      conf.put(MdrillTaskAssignment.SHARD_NAME, "shard");
      conf.put(MdrillTaskAssignment.REALTIME_NAME, "realtime");

    }else{
      conf.put(CustomAssignment.TOPOLOGY_CUSTOM_ASSIGNMENT, MdrillDefaultTaskAssignment.class.getName());
      conf.put(MdrillDefaultTaskAssignment.MDRILL_ASSIGNMENT_DEFAULT, assignment);
      conf.put(MdrillDefaultTaskAssignment.MDRILL_ASSIGNMENT_PORTS, assignmentports);
    }
    BoltParams paramsMs=new BoltParams();
    paramsMs.compname="merge_0";
    paramsMs.replication=1;
    paramsMs.replicationindex=0;
View Full Code Here

        builder.setBolt("TEST_BOLT", bolt);

        Fields fields = new Fields("VALUE");
        TopologyContext context = new MockTopologyContext(builder.createTopology(), fields);

        Config config = new Config();
        config.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 5000);
       
        Map<String, Object> clientConfig = new HashMap<String, Object>();
        clientConfig.put(StormCassandraConstants.CASSANDRA_HOST, "localhost:9160");
        clientConfig.put(StormCassandraConstants.CASSANDRA_KEYSPACE, Arrays.asList(new String [] {KEYSPACE}));
        config.put(configKey, clientConfig);

        bolt.prepare(config, context, null);
        System.out.println("Bolt Preparation Complete.");

        Values values = new Values(42);
View Full Code Here

        builder.setBolt("TEST__COUNTER_BOLT", bolt);

        Fields fields = new Fields("Timestamp", "IncrementAmount", "CounterColumn");
        TopologyContext context = new MockTopologyContext(builder.createTopology(), fields);

        Config config = new Config();
        config.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 5000);
       
        Map<String, Object> clientConfig = new HashMap<String, Object>();
        clientConfig.put(StormCassandraConstants.CASSANDRA_HOST, "localhost:9160");
        clientConfig.put(StormCassandraConstants.CASSANDRA_KEYSPACE, Arrays.asList(new String [] {KEYSPACE}));
        config.put(configKey, clientConfig);
       

        bolt.prepare(config, context, null);
        System.out.println("Bolt Preparation Complete.");
View Full Code Here

        TridentTopology topology = new TridentTopology();

        HashMap<String, Object> clientConfig = new HashMap<String, Object>();
        clientConfig.put(StormCassandraConstants.CASSANDRA_HOST, "localhost:9160");
        clientConfig.put(StormCassandraConstants.CASSANDRA_STATE_KEYSPACE, KEYSPACE);
        Config config = new Config();
        config.setMaxSpoutPending(25);
        config.put("cassandra.config", clientConfig);

        StateFactory cassandraStateFactory = null;
        Options options = null;
        switch(txType){
        case TRANSACTIONAL:
View Full Code Here

    public static void main(String[] args) throws Exception {
        if(args.length < 1) {
            System.out.println("Please provide kafka broker url ,e.g. localhost:9092");
        }

        Config conf = getConfig(args[0]);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("wordCounter", conf, buildTopology());
        Thread.sleep(60 * 1000);
        cluster.killTopology("wordCounter");

View Full Code Here

        cluster.shutdown();
    }

    private  static Config getConfig(String brokerConnectionString) {
        Config conf = new Config();
        Map config = new HashMap();
        Properties props = new Properties();
        props.put("metadata.broker.list", brokerConnectionString);
        props.put("request.required.acks", "1");
        props.put("serializer.class", "kafka.serializer.StringEncoder");
        conf.put(TridentKafkaState.KAFKA_BROKER_PROPERTIES, props);
        return conf;
    }
View Full Code Here

TOP

Related Classes of backtype.storm.Config

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.