Package backtype.storm.generated

Examples of backtype.storm.generated.StormTopology


            IRichSpout spout = _spouts.get(spoutId);
            ComponentCommon common = getComponentCommon(spoutId, spout);
            spoutSpecs.put(spoutId, new SpoutSpec(ComponentObject.serialized_java(Utils.serialize(spout)), common));
           
        }
        return new StormTopology(spoutSpecs,
                                 boltSpecs,
                                 new HashMap<String, StateSpoutSpec>());
    }
View Full Code Here


        Config conf = createConfig(cmd);

        beforeCreateTopology(cmd, conf);

        LOGGER.info("Creating topology: " + getTopologyName());
        StormTopology topology = createTopology(parallelismHint);
        LOGGER.info("Created topology layout: " + topology);
        LOGGER.info(String.format("Submitting topology '%s'", getTopologyName()));

        if (local) {
            LocalCluster cluster = new LocalCluster();
View Full Code Here

    Map<Object, Object> stormConf = NimbusUtils.normalizeConf(conf,
        serializedConf, topology);
    Map<Object, Object> totalStormConf = new HashMap<Object, Object>(conf);
    totalStormConf.putAll(stormConf);
    totalStormConf.putAll(serializedConf);
    StormTopology newtopology = new StormTopology(topology);
    // TODO TOPOLOGY_OPTIMIZE
    // if ((Boolean) totalStormConf.get(Config.TOPOLOGY_OPTIMIZE)) {
    // newtopology = optimizeTopology(topology);
    // }
    StormClusterState stormClusterState = data.getStormClusterState();
View Full Code Here

   @return StormTopology
         */
  @Override
  public StormTopology getTopology(String id) throws NotAliveException,
      TException {
    StormTopology topology = null;
    try {
      StormTopology stormtopology = readStormTopology(conf, id);
      if (stormtopology == null) {
        throw new InvalidTopologyException("topology:" + id + "is null");
      }
      topology = Common.system_topology(readStormConf(conf, id),
          stormtopology);
View Full Code Here

   */
  public Map<Integer, String> mkTaskComponentAssignments(
      Map<Object, Object> conf, String topologyid) throws IOException,
      InvalidTopologyException {
    Map<Object, Object> stormConf = readStormConf(conf, topologyid);
    StormTopology stopology = readStormTopology(conf, topologyid);
    StormTopology topology = null;
    Map<Integer, String> rtn = new HashMap<Integer, String>();

    if (stopology != null) {
      topology = Common.system_topology(stormConf, stopology);
      Integer count = 0;
      count = mkTaskMaker(stormConf, topology.get_bolts(), rtn, count);
      count = mkTaskMaker(stormConf, topology.get_spouts(), rtn, count);
      mkTaskMaker(stormConf, topology.get_state_spouts(), rtn, count);
    }

    return rtn;
  }
View Full Code Here

   * @throws IOException
   */
  public StormTopology readStormTopology(Map<Object, Object> conf,
      String topologyId) throws IOException {
    String stormroot = StormConfig.masterStormdistRoot(conf, topologyId);
    StormTopology topology = null;
    byte[] bTopo = FileUtils.readFileToByteArray(new File(StormConfig
        .masterStormcodePath(stormroot)));
    if (bTopo != null) {
      topology = (StormTopology) Utils.deserialize(bTopo);
    }
View Full Code Here

    this.taskNodeport = new ConcurrentHashMap<Integer, NodePort>();
    this.tasksToComponent = Common.topology_task_info(zkCluster, topologyId);
    //��ǰworker��taskid�б�
    this.taskids = WorkCommon.readWorkerTaskids(zkCluster, topologyId,supervisorId, port);
    //�ӱ��ض�ȡsupervisorĿ¼�����л���topology�ļ�
    StormTopology topology = StormConfig.read_supervisor_topology(conf,  topology_id);
    // ����ϵͳtopology,�����acker
    this.systemContext = new SystemContextMake(topology, stormConf,
        topologyId, worker_id, tasksToComponent);
    // ������acker��StormTopology
    this.userContext = new UserContextMake(topology, stormConf, topologyId,
View Full Code Here

  }

  public TopologyContext make(Integer task_id) {
    TopologyContext rtn = null;
    try {
      StormTopology systopology = Common.system_topology(stormConf,topology);
      String distroot = StormConfig.supervisor_stormdist_root(stormConf,topologyId);
      String resourcePath = StormConfig.supervisor_storm_resources_path(distroot);
      String workpid = StormConfig.worker_pids_root(stormConf, workerId);
      rtn = new TopologyContext(systopology, tasksToComponent, topologyId,resourcePath, workpid, task_id);
    } catch (Exception e) {
View Full Code Here

        }
    }

    public static StormTopology system_topology(Map storm_conf,StormTopology topology) throws InvalidTopologyException {
        Common.validate_basic(topology);
        StormTopology ret = topology.deepCopy();
        String key = Config.TOPOLOGY_ACKERS;
        Integer ackercount = StormUtils.parseInt(storm_conf.get(key));
        Common.add_acker(ackercount, ret);
        add_system_streams(ret);
        return ret;
View Full Code Here

    builder.setBolt("karma-reducer-json", reducerBolt).fieldsGrouping("karma-generate-json", new Fields("id"));
    builder.setBolt("karma-output-json", sequenceFileBolt).shuffleGrouping("karma-reducer-json");
    Config config = new Config();
    config.put("input.path", "/tmp/loaded_data/simpleloader.seq");
    config.setDebug(true);
    StormTopology topology = builder.createTopology();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("karma-basic-topology",
         config,
         topology);
    Utils.sleep(60000);
View Full Code Here

TOP

Related Classes of backtype.storm.generated.StormTopology

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.