Package com.cloudera.iterativereduce.yarn.avro.generated

Examples of com.cloudera.iterativereduce.yarn.avro.generated.StartupConfiguration


                    + "unknown.").build();
      }
     
      // TODO: can a worker "start" more than once?
     
      StartupConfiguration workerConf = workers.get(workerId);
      Utils.mergeConfigs(appConf, workerConf);

      LOG.debug("Got a startup call, workerId="
          + Utils.getWorkerId(workerId) + ", responded with"
          + ", batchSize=" + workerConf.getBatchSize() + ", iterations="
          + workerConf.getIterations() + ", fileSplit=[" + workerConf.getSplit().getPath()
          + ", " + workerConf.getSplit().getOffset() + "]");

      workersState.put(workerId, WorkerState.STARTED);

      return workerConf;
    }
View Full Code Here


     
      convertedToMetronomeSplit.length = hadoopFileSplit.getLength();
      convertedToMetronomeSplit.offset = hadoopFileSplit.getStart();
      convertedToMetronomeSplit.path = hadoopFileSplit.getPath().toString();
     
        StartupConfiguration config = StartupConfiguration.newBuilder()
              .setBatchSize(batchSize).setIterations(iterationCount)
              .setOther(appConfig).setSplit( convertedToMetronomeSplit ).build();
             
        String wid = "worker-" + workerId;
        ConfigurationTuple tuple = new ConfigurationTuple( split.getLocations()[ 0 ], wid, config );
View Full Code Here

      for (BlockLocation b : bl) {
        FileSplit split = FileSplit.newBuilder().setPath(p.toString())
            .setOffset(b.getOffset()).setLength(b.getLength()).build();

        StartupConfiguration config = StartupConfiguration.newBuilder()
            .setBatchSize(batchSize).setIterations(iterationCount)
            .setOther(appConfig).setSplit(split).build();

        String wid = "worker-" + workerId;
        ConfigurationTuple tuple = new ConfigurationTuple(b.getHosts()[0], wid,
View Full Code Here

  public void setUpMaster() throws Exception {
    FileSplit split = FileSplit.newBuilder()
        .setPath("testData/testWorkerService.txt").setOffset(0).setLength(200)
        .build();

    StartupConfiguration conf = StartupConfiguration.newBuilder()
        .setSplit(split).setBatchSize(2).setIterations(1).setOther(null)
        .build();

    HashMap<WorkerId, StartupConfiguration> workers = new HashMap<WorkerId, StartupConfiguration>();
    workers.put(Utils.createWorkerId("worker1"), conf);
View Full Code Here

  public void setUpMaster() throws Exception {
    FileSplit split = FileSplit.newBuilder()
        .setPath("testData/testWorkerService.txt").setOffset(0).setLength(200)
        .build();

    StartupConfiguration conf = StartupConfiguration.newBuilder()
        .setSplit(split).setBatchSize(200).setIterations(1).setOther(null)
        .build();

    HashMap<WorkerId, StartupConfiguration> workers = new HashMap<WorkerId, StartupConfiguration>();
    workers.put(Utils.createWorkerId("worker1"), conf);
View Full Code Here

  public void setUpMaster() throws Exception {
    FileSplit split = FileSplit.newBuilder()
        .setPath("testData/testWorkerService.txt").setOffset(0).setLength(200)
        .build();

    StartupConfiguration conf = StartupConfiguration.newBuilder()
        .setSplit(split).setBatchSize(200).setIterations(1).setOther(null)
        .build();

    HashMap<WorkerId, StartupConfiguration> workers = new HashMap<WorkerId, StartupConfiguration>();
    workers.put(Utils.createWorkerId("worker1"), conf);
View Full Code Here

        .setPath("/foo/bar")
        .setOffset(100)
        .setLength(200)
        .build();
   
    StartupConfiguration conf = StartupConfiguration.newBuilder()
        .setSplit(split)
        .setBatchSize(2)
        .setIterations(1)
        .setOther(null)
        .build();
View Full Code Here

  @Before
  public void setUp() throws Exception {
    FileSplit split = FileSplit.newBuilder().setPath("/foo/bar").setOffset(100)
        .setLength(200).build();

    StartupConfiguration conf = StartupConfiguration.newBuilder()
        .setSplit(split).setBatchSize(2).setIterations(1).setOther(null)
        .build();

    HashMap<WorkerId, StartupConfiguration> workers = new HashMap<WorkerId, StartupConfiguration>();
    workers.put(Utils.createWorkerId("worker1"), conf);
View Full Code Here

TOP

Related Classes of com.cloudera.iterativereduce.yarn.avro.generated.StartupConfiguration

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.