Package com.cloudera.flume.agent

Examples of com.cloudera.flume.agent.LogicalNode


    BenchmarkHarness.cleanupLocalWriteDir();
  }

  LogicalNode setupAgent(long count, String agentSink) throws IOException,
      RuntimeException, FlumeSpecException {
    LogicalNode agent = new LogicalNode(
        new LogicalNodeContext("phys", "agent"), "agent");
    FlumeConfigData fcd = new FlumeConfigData(0, "asciisynth(" + count + ")",
        agentSink, 1, 1, "flow");
    agent.loadConfig(fcd);
    return agent;
  }
View Full Code Here


  LogicalNode setupColl(long port, String name, String acc) throws IOException,
      RuntimeException, FlumeSpecException {
    Context ctx = new LogicalNodeContext(new ReportTestingContext(), "phys",
        name);
    LogicalNode coll = new LogicalNode(ctx, name);
    FlumeConfigData fcd2 = new FlumeConfigData(0, "rpcSource(" + port + ")",
        "accumulator(\"" + acc + "\")", 1, 1, "flow");
    coll.loadConfig(fcd2);
    return coll;
  }
View Full Code Here

  public void testDFOPerfectRetry() throws IOException, RuntimeException,
      FlumeSpecException, InterruptedException {
    long count = 1000;

    // start the collectors first
    LogicalNode coll = setupColl(12345, "coll", "count");
    LogicalNode coll2 = setupColl(12346, "coll2", "count2");

    // then the agent so it can connect
    String agentSink = "< { flakeyAppend(.1, 1337) => rpcSink(\"localhost\",12345) } ?"
        + " {diskFailover => {insistentAppend => { lazyOpen "
        + "=> rpcSink(\"localhost\",12345) } } } >";
    LogicalNode agent = setupAgent(count, agentSink);

    // wait until the counts add up properly
    AccumulatorSink ctr = (AccumulatorSink) ReportManager.get().getReportable(
        "count");
    AccumulatorSink ctr2 = (AccumulatorSink) ReportManager.get().getReportable(
        "count2");
    loopUntilCount(count, coll, coll2);

    assertEquals(NodeState.IDLE, agent.getStatus().state);

    // close off the collector
    coll.close();
    coll2.close();
    agent.close();

    // check outout
    LOG.info("primary collector count   = " + ctr.getCount());
    LOG.info("secondary collector count = " + ctr2.getCount());
    assertEquals(count, ctr.getCount() + ctr2.getCount());
View Full Code Here

  public void testDFOFlakeyRetry() throws IOException, RuntimeException,
      FlumeSpecException, InterruptedException {
    long count = 1000;

    // start the collectors first
    LogicalNode coll = setupColl(12345, "coll", "count");
    LogicalNode coll2 = setupColl(12346, "coll2", "count2");

    // Then the agent so it can connect. This version assumes that the
    // secondary/failover case will fail and pass
    // an exception back the primary.
    String agentSink = "< { flakeyAppend(.1,1337) => rpcSink(\"localhost\",12345) } ?"
        + " {diskFailover => {lazyOpen => {flakeyAppend(.1,1337) "
        + "=> rpcSink(\"localhost\",12346) } } } >";
    LogicalNode agent = setupAgent(count, agentSink);

    // wait for agent done
    // wait until the counts add up properly
    boolean done = false;
    int loops = 0;
    AccumulatorSink ctr = (AccumulatorSink) ReportManager.get().getReportable(
        "count");
    AccumulatorSink ctr2 = (AccumulatorSink) ReportManager.get().getReportable(
        "count2");
    long old = 0;
    while (!done) {
      Clock.sleep(1000);

      LOG.info("loop " + loops + " collector count = " + ctr.getCount()
          + " count2 = " + ctr2.getCount());

      LOG.info(coll.getMetrics().toText());
      LOG.info(coll2.getMetrics().toText());
      if (old == ctr.getCount()) {
        break;
      }
      old = ctr.getCount();
      loops++;
    }

    // close off the collector
    coll.close();
    coll2.close();
    agent.close();

  }
View Full Code Here

  public void testDFOInsistentRetry() throws IOException, RuntimeException,
      FlumeSpecException, InterruptedException {
    long count = 100;

    // start the collectors first
    LogicalNode coll = setupColl(12345, "coll", "count");
    LogicalNode coll2 = setupColl(12346, "coll2", "count2");

    // Then the agent so it can connect. This config will attempt to send on the
    // primary, and when if fails goes to writing to disk. The subsink of
    // diskFailover is decorated to never return with an exception.
    String agentSink = "{ delay(100) => < "
        + "{ flakeyAppend(.05) => rpcSink(\"localhost\",12345) } ?"
        + " {diskFailover => { insistentAppend => { stubbornAppend => { insistentOpen "
        + "=> { lazyOpen => {flakeyAppend(.05) => rpcSink(\"localhost\",12346) } } }  } } }> } ";
    LogicalNode agent = setupAgent(count, agentSink);

    // wait until the counts add up properly
    AccumulatorSink ctr = (AccumulatorSink) ReportManager.get().getReportable(
        "count");
    AccumulatorSink ctr2 = (AccumulatorSink) ReportManager.get().getReportable(
        "count2");
    loopUntilCount(count, coll, coll2);

    // close off the collector
    coll.close();
    coll2.close();

    // dump info for debugging
    Map<String, ReportEvent> rpts = new HashMap<String, ReportEvent>();
    agent.getReports(rpts);
    for (Entry<String, ReportEvent> e : rpts.entrySet()) {
      LOG.info(e.getKey() + " : " + e.getValue());
    }

    // check the end states
View Full Code Here

    // Start the agent first.
    String agentSink = "{ delay(100) => < "
        + "{ flakeyAppend(.05) => rpcSink(\"localhost\",12345) } ?"
        + " {diskFailover => { insistentAppend => { stubbornAppend => { insistentOpen "
        + "=> { lazyOpen => {flakeyAppend(.05) => rpcSink(\"localhost\",12346) } } }  } } }> } ";
    LogicalNode agent = setupAgent(count, agentSink);

    // Purposely sleep a little so that the agent is collecting to disk, then
    // start collectors
    Clock.sleep(2000);
    LogicalNode coll = setupColl(12345, "coll", "count");
    LogicalNode coll2 = setupColl(12346, "coll2", "count2");

    // wait until the counts add up properly
    AccumulatorSink ctr = (AccumulatorSink) ReportManager.get().getReportable(
        "count");
    AccumulatorSink ctr2 = (AccumulatorSink) ReportManager.get().getReportable(
        "count2");
    loopUntilCount(count, coll, coll2);

    // close off the collector
    coll.close();
    coll2.close();

    // dump info for debugging
    Map<String, ReportEvent> rpts = new HashMap<String, ReportEvent>();
    agent.getReports(rpts);
    for (Entry<String, ReportEvent> e : rpts.entrySet()) {
      LOG.info(e.getKey() + " : " + e.getValue());
    }

    // check the end states
    assertEquals(count, ctr.getCount() + ctr2.getCount());
    assertTrue(ctr.getCount() > 0);
    assertTrue(ctr2.getCount() > 0);

    // the collector can be in ERROR or IDLE state because of the randomness.
    NodeState stateColl = coll.getStatus().state;
    LOG.info("coll exited in state: " + stateColl);
    assertTrue(stateColl.equals(NodeState.IDLE)
        || stateColl.equals(NodeState.ERROR));

    NodeState stateColl2 = coll2.getStatus().state;
    LOG.info("coll2 exited in state: " + stateColl2);
    assertTrue(stateColl2.equals(NodeState.IDLE)
        || stateColl2.equals(NodeState.ERROR));
  }
View Full Code Here

    // Start the agent first.
    String agentSink = "{ delay(100) => < "
        + "{ flakeyAppend(.05) => rpcSink(\"localhost\",12345) } ?"
        + " {diskFailover => { insistentAppend => { stubbornAppend => { insistentOpen "
        + "=> { lazyOpen => {flakeyAppend(.05) => rpcSink(\"localhost\",12346) } } }  } } }> } ";
    LogicalNode agent = setupAgent(count, agentSink);

    // Purposely sleep a little so that the agent is collecting to disk, then
    // start collectors
    Clock.sleep(2000);
    LogicalNode coll2 = setupColl(12346, "coll2", "count2");

    // wait until the counts add up properly
    AccumulatorSink ctr2 = (AccumulatorSink) ReportManager.get().getReportable(
        "count2");
    loopUntilCount(count, null, coll2);

    // close off the collector
    coll2.close();

    // dump info for debugging
    Map<String, ReportEvent> rpts = new HashMap<String, ReportEvent>();
    agent.getReports(rpts);
    for (Entry<String, ReportEvent> e : rpts.entrySet()) {
      LOG.info(e.getKey() + " : " + e.getValue());
    }

    // check the end states
    assertEquals(count, ctr2.getCount());

    // the collector can be in ERROR or IDLE state because of the randomness.f
    NodeState stateColl2 = coll2.getStatus().state;
    LOG.info("coll2 exited in state: " + stateColl2);
    assertTrue(stateColl2.equals(NodeState.IDLE)
        || stateColl2.equals(NodeState.ERROR));
  }
View Full Code Here

   */
  @GET
  @Path("{node}")
  @Produces("application/json")
  public JSONObject getConfig(@PathParam("node") String lnode) {
    LogicalNode ln = node.getLogicalNodeManager().get(lnode);
    if (ln == null) {
      // no node found, return empty.
      return new JSONObject();
    }

View Full Code Here

    master.getSpecMan().setConfig(lnode, "flow", "asciisynth(0)",
        "agentDFOSink(\"localhost\", 12345)");
    master.getSpecMan().addLogicalNode(NetUtils.localhost(), lnode);
    liveMan.heartbeatChecks();

    LogicalNode n = node.getLogicalNodeManager().get(lnode);
    Driver d = n.getDriver();
    assertTrue("Attempting to start driver timed out",
        d.waitForAtLeastState(DriverState.ACTIVE, 10000));

    // update config node to something that will be interrupted.
    LOG.info("!!! decommissioning node on master");
View Full Code Here

    master.getSpecMan().setConfig("node2", "flow", "asciisynth(0)",
        "agentDFOSink(\"invalid\", 12346)");
    master.getSpecMan().addLogicalNode(NetUtils.localhost(), lnode);
    liveMan.heartbeatChecks();

    LogicalNode n = node.getLogicalNodeManager().get(lnode);
    Driver d = n.getDriver();
    assertTrue("Attempting to start driver timed out",
        d.waitForAtLeastState(DriverState.ACTIVE, 20000));

    // update config node to something that will be interrupted.
    LOG.info("!!! decommissioning node on master");
View Full Code Here

TOP

Related Classes of com.cloudera.flume.agent.LogicalNode

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.