Package com.cloudera.flume.agent

Examples of com.cloudera.flume.agent.FlumeNode


    // parts. Normally this manager would want to delete a wal file (or wal
    // entries). This stubs that out to a call doesn't cause a file not found
    // exception.
    WALManager mockWalMan = mock(WALManager.class);
    BenchmarkHarness.setupFlumeNode(null, mockWalMan, null, null, null);
    FlumeNode node = FlumeNode.getInstance();
    File tmpdir = FileUtil.mktempdir();

    EventSource ackedmem = setupAckRoll();
    Pair<RollSink, EventSink> p = setupSink(node, tmpdir);
    EventSink snk = p.getRight();
    RollSink roll = p.getLeft();
    snk.open();

    String tag1 = roll.getCurrentTag();
    LOG.info(tag1);
    snk.append(ackedmem.next()); // ack beg
    snk.append(ackedmem.next()); // data
    snk.append(ackedmem.next()); // ack end
    snk.append(ackedmem.next()); // ack beg
    snk.append(ackedmem.next()); // data
    Clock.sleep(10); // have to make sure it is not in the same millisecond
    roll.rotate(); // we should have the first batch and part of the second
    // one ack pending
    assertEquals(1, node.getAckChecker().getPendingAckTags().size());
    node.getAckChecker().checkAcks();
    // no acks pending
    assertEquals(0, node.getAckChecker().getPendingAckTags().size());

    // note, we still are checking state for the 2nd batch of messages

    String tag2 = roll.getCurrentTag();
    LOG.info(tag2);
    // This is the end msg closes the 2nd batch
    snk.append(ackedmem.next()); // ack end
    snk.append(ackedmem.next()); // ack beg
    snk.append(ackedmem.next()); // data
    snk.append(ackedmem.next()); // ack end
    Clock.sleep(10); // have to make sure it is not in the same millisecond
    roll.rotate();
    // now 2nd batch and 3rd batch are pending.
    assertEquals(2, node.getAckChecker().getPendingAckTags().size());
    node.getAckChecker().checkAcks();

    // no more acks out standing
    LOG.info("pending ack tags: " + node.getAckChecker().getPendingAckTags());
    assertEquals(0, node.getAckChecker().getPendingAckTags().size());

    snk.close();

    FileUtil.rmr(tmpdir);
    BenchmarkHarness.cleanupLocalWriteDir();
View Full Code Here


    // parts. Normally this manager would want to delete a wal file (or wal
    // entries). This stubs that out to a call doesn't cause a file not found
    // exception.
    WALManager mockWalMan = mock(WALManager.class);
    BenchmarkHarness.setupFlumeNode(null, mockWalMan, null, null, null);
    FlumeNode node = FlumeNode.getInstance();
    File tmpdir = FileUtil.mktempdir();

    EventSource ackedmem = setupAckRoll();
    Pair<RollSink, EventSink> p = setupSink(node, tmpdir);
    EventSink snk = p.getRight();
    RollSink roll = p.getLeft();
    snk.open();

    String tag1 = roll.getCurrentTag();
    LOG.info(tag1);
    snk.append(ackedmem.next()); // ack beg
    snk.append(ackedmem.next()); // data
    snk.append(ackedmem.next()); // ack end
    snk.append(ackedmem.next()); // ack beg
    Clock.sleep(10); // have to make sure it is not in the same millisecond
    roll.rotate(); // we should have the first batch and part of the second
    // one ack pending
    assertEquals(1, node.getAckChecker().getPendingAckTags().size());
    node.getAckChecker().checkAcks();
    // no acks pending
    assertEquals(0, node.getAckChecker().getPendingAckTags().size());

    // we are partially through the second batch, at a different split point

    String tag2 = roll.getCurrentTag();
    LOG.info(tag2);
    snk.append(ackedmem.next()); // data
    snk.append(ackedmem.next()); // ack end
    snk.append(ackedmem.next()); // ack beg
    snk.append(ackedmem.next()); // data
    snk.append(ackedmem.next()); // ack end
    Clock.sleep(10); // have to make sure it is not in the same millisecond
    roll.rotate();
    // now we have closed off group2 and group3
    assertEquals(2, node.getAckChecker().getPendingAckTags().size());
    node.getAckChecker().checkAcks();
    Clock.sleep(10); // have to make sure it is not in the same millisecond

    // no more acks left
    LOG.info("pending ack tags: " + node.getAckChecker().getPendingAckTags());
    assertEquals(0, node.getAckChecker().getPendingAckTags().size());

    snk.close();

    FileUtil.rmr(tmpdir);
    BenchmarkHarness.cleanupLocalWriteDir();
View Full Code Here

  public void doTestContextConcurrentWALMans(final int threads,
      final int events, int timeout) throws IOException, InterruptedException,
      FlumeSpecException {
    BenchmarkHarness.setupLocalWriteDir();
    FlumeMaster master = new FlumeMaster();
    FlumeNode node = new FlumeNode(new DirectMasterRPC(master), false, false);

    for (int i = 0; i < threads; i++) {
      String name = "test." + i;
      String report = "report." + i;
      int count = events + i;
      String src = "asciisynth(" + count + ",100)";
      String snk = " { ackedWriteAhead(15000) => {ackChecker => counter(\""
          + report + "\") }}";

      node.getLogicalNodeManager().testingSpawn(name, src, snk);
    }

    // wait for WALs to flush.
    waitForEmptyWALs(master, node, timeout);
View Full Code Here

        public void append(Event e) throws IOException {
          throw new IOException("mock ioe");
        }
      };

      FlumeNode node = FlumeNode.getInstance();
      EventSinkDecorator<EventSink> deco = new NaiveFileWALDeco(
          LogicalNodeContext.testingContext(), snk, node.getWalManager(),
          new TimeTrigger(new ProcessTagger(), 1000), node.getAckChecker()
              .getAgentAckQueuer(), 1000);

      deco.open();
      deco.append(e);
      deco.close();
View Full Code Here

      IOException {
    assertEquals(0, flumeMaster.getSpecMan().getAllConfigs().size());

    String nodename = "bar";
    flumeMaster.getSpecMan().addLogicalNode(nodename, "foo");
    FlumeNode n = new FlumeNode(FlumeConfiguration.get(), nodename,
        new DirectMasterRPC(flumeMaster), false, false);
    n.start();

    // jumpstart the heartbeat (get foo register, and its default logicalNode
    // started)
    n.getLivenessManager().heartbeatChecks();

    // One for the logical node by default, one for foo
    assertEquals(2, flumeMaster.getStatMan().getNodeStatuses().size());

    FlumeShell sh = new FlumeShell();
    sh.executeLine("connect localhost: "
        + FlumeConfiguration.DEFAULT_ADMIN_PORT);
    sh
        .executeLine("exec config foo 'synth(100)' '{delay(100) => accumulator(\"count\") }' ");

    FlumeConfigData fcd = flumeMaster.getSpecMan().getConfig("foo");
    assertEquals("{delay(100) => accumulator(\"count\") }", fcd.sinkConfig);
    assertEquals("synth(100)", fcd.sourceConfig);
    assertTrue(0 != fcd.timestamp);

    sh.executeLine("waitForNodesDone 0 foo");
    n.getLivenessManager().heartbeatChecks();
    NodeState status = flumeMaster.getStatMan().getNodeStatuses().get("foo").state;
    NodeState idle = NodeState.IDLE;
    assertEquals(status, idle);
    // TODO: uncomment when there is a clean way to get at the reportable
    n.stop();
  }
View Full Code Here

      TTransportException, IOException {
    assertEquals(0, flumeMaster.getSpecMan().getAllConfigs().size());

    String nodename = "foo";
    FlumeConfiguration conf = FlumeConfiguration.createTestableConfiguration();
    FlumeNode n = new FlumeNode(conf, nodename,
        new DirectMasterRPC(flumeMaster), false, false);
    n.start();

    String node2 = "bar";
    FlumeNode n2 = new FlumeNode(conf, node2, new DirectMasterRPC(flumeMaster),
        false, false);
    n2.start();

    String node3 = "baz";
    FlumeNode n3 = new FlumeNode(conf, node3, new DirectMasterRPC(flumeMaster),
        false, false);
    n3.start();

    // jumpstart the heartbeat (get foo register, and its default logicalNode
    // started)
    n.getLivenessManager().heartbeatChecks();
    n2.getLivenessManager().heartbeatChecks();
    n3.getLivenessManager().heartbeatChecks();

    assertEquals(3, flumeMaster.getStatMan().getNodeStatuses().size());

    FlumeShell sh = new FlumeShell();
    sh.executeLine("connect localhost: "
        + FlumeConfiguration.DEFAULT_ADMIN_PORT);
    sh
        .executeLine("exec config foo 'synth(100)' '{delay(100) => accumulator(\"count\") }' ");
    sh
        .executeLine("exec config bar 'synth(50)' '{delay(100) => accumulator(\"count2\") }' ");
    sh
        .executeLine("exec config baz 'synth(75)' '{delay(100) => accumulator(\"count3\") }' ");

    FlumeConfigData fcd = flumeMaster.getSpecMan().getConfig("foo");
    assertEquals("{delay(100) => accumulator(\"count\") }", fcd.sinkConfig);
    assertEquals("synth(100)", fcd.sourceConfig);
    assertTrue(0 != fcd.timestamp);

    sh.executeLine("waitForNodesDone 0 foo bar baz");
    n.getLivenessManager().heartbeatChecks();
    NodeState status = flumeMaster.getStatMan().getNodeStatuses().get(nodename).state;
    NodeState idle = NodeState.IDLE;
    assertEquals(status, idle);
    AccumulatorSink cnt = (AccumulatorSink) ReportManager.get().getReportable(
        "count");
    assertEquals(100, cnt.getCount());
    AccumulatorSink cnt2 = (AccumulatorSink) ReportManager.get().getReportable(
        "count2");
    assertEquals(50, cnt2.getCount());
    AccumulatorSink cnt3 = (AccumulatorSink) ReportManager.get().getReportable(
        "count3");
    assertEquals(75, cnt3.getCount());
    n.stop();
    n2.stop();
    n3.stop();
  }
View Full Code Here

  public void testNodesActive() throws InterruptedException,
      TTransportException, IOException {
    assertEquals(0, flumeMaster.getSpecMan().getAllConfigs().size());

    String nodename = "foo";
    FlumeNode n = new FlumeNode(FlumeConfiguration.get(), nodename,
        new DirectMasterRPC(flumeMaster), false, false);
    n.start();

    // jumpstart the heartbeat (get foo register, and its default logicalNode
    // started)
    n.getLivenessManager().heartbeatChecks();

    assertEquals(1, flumeMaster.getStatMan().getNodeStatuses().size());

    FlumeShell sh = new FlumeShell();
    sh.executeLine("connect localhost: "
        + FlumeConfiguration.DEFAULT_ADMIN_PORT);
    // this will run for 10 seconds
    sh
        .executeLine("exec config foo 'synth(100)' '{delay(100) => accumulator(\"count\") }' ");

    FlumeConfigData fcd = flumeMaster.getSpecMan().getConfig("foo");
    assertEquals("{delay(100) => accumulator(\"count\") }", fcd.sinkConfig);
    assertEquals("synth(100)", fcd.sourceConfig);
    assertTrue(0 != fcd.timestamp);

    sh.executeLine("waitForNodesActive 0 foo");
    n.getLivenessManager().heartbeatChecks();
    NodeStatus status = flumeMaster.getStatMan().getNodeStatuses()
        .get(nodename);
    NodeState active = NodeState.ACTIVE;
    assertEquals(status.state, active);

    sh.executeLine("waitForNodesDone 0 foo");
    n.getLivenessManager().heartbeatChecks();
    status = flumeMaster.getStatMan().getNodeStatuses().get(nodename);
    NodeState idle = NodeState.IDLE;
    assertEquals(status.state, idle);
    AccumulatorSink cnt = (AccumulatorSink) ReportManager.get().getReportable(
        "count");
    assertEquals(100, cnt.getCount());
    n.stop();
  }
View Full Code Here

  public void doTestLogicalNodesConcurrentDFOMans(final int threads,
      final int events, int timeout) throws IOException, InterruptedException,
      FlumeSpecException {
    BenchmarkHarness.setupLocalWriteDir();
    FlumeMaster master = new FlumeMaster();
    FlumeNode node = new FlumeNode(new DirectMasterRPC(master), false, false);
    final Reportable[] dfos = new Reportable[threads];

    for (int i = 0; i < threads; i++) {
      String name = "test." + i;
      String report = "report." + i;
      int count = events + i;
      String src = "asciisynth(" + count + ",100)";
      String snk = "{ diskFailover => counter(\"" + report + "\") } ";
      node.getLogicalNodeManager().testingSpawn(name, src, snk);
      dfos[i] = node.getLogicalNodeManager().get(name);
    }

    // TODO (jon) using sleep is cheating to give all threads a chance to start.
    // Test seems flakey without this due to a race condition.
    Thread.sleep(500);
View Full Code Here

          checkmillis = Long.parseLong(argv[1]);
        }

        // TODO (jon) this will cause problems with multiple nodes in
        // same JVM
        FlumeNode node = FlumeNode.getInstance();

        // this makes the dfo present to the when reporting on the
        // FlumeNode
        String dfonode = context.getValue(LogicalNodeContext.C_LOGICAL);
        Preconditions.checkArgument(dfonode != null,
            "Context does not have a logical node name");
        DiskFailoverManager dfoman = node.getAddDFOManager(dfonode);

        return new DiskFailoverDeco(null, context, dfoman, new TimeTrigger(
            new ProcessTagger(), delayMillis), checkmillis);
      }
    };
View Full Code Here

    FlumeConfiguration conf = FlumeConfiguration.get();
    conf.clear(); // reset all back to defaults.
    conf.set(FlumeConfiguration.AGENT_LOG_DIR_NEW, tmpdir.getAbsolutePath());

    mock = new MockMasterRPC();
    node = new FlumeNode(mock, false /* starthttp */, false /* oneshot */);
    ReportManager.get().clear();
  }
View Full Code Here

TOP

Related Classes of com.cloudera.flume.agent.FlumeNode

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.