Examples of EditLogInputStream


Examples of org.apache.hadoop.hdfs.server.namenode.EditLogInputStream

    readerQjm.selectInputStreams(streams, 0, false);
    try {
      assertEquals(1, streams.size());
      // Validate the actual stream contents.
      EditLogInputStream stream = streams.get(0);
      assertEquals(1, stream.getFirstTxId());
      assertEquals(3, stream.getLastTxId());
     
      verifyEdits(streams, 1, 3);
      assertNull(stream.readOp());
    } finally {
      IOUtils.cleanup(LOG, streams.toArray(new Closeable[0]));
      streams.clear();
    }
   
    // Ensure correct results when there is a stream in-progress, but we don't
    // ask for in-progress.
    writeSegment(cluster, qjm, 4, 3, false);
    readerQjm.selectInputStreams(streams, 0, false);
    try {
      assertEquals(1, streams.size());
      EditLogInputStream stream = streams.get(0);
      assertEquals(1, stream.getFirstTxId());
      assertEquals(3, stream.getLastTxId());
      verifyEdits(streams, 1, 3);
    } finally {
      IOUtils.cleanup(LOG, streams.toArray(new Closeable[0]));
      streams.clear();
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.EditLogInputStream

  public static void verifyEdits(List<EditLogInputStream> streams,
      int firstTxnId, int lastTxnId) throws IOException {
   
    Iterator<EditLogInputStream> iter = streams.iterator();
    assertTrue(iter.hasNext());
    EditLogInputStream stream = iter.next();
   
    for (int expected = firstTxnId;
        expected <= lastTxnId;
        expected++) {
     
      FSEditLogOp op = stream.readOp();
      while (op == null) {
        assertTrue("Expected to find txid " + expected + ", " +
            "but no more streams available to read from",
            iter.hasNext());
        stream = iter.next();
        op = stream.readOp();
      }
     
      assertEquals(FSEditLogOpCodes.OP_MKDIR, op.opCode);
      assertEquals(expected, op.getTransactionId());
    }
   
    assertNull(stream.readOp());
    assertFalse("Expected no more txns after " + lastTxnId +
        " but more streams are available", iter.hasNext());
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.EditLogInputStream

      EditLogOutputStream bkeos = bkjm.startLogSegment(1);
      EditLogOutputStream elfos =
          new EditLogFileOutputStream(tempEditsFile, null);
      elfos.create();
      FSEditLogTestUtil.populateStreams(1, 100, bkeos, elfos);
      EditLogInputStream bkeis =
          FSEditLogTestUtil.getJournalInputStream(bkjm, 1, true);
      EditLogInputStream elfis = new EditLogFileInputStream(tempEditsFile);
      Map<String, EditLogInputStream> streamByName =
          ImmutableMap.of("BookKeeper", bkeis, "File", elfis);
      FSEditLogTestUtil.assertStreamsAreEquivalent(100, streamByName);
      assertNotNull("Log was validated", h.logValidation);
      assertEquals("numTrasactions validated correctly",
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.EditLogInputStream

      EditLogOutputStream bkeos = bkjm.startLogSegment(1);
      EditLogOutputStream elfos =
          new EditLogFileOutputStream(tempEditsFile, null);
      elfos.create();
      FSEditLogTestUtil.populateStreams(1, 100, bkeos, elfos);
      EditLogInputStream bkeis =
          getJournalInputStreamDontCheckLastTxId(bkjm, 1);
      EditLogInputStream elfis = new EditLogFileInputStream(tempEditsFile);
      Map<String, EditLogInputStream> streamByName =
          ImmutableMap.of("BookKeeper", bkeis, "File", elfis);
      FSEditLogTestUtil.assertStreamsAreEquivalent(100, streamByName);
    } finally {
      if (!tempEditsFile.delete()) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.EditLogInputStream

  public static void verifyEdits(List<EditLogInputStream> streams,
      int firstTxnId, int lastTxnId) throws IOException {
   
    Iterator<EditLogInputStream> iter = streams.iterator();
    assertTrue(iter.hasNext());
    EditLogInputStream stream = iter.next();
   
    for (int expected = firstTxnId;
        expected <= lastTxnId;
        expected++) {
     
      FSEditLogOp op = stream.readOp();
      while (op == null) {
        assertTrue("Expected to find txid " + expected + ", " +
            "but no more streams available to read from",
            iter.hasNext());
        stream = iter.next();
        op = stream.readOp();
      }
     
      assertEquals(FSEditLogOpCodes.OP_MKDIR, op.opCode);
      assertEquals(expected, op.getTransactionId());
    }
   
    assertNull(stream.readOp());
    assertFalse("Expected no more txns after " + lastTxnId +
        " but more streams are available", iter.hasNext());
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.EditLogInputStream

    Callable<Void> consumerThread = new Callable<Void>() {
      @Override
      public Void call() throws Exception {
        List<EditLogInputStream> streams = Lists.newArrayList();
        qjm.selectInputStreams(streams, 0, true, false);
        EditLogInputStream in = streams.get(0);

        long numOps = 0;
        long maxTxId = -1;
        FSEditLogOp op;
        long lastPos = in.getPosition();
        do {
          op = in.readOp();
          if (op == null) { // If we've reached the end prematurely...
            Thread.sleep(200);
            LOG.info("Refreshing to " + lastPos);

            in.refresh(lastPos, maxTxId); // Then refresh to last known good position
          } else {
            long txId = op.getTransactionId();
            if (txId > maxTxId) {
              // Standby ingest contains similar logic: transactions
              // with ids lower than what is already read are ignored.
              numOps++;
              maxTxId = txId;
            }

            // Remember the last known safe position that we can refresh to
            lastPos = in.getPosition();
          }
        } while (op != null || !finishedProducing.get());
        Thread.sleep(1000);

        // finalize the segment, so we can read to the end
        qjm.finalizeLogSegment(0, numEdits - 1);

        // Once producer is shutdown, scan again from last known good position
        // until the end of the ledger. This mirrors the Ingest logic (last
        // read when being quiesced).
        in.refresh(lastPos, maxTxId);
        do {
          op = in.readOp();
          if (op != null) {
            long txId = op.getTransactionId();
            if (txId > maxTxId) {
              numOps++;
              maxTxId = txId;
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.EditLogInputStream

      int firstTxnId, int lastTxnId, List<FSEditLogOp> writtenTxns,
      boolean inProgress) throws IOException {

    Iterator<EditLogInputStream> iter = streams.iterator();
    assertTrue(iter.hasNext());
    EditLogInputStream stream = iter.next();

    long position = stream.getPosition();
    if (inProgress) {
      // we are one transaction behind
      lastTxnId--;
    }

    for (int expected = firstTxnId; expected <= lastTxnId; expected++) {
      if (inProgress) { // otherwise we cannot call refresh
        stream.refresh(position, expected - 1);
      }
      FSEditLogOp op = stream.readOp();
      position = stream.getPosition();

      while (op == null) {
        assertTrue("Expected to find txid " + expected + ", "
            + "but no more streams available to read from", iter.hasNext());
        stream = iter.next();
        op = stream.readOp();
      }

      assertEquals(expected, op.getTransactionId());
      assertEquals(expected, writtenTxns.get(expected).getTransactionId());
      assertEquals(op.opCode, writtenTxns.get(expected).opCode);
    }

    assertNull(stream.readOp());
    assertFalse("Expected no more txns after " + lastTxnId
        + " but more streams are available", iter.hasNext());
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.EditLogInputStream

    readerQjm.selectInputStreams(streams, 0, false, true);
    try {
      assertEquals(1, streams.size());
      // Validate the actual stream contents.
      EditLogInputStream stream = streams.get(0);
      assertEquals(1, stream.getFirstTxId());
      assertEquals(3, stream.getLastTxId());
     
      verifyEdits(streams, 1, 3);
      assertNull(stream.readOp());
    } finally {
      IOUtils.cleanup(LOG, streams.toArray(new Closeable[0]));
      streams.clear();
    }
   
    // Ensure correct results when there is a stream in-progress, but we don't
    // ask for in-progress.
    writeSegment(cluster, qjm, 4, 3, false);
    readerQjm.selectInputStreams(streams, 0, false, true);
    try {
      assertEquals(1, streams.size());
      EditLogInputStream stream = streams.get(0);
      assertEquals(1, stream.getFirstTxId());
      assertEquals(3, stream.getLastTxId());
      verifyEdits(streams, 1, 3);
    } finally {
      IOUtils.cleanup(LOG, streams.toArray(new Closeable[0]));
      streams.clear();
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.EditLogInputStream

    List<EditLogInputStream> streams = Lists.newArrayList();
    // get inprogress streams
    qjm.selectInputStreams(streams, 0, true, false);
   
    long lastReadTxId = -1;
    EditLogInputStream is = streams.get(0);
    for (int i = 0; i < 3; i++) {
      FSEditLogOp op = is.readOp();
      assertNotNull(op);
      lastReadTxId = op.getTransactionId();
      LOG.info("Read transaction: " + op + " with txid: "
          + op.getTransactionId());
    }
   
    // get the stream we are tailing from
    URLLogInputStream[] tailing = new URLLogInputStream[1];
    JournalNode jn = getTailingJN(is, tailing);
   
    long position = is.getPosition();
   
    // stop the node
    jn.stopAndJoin(0);
   
    // refresh the input stream
    is.refresh(position, 0);
   
    LOG.info("Checking failed stream");
    // this guy should be disabled
    // its position should be fixed
    URLLogInputStream urlis = tailing[0];
    assertTrue(urlis.isDisabled());
    assertEquals(position, urlis.getPosition());
    assertEquals(HdfsConstants.INVALID_TXID, urlis.getLastTxId());
    try {
      urlis.readOp();
      fail("This read should fail");
    } catch (IOException e) {
      LOG.info("Expected exception: ", e);
    } // expected
   
    // reads should fall back to another stream
    LOG.info("We should be able to read from the stream");
    for (int i = 0; i < 3; i++) {
      FSEditLogOp op = is.readOp();
      assertNotNull(op);
      assertEquals(++lastReadTxId, op.getTransactionId());
      LOG.info("Read transaction: " + op + " with txid: "
          + op.getTransactionId());
      position = is.getPosition();
    }
    LOG.info("Current state of the input stream: " + is.getName());
   
    // refresh again
    is.refresh(position, 0);
    assertEquals(position, urlis.getPosition());
    assertTrue(urlis.isDisabled());
    assertEquals(HdfsConstants.INVALID_TXID, urlis.getLastTxId());
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.EditLogInputStream

    readerQjm.selectInputStreams(streams, 0, false, true);
    try {
      assertEquals(1, streams.size());
      // Validate the actual stream contents.
      EditLogInputStream stream = streams.get(0);
      assertEquals(0, stream.getFirstTxId());
      assertEquals(numTxns - 1, stream.getLastTxId());

      verifyEdits(streams, 0, numTxns - 1, txns, false);
      assertNull(stream.readOp());
    } finally {
      IOUtils.cleanup(LOG, streams.toArray(new Closeable[0]));
      streams.clear();
    }
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.