Package org.apache.hadoop.hdfs.server.namenode

Examples of org.apache.hadoop.hdfs.server.namenode.FSEditLogOp$UpdateBlocksOp


  @Override
  public void loadEdits() throws IOException {
    visitor.start(inputStream.getVersion());
    while (true) {
      try {
        FSEditLogOp op = inputStream.readOp();
        if (op == null)
          break;
        if (fixTxIds) {
          if (nextTxId <= 0) {
            nextTxId = op.getTransactionId();
            if (nextTxId <= 0) {
              nextTxId = 1;
            }
          }
          op.setTransactionId(nextTxId);
          nextTxId++;
        }
        visitor.visitOp(op);
      } catch (IOException e) {
        if (!recoveryMode) {
View Full Code Here


      if (stanzaStack.empty()) {
        if (!name.equals("DATA")) {
          throw new InvalidXmlException("expected </DATA>");
        }
        state = ParseState.EXPECT_RECORD;
        FSEditLogOp op = opCache.get(opCode);
        opCode = null;
        try {
          op.decodeXml(stanza);
          stanza = null;
        } finally {
          if (stanza != null) {
            System.err.println("fromXml error decoding opcode " + opCode +
                "\n" + stanza.toString());
            stanza = null;
          }
        }
        if (fixTxIds) {
          if (nextTxId <= 0) {
            nextTxId = op.getTransactionId();
            if (nextTxId <= 0) {
              nextTxId = 1;
            }
          }
          op.setTransactionId(nextTxId);
          nextTxId++;
        }
        try {
          visitor.visitOp(op);
        } catch (IOException e) {
View Full Code Here

          doAnswer(new Answer<FSEditLogOp>() {

            @Override
            public FSEditLogOp answer(InvocationOnMock invocation)
                throws Throwable {
              FSEditLogOp op = (FSEditLogOp) invocation.callRealMethod();
              if (throwExceptionOnRead &&
                  TEST_DIR3.equals(NameNodeAdapter.getMkdirOpPath(op))) {
                throw new IOException("failed to read op creating " + TEST_DIR3);
              } else {
                return op;
View Full Code Here

      }

      in = new BookKeeperEditLogInputStream(lh, l, lastAddConfirmed);

      long endTxId = HdfsConstants.INVALID_TXID;
      FSEditLogOp op = in.readOp();
      while (op != null) {
        if (endTxId == HdfsConstants.INVALID_TXID
            || op.getTransactionId() == endTxId+1) {
          endTxId = op.getTransactionId();
        }
        op = in.readOp();
      }
      return endTxId;
    } finally {
View Full Code Here

   * with a binary search over bk entries
   */
  public void skipTo(long txId) throws IOException {
    long numToSkip = getFirstTxId() - txId;

    FSEditLogOp op = null;
    for (long i = 0; i < numToSkip; i++) {
      op = readOp();
    }
    if (op != null && op.getTransactionId() != txId-1) {
      throw new IOException("Corrupt stream, expected txid "
          + (txId-1) + ", got " + op.getTransactionId());
    }
  }
View Full Code Here

  public void testSimpleWrite() throws Exception {
    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
        BKJMUtil.createJournalURI("/hdfsjournal-simplewrite"));
    EditLogOutputStream out = bkjm.startLogSegment(1);
    for (long i = 1 ; i <= 100; i++) {
      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
      op.setTransactionId(i);
      out.write(op);
    }
    out.close();
    bkjm.finalizeLogSegment(1, 100);
View Full Code Here

  public void testNumberOfTransactions() throws Exception {
    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
        BKJMUtil.createJournalURI("/hdfsjournal-txncount"));
    EditLogOutputStream out = bkjm.startLogSegment(1);
    for (long i = 1 ; i <= 100; i++) {
      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
      op.setTransactionId(i);
      out.write(op);
    }
    out.close();
    bkjm.finalizeLogSegment(1, 100);
View Full Code Here

    long txid = 1;
    for (long i = 0; i < 3; i++) {
      long start = txid;
      EditLogOutputStream out = bkjm.startLogSegment(start);
      for (long j = 1 ; j <= DEFAULT_SEGMENT_SIZE; j++) {
        FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
        op.setTransactionId(txid++);
        out.write(op);
      }
      out.close();
      bkjm.finalizeLogSegment(start, txid-1);
      assertNotNull(
View Full Code Here

    long txid = 1;
    for (long i = 0; i < 3; i++) {
      long start = txid;
      EditLogOutputStream out = bkjm.startLogSegment(start);
      for (long j = 1 ; j <= DEFAULT_SEGMENT_SIZE; j++) {
        FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
        op.setTransactionId(txid++);
        out.write(op);
      }
     
      out.close();
      bkjm.finalizeLogSegment(start, (txid-1));
      assertNotNull(
          zkc.exists(bkjm.finalizedLedgerZNode(start, (txid-1)), false));
    }
    long start = txid;
    EditLogOutputStream out = bkjm.startLogSegment(start);
    for (long j = 1 ; j <= DEFAULT_SEGMENT_SIZE/2; j++) {
      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
      op.setTransactionId(txid++);
      out.write(op);
    }
    out.setReadyToFlush();
    out.flush();
    out.abort();
View Full Code Here

        BKJMUtil.createJournalURI("/hdfsjournal-restartFrom1"));
    long txid = 1;
    long start = txid;
    EditLogOutputStream out = bkjm.startLogSegment(txid);
    for (long j = 1 ; j <= DEFAULT_SEGMENT_SIZE; j++) {
      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
      op.setTransactionId(txid++);
      out.write(op);
    }
    out.close();
    bkjm.finalizeLogSegment(start, (txid-1));
   
    txid = 1;
    try {
      out = bkjm.startLogSegment(txid);
      fail("Shouldn't be able to start another journal from " + txid
          + " when one already exists");
    } catch (Exception ioe) {
      LOG.info("Caught exception as expected", ioe);
    }

    // test border case
    txid = DEFAULT_SEGMENT_SIZE;
    try {
      out = bkjm.startLogSegment(txid);
      fail("Shouldn't be able to start another journal from " + txid
          + " when one already exists");
    } catch (IOException ioe) {
      LOG.info("Caught exception as expected", ioe);
    }

    // open journal continuing from before
    txid = DEFAULT_SEGMENT_SIZE + 1;
    start = txid;
    out = bkjm.startLogSegment(start);
    assertNotNull(out);

    for (long j = 1 ; j <= DEFAULT_SEGMENT_SIZE; j++) {
      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
      op.setTransactionId(txid++);
      out.write(op);
    }
    out.close();
    bkjm.finalizeLogSegment(start, (txid-1));
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.FSEditLogOp$UpdateBlocksOp

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.