Package org.apache.hadoop.hdfs.server.namenode

Examples of org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream


    Assert.assertFalse("Report should be plain text", report.contains("<"));
  }

  @Test
  public void testWriteEdits() throws Exception {
    EditLogOutputStream stm = createLogSegment();
    writeOp(stm, 1);
    writeOp(stm, 2);
   
    stm.setReadyToFlush();
    writeOp(stm, 3);
   
    // The flush should log txn 1-2
    futureReturns(null).when(spyLoggers.get(0)).sendEdits(
        anyLong(), eq(1L), eq(2), Mockito.<byte[]>any());
    futureReturns(null).when(spyLoggers.get(1)).sendEdits(
        anyLong(), eq(1L), eq(2), Mockito.<byte[]>any());
    futureReturns(null).when(spyLoggers.get(2)).sendEdits(
        anyLong(), eq(1L), eq(2), Mockito.<byte[]>any());
    stm.flush();

    // Another flush should now log txn #3
    stm.setReadyToFlush();
    futureReturns(null).when(spyLoggers.get(0)).sendEdits(
        anyLong(), eq(3L), eq(1), Mockito.<byte[]>any());
    futureReturns(null).when(spyLoggers.get(1)).sendEdits(
        anyLong(), eq(3L), eq(1), Mockito.<byte[]>any());
    futureReturns(null).when(spyLoggers.get(2)).sendEdits(
        anyLong(), eq(3L), eq(1), Mockito.<byte[]>any());
    stm.flush();
  }
View Full Code Here


    stm.flush();
  }
 
  @Test
  public void testWriteEditsOneSlow() throws Exception {
    EditLogOutputStream stm = createLogSegment();
    writeOp(stm, 1);
    stm.setReadyToFlush();
   
    // Make the first two logs respond immediately
    futureReturns(null).when(spyLoggers.get(0)).sendEdits(
        anyLong(), eq(1L), eq(1), Mockito.<byte[]>any());
    futureReturns(null).when(spyLoggers.get(1)).sendEdits(
        anyLong(), eq(1L), eq(1), Mockito.<byte[]>any());
   
    // And the third log not respond
    SettableFuture<Void> slowLog = SettableFuture.create();
    Mockito.doReturn(slowLog).when(spyLoggers.get(2)).sendEdits(
        anyLong(), eq(1L), eq(1), Mockito.<byte[]>any());
    stm.flush();
   
    Mockito.verify(spyLoggers.get(0)).setCommittedTxId(1L);
  }
View Full Code Here

        Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
    futureReturns(null).when(spyLoggers.get(1)).startLogSegment(Mockito.anyLong(),
        Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
    futureReturns(null).when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong(),
        Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
    EditLogOutputStream stm = qjm.startLogSegment(1,
        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
    return stm;
  }
View Full Code Here

  @Test
  public void testSimpleWrite() throws Exception {
    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
        BKJMUtil.createJournalURI("/hdfsjournal-simplewrite"));
    EditLogOutputStream out = bkjm.startLogSegment(1);
    for (long i = 1 ; i <= 100; i++) {
      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
      op.setTransactionId(i);
      out.write(op);
    }
    out.close();
    bkjm.finalizeLogSegment(1, 100);
    String zkpath = bkjm.finalizedLedgerZNode(1, 100);
   
    assertNotNull(zkc.exists(zkpath, false));
View Full Code Here

  @Test
  public void testNumberOfTransactions() throws Exception {
    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
        BKJMUtil.createJournalURI("/hdfsjournal-txncount"));
    EditLogOutputStream out = bkjm.startLogSegment(1);
    for (long i = 1 ; i <= 100; i++) {
      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
      op.setTransactionId(i);
      out.write(op);
    }
    out.close();
    bkjm.finalizeLogSegment(1, 100);

    long numTrans = bkjm.getNumberOfTransactions(1, true);
    assertEquals(100, numTrans);
  }
View Full Code Here

    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
        BKJMUtil.createJournalURI("/hdfsjournal-gaps"));
    long txid = 1;
    for (long i = 0; i < 3; i++) {
      long start = txid;
      EditLogOutputStream out = bkjm.startLogSegment(start);
      for (long j = 1 ; j <= DEFAULT_SEGMENT_SIZE; j++) {
        FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
        op.setTransactionId(txid++);
        out.write(op);
      }
      out.close();
      bkjm.finalizeLogSegment(start, txid-1);
      assertNotNull(
          zkc.exists(bkjm.finalizedLedgerZNode(start, txid-1), false));
    }
    zkc.delete(bkjm.finalizedLedgerZNode(DEFAULT_SEGMENT_SIZE+1,
View Full Code Here

    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
        BKJMUtil.createJournalURI("/hdfsjournal-inprogressAtEnd"));
    long txid = 1;
    for (long i = 0; i < 3; i++) {
      long start = txid;
      EditLogOutputStream out = bkjm.startLogSegment(start);
      for (long j = 1 ; j <= DEFAULT_SEGMENT_SIZE; j++) {
        FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
        op.setTransactionId(txid++);
        out.write(op);
      }
     
      out.close();
      bkjm.finalizeLogSegment(start, (txid-1));
      assertNotNull(
          zkc.exists(bkjm.finalizedLedgerZNode(start, (txid-1)), false));
    }
    long start = txid;
    EditLogOutputStream out = bkjm.startLogSegment(start);
    for (long j = 1 ; j <= DEFAULT_SEGMENT_SIZE/2; j++) {
      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
      op.setTransactionId(txid++);
      out.write(op);
    }
    out.setReadyToFlush();
    out.flush();
    out.abort();
    out.close();
   
    long numTrans = bkjm.getNumberOfTransactions(1, true);
    assertEquals((txid-1), numTrans);
  }
View Full Code Here

  public void testWriteRestartFrom1() throws Exception {
    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
        BKJMUtil.createJournalURI("/hdfsjournal-restartFrom1"));
    long txid = 1;
    long start = txid;
    EditLogOutputStream out = bkjm.startLogSegment(txid);
    for (long j = 1 ; j <= DEFAULT_SEGMENT_SIZE; j++) {
      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
      op.setTransactionId(txid++);
      out.write(op);
    }
    out.close();
    bkjm.finalizeLogSegment(start, (txid-1));
   
    txid = 1;
    try {
      out = bkjm.startLogSegment(txid);
      fail("Shouldn't be able to start another journal from " + txid
          + " when one already exists");
    } catch (Exception ioe) {
      LOG.info("Caught exception as expected", ioe);
    }

    // test border case
    txid = DEFAULT_SEGMENT_SIZE;
    try {
      out = bkjm.startLogSegment(txid);
      fail("Shouldn't be able to start another journal from " + txid
          + " when one already exists");
    } catch (IOException ioe) {
      LOG.info("Caught exception as expected", ioe);
    }

    // open journal continuing from before
    txid = DEFAULT_SEGMENT_SIZE + 1;
    start = txid;
    out = bkjm.startLogSegment(start);
    assertNotNull(out);

    for (long j = 1 ; j <= DEFAULT_SEGMENT_SIZE; j++) {
      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
      op.setTransactionId(txid++);
      out.write(op);
    }
    out.close();
    bkjm.finalizeLogSegment(start, (txid-1));

    // open journal arbitarily far in the future
    txid = DEFAULT_SEGMENT_SIZE * 4;
    out = bkjm.startLogSegment(txid);
View Full Code Here

    BookKeeperJournalManager bkjm1 = new BookKeeperJournalManager(conf,
        BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"));
    BookKeeperJournalManager bkjm2 = new BookKeeperJournalManager(conf,
        BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"));
   
    EditLogOutputStream out1 = bkjm1.startLogSegment(start);
    try {
      bkjm2.startLogSegment(start);
      fail("Shouldn't have been able to open the second writer");
    } catch (IOException ioe) {
      LOG.info("Caught exception as expected", ioe);
    }finally{
      out1.close();
    }
  }
View Full Code Here

  @Test
  public void testSimpleRead() throws Exception {
    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
        BKJMUtil.createJournalURI("/hdfsjournal-simpleread"));
    final long numTransactions = 10000;
    EditLogOutputStream out = bkjm.startLogSegment(1);
    for (long i = 1 ; i <= numTransactions; i++) {
      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
      op.setTransactionId(i);
      out.write(op);
    }
    out.close();
    bkjm.finalizeLogSegment(1, numTransactions);

    
    EditLogInputStream in = bkjm.getInputStream(1, true);
    try {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.