Package org.apache.hadoop.hbase.regionserver

Examples of org.apache.hadoop.hbase.regionserver.HLog$WriterAndPath


          tableName.toString()
      );
      Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() +
          HREGION_LOGDIR_NAME);
      this.hlog =
        new HLog(fs, logdir, conf, null);
    }
View Full Code Here


   * @throws IOException */
  public synchronized HLog getLog() throws IOException {
    if (this.log == null) {
      Path logdir = new Path(this.fs.getHomeDirectory(),
          HConstants.HREGION_LOGDIR_NAME + "_" + System.currentTimeMillis());
      this.log = new HLog(this.fs, logdir, this.conf, null);
    }
    return this.log;
  }
View Full Code Here

          tableName.toString()
      );
      Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() +
          HREGION_LOGDIR_NAME);
      this.hlog =
        new HLog(fs, logdir, conf, null);
    }
View Full Code Here

          Bytes.toString(tableName)
      );
      Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() +
          HREGION_LOGDIR_NAME);
      this.hlog =
        new HLog(fs, logdir, conf, null);
    }
View Full Code Here

  /**
   * @throws IOException
   */
  public void testInterlievedCommitAbort() throws IOException {

    HLog log = new HLog(fs, dir, this.conf, null);
    TransactionalHLogManager logMangaer = new TransactionalHLogManager(log, fs,
        regionInfo, conf);

    long transaction1Id = 1;
    long transaction2Id = 2;
    logMangaer.writeStartToLog(transaction1Id);

    BatchUpdate update1 = new BatchUpdate(row1);
    update1.put(col, val1);
    logMangaer.writeUpdateToLog(transaction1Id, update1);

    logMangaer.writeStartToLog(transaction2Id);
   
    BatchUpdate update2 = new BatchUpdate(row2);
    update2.put(col, val2);
    logMangaer.writeUpdateToLog(transaction2Id, update2);

    logMangaer.writeCommitToLog(transaction2Id);
   
    BatchUpdate update3 = new BatchUpdate(row3);
    update3.put(col, val3);
    logMangaer.writeUpdateToLog(transaction1Id, update3);

    logMangaer.writeAbortToLog(transaction1Id);

    // log.completeCacheFlush(regionName, tableName, logSeqId);

    log.close();
    Path filename = log.computeFilename(log.getFilenum());

    Map<Long, List<BatchUpdate>> commits = logMangaer.getCommitsFromLog(
        filename, -1, null);

    assertEquals(1, commits.size());
View Full Code Here

  /**
   * @throws IOException
   */
  public void testSingleCommit() throws IOException {

    HLog log = new HLog(fs, dir, this.conf, null);
    TransactionalHLogManager logMangaer = new TransactionalHLogManager(log, fs,
        regionInfo, conf);

    // Write columns named 1, 2, 3, etc. and then values of single byte
    // 1, 2, 3...
    long transactionId = 1;
    logMangaer.writeStartToLog(transactionId);

    BatchUpdate update1 = new BatchUpdate(row1);
    update1.put(col, val1);
    logMangaer.writeUpdateToLog(transactionId, update1);

    BatchUpdate update2 = new BatchUpdate(row2);
    update2.put(col, val2);
    logMangaer.writeUpdateToLog(transactionId, update2);

    BatchUpdate update3 = new BatchUpdate(row3);
    update3.put(col, val3);
    logMangaer.writeUpdateToLog(transactionId, update3);

    logMangaer.writeCommitToLog(transactionId);

    // log.completeCacheFlush(regionName, tableName, logSeqId);

    log.close();
    Path filename = log.computeFilename(log.getFilenum());

    Map<Long, List<BatchUpdate>> commits = logMangaer.getCommitsFromLog(
        filename, -1, null);

    assertEquals(1, commits.size());
View Full Code Here

  /**
   * @throws IOException
   */
  public void testSingleAbort() throws IOException {

    HLog log = new HLog(fs, dir, this.conf, null);
    TransactionalHLogManager logMangaer = new TransactionalHLogManager(log, fs,
        regionInfo, conf);

    long transactionId = 1;
    logMangaer.writeStartToLog(transactionId);

    BatchUpdate update1 = new BatchUpdate(row1);
    update1.put(col, val1);
    logMangaer.writeUpdateToLog(transactionId, update1);

    BatchUpdate update2 = new BatchUpdate(row2);
    update2.put(col, val2);
    logMangaer.writeUpdateToLog(transactionId, update2);

    BatchUpdate update3 = new BatchUpdate(row3);
    update3.put(col, val3);
    logMangaer.writeUpdateToLog(transactionId, update3);

    logMangaer.writeAbortToLog(transactionId);

    // log.completeCacheFlush(regionName, tableName, logSeqId);

    log.close();
    Path filename = log.computeFilename(log.getFilenum());

    Map<Long, List<BatchUpdate>> commits = logMangaer.getCommitsFromLog(
        filename, -1, null);

    assertEquals(0, commits.size());
View Full Code Here

  /**
   * @throws IOException
   */
  public void testInterlievedCommits() throws IOException {

    HLog log = new HLog(fs, dir, this.conf, null);
    TransactionalHLogManager logMangaer = new TransactionalHLogManager(log, fs,
        regionInfo, conf);

    long transaction1Id = 1;
    long transaction2Id = 2;
    logMangaer.writeStartToLog(transaction1Id);

    BatchUpdate update1 = new BatchUpdate(row1);
    update1.put(col, val1);
    logMangaer.writeUpdateToLog(transaction1Id, update1);

    logMangaer.writeStartToLog(transaction2Id);
   
    BatchUpdate update2 = new BatchUpdate(row2);
    update2.put(col, val2);
    logMangaer.writeUpdateToLog(transaction2Id, update2);

    BatchUpdate update3 = new BatchUpdate(row3);
    update3.put(col, val3);
    logMangaer.writeUpdateToLog(transaction1Id, update3);

    logMangaer.writeCommitToLog(transaction2Id);
    logMangaer.writeCommitToLog(transaction1Id);

    // log.completeCacheFlush(regionName, tableName, logSeqId);

    log.close();
    Path filename = log.computeFilename(log.getFilenum());

    Map<Long, List<BatchUpdate>> commits = logMangaer.getCommitsFromLog(
        filename, -1, null);

    assertEquals(2, commits.size());
View Full Code Here

  /**
   * @throws IOException
   */
  public void testInterlievedAbortCommit() throws IOException {

    HLog log = new HLog(fs, dir, this.conf, null);
    TransactionalHLogManager logMangaer = new TransactionalHLogManager(log, fs,
        regionInfo, conf);

    long transaction1Id = 1;
    long transaction2Id = 2;
    logMangaer.writeStartToLog(transaction1Id);

    BatchUpdate update1 = new BatchUpdate(row1);
    update1.put(col, val1);
    logMangaer.writeUpdateToLog(transaction1Id, update1);

    logMangaer.writeStartToLog(transaction2Id);
   
    BatchUpdate update2 = new BatchUpdate(row2);
    update2.put(col, val2);
    logMangaer.writeUpdateToLog(transaction2Id, update2);

    logMangaer.writeAbortToLog(transaction2Id);
   
    BatchUpdate update3 = new BatchUpdate(row3);
    update3.put(col, val3);
    logMangaer.writeUpdateToLog(transaction1Id, update3);

    logMangaer.writeCommitToLog(transaction1Id);

    // log.completeCacheFlush(regionName, tableName, logSeqId);

    log.close();
    Path filename = log.computeFilename(log.getFilenum());

    Map<Long, List<BatchUpdate>> commits = logMangaer.getCommitsFromLog(
        filename, -1, null);

    assertEquals(1, commits.size());
View Full Code Here

   * @throws IOException */
  public synchronized HLog getLog() throws IOException {
    if (this.log == null) {
      Path logdir = new Path(this.fs.getHomeDirectory(),
          HConstants.HREGION_LOGDIR_NAME + "_" + System.currentTimeMillis());
      this.log = new HLog(this.fs, logdir, this.conf, null);
    }
    return this.log;
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.regionserver.HLog$WriterAndPath

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.