Package org.apache.hadoop.hbase.regionserver

Examples of org.apache.hadoop.hbase.regionserver.HLog


    // Create a log that we can reuse when we need to open regions
    Path logPath = new Path("/tmp", HConstants.HREGION_LOGDIR_NAME + "_" +
      System.currentTimeMillis());
    LOG.info("Creating log " + logPath.toString());
    HLog log = new HLog(this.fs, logPath, this.conf, null);
    try {
       // Merge Region 0 and Region 1
      HRegion merged = mergeAndVerify("merging regions 0 and 1",
        this.sourceRegions[0].getRegionNameAsString(),
        this.sourceRegions[1].getRegionNameAsString(), log, 2);

      // Merge the result of merging regions 0 and 1 with region 2
      merged = mergeAndVerify("merging regions 0+1 and 2",
        merged.getRegionInfo().getRegionNameAsString(),
        this.sourceRegions[2].getRegionNameAsString(), log, 3);

      // Merge the result of merging regions 0, 1 and 2 with region 3
      merged = mergeAndVerify("merging regions 0+1+2 and 3",
        merged.getRegionInfo().getRegionNameAsString(),
        this.sourceRegions[3].getRegionNameAsString(), log, 4);
     
      // Merge the result of merging regions 0, 1, 2 and 3 with region 4
      merged = mergeAndVerify("merging regions 0+1+2+3 and 4",
        merged.getRegionInfo().getRegionNameAsString(),
        this.sourceRegions[4].getRegionNameAsString(), log, rows.length);
    } finally {
      log.closeAndDelete();
    }
  }
View Full Code Here


   * @throws IOException */
  public synchronized HLog getLog() throws IOException {
    if (this.log == null) {
      Path logdir = new Path(this.fs.getHomeDirectory(),
          HConstants.HREGION_LOGDIR_NAME + "_" + System.currentTimeMillis());
      this.log = new HLog(this.fs, logdir, this.conf, null);
    }
    return this.log;
  }
View Full Code Here

  /**
   * @throws IOException
   */
  public void testSingleCommit() throws IOException {

    HLog log = new HLog(fs, dir, this.conf, null);
    TransactionalHLogManager logMangaer = new TransactionalHLogManager(log, fs,
        regionInfo, conf);

    // Write columns named 1, 2, 3, etc. and then values of single byte
    // 1, 2, 3...
    long transactionId = 1;
    logMangaer.writeStartToLog(transactionId);

    BatchUpdate update1 = new BatchUpdate(row1);
    update1.put(col, val1);
    logMangaer.writeUpdateToLog(transactionId, update1);

    BatchUpdate update2 = new BatchUpdate(row2);
    update2.put(col, val2);
    logMangaer.writeUpdateToLog(transactionId, update2);

    BatchUpdate update3 = new BatchUpdate(row3);
    update3.put(col, val3);
    logMangaer.writeUpdateToLog(transactionId, update3);

    logMangaer.writeCommitToLog(transactionId);

    // log.completeCacheFlush(regionName, tableName, logSeqId);

    log.close();
    Path filename = log.computeFilename(log.getFilenum());

    Map<Long, List<BatchUpdate>> commits = logMangaer.getCommitsFromLog(
        filename, -1, null);

    assertEquals(1, commits.size());
View Full Code Here

  /**
   * @throws IOException
   */
  public void testSingleAbort() throws IOException {

    HLog log = new HLog(fs, dir, this.conf, null);
    TransactionalHLogManager logMangaer = new TransactionalHLogManager(log, fs,
        regionInfo, conf);

    long transactionId = 1;
    logMangaer.writeStartToLog(transactionId);

    BatchUpdate update1 = new BatchUpdate(row1);
    update1.put(col, val1);
    logMangaer.writeUpdateToLog(transactionId, update1);

    BatchUpdate update2 = new BatchUpdate(row2);
    update2.put(col, val2);
    logMangaer.writeUpdateToLog(transactionId, update2);

    BatchUpdate update3 = new BatchUpdate(row3);
    update3.put(col, val3);
    logMangaer.writeUpdateToLog(transactionId, update3);

    logMangaer.writeAbortToLog(transactionId);

    // log.completeCacheFlush(regionName, tableName, logSeqId);

    log.close();
    Path filename = log.computeFilename(log.getFilenum());

    Map<Long, List<BatchUpdate>> commits = logMangaer.getCommitsFromLog(
        filename, -1, null);

    assertEquals(0, commits.size());
View Full Code Here

  /**
   * @throws IOException
   */
  public void testInterlievedCommits() throws IOException {

    HLog log = new HLog(fs, dir, this.conf, null);
    TransactionalHLogManager logMangaer = new TransactionalHLogManager(log, fs,
        regionInfo, conf);

    long transaction1Id = 1;
    long transaction2Id = 2;
    logMangaer.writeStartToLog(transaction1Id);

    BatchUpdate update1 = new BatchUpdate(row1);
    update1.put(col, val1);
    logMangaer.writeUpdateToLog(transaction1Id, update1);

    logMangaer.writeStartToLog(transaction2Id);
   
    BatchUpdate update2 = new BatchUpdate(row2);
    update2.put(col, val2);
    logMangaer.writeUpdateToLog(transaction2Id, update2);

    BatchUpdate update3 = new BatchUpdate(row3);
    update3.put(col, val3);
    logMangaer.writeUpdateToLog(transaction1Id, update3);

    logMangaer.writeCommitToLog(transaction2Id);
    logMangaer.writeCommitToLog(transaction1Id);

    // log.completeCacheFlush(regionName, tableName, logSeqId);

    log.close();
    Path filename = log.computeFilename(log.getFilenum());

    Map<Long, List<BatchUpdate>> commits = logMangaer.getCommitsFromLog(
        filename, -1, null);

    assertEquals(2, commits.size());
View Full Code Here

  /**
   * @throws IOException
   */
  public void testInterlievedAbortCommit() throws IOException {

    HLog log = new HLog(fs, dir, this.conf, null);
    TransactionalHLogManager logMangaer = new TransactionalHLogManager(log, fs,
        regionInfo, conf);

    long transaction1Id = 1;
    long transaction2Id = 2;
    logMangaer.writeStartToLog(transaction1Id);

    BatchUpdate update1 = new BatchUpdate(row1);
    update1.put(col, val1);
    logMangaer.writeUpdateToLog(transaction1Id, update1);

    logMangaer.writeStartToLog(transaction2Id);
   
    BatchUpdate update2 = new BatchUpdate(row2);
    update2.put(col, val2);
    logMangaer.writeUpdateToLog(transaction2Id, update2);

    logMangaer.writeAbortToLog(transaction2Id);
   
    BatchUpdate update3 = new BatchUpdate(row3);
    update3.put(col, val3);
    logMangaer.writeUpdateToLog(transaction1Id, update3);

    logMangaer.writeCommitToLog(transaction1Id);

    // log.completeCacheFlush(regionName, tableName, logSeqId);

    log.close();
    Path filename = log.computeFilename(log.getFilenum());

    Map<Long, List<BatchUpdate>> commits = logMangaer.getCommitsFromLog(
        filename, -1, null);

    assertEquals(1, commits.size());
View Full Code Here

  /**
   * @throws IOException
   */
  public void testInterlievedCommitAbort() throws IOException {

    HLog log = new HLog(fs, dir, this.conf, null);
    TransactionalHLogManager logMangaer = new TransactionalHLogManager(log, fs,
        regionInfo, conf);

    long transaction1Id = 1;
    long transaction2Id = 2;
    logMangaer.writeStartToLog(transaction1Id);

    BatchUpdate update1 = new BatchUpdate(row1);
    update1.put(col, val1);
    logMangaer.writeUpdateToLog(transaction1Id, update1);

    logMangaer.writeStartToLog(transaction2Id);
   
    BatchUpdate update2 = new BatchUpdate(row2);
    update2.put(col, val2);
    logMangaer.writeUpdateToLog(transaction2Id, update2);

    logMangaer.writeCommitToLog(transaction2Id);
   
    BatchUpdate update3 = new BatchUpdate(row3);
    update3.put(col, val3);
    logMangaer.writeUpdateToLog(transaction1Id, update3);

    logMangaer.writeAbortToLog(transaction1Id);

    // log.completeCacheFlush(regionName, tableName, logSeqId);

    log.close();
    Path filename = log.computeFilename(log.getFilenum());

    Map<Long, List<BatchUpdate>> commits = logMangaer.getCommitsFromLog(
        filename, -1, null);

    assertEquals(1, commits.size());
View Full Code Here

    // Create a log that we can reuse when we need to open regions
    Path logPath = new Path("/tmp", HConstants.HREGION_LOGDIR_NAME + "_" +
      System.currentTimeMillis());
    LOG.info("Creating log " + logPath.toString());
    HLog log = new HLog(this.fs, logPath, this.conf, null);
    try {
       // Merge Region 0 and Region 1
      HRegion merged = mergeAndVerify("merging regions 0 and 1",
        this.sourceRegions[0].getRegionNameAsString(),
        this.sourceRegions[1].getRegionNameAsString(), log, 2);

      // Merge the result of merging regions 0 and 1 with region 2
      merged = mergeAndVerify("merging regions 0+1 and 2",
        merged.getRegionInfo().getRegionNameAsString(),
        this.sourceRegions[2].getRegionNameAsString(), log, 3);

      // Merge the result of merging regions 0, 1 and 2 with region 3
      merged = mergeAndVerify("merging regions 0+1+2 and 3",
        merged.getRegionInfo().getRegionNameAsString(),
        this.sourceRegions[3].getRegionNameAsString(), log, 4);
     
      // Merge the result of merging regions 0, 1, 2 and 3 with region 4
      merged = mergeAndVerify("merging regions 0+1+2+3 and 4",
        merged.getRegionInfo().getRegionNameAsString(),
        this.sourceRegions[4].getRegionNameAsString(), log, rows.length);
    } finally {
      log.closeAndDelete();
    }
  }
View Full Code Here

   * @throws IOException */
  public synchronized HLog getLog() throws IOException {
    if (this.log == null) {
      Path logdir = new Path(this.fs.getHomeDirectory(),
          HConstants.HREGION_LOGDIR_NAME + "_" + System.currentTimeMillis());
      this.log = new HLog(this.fs, logdir, this.conf, null);
    }
    return this.log;
  }
View Full Code Here

    if (info2 == null) {
      throw new IOException("Cound not find " + region2 + " in " +
          meta2.getRegionName());
    }
    HRegion merged = null;
    HLog log = utils.getLog();
    HRegion r1 = HRegion.openHRegion(info1, this.rootdir, log, this.conf);
    try {
      HRegion r2 = HRegion.openHRegion(info2, this.rootdir, log, this.conf);
      try {
        merged = HRegion.merge(r1, r2);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.regionserver.HLog

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.