Package org.apache.hadoop.hbase.regionserver.compactions

Examples of org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest


    // Each compaction request will find one expired store file and delete it
    // by the compaction.
    for (int i = 1; i <= storeFileNum; i++) {
      // verify the expired store file.
      CompactionContext compaction = this.store.requestCompaction();
      CompactionRequest cr = compaction.getRequest();
      // the first is expired normally.
      // If not the first compaction, there is another empty store file,
      List<StoreFile> files = new ArrayList<StoreFile>(cr.getFiles());
      Assert.assertEquals(Math.min(i, 2), cr.getFiles().size());
      for (int j = 0; j < files.size(); j++) {
        Assert.assertTrue(files.get(j).getReader().getMaxTimestamp() < (edge
            .currentTimeMillis() - this.store.getScanInfo().getTtl()));
      }
      // Verify that the expired store file is compacted to an empty store file.
View Full Code Here


  void compactEquals(List<StoreFile> candidates, boolean forcemajor, boolean isOffPeak,
      long ... expected)
  throws IOException {
    store.forceMajor = forcemajor;
    //Test Default compactions
    CompactionRequest result = ((RatioBasedCompactionPolicy)store.storeEngine.getCompactionPolicy())
        .selectCompaction(candidates, new ArrayList<StoreFile>(), false, isOffPeak, forcemajor);
    List<StoreFile> actual = new ArrayList<StoreFile>(result.getFiles());
    if (isOffPeak && !forcemajor) {
      assertTrue(result.isOffPeak());
    }
    assertEquals(Arrays.toString(expected), Arrays.toString(getSizes(actual)));
    store.forceMajor = false;
  }
View Full Code Here

        mockFile.setTimeRangeTracker(new TimeRangeTracker(-1, -1));
        mockFile.setEntries(0);
      }
    }
    // Test Default compactions
    CompactionRequest result = ((RatioBasedCompactionPolicy) store.storeEngine
        .getCompactionPolicy()).selectCompaction(candidates,
        new ArrayList<StoreFile>(), false, false, false);
    assertTrue(result.getFiles().size() == 0);
    store.setScanInfo(oldScanInfo);
  }
View Full Code Here

    when(r.getHFileReader()).thenReturn(mock(HFile.Reader.class));
    when(r.getStoreFileScanner(anyBoolean(), anyBoolean(), anyBoolean(), anyLong()))
      .thenReturn(mock(StoreFileScanner.class));
    when(sf.getReader()).thenReturn(r);
    when(sf.createReader()).thenReturn(r);
    return new CompactionRequest(Arrays.asList(sf));
  }
View Full Code Here

    public boolean select(List<StoreFile> filesCompacting, boolean isUserCompaction,
        boolean mayUseOffPeak, boolean forceMajor) throws IOException {
      this.stripeRequest = compactionPolicy.selectCompaction(
          storeFileManager, filesCompacting, mayUseOffPeak);
      this.request = (this.stripeRequest == null)
          ? new CompactionRequest(new ArrayList<StoreFile>()) : this.stripeRequest.getRequest();
      return this.stripeRequest != null;
    }
View Full Code Here

    // not a special compaction request, so make our own list
    List<CompactionRequest> ret = null;
    if (requests == null) {
      ret = selectNow ? new ArrayList<CompactionRequest>(r.getStores().size()) : null;
      for (Store s : r.getStores().values()) {
        CompactionRequest cr = requestCompactionInternal(r, s, why, p, null, selectNow);
        if (selectNow) ret.add(cr);
      }
    } else {
      Preconditions.checkArgument(selectNow); // only system requests have selectNow == false
      ret = new ArrayList<CompactionRequest>(requests.size());
View Full Code Here

      }

      @Override
      public boolean select(List<StoreFile> filesCompacting, boolean isUserCompaction,
          boolean mayUseOffPeak, boolean forceMajor) throws IOException {
        this.request = new CompactionRequest(selectedFiles);
        this.request.setPriority(getPriority());
        return true;
      }
View Full Code Here

      }

      @Override
      public boolean select(List<StoreFile> f, boolean i, boolean m, boolean e)
          throws IOException {
        this.request = new CompactionRequest(new ArrayList<StoreFile>());
        return true;
      }
View Full Code Here

    compaction.select(al(), false, false, false);
    assertEquals(3, compaction.getRequest().getFiles().size());
    // Override the file list. Granted, overriding this compaction in this manner will
    // break things in real world, but we only want to verify the override.
    compactUs.remove(sf);
    CompactionRequest req = new CompactionRequest(compactUs);
    compaction.forceSelect(req);
    assertEquals(2, compaction.getRequest().getFiles().size());
    assertFalse(compaction.getRequest().getFiles().contains(sf));
    // Make sure the correct method it called on compactor.
    compaction.compact();
View Full Code Here

    for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
      createStoreFile(r);
    }
    store.triggerMajorCompaction();

    CompactionRequest request = store.requestCompaction(Store.NO_PRIORITY, null).getRequest();
    assertNotNull("Expected to receive a compaction request", request);
    assertEquals(
      "System-requested major compaction should not occur if there are too many store files",
      false,
      request.isMajor());
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.