Package org.apache.hadoop.hbase.regionserver.compactions

Examples of org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest


    // Each compaction request will find one expired store file and delete it
    // by the compaction.
    for (int i = 1; i <= storeFileNum; i++) {
      // verify the expired store file.
      CompactionRequest cr = this.store.requestCompaction();
      // the first is expired normally.
      // If not the first compaction, there is another empty store file,
      assertEquals(Math.min(i, 2), cr.getFiles().size());
      for (int j = 0; i < cr.getFiles().size(); j++) {
        assertTrue(cr.getFiles().get(j).getReader().getMaxTimestamp() < (System
            .currentTimeMillis() - this.store.scanInfo.getTtl()));
      }
      // Verify that the expired store file is compacted to an empty store file.
      this.store.compact(cr);
      // It is an empty store file.
View Full Code Here


   * @throws IOException
   * @return Storefile we compacted into or null if we failed or opted out early.
   */
  public List<StoreFile> compact(CompactionContext compaction) throws IOException {
    assert compaction != null && compaction.hasSelection();
    CompactionRequest cr = compaction.getRequest();
    Collection<StoreFile> filesToCompact = cr.getFiles();
    assert !filesToCompact.isEmpty();
    synchronized (filesCompacting) {
      // sanity check: we're compacting files that this store knows about
      // TODO: change this to LOG.error() after more debugging
      Preconditions.checkArgument(filesCompacting.containsAll(filesToCompact));
    }

    // Ready to go. Have list of files to compact.
    LOG.info("Starting compaction of " + filesToCompact.size() + " file(s) in "
        + this + " of " + this.getRegionInfo().getRegionNameAsString()
        + " into tmpdir=" + fs.getTempDir() + ", totalSize="
        + StringUtils.humanReadableInt(cr.getSize()));

    List<StoreFile> sfs = new ArrayList<StoreFile>();
    long compactionStartTime = EnvironmentEdgeManager.currentTimeMillis();
    try {
      // Commence the compaction.
View Full Code Here

          List<StoreFile> candidatesForCoproc = compaction.preSelect(this.filesCompacting);
          boolean override = this.getCoprocessorHost().preCompactSelection(
              this, candidatesForCoproc, baseRequest);
          if (override) {
            // Coprocessor is overriding normal file selection.
            compaction.forceSelect(new CompactionRequest(candidatesForCoproc));
          }
        }

        // Normal case - coprocessor is not overriding file selection.
        if (!compaction.hasSelection()) {
View Full Code Here

  void compactEquals(List<StoreFile> candidates, boolean forcemajor, boolean isOffPeak,
      long ... expected)
  throws IOException {
    store.forceMajor = forcemajor;
    //Test Default compactions
    CompactionRequest result = ((DefaultCompactionPolicy)store.storeEngine.getCompactionPolicy())
        .selectCompaction(candidates, new ArrayList<StoreFile>(), false, isOffPeak, forcemajor);
    List<StoreFile> actual = new ArrayList<StoreFile>(result.getFiles());
    if (isOffPeak && !forcemajor) {
      assertTrue(result.isOffPeak());
    }
    assertEquals(Arrays.toString(expected), Arrays.toString(getSizes(actual)));
    store.forceMajor = false;
  }
View Full Code Here

        // everything went better than expected. create a compaction request
        int pri = getCompactPriority(priority);
        //not a special compaction request, so we need to make one
        if(request == null){
          request = new CompactionRequest(region, this, filesToCompact, isMajor, pri);
        } else {
          // update the request with what the system thinks the request should be
          // its up to the request if it wants to listen
          request.setSelection(filesToCompact);
          request.setIsMajor(isMajor);
View Full Code Here

   *
   * @throws IOException e
   */
  public void compactStores() throws IOException {
    for(Store s : getStores().values()) {
      CompactionRequest cr = s.requestCompaction();
      if(cr != null) {
        try {
          compact(cr);
        } finally {
          s.finishRequest(cr);
View Full Code Here

    // not a special compaction request, so make our own list
    List<CompactionRequest> ret = null;
    if (requests == null) {
      ret = selectNow ? new ArrayList<CompactionRequest>(r.getStores().size()) : null;
      for (Store s : r.getStores().values()) {
        CompactionRequest cr = requestCompactionInternal(r, s, why, p, null, selectNow);
        if (selectNow) ret.add(cr);
      }
    } else {
      Preconditions.checkArgument(selectNow); // only system requests have selectNow == false
      ret = new ArrayList<CompactionRequest>(requests.size());
View Full Code Here

    public boolean select(List<StoreFile> filesCompacting, boolean isUserCompaction,
        boolean mayUseOffPeak, boolean forceMajor) throws IOException {
      this.stripeRequest = compactionPolicy.selectCompaction(
          storeFileManager, filesCompacting, mayUseOffPeak);
      this.request = (this.stripeRequest == null)
          ? new CompactionRequest(new ArrayList<StoreFile>()) : this.stripeRequest.getRequest();
      return this.stripeRequest != null;
    }
View Full Code Here

   *
   * @throws IOException e
   */
  public void compactStores() throws IOException {
    for(Store s : getStores().values()) {
      CompactionRequest cr = s.requestCompaction();
      if(cr != null) {
        try {
          compact(cr);
        } finally {
          s.finishRequest(cr);
View Full Code Here

      LOG.info("Compact table=" + region.getTableDesc().getNameAsString() +
        " region=" + region.getRegionNameAsString() +
        " family=" + familyDir.getName());
      Store store = getStore(region, familyDir);
      do {
        CompactionRequest cr = store.requestCompaction();
        StoreFile storeFile = store.compact(cr);
        if (storeFile != null) {
          if (keepCompactedFiles && deleteCompacted) {
            fs.delete(storeFile.getPath(), false);
          }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.