Package org.apache.hadoop.util

Examples of org.apache.hadoop.util.Progressable


  protected HRegion instantiateRegion(final HRegionInfo regionInfo)
      throws IOException {
    HRegion r = new HRegion(HTableDescriptor.getTableDir(rootDir, regionInfo
        .getTableDesc().getName()), this.hlog, this.fs, conf, regionInfo,
        this.cacheFlusher);
    r.initialize(null,  new Progressable() {
      public void progress() {
        addProcessingMessage(regionInfo);
      }
    });
    return r;
View Full Code Here


        }

        final Path file = new Path(new File("target/test/test-camel-string").getAbsolutePath());
        Configuration conf = new Configuration();
        FileSystem fs1 = FileSystem.get(file.toUri(), conf);
        ArrayFile.Writer writer = new ArrayFile.Writer(conf, fs1, "target/test/test-camel-string1", Text.class, CompressionType.NONE, new Progressable() {
            @Override
            public void progress() {
            }
        });
        Text valueWritable = new Text();
View Full Code Here

 
  protected HRegion instantiateRegion(final HRegionInfo regionInfo)
      throws IOException {
    return new HRegion(HTableDescriptor.getTableDir(rootDir, regionInfo
        .getTableDesc().getName()), this.log, this.fs, conf, regionInfo, null,
        this.cacheFlusher, new Progressable() {
          public void progress() {
            addProcessingMessage(regionInfo);
          }
        });
  }
View Full Code Here

  protected HRegion instantiateRegion(final HRegionInfo regionInfo)
      throws IOException {
    HRegion r = HRegion.newHRegion(HTableDescriptor.getTableDir(rootDir, regionInfo
        .getTableDesc().getName()), this.hlog, this.fs, conf, regionInfo,
        this.cacheFlusher);
    r.initialize(null,  new Progressable() {
      public void progress() {
        addProcessingMessage(regionInfo);
      }
    });
    return r;
View Full Code Here

    String filename = "/testFileForceSync";
    boolean forceSync = true;
    DFSClient dfsClient = ((DistributedFileSystem) fileSystem).getClient();
    DFSOutputStream out = (DFSOutputStream)dfsClient.create(
        filename, FsPermission.getDefault(), true, true, REPLICATION_NUM, BLOCK_SIZE,
        new Progressable() {
          @Override
          public void progress() {
          }
        },
        BUFFER_SIZE,
View Full Code Here

    String filename = "/testFileParallelWrite";
    boolean doParallelWrites = true;
    DFSClient dfsClient = ((DistributedFileSystem) fileSystem).getClient();
    DFSOutputStream out = (DFSOutputStream)dfsClient.create(
        filename, FsPermission.getDefault(), true, true, REPLICATION_NUM, BLOCK_SIZE,
        new Progressable() {
          @Override
          public void progress() {
          }
        },
        BUFFER_SIZE,
View Full Code Here

   * @return true if file was reconstructed, false if no reconstruction
   * was necessary or possible.
   */
  boolean reconstructFile(Path srcPath, Context context)
      throws IOException, InterruptedException {
    Progressable progress = context;
    if (progress == null) {
      progress = RaidUtils.NULL_PROGRESSABLE;
    }

    if (RaidNode.isParityHarPartFile(srcPath)) {
View Full Code Here

   */
  boolean processFile(Path srcPath, ParityFilePair parityPair,
      Decoder decoder, Context context) throws IOException,
      InterruptedException {
    LOG.info("Processing file " + srcPath);
    Progressable progress = context;
    if (progress == null) {
      progress = RaidUtils.NULL_PROGRESSABLE;
    }

    DistributedFileSystem srcFs = getDFS(srcPath);
    FileStatus srcStat = srcFs.getFileStatus(srcPath);
    long blockSize = srcStat.getBlockSize();
    long srcFileSize = srcStat.getLen();
    String uriPath = srcPath.toUri().getPath();

    int numBlocksReconstructed = 0;
    List<LocatedBlockWithMetaInfo> lostBlocks = lostBlocksInFile(srcFs, uriPath, srcStat);
    if (lostBlocks.size() == 0) {
      LOG.warn("Couldn't find any lost blocks in file " + srcPath +
          ", ignoring...");
      return false;
    }
    for (LocatedBlockWithMetaInfo lb: lostBlocks) {
      Block lostBlock = lb.getBlock();
      long lostBlockOffset = lb.getStartOffset();

      LOG.info("Found lost block " + lostBlock +
          ", offset " + lostBlockOffset);

      final long blockContentsSize =
        Math.min(blockSize, srcFileSize - lostBlockOffset);
      File localBlockFile =
        File.createTempFile(lostBlock.getBlockName(), ".tmp");
      localBlockFile.deleteOnExit();

      try {
        decoder.recoverBlockToFile(srcFs, srcPath, parityPair.getFileSystem(),
            parityPair.getPath(), blockSize,
            lostBlockOffset, localBlockFile,
            blockContentsSize, context);

        // Now that we have recovered the file block locally, send it.
        String datanode = chooseDatanode(lb.getLocations());
        computeMetadataAndSendReconstructedBlock(datanode, localBlockFile,
            lostBlock, blockContentsSize,
            lb.getDataProtocolVersion(), lb.getNamespaceID(), progress);
       
        numBlocksReconstructed++;

      } finally {
        localBlockFile.delete();
      }
      progress.progress();
    }
   
    LOG.info("Reconstructed " + numBlocksReconstructed + " blocks in " + srcPath);
    return true;
  }
View Full Code Here

  boolean processParityFile(Path parityPath, Decoder decoder,
      Context context)
  throws IOException, InterruptedException {
    LOG.info("Processing parity file " + parityPath);
   
    Progressable progress = context;
    if (progress == null) {
      progress = RaidUtils.NULL_PROGRESSABLE;
    }
   
    Path srcPath = sourcePathFromParityPath(parityPath);
    if (srcPath == null) {
      LOG.warn("Could not get regular file corresponding to parity file "
          parityPath + ", ignoring...");
      return false;
    }

    DistributedFileSystem parityFs = getDFS(parityPath);
    DistributedFileSystem srcFs = getDFS(srcPath);
    FileStatus parityStat = parityFs.getFileStatus(parityPath);
    long blockSize = parityStat.getBlockSize();
    FileStatus srcStat = srcFs.getFileStatus(srcPath);

    // Check timestamp.
    if (srcStat.getModificationTime() != parityStat.getModificationTime()) {
      LOG.warn("Mismatching timestamp for " + srcPath + " and " + parityPath +
          ", ignoring...");
      return false;
    }

    String uriPath = parityPath.toUri().getPath();
    int numBlocksReconstructed = 0;
    List<LocatedBlockWithMetaInfo> lostBlocks =
      lostBlocksInFile(parityFs, uriPath, parityStat);
    if (lostBlocks.size() == 0) {
      LOG.warn("Couldn't find any lost blocks in parity file " + parityPath +
          ", ignoring...");
      return false;
    }
    for (LocatedBlockWithMetaInfo lb: lostBlocks) {
      Block lostBlock = lb.getBlock();
      long lostBlockOffset = lb.getStartOffset();

      LOG.info("Found lost block " + lostBlock +
          ", offset " + lostBlockOffset);

      File localBlockFile =
        File.createTempFile(lostBlock.getBlockName(), ".tmp");
      localBlockFile.deleteOnExit();

      try {
        decoder.recoverParityBlockToFile(srcFs, srcPath, parityFs, parityPath,
            blockSize, lostBlockOffset, localBlockFile, context);
       
        // Now that we have recovered the parity file block locally, send it.
        String datanode = chooseDatanode(lb.getLocations());
        computeMetadataAndSendReconstructedBlock(
            datanode, localBlockFile,
            lostBlock, blockSize,
            lb.getDataProtocolVersion(), lb.getNamespaceID(),
            progress);

        numBlocksReconstructed++;
      } finally {
        localBlockFile.delete();
      }
      progress.progress();
    }
   
    LOG.info("Reconstructed " + numBlocksReconstructed + " blocks in " + parityPath);
    return true;
  }
View Full Code Here

  DecoderInputStream generateAlternateStream(FileSystem srcFs, Path srcFile,
                      FileSystem parityFs, Path parityFile,
                      long blockSize, long errorOffset, long limit,
                      Context context) {
    configureBuffers(blockSize);
    Progressable reporter = context;
    if (reporter == null) {
      reporter = RaidUtils.NULL_PROGRESSABLE;
    }
   
    DecoderInputStream decoderInputStream = new DecoderInputStream(
View Full Code Here

TOP

Related Classes of org.apache.hadoop.util.Progressable

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.