Package org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader

Examples of org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.PositionTrackingInputStream


  /**
   * Process image file.
   */
  public void go() throws IOException  {
    DataInputStream in = null;
    PositionTrackingInputStream tracker = null;
    ImageLoader fsip = null;
    boolean done = false;
    try {
      tracker = new PositionTrackingInputStream(new BufferedInputStream(
               new FileInputStream(new File(inputFile))));
      in = new DataInputStream(tracker);

      int imageVersionFile = findImageVersion(in);

      fsip = ImageLoader.LoaderFactory.getLoader(imageVersionFile);

      if(fsip == null)
        throw new IOException("No image processor to read version " +
            imageVersionFile + " is available.");
      fsip.loadImage(in, processor, skipBlocks);
      done = true;
    } finally {
      if (!done) {
        LOG.error("image loading failed at offset " + tracker.getPos());
      }
      IOUtils.cleanup(LOG, in, tracker);
    }
  }
View Full Code Here


  /**
   * Process image file.
   */
  public void go() throws IOException  {
    DataInputStream in = null;
    PositionTrackingInputStream tracker = null;
    ImageLoader fsip = null;
    boolean done = false;
    try {
      tracker = new PositionTrackingInputStream(new BufferedInputStream(
               new FileInputStream(new File(inputFile))));
      in = new DataInputStream(tracker);

      int imageVersionFile = findImageVersion(in);

      fsip = ImageLoader.LoaderFactory.getLoader(imageVersionFile);

      if(fsip == null)
        throw new IOException("No image processor to read version " +
            imageVersionFile + " is available.");
      fsip.loadImage(in, processor, skipBlocks);
      done = true;
    } finally {
      if (!done) {
        LOG.error("image loading failed at offset " + tracker.getPos());
      }
      IOUtils.cleanup(LOG, in, tracker);
    }
  }
View Full Code Here

    }

    journalInputStream = new BookKeeperJournalInputStream(ledger,
        firstBookKeeperEntry);
    bin = new BufferedInputStream(journalInputStream);
    tracker = new PositionTrackingInputStream(bin, 0);
    DataInputStream in = new DataInputStream(tracker);
    try {
      logVersion = readLogVersion(in);
    } catch (EOFException e) {
      throw new LedgerHeaderCorruptException("No header file in the ledger");
View Full Code Here

    // Try to set the underlying stream to the specified position
    journalInputStream.position(position);
    // Reload the position tracker and log reader to adjust to the newly
    // refreshed position
    bin = new BufferedInputStream(journalInputStream);
    tracker = new PositionTrackingInputStream(bin, position);
    DataInputStream in = new DataInputStream(tracker);
    if (position == 0) { // If we are at the beginning, re-read the version
      logVersion = readLogVersion(in);
    }
    reader = new Reader(in, logVersion);
View Full Code Here

    DataInputStream in = null;
    DataOutputStream out = null;

    try {
      // setup in
      PositionTrackingInputStream ptis = new PositionTrackingInputStream(
          new FileInputStream(new File(inputFile)));
      in = new DataInputStream(ptis);

      // read header information
      int imgVersion = in.readInt();
      if (!LayoutVersion.supports(Feature.FSIMAGE_COMPRESSION, imgVersion)) {
        System.out
            .println("Image is not compressed. No output will be produced.");
        return;
      }
      int namespaceId = in.readInt();
      long numFiles = in.readLong();
      long genstamp = in.readLong();

      long imgTxId = -1;
      if (LayoutVersion.supports(Feature.STORED_TXIDS, imgVersion)) {
        imgTxId = in.readLong();
      }
      FSImageCompression compression = FSImageCompression
          .readCompressionHeader(new Configuration(), in);
      if (compression.isNoOpCompression()) {
        System.out
            .println("Image is not compressed. No output will be produced.");
        return;
      }
      in = BufferedByteInputStream.wrapInputStream(
          compression.unwrapInputStream(in), FSImage.LOAD_SAVE_BUFFER_SIZE,
          FSImage.LOAD_SAVE_CHUNK_SIZE);
      System.out.println("Starting decompression.");

      // setup output
      out = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(
          outputFile)));

      // write back the uncompressed information
      out.writeInt(imgVersion);
      out.writeInt(namespaceId);
      out.writeLong(numFiles);
      out.writeLong(genstamp);
      if (LayoutVersion.supports(Feature.STORED_TXIDS, imgVersion)) {
        out.writeLong(imgTxId);
      }
      // no compression
      out.writeBoolean(false);

      // copy the data
      long size = new File(inputFile).length();
      // read in 1MB chunks
      byte[] block = new byte[1024 * 1024];
      while (true) {
        int bytesRead = in.read(block);
        if (bytesRead <= 0)
          break;
        out.write(block, 0, bytesRead);
        printProgress(ptis.getPos(), size);
      }

      out.close();

      long stop = System.currentTimeMillis();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.PositionTrackingInputStream

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.