Package org.codehaus.activemq.journal.impl

Source Code of org.codehaus.activemq.journal.impl.LogFileManager

/**
*
* Copyright 2004 Hiram Chirino
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**/
package org.codehaus.activemq.journal.impl;

import java.io.File;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.lang.reflect.InvocationTargetException;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.codehaus.activemq.journal.InvalidRecordLocationException;
import org.codehaus.activemq.journal.JournalEventListener;
import org.codehaus.activemq.util.LongSequenceGenerator;

import EDU.oswego.cs.dl.util.concurrent.FutureResult;
import EDU.oswego.cs.dl.util.concurrent.QueuedExecutor;
import EDU.oswego.cs.dl.util.concurrent.ThreadFactory;

/**
* The LogFileManager manages concurent access to a LogFile.
*
* @version $Revision: 1.7 $
*/
public class LogFileManager {
   
    static final private Log log = LogFactory.getLog(LogFileManager.class);
   
    static final private int LOG_HEADER_SIZE=512;
    static final private int OVERFLOW_RENOTIFICATION_DELAY=500;

  private final LongSequenceGenerator sequenceGenerator;
  private final byte fileManagerId;
  private boolean closed=false;
 
  // The id of the file that has the mark.
  private byte markedSegmentIndex=0;
  // The id of the current log file that is being filled.
  private byte appendSegmentIndex=0
  // The offset in the current log file that is being filled.
  private int appendSegmentOffset=0
   
  // Used to batch writes together.
  private BatchedWrite pendingBatchWrite;

  private RecordLocationImpl lastMarkedLocation;
    private LogFile file;
    private QueuedExecutor executor;

    private int rolloverFence;
    private JournalEventListener eventListener;
  private ByteBufferPool byteBufferPool;
 
  private long overflowNotificationTime=System.currentTimeMillis();

  /**
   */
  public LogFileManager(byte fileManagerId, LongSequenceGenerator sequenceGenerator, File logDirectory)
    throws IOException {
      this( fileManagerId, sequenceGenerator, new LogFile(logDirectory) );
  }
 
  /**
   */
  public LogFileManager(byte fileManagerId, LongSequenceGenerator sequenceGenerator, LogFile logFile) {
    this.fileManagerId = fileManagerId;
    this.sequenceGenerator = sequenceGenerator;
      this.file = logFile;   
      this.byteBufferPool = new ByteBufferPool();
    this.executor = new QueuedExecutor()
    this.executor.setThreadFactory(new ThreadFactory(){
      public Thread newThread(Runnable runnable) {
        Thread answer = new Thread(runnable, "Journal Writter");
        answer.setPriority(Thread.MAX_PRIORITY);
        answer.setDaemon(true);
        return answer;
      }
    });
   
   
    lastMarkedLocation = file.getLastMarkedRecordLocation(fileManagerId);
    appendSegmentIndex = file.getAppendSegmentIndex();
    appendSegmentOffset = file.getAppendSegmentOffset();
    rolloverFence = (file.getInitialSegmentSize()/10)*9;
  }

  public RecordLocationImpl write(byte[] data, boolean sync) throws IOException {
      return write(LogFile.DATA_RECORD_TYPE, data, sync, null);
  }
   
  private RecordLocationImpl write(byte recordType, byte[] data, boolean sync, Mark mark) throws IOException {
    try {
      RecordLocationImpl location;
      BatchedWrite writeCommand;
      synchronized( this ) {     
        if(closed) {
          throw new IOException("Journal has been closed.");
        }
       
        // Create our record
        long sequenceId = sequenceGenerator.getNextSequenceId();     
        location = new RecordLocationImpl(this.fileManagerId, appendSegmentIndex, appendSegmentOffset, sequenceId);     
        Record record = new Record(sequenceId, recordType, data, mark );     
        // Piggy back the packet on the pending write batch.
        writeCommand = addToPendingWriteBatch(record);
       
        // Update where the next record will land.
        appendSegmentOffset += data.length+Record.RECORD_BASE_SIZE;     
        rolloverCheck();     
      }
     
      if( sync ) {
        writeCommand.waitForForce();
      }
     
      return location;
    } catch (IOException e) {
      throw e;
    } catch (InterruptedException e) {
      throw (IOException)new InterruptedIOException().initCause(e);
    } catch (Throwable e) {
      throw (IOException)new IOException("Write failed: "+e).initCause(e);
    }
  }
 
    /**
   * @param record
   * @return
   * @throws InterruptedException
   */
  private BatchedWrite addToPendingWriteBatch(Record record) throws InterruptedException {
   
    // Load the write batch up with data from our record.
    // it may take more than one write batch if the record is large.
      BatchedWrite answer=null;
    while( record.remaining()>0 ) {
      // Do we need another BatchWrite?
      if( pendingBatchWrite==null ) {
        final BatchedWrite write = new BatchedWrite(byteBufferPool.getByteBuffer());
        pendingBatchWrite = write;
        executor.execute(new Runnable() {
                    public void run() {
                        try {
                            queuedWrite(write);
                        } catch (InterruptedException e) {
                        }
                    }
        });
      }
      answer = pendingBatchWrite;
      // Can we continue to use the pendingBatchWrite? 
      if( !pendingBatchWrite.append(record) ) {
          pendingBatchWrite = null;
      }
    } 
    return answer;
   
  }

  /**
   * This is a blocking call
   * @param write
   * @throws InterruptedException
   */
  private void queuedWrite(BatchedWrite write) throws InterruptedException {
   
      // Stop other threads from appending more pendingBatchWrite.
    write.disableAppend();
   
    // Do the write.
    try {     
      file.appendAndForce(write);
      write.forced();
    } catch (Throwable e) {
        write.writeFailed(e);
    } finally {
      byteBufferPool.returnByteBuffer(write.getByteBuffer());
    }
  }

  /**
     *
     */
    private void rolloverCheck() throws IOException {

    // See if we need to issue an overflow notification.
        if( eventListener!=null
            && !file.canActivateNextSegment()
        && overflowNotificationTime+OVERFLOW_RENOTIFICATION_DELAY < System.currentTimeMillis() ) {

        // We need to send an overflow notification to at free up
            // some segments.               
          RecordLocationImpl safeSpot = file.getFirstRecordLocationOfSecondActiveSegment(fileManagerId);         
            eventListener.overflowNotification(safeSpot);
      overflowNotificationTime = System.currentTimeMillis();                   
        }
     
      // Is it time to rollover and can we rollover?
        if( appendSegmentOffset > rolloverFence && file.canActivateNextSegment() ) {
         
          // don't delay the next overflow notification.
          overflowNotificationTime -= OVERFLOW_RENOTIFICATION_DELAY;
           
          final FutureResult result = new FutureResult();
          try {
              executor.execute(new Runnable() {
                  public void run() {
                      try {
                          result.set( queuedActivateNextSegment() );                   
                      } catch (Throwable e) {
                          result.setException(e);
                      }
                  }           
            });
                appendSegmentIndex = ((Byte)result.get()).byteValue();
                appendSegmentOffset = Segment.SEGMENT_HEADER_SIZE;
               
            } catch (InterruptedException e) {
                throw (IOException)new IOException("Interrupted.").initCause(e);
            } catch (InvocationTargetException e) {
                if( e.getTargetException() instanceof IOException)
                    throw (IOException)new IOException(e.getTargetException().getMessage()).initCause(e.getTargetException());
                throw (IOException)new IOException("Unexpected Exception: ").initCause(e.getTargetException());
            }       
        }
    }

    /**
   * This is a blocking call
   */
  private Byte queuedActivateNextSegment() throws IOException {
    file.activateNextSegment();
      return new Byte(file.getAppendSegmentIndex());
  }


  /**
   * @param recordLocator
   * @param force
   * @return
   * @throws InvalidRecordLocationException
   * @throws IOException
   * @throws InterruptedException
   */
  synchronized public void setMark(final RecordLocationImpl recordLocator, boolean force) throws InvalidRecordLocationException, InterruptedException, IOException {
    if( recordLocator==null )
          throw new InvalidRecordLocationException("The location cannot be null.");           
      if( lastMarkedLocation!=null && recordLocator.compareTo(lastMarkedLocation)<0 )
          throw new InvalidRecordLocationException("The location is less than the last mark.");           
      lastMarkedLocation = recordLocator;
      Mark mark = new Mark(recordLocator);
      byte data[] = mark.writeExternal();
      write(LogFile.MARK_RECORD_TYPE,data,force,mark);
  }

  /**
   * @return
   */
  public RecordLocationImpl getMark() {
      return lastMarkedLocation;
  }

  /**
   * @param lastLocation
   * @return
   * @throws IOException
   * @throws InvalidRecordLocationException
   */
  public RecordLocationImpl getNextRecordLocation(final RecordLocationImpl lastLocation) throws IOException, InvalidRecordLocationException {
    if( lastLocation==null ) {
      if( lastMarkedLocation!=null) {
        return lastMarkedLocation;
      } else {
                byte safeSeg = file.getFirstActiveSegmentIndex();
        try {
          return file.readRecordLocation( new RecordLocationImpl(fileManagerId, safeSeg, Segment.SEGMENT_HEADER_SIZE));
        } catch (InvalidRecordLocationException e1) {
          return null;
        }
      }
    }
   
      // Run this in the queued executor thread.
      final FutureResult result = new FutureResult();
      try {
          executor.execute(new Runnable() {
              public void run() {
                  try {
                      result.set( queuedGetNextRecordLocation(lastLocation) );                   
                  } catch (Throwable e) {
                      result.setException(e);
                  }
              }           
        });         
            return (RecordLocationImpl)result.get();
        } catch (InterruptedException e) {
            throw (IOException)new IOException("Interrupted.").initCause(e);
        } catch (InvocationTargetException e) {
            if( e.getTargetException() instanceof InvalidRecordLocationException)
                throw new InvalidRecordLocationException(e.getTargetException().getMessage(),e.getTargetException());
            if( e.getTargetException() instanceof IOException)
                throw (IOException)new IOException(e.getTargetException().getMessage()).initCause(e.getTargetException());
            throw (IOException)new IOException("Unexpected Exception: ").initCause(e.getTargetException());
        }       
  } 

  private RecordLocationImpl queuedGetNextRecordLocation(RecordLocationImpl location) throws IOException, InvalidRecordLocationException {
    return file.getNextDataRecordLocation(location);
  }

  /**
   * @param location
   * @return
   * @throws InvalidRecordLocationException
   * @throws IOException
   */
  public byte[] read(final RecordLocationImpl location) throws IOException, InvalidRecordLocationException {     
      // Run this in the queued executor thread.
      final FutureResult result = new FutureResult();     
      try {
          executor.execute(new Runnable() {
              public void run() {
                  try {
                      result.set( queuedRead(location) );                   
                  } catch (Throwable e) {
                      result.setException(e);
                  }
              }
        });
            return (byte[])result.get();
        } catch (InterruptedException e) {
            throw (IOException)new IOException("Interrupted.").initCause(e);
        } catch (InvocationTargetException e) {
            if( e.getTargetException() instanceof InvalidRecordLocationException)
                throw new InvalidRecordLocationException(e.getTargetException().getMessage(),e.getTargetException());
            if( e.getTargetException() instanceof IOException)
                throw (IOException)new IOException(e.getTargetException().getMessage()).initCause(e.getTargetException());
            throw (IOException)new IOException("Unexpected Exception: ").initCause(e.getTargetException());
        }       
  }
 
    private byte[] queuedRead(RecordLocationImpl newLocation) throws IOException, InvalidRecordLocationException {
   
      int segmentIndex;
      int segmentOffset;
      if( newLocation==null ) {
          segmentIndex=markedSegmentIndex;
        segmentOffset=Segment.SEGMENT_HEADER_SIZE;
    } else {
          segmentIndex=newLocation.getSegmentIndex();
        segmentOffset=newLocation.getSegmentOffset();
    }
     
    return file.readData(segmentIndex,segmentOffset);
  }

    /**
     * @param eventListener
     */
    public void setJournalEventListener(JournalEventListener eventListener) {
        this.eventListener = eventListener;
    }
 
  /**
   * @throws InterruptedException
   *
   */
  public void close() {
    if(closed)
      return;     
    executor.shutdownAfterProcessingCurrentlyQueuedTasks();
      try { file.close(); } catch ( Throwable e ) {}
    closed = true;
  }

  public long getLastSequenceId() {
    return file.getLastSequenceId();
  }

  /**
   * @return
   */
  public File getLogDirectory() {
    return file.getLogDirectory();
  }

  public int getTotalSegements() {
    return file.getTotalSegements();
  }

  public int getInitialSegmentSize() {
    return file.getInitialSegmentSize();
  }
}
TOP

Related Classes of org.codehaus.activemq.journal.impl.LogFileManager

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.