/*
* Copyright 2000-2007 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.util.io;
import com.intellij.openapi.diagnostic.Logger;
import gnu.trove.TIntArrayList;
import org.jetbrains.annotations.NonNls;
import java.io.*;
import java.util.Arrays;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
/**
* @author max
*/
@SuppressWarnings({"PointlessArithmeticExpression", "IOResourceOpenedButNotSafelyClosed"})
public class PagedMemoryMappedFile {
private static final Logger LOG = Logger.getInstance("#com.intellij.util.io.PagedMemoryMappedFile");
private final int START_FILE_SIZE;
private final int CLUSTER_SIZE;
private short COMPACTED_FILE_FREEPAGE_SIZE = 64;
private static final int PAGE_LINK_OFFSET = 0;
private static final int CLUSTERS_COUNT_OFFSET = 4;
private static final int BYTE_COUNT_OFFSET = 6;
public static final int HEADER_SIZE = 7;
private static final int MAX_CLUSTERS_PER_PAGE = 32000;
private static final int DEFFERED_FORCE_DELAY = 1000;
private static final ScheduledExecutorService ourForceAlarm = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
public Thread newThread(final Runnable r) {
@NonNls final Thread t = new Thread(r);
t.setName("PagedMemoryMappedFile flusher");
return t;
}
});
private final File myFile;
private final PagedFileStorage myStorage;
private boolean myIsTestMode;
private long myFileLength;
private boolean myIsDisposed = false;
private volatile Runnable myActiveFlushRequest;
private volatile int myModificationCount = 0;
private PagedDataOutputStream myOutputPool = null;
private RandomAccessPagedDataInput myInputPool = null;
public PagedMemoryMappedFile(File file, int initialSize, int pageSize) throws IOException {
START_FILE_SIZE = Math.min(initialSize, pageSize * MAX_CLUSTERS_PER_PAGE);
CLUSTER_SIZE = pageSize;
myFile = file;
if (!file.exists()) {
file.getParentFile().mkdirs();
file.createNewFile();
}
myStorage = new PagedFileStorage(file);
remap(false);
myIsTestMode = false;
}
public PagedMemoryMappedFile(File file) throws IOException {
this(file, 1024, 32);
}
public void setCompactedFileFreePageSize(short freePageSize) {
COMPACTED_FILE_FREEPAGE_SIZE = freePageSize;
}
void setTestMode(boolean testMode) {
myIsTestMode = testMode;
}
private void remap(boolean extend) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Remapping: " + myFile.getAbsolutePath());
}
int length = (int)myStorage.length();
int freePage = length / CLUSTER_SIZE;
int increment = 0;
if (length == 0) {
increment = START_FILE_SIZE;
}
else if (extend) {
increment = Math.min(length, 1024 * 128);
}
final int newSize = length + increment;
myStorage.resize(newSize);
myFileLength = newSize;
if (increment > 0) {
if (freePage == 0) {
freePage = 1;
increment--;
}
addFreePage(freePage, (short)(increment / CLUSTER_SIZE));
}
}
public void compact() throws IOException {
int freePage = getFirstFreePage();
while (freePage > 0 && !isLastPage(freePage)) {
freePage = getNextPageLink(freePage);
}
if (freePage > 0 && getClustersCount(freePage) > COMPACTED_FILE_FREEPAGE_SIZE) {
int next = getNextPageLink(freePage);
int prev = getPrevPageLink(freePage);
setClustersCount(freePage, COMPACTED_FILE_FREEPAGE_SIZE);
markPageFree(freePage, true);
linkPages(freePage, next);
linkPages(prev, freePage);
myFileLength = (freePage + COMPACTED_FILE_FREEPAGE_SIZE) * CLUSTER_SIZE;
myStorage.resize((int)myFileLength);
}
}
private boolean isLastPage(int page) {
return getNextSiblingPage(page) == -1;
}
public RecordDataOutput createRecord() {
return getWriter(getFreePage());
}
public RandomAccessPagedDataInput getReader(int page) {
final RandomAccessPagedDataInput pooled = myInputPool;
if (pooled != null) {
myInputPool = null;
pooled.setup(page);
return pooled;
}
return new RandomAccessPagedDataInput(new PagedInputStream(page));
}
public void returnToPool(final RandomAccessPagedDataInput input) {
myInputPool = input;
}
private void returnToPool(final PagedDataOutputStream stream) {
myOutputPool = stream;
}
public RecordDataOutput getWriter(int page) {
final PagedDataOutputStream pooled = myOutputPool;
if (pooled != null) {
myOutputPool = null;
pooled.setup(page);
return pooled;
}
return new PagedDataOutputStream(page);
}
public void delete(int page) {
assertTrue(page > 0);
uncheckedDelete(page);
checkConsistency(false);
}
private void uncheckedDelete(int page) {
if (page == 0) return;
assertTrue(!isPageFree(page));
int next = getNextPageLink(page);
deleteFromList(page);
addFreePage(page, getClustersCount(page));
uncheckedDelete(next);
}
private int getPrevSiblingPage(int page) {
int prevSublingPage = -1;
if (page > 1) {
prevSublingPage = page - myStorage.getShort(page * CLUSTER_SIZE - HEADER_SIZE + CLUSTERS_COUNT_OFFSET);
}
return prevSublingPage;
}
private int getNextSiblingPage(int page) {
int nextSublingPage = page + getClustersCount(page);
if (nextSublingPage * CLUSTER_SIZE >= myFileLength) {
nextSublingPage = -1;
}
return nextSublingPage;
}
private synchronized int getFreePage() {
int page = getFirstFreePage();
if (page == 0) {
try {
remap(true);
}
catch (IOException e) {
LOG.error(e);
}
return getFreePage();
}
deleteFromList(page);
return page;
}
private short getClustersCount(int page) {
short count = myStorage.getShort(page * CLUSTER_SIZE + CLUSTERS_COUNT_OFFSET);
assertTrue(count > 0);
return count;
}
private void setClustersCount(int page, short count) {
assertTrue(page > 0);
assertTrue(count > 0 && count < MAX_CLUSTERS_PER_PAGE);
myStorage.putShort(page * CLUSTER_SIZE + CLUSTERS_COUNT_OFFSET, count);
myStorage.putShort((page + count) * CLUSTER_SIZE - HEADER_SIZE + CLUSTERS_COUNT_OFFSET, count);
}
private int getPrevPageLink(int page) {
return myStorage.getInt(page * CLUSTER_SIZE + PAGE_LINK_OFFSET);
}
private int getNextPageLink(int page) {
return myStorage.getInt((page + getClustersCount(page)) * CLUSTER_SIZE - HEADER_SIZE + PAGE_LINK_OFFSET);
}
private void linkPages(int prevPage, int nextPage) {
assertTrue(prevPage == 0 || prevPage != nextPage);
if (nextPage != 0) {
myStorage.putInt(nextPage * CLUSTER_SIZE + PAGE_LINK_OFFSET, prevPage);
assertTrue(prevPage == 0 || getNextPageLink(nextPage) != prevPage);
}
if (prevPage != 0) {
int rightSide = prevPage + getClustersCount(prevPage);
/* TODO: Can be fired if called from compact. Correct compact
assertTrue(rightSide != nextPage ||
getClustersCount(prevPage) + getClustersCount(nextPage) >= MAX_CLUSTERS_PER_PAGE);
*/
myStorage.putInt(rightSide * CLUSTER_SIZE - HEADER_SIZE + PAGE_LINK_OFFSET, nextPage);
if (nextPage != 0) {
if (!(nextPage <= prevPage || nextPage >= rightSide)) {
LOG.error("nextPage = " + nextPage + ", prevPage = " + prevPage + ", rightSide = " + rightSide);
assertTrue(false);
}
}
}
}
private void closePage(int page, int bytesWritten) {
splitPage(page, bytesWritten);
markPageFree(page, false);
}
private void splitPage(int page, int bytesWritten) {
short oldClustersCount = getClustersCount(page);
if (oldClustersCount > 1 && bytesWritten + HEADER_SIZE * 2 < oldClustersCount * CLUSTER_SIZE) {
short newClustersCount = (short)((bytesWritten + HEADER_SIZE * 2) / CLUSTER_SIZE);
if ((bytesWritten + HEADER_SIZE * 2) % CLUSTER_SIZE > 0) newClustersCount++;
if (oldClustersCount > newClustersCount) {
setClustersCount(page, newClustersCount);
markPageFree(page, false);
int newPage = page + newClustersCount;
setClustersCount(newPage, (short)(oldClustersCount - newClustersCount));
linkPages(newPage, 0);
linkPages(0, newPage);
addFreePage(newPage, (short)(oldClustersCount - newClustersCount));
}
}
}
private void deleteFromList(int page) {
assertTrue(page > 0);
int prev = getPrevPageLink(page);
int next = getNextPageLink(page);
if (getFirstFreePage() == page) {
assertTrue(prev == 0);
setFirstFreePage(next);
}
linkPages(prev, next);
linkPages(0, page);
linkPages(page, 0);
}
private void mergeWithNext(int page) {
int nextSubling = getNextSiblingPage(page);
assertTrue(nextSubling != -1 && isPageFree(nextSubling));
deleteFromList(page);
int prev = getPrevPageLink(nextSubling);
int next = getNextPageLink(nextSubling);
short count = getClustersCount(nextSubling);
count += getClustersCount(page);
setClustersCount(page, count);
if (getFirstFreePage() == nextSubling) {
assertTrue(prev == 0);
setFirstFreePage(page);
}
linkPages(prev, page);
linkPages(page, next);
}
private void mergeWithPrev(int page) {
int prevSubling = getPrevSiblingPage(page);
assertTrue(prevSubling != -1 && isPageFree(prevSubling));
deleteFromList(page);
int prev = getPrevPageLink(prevSubling);
int next = getNextPageLink(prevSubling);
short count = getClustersCount(prevSubling);
count += getClustersCount(page);
setClustersCount(prevSubling, count);
if (getFirstFreePage() == prevSubling) {
assertTrue(prev == 0);
setFirstFreePage(prevSubling);
}
linkPages(prev, prevSubling);
boolean canMergeAgain = getPrevSiblingPage(next) == prevSubling && count + getClustersCount(next) < MAX_CLUSTERS_PER_PAGE;
if (!canMergeAgain) {
linkPages(prevSubling, next);
}
else {
mergeWithPrev(next);
}
}
private synchronized void addFreePage(int page, short count) {
if (count == 0) return;
setClustersCount(page, count);
markPageFree(page, true);
int prevSubling = getPrevSiblingPage(page);
if (prevSubling != -1 && isPageFree(prevSubling)) {
if (getClustersCount(page) + getClustersCount(prevSubling) < MAX_CLUSTERS_PER_PAGE) {
mergeWithPrev(page);
page = prevSubling;
markPageFree(page, true);
}
}
int nextSubling = getNextSiblingPage(page);
if (nextSubling != -1 && isPageFree(nextSubling)) {
if (getClustersCount(page) + getClustersCount(nextSubling) < MAX_CLUSTERS_PER_PAGE) {
mergeWithNext(page);
markPageFree(page, true);
}
}
if (getPrevPageLink(page) == 0 && page != getFirstFreePage()) {
linkPages(page, getFirstFreePage());
setFirstFreePage(page);
markPageFree(page, true);
}
}
private int getFirstFreePage() {
return myStorage.getInt(0);
}
private void setFirstFreePage(int page) {
myStorage.putInt(0, page);
}
public int getVersion() {
return myStorage.getInt(4);
}
public void setVersion(int ver) {
myStorage.putInt(4, ver);
}
private void markPageFree(int page, boolean isFree) {
assertTrue(page > 0);
myStorage.put(page * CLUSTER_SIZE + BYTE_COUNT_OFFSET, (byte)(isFree ? 0 : 1));
myStorage.put((page + getClustersCount(page)) * CLUSTER_SIZE - HEADER_SIZE + BYTE_COUNT_OFFSET, (byte)(isFree ? 0 : 1));
}
public boolean isPageFree(int page) {
return myStorage.get(page * CLUSTER_SIZE + BYTE_COUNT_OFFSET) == 0;
}
public void dispose() {
myIsDisposed = true;
myStorage.close();
}
private void checkConsistency(boolean shouldBeFree) {
if (myIsTestMode) {
boolean[] map = new boolean[(int)(myFileLength / CLUSTER_SIZE)];
Arrays.fill(map, false);
int firstFreePage = getFirstFreePage();
if (firstFreePage != 0) {
checkTraverseLinks(firstFreePage, map, true, new TIntArrayList());
}
for (int i = 1; i < map.length; i++) {
boolean b = map[i];
if (!b) {
assertTrue(!shouldBeFree);
checkTraverseLinks(i, map, false, new TIntArrayList());
}
}
for (int i = 1; i < map.length; i++) {
assertTrue(map[i]);
}
}
}
private void checkTraverseLinks(int page, boolean[] map, boolean shouldBeFree, TIntArrayList stack) {
if (stack.indexOf(page) >= 0) {
LOG.error("Short circute.");
return;
}
int clustersCount = getClustersCount(page);
for (int i = 0; i < clustersCount; i++) {
map[page + i] = true;
}
assertTrue(isPageFree(page) == shouldBeFree);
int next = getNextPageLink(page);
if (next == 0) return;
int nextSibling = getNextSiblingPage(page);
assertTrue(nextSibling != next || getClustersCount(page) + getClustersCount(nextSibling) >= MAX_CLUSTERS_PER_PAGE);
int prevSibling = getPrevSiblingPage(page);
int prev = getPrevPageLink(page);
assertTrue(prevSibling != prev || getClustersCount(page) + getClustersCount(prevSibling) >= MAX_CLUSTERS_PER_PAGE);
assertTrue(getPrevPageLink(next) == page);
stack.add(page);
checkTraverseLinks(next, map, shouldBeFree, stack);
stack.remove(stack.size() - 1);
}
private class PagedOutputStream extends OutputStream {
private int myCurrentPage;
private int myCursor;
private int myCursorLimit;
private int myLocalCounter;
public PagedOutputStream(int startPage) {
setup(startPage);
}
private void setup(final int startPage) {
assertTrue(startPage > 0);
myCurrentPage = startPage;
setCursor(startPage);
}
public void write(int b) throws IOException {
if (myCursor >= myCursorLimit) allocatePage();
myStorage.put(myCursor, (byte)b);
myLocalCounter++;
myCursor++;
}
public void write(byte[] b, int off, int len) throws IOException {
int stopOffset = off + len;
do {
int bufferSizeLeft = stopOffset - off;
int pageSizeLeft = myCursorLimit - myCursor;
if (pageSizeLeft <= 0) {
allocatePage();
continue;
}
int dataToWrite = Math.min(bufferSizeLeft, pageSizeLeft);
myStorage.put(myCursor, b, off, dataToWrite);
myCursor += dataToWrite;
myLocalCounter += dataToWrite;
off += dataToWrite;
}
while (off < stopOffset);
assertTrue(off == stopOffset);
}
private void allocatePage() {
int nextFromCurrent = getNextPageLink(myCurrentPage);
int nextSublingPage = getNextSiblingPage(myCurrentPage);
if (nextSublingPage != -1 && isPageFree(nextSublingPage) &&
getClustersCount(myCurrentPage) + getClustersCount(nextSublingPage) < MAX_CLUSTERS_PER_PAGE) {
linkPages(myCurrentPage, 0);
markPageFree(myCurrentPage, true);
mergeWithPrev(nextSublingPage);
markPageFree(myCurrentPage, false);
myCursorLimit = (myCurrentPage + getClustersCount(myCurrentPage)) * CLUSTER_SIZE - HEADER_SIZE;
return;
}
closePage(myCurrentPage, myLocalCounter);
int newPage = nextFromCurrent;
if (newPage == 0) {
newPage = getFreePage();
}
short currentClustersCount = getClustersCount(myCurrentPage);
if (myCurrentPage + currentClustersCount == newPage && currentClustersCount + getClustersCount(newPage) < MAX_CLUSTERS_PER_PAGE) {
/* TODO: seems like this assertion is not necessary. It fails when nextFromCurrent is not 0.
final int prevOfNew = getPrevPageLink(newPage);
if (prevOfNew != 0) {
assertTrue(false, "Expected 0 but was " + prevOfNew + ". nextFromCurrent=" + nextFromCurrent + ", myCurrentPage = " + myCurrentPage);
}
*/
linkPages(myCurrentPage, 0);
markPageFree(myCurrentPage, true);
mergeWithPrev(newPage);
markPageFree(myCurrentPage, false);
myCursorLimit = (myCurrentPage + getClustersCount(myCurrentPage)) * CLUSTER_SIZE - HEADER_SIZE;
return;
}
linkPages(myCurrentPage, newPage);
markPageFree(myCurrentPage, false);
myCurrentPage = newPage;
setCursor(myCurrentPage);
}
private void setCursor(int page) {
assertTrue(page > 0);
myCursor = page * CLUSTER_SIZE + HEADER_SIZE;
myCursorLimit = (page + getClustersCount(page)) * CLUSTER_SIZE - HEADER_SIZE;
myLocalCounter = 0;
}
public void close() throws IOException {
super.close();
assertTrue(myLocalCounter > 0);
closePage(myCurrentPage, myLocalCounter);
linkPages(myCurrentPage, 0);
checkConsistency(false);
}
}
class PagedInputStream extends InputStream {
private int myCurrentPage;
private int myCursor;
private int myPageStart;
private int myPosition;
private int myPageEnd;
public PagedInputStream(int page) {
setup(page);
}
public void setup(int page) {
assertTrue(page > 0);
myCurrentPage = page;
setCursor(myCurrentPage);
myPosition = 0;
// myStatistics.put(page, myStatistics.get(page) + 1);
}
private void setCursor(int page) {
if (page <= 0) {
assertTrue(page > 0);
}
myCursor = page * CLUSTER_SIZE + HEADER_SIZE;
myPageStart = myCursor;
myPageEnd = (page + getClustersCount(page)) * CLUSTER_SIZE - HEADER_SIZE;
assertTrue(myPageStart < myPageEnd);
assertTrue(myPageStart > 0);
}
public int getPosition() {
return myPosition;
}
public void setPosition(int pos) {
do {
int delta = pos - myPosition;
if (delta == 0) return;
assertTrue(myCurrentPage > 0);
if (delta > 0 && myCursor + delta <= myPageEnd || delta < 0 && myCursor + delta >= myPageStart) {
myCursor += delta;
myPosition = pos;
return;
}
if (delta > 0) {
int posDelta = myPageEnd - myCursor;
goNextPage();
myPosition += posDelta;
}
else {
int posDelta = myCursor - myPageStart;
goPrevPage();
myPosition -= posDelta + (myPageEnd - myPageStart);
}
}
while (true);
}
public long skip(long n) throws IOException {
setPosition(myPosition + (int)n);
return n;
}
public int read() throws IOException {
if (myCursor >= myPageEnd) goNextPage();
byte b = myStorage.get(myCursor);
myCursor++;
myPosition++;
return 0xFF & (int)b;
}
public int read(byte[] b, int off, int len) throws IOException {
int stopOffset = off + len;
while (off < stopOffset) {
if (myCursor >= myPageEnd) goNextPage();
int bufferSizeLeft = stopOffset - off;
int pageSizeLeft = myPageEnd - myCursor;
int dataToRead = Math.min(bufferSizeLeft, pageSizeLeft);
myStorage.get(myCursor, b, off, dataToRead);
myCursor += dataToRead;
myPosition += dataToRead;
off += dataToRead;
}
assertTrue(off == stopOffset);
return len;
}
private void goNextPage() {
myCurrentPage = getNextPageLink(myCurrentPage);
setCursor(myCurrentPage);
}
private void goPrevPage() {
myCurrentPage = getPrevPageLink(myCurrentPage);
setCursor(myCurrentPage);
}
PagedMemoryMappedFile getOwnerFile() {
return PagedMemoryMappedFile.this;
}
}
private class PagedDataOutputStream extends DataOutputStream implements RecordDataOutput {
private int myStartPage;
public PagedDataOutputStream(int startPage) {
super(new PagedOutputStream(startPage));
myStartPage = startPage;
}
public void setup(int startPage) {
myStartPage = startPage;
((PagedOutputStream)out).setup(startPage);
}
public int getRecordId() {
return myStartPage;
}
public void close() throws IOException {
super.close();
checkConsistency(false);
returnToPool(this);
}
}
public void immediateForce() {
try {
myStorage.flush();
}
catch (Exception e) { // IOException sometimes raises on Linux. The following seem to be a hack.
deferredForce();
}
}
public void deferredForce() {
final int modCount = ++myModificationCount;
if (myActiveFlushRequest == null) {
final Runnable request = new Runnable() {
public void run() {
if (myIsDisposed) return;
if (modCount == myModificationCount) {
immediateForce();
myActiveFlushRequest = null;
}
else {
myActiveFlushRequest = null;
deferredForce();
}
}
};
myActiveFlushRequest = request;
ourForceAlarm.schedule(request, DEFFERED_FORCE_DELAY, TimeUnit.MILLISECONDS);
}
}
public void assertFree() {
checkConsistency(true);
}
private static class PagedFileAssertionException extends RuntimeException {
private PagedFileAssertionException(final String message) {
super(message);
}
}
private static void assertTrue(boolean b) {
assertTrue(b, "");
}
private static void assertTrue(boolean b, @NonNls String message) {
if (!b) {
throw new PagedFileAssertionException(message);
}
}
}