/*
* eXist Open Source Native XML Database
* Copyright (C) 2001-2007 The eXist team
* http://exist-db.org
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* $Id$
*/
package org.exist.storage.dom;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Writer;
import java.text.NumberFormat;
import java.util.ArrayList;
import javax.xml.stream.XMLStreamException;
import javax.xml.stream.XMLStreamReader;
import org.apache.log4j.Logger;
import org.exist.dom.AttrImpl;
import org.exist.dom.DocumentImpl;
import org.exist.dom.ElementImpl;
import org.exist.dom.NodeProxy;
import org.exist.dom.StoredNode;
import org.apache.commons.io.output.ByteArrayOutputStream;
import org.exist.numbering.DLNBase;
import org.exist.numbering.NodeId;
import org.exist.stax.EmbeddedXMLStreamReader;
import org.exist.storage.BrokerPool;
import org.exist.storage.BufferStats;
import org.exist.storage.CacheManager;
import org.exist.storage.DBBroker;
import org.exist.storage.NativeBroker;
import org.exist.storage.NativeBroker.NodeRef;
import org.exist.storage.Signatures;
import org.exist.storage.StorageAddress;
import org.exist.storage.btree.BTree;
import org.exist.storage.btree.BTreeCallback;
import org.exist.storage.btree.BTreeException;
import org.exist.storage.btree.DBException;
import org.exist.storage.btree.IndexQuery;
import org.exist.storage.btree.Value;
import org.exist.storage.cache.Cache;
import org.exist.storage.cache.Cacheable;
import org.exist.storage.cache.LRUCache;
import org.exist.storage.journal.LogEntryTypes;
import org.exist.storage.journal.Loggable;
import org.exist.storage.journal.Lsn;
import org.exist.storage.lock.Lock;
import org.exist.storage.lock.ReentrantReadWriteLock;
import org.exist.storage.txn.TransactionException;
import org.exist.storage.txn.Txn;
import org.exist.util.ByteConversion;
import org.exist.util.Configuration;
import org.exist.util.Lockable;
import org.exist.util.ReadOnlyException;
import org.exist.util.hashtable.Object2LongIdentityHashMap;
import org.exist.util.sanity.SanityCheck;
import org.exist.xquery.TerminatedException;
import org.w3c.dom.Node;
import static java.nio.charset.StandardCharsets.UTF_8;
/**
* This is the main storage for XML nodes. Nodes are stored in document order.
* Every document gets its own sequence of pages, which is bound to the writing
* thread to avoid conflicting writes. The page structure is as follows:
* | page header | (tid1 node-data, tid2 node-data, ..., tidn node-data) |
*
* node-data contains the raw binary data of the node. Within a page, a node is
* identified by a unique id, called tuple id (tuple id). Every node can thus be
* located by a virtual address pointer, which consists of the page id and the
* tid. Both components are encoded in a long value (with additional bits used
* for optional flags). The address pointer is used to reference nodes from the
* indexes. It should thus remain unchanged during the life-time of a document.
*
* However, XUpdate requests may insert new nodes in the middle of a page. In
* these cases, the page will be split and the upper portion of the page is
* copied to a split page. The record in the original page will be replaced by a
* forward link, pointing to the new location of the node data in the split
* page.
*
* As a consequence, the class has to distinguish three different types of data
* records:
*
* 1) Ordinary record:
* | tuple id | length | data |
*
* 3) Relocated record:
* | tuple id | length | address pointer to original location | data |
*
* 2) Forward link:
* | tuple id | address pointer |
*
* tuple id and length each use two bytes (short), address pointers 8 bytes (long).
* The upper two bits of the tuple id are used to indicate the type of the record
* (see {@link org.exist.storage.dom.ItemId}).
*
* @author Wolfgang Meier <wolfgang@exist-db.org>
*/
public class DOMFile extends BTree implements Lockable {
protected final static Logger LOGSTATS = Logger.getLogger( NativeBroker.EXIST_STATISTICS_LOGGER );
public static final String FILE_NAME = "dom.dbx";
public static final String CONFIG_KEY_FOR_FILE = "db-connection.dom";
public static final int LENGTH_TID = 2; //sizeof short
public static final int LENGTH_DATA_LENGTH = 2; //sizeof short
public static final int LENGTH_LINK = 8; //sizeof long
public static final int LENGTH_ORIGINAL_LOCATION = LENGTH_LINK;
public static final int LENGTH_FORWARD_LOCATION = LENGTH_LINK;
public static final int LENGTH_OVERFLOW_LOCATION = LENGTH_LINK;
/*
* Byte ids for the records written to the log file.
*/
public final static byte LOG_CREATE_PAGE = 0x10;
public final static byte LOG_ADD_VALUE = 0x11;
public final static byte LOG_REMOVE_VALUE = 0x12;
public final static byte LOG_REMOVE_EMPTY_PAGE = 0x13;
public final static byte LOG_UPDATE_VALUE = 0x14;
public final static byte LOG_REMOVE_PAGE = 0x15;
public final static byte LOG_WRITE_OVERFLOW = 0x16;
public final static byte LOG_REMOVE_OVERFLOW = 0x17;
public final static byte LOG_INSERT_RECORD = 0x18;
public final static byte LOG_SPLIT_PAGE = 0x19;
public final static byte LOG_ADD_LINK = 0x1A;
public final static byte LOG_ADD_MOVED_REC = 0x1B;
public final static byte LOG_UPDATE_HEADER = 0x1C;
public final static byte LOG_UPDATE_LINK = 0x1D;
static {
// register log entry types for this db file
LogEntryTypes.addEntryType(LOG_CREATE_PAGE, CreatePageLoggable.class);
LogEntryTypes.addEntryType(LOG_ADD_VALUE, AddValueLoggable.class);
LogEntryTypes.addEntryType(LOG_REMOVE_VALUE, RemoveValueLoggable.class);
LogEntryTypes.addEntryType(LOG_REMOVE_EMPTY_PAGE, RemoveEmptyPageLoggable.class);
LogEntryTypes.addEntryType(LOG_UPDATE_VALUE, UpdateValueLoggable.class);
LogEntryTypes.addEntryType(LOG_REMOVE_PAGE, RemovePageLoggable.class);
LogEntryTypes.addEntryType(LOG_WRITE_OVERFLOW, WriteOverflowPageLoggable.class);
LogEntryTypes.addEntryType(LOG_REMOVE_OVERFLOW, RemoveOverflowLoggable.class);
LogEntryTypes.addEntryType(LOG_INSERT_RECORD, InsertValueLoggable.class);
LogEntryTypes.addEntryType(LOG_SPLIT_PAGE, SplitPageLoggable.class);
LogEntryTypes.addEntryType(LOG_ADD_LINK, AddLinkLoggable.class);
LogEntryTypes.addEntryType(LOG_ADD_MOVED_REC, AddMovedValueLoggable.class);
LogEntryTypes.addEntryType(LOG_UPDATE_HEADER, UpdateHeaderLoggable.class);
LogEntryTypes.addEntryType(LOG_UPDATE_LINK, UpdateLinkLoggable.class);
}
public final static short FILE_FORMAT_VERSION_ID = 9;
//Page types
public final static byte LOB = 21;
public final static byte RECORD = 20;
//Data length for overflow pages
public final static short OVERFLOW = 0;
public final static long DATA_SYNC_PERIOD = 4200;
private final Cache dataCache;
private BTreeFileHeader fileHeader;
private Object owner = null;
private Lock lock = null;
private final Object2LongIdentityHashMap<Object> pages = new Object2LongIdentityHashMap<Object>(64);
private DocumentImpl currentDocument = null;
private final AddValueLoggable addValueLog = new AddValueLoggable();
public DOMFile(BrokerPool pool, byte id, String dataDir, Configuration config) throws DBException {
super(pool, id, true, pool.getCacheManager(), 0.01);
lock = new ReentrantReadWriteLock(getFileName());
fileHeader = (BTreeFileHeader)getFileHeader();
fileHeader.setPageCount(0);
fileHeader.setTotalCount(0);
dataCache = new LRUCache(256, 0.0, 1.0, CacheManager.DATA_CACHE);
dataCache.setFileName(getFileName());
cacheManager.registerCache(dataCache);
final File file = new File(dataDir + File.separatorChar + getFileName());
setFile(file);
if (exists()) {
open();
} else {
if (LOG.isDebugEnabled())
{LOG.debug("Creating data file: " + file.getName());}
create();
}
config.setProperty(getConfigKeyForFile(), this);
}
/**
* Set the current page.
*
* @param page The new page
*/
private final void setCurrentPage(DOMPage page) {
final long pageNum = pages.get(owner);
if (pageNum == page.page.getPageNum())
{return;}
pages.put(owner, page.page.getPageNum());
}
/**
* Retrieve the last page in the current sequence.
*
* @return The current page
*/
private final DOMPage getCurrentPage(Txn transaction) {
final long pageNum = pages.get(owner);
if (pageNum == Page.NO_PAGE) {
final DOMPage page = new DOMPage();
pages.put(owner, page.page.getPageNum());
dataCache.add(page);
if (isTransactional && transaction != null) {
final CreatePageLoggable loggable = new CreatePageLoggable(
transaction, Page.NO_PAGE, page.getPageNum(), Page.NO_PAGE);
writeToLog(loggable, page.page);
}
return page;
} else {
return getDOMPage(pageNum);
}
}
/**
* Retrieve the current page
*
* @param pointer Description of the Parameter
* @return The current page
*/
protected final DOMPage getDOMPage(long pointer) {
DOMPage page = (DOMPage) dataCache.get(pointer);
if (page == null) {
page = new DOMPage(pointer);
}
return page;
}
/**
* Open the file.
*
* @return Description of the Return Value
* @exception DBException Description of the Exception
*/
public boolean open() throws DBException {
return super.open(FILE_FORMAT_VERSION_ID);
}
public void closeDocument() {
if (!lock.hasLock())
{LOG.warn("The file doesn't own a lock");}
pages.remove(owner);
}
public static String getFileName() {
return FILE_NAME;
}
public static String getConfigKeyForFile() {
return CONFIG_KEY_FOR_FILE;
}
public synchronized final void addToBuffer(DOMPage page) {
dataCache.add(page);
}
protected final Cache getPageBuffer() {
return dataCache;
}
/**
* @return file version.
*/
public short getFileVersion() {
return FILE_FORMAT_VERSION_ID;
}
public boolean create() throws DBException {
if (super.create((short) -1))
{return true;}
else
{return false;}
}
public boolean close() throws DBException {
if (!isReadOnly())
{flush();}
super.close();
return true;
}
public void closeAndRemove() {
if (!lock.isLockedForWrite())
{LOG.warn("The file doesn't own a write lock");}
super.closeAndRemove();
cacheManager.deregisterCache(dataCache);
}
public void setCurrentDocument(DocumentImpl doc) {
this.currentDocument = doc;
}
/**
* Append a value to the current page.
*
* This method is called when storing a new document. Each writing thread
* gets its own sequence of pages for writing a document, so all document
* nodes are stored in sequential order. A new page will be allocated if the
* current page is full. If the value is larger than the page size, it will
* be written to an overflow page.
*
* @param value the value to append
* @return the virtual storage address of the value
*/
public long add(Txn transaction, byte[] value) throws ReadOnlyException {
if (!lock.isLockedForWrite())
{LOG.warn("The file doesn't own a write lock");}
if (value == null || value.length == 0)
{return KEY_NOT_FOUND;}
// overflow value?
if (value.length + LENGTH_TID + LENGTH_DATA_LENGTH > fileHeader.getWorkSize()) {
if (LOG.isDebugEnabled())
{LOG.debug("Creating overflow page");}
final OverflowDOMPage overflowPage = new OverflowDOMPage(transaction);
overflowPage.write(transaction, value);
final byte[] pageNum = ByteConversion.longToByte(overflowPage.getPageNum());
return add(transaction, pageNum, true);
} else {
return add(transaction, value, false);
}
}
/**
* Append a value to the current page. If overflowPage is true, the value
* will be saved into its own, reserved chain of pages. The current page
* will just contain a link to the first overflow page.
*
* @param value
* @param overflowPage
* @return the virtual storage address of the value
* @throws ReadOnlyException
*/
private long add(Txn transaction, byte[] value, boolean overflowPage) throws ReadOnlyException {
if (!lock.isLockedForWrite())
{LOG.warn("The file doesn't own a write lock");}
final int valueLength = value.length;
//Always append data to the end of the file
DOMPage currentPage = getCurrentPage(transaction);
//Does the value fit into current data page?
if (currentPage.len + LENGTH_TID + LENGTH_DATA_LENGTH + valueLength > currentPage.data.length) {
final DOMPage newPage = new DOMPage();
final DOMFilePageHeader currentPageHeader = currentPage.getPageHeader();
if (isTransactional && transaction != null) {
final UpdateHeaderLoggable loggable = new UpdateHeaderLoggable(
transaction, currentPageHeader.getPreviousDataPage(), currentPage.getPageNum(),
newPage.getPageNum(), currentPageHeader.getPreviousDataPage(),
currentPageHeader.getNextDataPage());
writeToLog(loggable, currentPage.page);
}
currentPageHeader.setNextDataPage(newPage.getPageNum());
newPage.getPageHeader().setPrevDataPage(currentPage.getPageNum());
currentPage.setDirty(true);
dataCache.add(currentPage);
if (isTransactional && transaction != null) {
final CreatePageLoggable loggable = new CreatePageLoggable(
transaction, currentPage == null ? Page.NO_PAGE : currentPage.getPageNum(),
newPage.getPageNum(), Page.NO_PAGE);
writeToLog(loggable, newPage.page);
}
currentPage = newPage;
setCurrentPage(newPage);
}
final DOMFilePageHeader currentPageHeader = currentPage.getPageHeader();
final short tupleID = currentPageHeader.getNextTupleID();
if (isTransactional && transaction != null) {
addValueLog.clear(transaction, currentPage.getPageNum(), tupleID, value);
writeToLog(addValueLog, currentPage.page);
}
//Save tuple identifier
ByteConversion.shortToByte(tupleID, currentPage.data, currentPage.len);
currentPage.len += LENGTH_TID;
//Save data length
ByteConversion.shortToByte(overflowPage ? OVERFLOW : (short) valueLength,
currentPage.data, currentPage.len);
currentPage.len += LENGTH_DATA_LENGTH;
//Save data
System.arraycopy(value, 0, currentPage.data, currentPage.len, valueLength);
currentPage.len += valueLength;
currentPageHeader.incRecordCount();
currentPageHeader.setDataLength(currentPage.len);
currentPage.setDirty(true);
dataCache.add(currentPage, 2);
// return pointer from pageNum and offset into page
return StorageAddress.createPointer((int)currentPage.getPageNum(), tupleID);
}
private void writeToLog(Loggable loggable, Page page) {
try {
logManager.writeToLog(loggable);
page.getPageHeader().setLsn(loggable.getLsn());
} catch (final TransactionException e) {
LOG.warn(e.getMessage(), e);
}
}
/**
* Store a raw binary resource into the file. The data will always be
* written into an overflow page.
*
* @param value Binary resource as byte array
*/
public long addBinary(Txn transaction, DocumentImpl doc, byte[] value) {
if (!lock.isLockedForWrite())
{LOG.warn("The file doesn't own a write lock");}
final OverflowDOMPage overflowPage = new OverflowDOMPage(transaction);
final int pagesCount = overflowPage.write(transaction, value);
doc.getMetadata().setPageCount(pagesCount);
return overflowPage.getPageNum();
}
/**
* Store a raw binary resource into the file. The data will always be
* written into an overflow page.
*
* @param is Binary resource as stream.
*/
public long addBinary(Txn transaction, DocumentImpl doc, InputStream is) {
if (!lock.isLockedForWrite())
{LOG.warn("The file doesn't own a write lock");}
final OverflowDOMPage overflowPage = new OverflowDOMPage(transaction);
final int pagesCount = overflowPage.write(transaction, is);
doc.getMetadata().setPageCount(pagesCount);
return overflowPage.getPageNum();
}
/**
* Return binary data stored with {@link #addBinary(Txn, DocumentImpl, byte[])}.
*
* @param pageNum
*/
public byte[] getBinary(long pageNum) {
if (!lock.hasLock())
{LOG.warn("The file doesn't own a lock");}
return getOverflowValue(pageNum);
}
public void readBinary(long pageNum, OutputStream os) {
if (!lock.hasLock())
{LOG.warn("The file doesn't own a lock");}
try {
final OverflowDOMPage overflowPage = new OverflowDOMPage(pageNum);
overflowPage.streamTo(os);
} catch (final IOException e) {
LOG.warn("IO error while loading overflow value", e);
}
}
/**
* Insert a new node after the specified node.
*
* @param key
* @param value
*/
public long insertAfter(Txn transaction, DocumentImpl doc, Value key, byte[] value) {
if (!lock.isLockedForWrite())
{LOG.warn("The file doesn't own a write lock");}
try {
final long address = findValue(key);
if (address == KEY_NOT_FOUND) {
LOG.warn("Couldn't find the value");
return KEY_NOT_FOUND;
}
return insertAfter(transaction, doc, address, value);
} catch (final BTreeException e) {
LOG.warn("key not found", e);
} catch (final IOException e) {
LOG.warn("IO error", e);
}
return KEY_NOT_FOUND;
}
/**
* Insert a new node after the node located at the specified address.
*
* If the previous node is in the middle of a page, the page is split. If
* the node is appended at the end and the page does not have enough room
* for the node, a new page is added to the page sequence.
*
* @param doc the document to which the new node belongs.
* @param address the storage address of the node after which the
* new value should be inserted.
* @param value the value of the new node.
*/
public long insertAfter(Txn transaction, DocumentImpl doc, long address, byte[] value) {
if (!lock.isLockedForWrite())
{LOG.warn("The file doesn't own a write lock");}
// check if we need an overflow page
boolean isOverflow = false;
if (LENGTH_TID + LENGTH_DATA_LENGTH + value.length > fileHeader.getWorkSize()) {
final OverflowDOMPage overflowPage = new OverflowDOMPage(transaction);
LOG.debug("Creating overflow page: " + overflowPage.getPageNum());
overflowPage.write(transaction, value);
value = ByteConversion.longToByte(overflowPage.getPageNum());
isOverflow = true;
}
// locate the node to insert after
RecordPos rec = findRecord(address);
if (rec == null) {
SanityCheck.TRACE("Page not found");
return KEY_NOT_FOUND;
}
final short vlen = ByteConversion.byteToShort(rec.getPage().data, rec.offset);
rec.offset += LENGTH_DATA_LENGTH;
if (ItemId.isRelocated(rec.getTupleID()))
{rec.offset += LENGTH_ORIGINAL_LOCATION;}
if (vlen == OVERFLOW)
{rec.offset += LENGTH_OVERFLOW_LOCATION;}
else
{rec.offset += vlen;}
//OK : we now have an offset for the new node
final int dataLength = rec.getPage().getPageHeader().getDataLength();
//Can we insert in the middle of the page?
if (rec.offset < dataLength) {
//New value fits into the page
if (dataLength + LENGTH_TID + LENGTH_DATA_LENGTH + value.length <= fileHeader.getWorkSize()
&& rec.getPage().getPageHeader().hasRoom()) {
final int end = rec.offset + LENGTH_TID + LENGTH_DATA_LENGTH + value.length;
System.arraycopy(rec.getPage().data, rec.offset, rec.getPage().data, end,
dataLength - rec.offset);
rec.getPage().len = dataLength + LENGTH_TID + LENGTH_DATA_LENGTH + value.length;
rec.getPage().getPageHeader().setDataLength(rec.getPage().len);
//Doesn't fit: split the page
} else {
rec = splitDataPage(transaction, doc, rec);
//Still not enough free space: create a new page
if (rec.offset + LENGTH_TID + LENGTH_DATA_LENGTH +
value.length > fileHeader.getWorkSize() ||
!rec.getPage().getPageHeader().hasRoom()) {
final DOMPage newPage = new DOMPage();
final DOMFilePageHeader newPageHeader = newPage.getPageHeader();
LOG.debug("creating additional page: " + newPage.getPageNum() +
"; prev = " + rec.getPage().getPageNum() +
"; next = " + rec.getPage().getPageHeader().getNextDataPage());
if (isTransactional && transaction != null) {
final CreatePageLoggable loggable = new CreatePageLoggable(
transaction, rec.getPage().getPageNum(),
newPage.getPageNum(), rec.getPage().getPageHeader().getNextDataPage());
writeToLog(loggable, newPage.page);
}
//Adjust page links
newPageHeader.setNextDataPage(rec.getPage().getPageHeader().getNextDataPage());
newPageHeader.setPrevDataPage(rec.getPage().getPageNum());
if (isTransactional && transaction != null) {
final UpdateHeaderLoggable loggable = new UpdateHeaderLoggable(
transaction, rec.getPage().getPageHeader().getPreviousDataPage(),
rec.getPage().getPageNum(), newPage.getPageNum(),
rec.getPage().getPageHeader().getPreviousDataPage(),
rec.getPage().getPageHeader().getNextDataPage());
writeToLog(loggable, rec.getPage().page);
}
rec.getPage().getPageHeader().setNextDataPage(newPage.getPageNum());
if (newPageHeader.getNextDataPage() != Page.NO_PAGE) {
//Link the next page in the chain back to the new page inserted
final DOMPage nextPage = getDOMPage(newPageHeader.getNextDataPage());
final DOMFilePageHeader nextPageHeader = nextPage.getPageHeader();
if (isTransactional && transaction != null) {
final UpdateHeaderLoggable loggable = new UpdateHeaderLoggable(
transaction, newPage.getPageNum(), nextPage.getPageNum(),
nextPageHeader.getNextDataPage(), nextPageHeader.getPreviousDataPage(),
nextPageHeader.getNextDataPage());
writeToLog(loggable, nextPage.page);
}
nextPageHeader.setPrevDataPage(newPage.getPageNum());
nextPage.setDirty(true);
dataCache.add(nextPage);
}
rec.getPage().setDirty(true);
dataCache.add(rec.getPage());
//Switch record to new page...
rec.setPage(newPage);
rec.offset = 0;
rec.getPage().len = LENGTH_TID + LENGTH_DATA_LENGTH + value.length;
rec.getPage().getPageHeader().setDataLength(rec.getPage().len);
//enough space in split page
} else {
rec.getPage().len = rec.offset + LENGTH_TID + LENGTH_DATA_LENGTH + value.length;
rec.getPage().getPageHeader().setDataLength(rec.getPage().len);
}
}
//The value doesn't fit into page : create new page
} else if (dataLength + LENGTH_TID + LENGTH_DATA_LENGTH + value.length >
fileHeader.getWorkSize() || !rec.getPage().getPageHeader().hasRoom()) {
final DOMPage newPage = new DOMPage();
final DOMFilePageHeader newPageHeader = newPage.getPageHeader();
LOG.debug("Creating new page: " + newPage.getPageNum());
if (isTransactional && transaction != null) {
final CreatePageLoggable loggable = new CreatePageLoggable(
transaction, rec.getPage().getPageNum(),
newPage.getPageNum(), rec.getPage().getPageHeader().getNextDataPage());
writeToLog(loggable, newPage.page);
}
final long nextPageNum = rec.getPage().getPageHeader().getNextDataPage();
newPageHeader.setNextDataPage(nextPageNum);
newPageHeader.setPrevDataPage(rec.getPage().getPageNum());
if (isTransactional && transaction != null) {
final DOMFilePageHeader pageHeader = rec.getPage().getPageHeader();
final UpdateHeaderLoggable loggable =
new UpdateHeaderLoggable(transaction, pageHeader.getPreviousDataPage(),
rec.getPage().getPageNum(), newPage.getPageNum(),
pageHeader.getPreviousDataPage(), pageHeader.getNextDataPage());
writeToLog(loggable, rec.getPage().page);
}
rec.getPage().getPageHeader().setNextDataPage(newPage.getPageNum());
if (nextPageNum != Page.NO_PAGE) {
final DOMPage nextPage = getDOMPage(nextPageNum);
final DOMFilePageHeader nextPageHeader = nextPage.getPageHeader();
if (isTransactional && transaction != null) {
final UpdateHeaderLoggable loggable =
new UpdateHeaderLoggable(transaction, newPage.getPageNum(),
nextPage.getPageNum(), nextPageHeader.getNextDataPage(),
nextPageHeader.getPreviousDataPage(), nextPageHeader.getNextDataPage());
writeToLog(loggable, nextPage.page);
}
nextPageHeader.setPrevDataPage(newPage.getPageNum());
nextPage.setDirty(true);
dataCache.add(nextPage);
}
rec.getPage().setDirty(true);
dataCache.add(rec.getPage());
//Switch record to new page
rec.setPage(newPage);
rec.offset = 0;
rec.getPage().len = LENGTH_TID + LENGTH_DATA_LENGTH + value.length;
rec.getPage().getPageHeader().setDataLength(rec.getPage().len);
//Append the value
} else {
rec.getPage().len = dataLength + LENGTH_TID + LENGTH_DATA_LENGTH + value.length;
rec.getPage().getPageHeader().setDataLength(rec.getPage().len);
}
final short tupleID = rec.getPage().getPageHeader().getNextTupleID();
if (isTransactional && transaction != null) {
final Loggable loggable = new InsertValueLoggable(transaction, rec.getPage().getPageNum(), isOverflow, tupleID, value, rec.offset);
writeToLog(loggable, rec.getPage().page);
}
//Write tid
ByteConversion.shortToByte(tupleID, rec.getPage().data, rec.offset);
rec.offset += LENGTH_TID;
//Write value length
ByteConversion.shortToByte(isOverflow ? OVERFLOW : (short) value.length,
rec.getPage().data, rec.offset);
rec.offset += LENGTH_DATA_LENGTH;
//Write data
System.arraycopy(value, 0, rec.getPage().data, rec.offset, value.length);
rec.offset += value.length;
rec.getPage().getPageHeader().incRecordCount();
if (doc != null && rec.getPage().getPageHeader().getCurrentTupleID() >=
ItemId.DEFRAG_LIMIT) {
doc.triggerDefrag();
}
rec.getPage().setDirty(true);
dataCache.add(rec.getPage());
return StorageAddress.createPointer((int)rec.getPage().getPageNum(), tupleID);
}
/**
* Split a data page at the position indicated by the rec parameter.
*
* The portion of the page starting at rec.offset is moved into a new page.
* Every moved record is marked as relocated and a link is stored into the
* original page to point to the new record position.
*
* @param doc
* @param rec
*/
private RecordPos splitDataPage(Txn transaction, DocumentImpl doc, final RecordPos rec) {
if (currentDocument != null)
{currentDocument.getMetadata().incSplitCount();}
//Check if a split is really required. A split is not required if
//all records following the split point are already links to other pages.
//In this case, the new record is just appended to a new page linked to the old one.
boolean requireSplit = false;
for (int pos = rec.offset; pos < rec.getPage().len;) {
final short tupleID = ByteConversion.byteToShort(rec.getPage().data, pos);
pos += LENGTH_TID;
if (!ItemId.isLink(tupleID)) {
requireSplit = true;
break;
}
pos += LENGTH_FORWARD_LOCATION;
}
if (!requireSplit) {
LOG.debug("page: " + rec.getPage().getPageNum() + ": no split required." +
" Next page:" + rec.getPage().getPageHeader().getNextDataPage() +
" Previous page:" +rec.getPage().getPageHeader().getPreviousDataPage());
rec.offset = rec.getPage().len;
return rec;
}
final DOMFilePageHeader pageHeader = rec.getPage().getPageHeader();
//Copy the old data up to the split point into a new array
final int oldDataLen = pageHeader.getDataLength();
final byte[] oldData = rec.getPage().data;
if (isTransactional && transaction != null) {
final Loggable loggable = new SplitPageLoggable(transaction,
rec.getPage().getPageNum(), rec.offset, oldData, oldDataLen);
writeToLog(loggable, rec.getPage().page);
}
rec.getPage().data = new byte[fileHeader.getWorkSize()];
System.arraycopy(oldData, 0, rec.getPage().data, 0, rec.offset);
//The old rec.page now contains a copy of the data up to the split point
rec.getPage().len = rec.offset;
pageHeader.setDataLength(rec.getPage().len);
rec.getPage().setDirty(true);
//Create a first split page
DOMPage firstSplitPage = new DOMPage();
if (isTransactional && transaction != null) {
final Loggable loggable = new CreatePageLoggable(transaction,
rec.getPage().getPageNum(), firstSplitPage.getPageNum(),
Page.NO_PAGE, pageHeader.getCurrentTupleID());
writeToLog(loggable, firstSplitPage.page);
}
DOMPage nextSplitPage = firstSplitPage;
nextSplitPage.getPageHeader().setNextTupleID(pageHeader.getCurrentTupleID());
long backLink;
short splitRecordCount = 0;
LOG.debug("Splitting " + rec.getPage().getPageNum() + " at " + rec.offset
+ ": New page: " + nextSplitPage.getPageNum() +
"; Next page: " + pageHeader.getNextDataPage());
//Start copying records from rec.offset to the new split pages
for (int pos = rec.offset; pos < oldDataLen; splitRecordCount++) {
//Read the current id
final short tupleID = ByteConversion.byteToShort(oldData, pos);
pos += LENGTH_TID;
//This is already a link, so we just copy it
if (ItemId.isLink(tupleID)) {
/* No room in the old page, append a new one */
if (rec.getPage().len + LENGTH_TID + LENGTH_FORWARD_LOCATION > fileHeader.getWorkSize()) {
final DOMPage newPage = new DOMPage();
final DOMFilePageHeader newPageHeader = newPage.getPageHeader();
if (isTransactional && transaction != null) {
Loggable loggable = new CreatePageLoggable(transaction,
rec.getPage().getPageNum(), newPage.getPageNum(),
pageHeader.getNextDataPage(), pageHeader.getCurrentTupleID());
writeToLog(loggable, firstSplitPage.page);
loggable = new UpdateHeaderLoggable(transaction,
pageHeader.getPreviousDataPage(), rec.getPage().getPageNum(),
newPage.getPageNum(), pageHeader.getPreviousDataPage(),
pageHeader.getNextDataPage());
writeToLog(loggable, nextSplitPage.page);
}
newPageHeader.setNextTupleID(pageHeader.getCurrentTupleID());
newPageHeader.setPrevDataPage(rec.getPage().getPageNum());
newPageHeader.setNextDataPage(pageHeader.getNextDataPage());
LOG.debug("Appending page after split: " + newPage.getPageNum());
pageHeader.setNextDataPage(newPage.getPageNum());
pageHeader.setDataLength(rec.getPage().len);
pageHeader.setRecordCount(countRecordsInPage(rec.getPage()));
rec.getPage().cleanUp();
rec.getPage().setDirty(true);
dataCache.add(rec.getPage());
//Switch record to new page...
rec.setPage(newPage);
rec.getPage().len = 0;
dataCache.add(newPage);
}
if (isTransactional && transaction != null) {
final long oldLink = ByteConversion.byteToLong(oldData, pos);
final Loggable loggable = new AddLinkLoggable(transaction,
rec.getPage().getPageNum(), ItemId.getId(tupleID), oldLink);
writeToLog(loggable, rec.getPage().page);
}
ByteConversion.shortToByte(tupleID, rec.getPage().data, rec.getPage().len);
rec.getPage().len += LENGTH_TID;
System.arraycopy(oldData, pos, rec.getPage().data, rec.getPage().len,
LENGTH_FORWARD_LOCATION);
rec.getPage().len += LENGTH_FORWARD_LOCATION;
pos += LENGTH_FORWARD_LOCATION;
continue;
}
//Read data length
final short vlen = ByteConversion.byteToShort(oldData, pos);
pos += LENGTH_DATA_LENGTH;
//If this is an overflow page, the real data length is always
//LENGTH_LINK byte for the page number of the overflow page
final short realLen = (vlen == OVERFLOW ? LENGTH_OVERFLOW_LOCATION : vlen);
//Check if we have room in the current split page
if (nextSplitPage.len + LENGTH_TID + LENGTH_DATA_LENGTH +
LENGTH_ORIGINAL_LOCATION + realLen > fileHeader.getWorkSize()) {
//Not enough room in the split page: append a new page
final DOMPage newPage = new DOMPage();
final DOMFilePageHeader newPageHeader = newPage.getPageHeader();
if (isTransactional && transaction != null) {
Loggable loggable = new CreatePageLoggable(transaction,
nextSplitPage.getPageNum(), newPage.getPageNum(),
Page.NO_PAGE, pageHeader.getCurrentTupleID());
writeToLog(loggable, firstSplitPage.page);
loggable = new UpdateHeaderLoggable(transaction,
nextSplitPage.getPageHeader().getPreviousDataPage(),
nextSplitPage.getPageNum(), newPage.getPageNum(),
nextSplitPage.getPageHeader().getPreviousDataPage(),
nextSplitPage.getPageHeader().getNextDataPage());
writeToLog(loggable, nextSplitPage.page);
}
newPageHeader.setNextTupleID(pageHeader.getCurrentTupleID());
newPageHeader.setPrevDataPage(nextSplitPage.getPageNum());
//No next page ? Well... we might want to enforce the value -pb
LOG.debug("Creating new split page: " + newPage.getPageNum());
nextSplitPage.getPageHeader().setNextDataPage(newPage.getPageNum());
nextSplitPage.getPageHeader().setDataLength(nextSplitPage.len);
nextSplitPage.getPageHeader().setRecordCount(splitRecordCount);
nextSplitPage.cleanUp();
nextSplitPage.setDirty(true);
dataCache.add(nextSplitPage);
dataCache.add(newPage);
nextSplitPage = newPage;
splitRecordCount = 0;
}
/*
* If the record has already been relocated,
* read the original storage address and update the link there.
*/
if (ItemId.isRelocated(tupleID)) {
backLink = ByteConversion.byteToLong(oldData, pos);
pos += LENGTH_ORIGINAL_LOCATION;
final RecordPos originalRecordPos = findRecord(backLink, false);
final long oldLink = ByteConversion.byteToLong(originalRecordPos.getPage().data,
originalRecordPos.offset);
final long forwardLink = StorageAddress.createPointer((int)
nextSplitPage.getPageNum(), ItemId.getId(tupleID));
if (isTransactional && transaction != null) {
final Loggable loggable = new UpdateLinkLoggable(transaction,
originalRecordPos.getPage().getPageNum(), originalRecordPos.offset,
forwardLink, oldLink);
writeToLog(loggable, originalRecordPos.getPage().page);
}
ByteConversion.longToByte(forwardLink, originalRecordPos.getPage().data,
originalRecordPos.offset);
originalRecordPos.getPage().setDirty(true);
dataCache.add(originalRecordPos.getPage());
} else {
backLink = StorageAddress.createPointer((int) rec.getPage().getPageNum(),
ItemId.getId(tupleID));
}
/*
* Save the record to the split page:
*/
if (isTransactional && transaction != null) {
//What does this "log" mean really ? Original ? -pb
final byte[] logData = new byte[realLen];
System.arraycopy(oldData, pos, logData, 0, realLen);
final Loggable loggable = new AddMovedValueLoggable(transaction,
nextSplitPage.getPageNum(), tupleID, logData, backLink);
writeToLog(loggable, nextSplitPage.page);
}
//Set the relocated flag and save the item id
ByteConversion.shortToByte(ItemId.setIsRelocated(tupleID), nextSplitPage.data,
nextSplitPage.len);
nextSplitPage.len += LENGTH_TID;
//Save length field
ByteConversion.shortToByte(vlen, nextSplitPage.data, nextSplitPage.len);
nextSplitPage.len += LENGTH_DATA_LENGTH;
//Save link to the original page
ByteConversion.longToByte(backLink, nextSplitPage.data, nextSplitPage.len);
nextSplitPage.len += LENGTH_ORIGINAL_LOCATION;
//Now save the data
try {
System.arraycopy(oldData, pos, nextSplitPage.data, nextSplitPage.len, realLen);
} catch (final ArrayIndexOutOfBoundsException e) {
SanityCheck.TRACE("pos = " + pos + "; len = " + nextSplitPage.len +
"; currentLen = " + realLen + "; tupleID = " + tupleID +
"; page = " + rec.getPage().getPageNum());
throw e;
}
nextSplitPage.len += realLen;
pos += realLen;
// save a link pointer in the original page if the record has not
// been relocated before.
if (!ItemId.isRelocated(tupleID)) {
// the link doesn't fit into the old page. Append a new page
if (rec.getPage().len + LENGTH_TID + LENGTH_FORWARD_LOCATION > fileHeader.getWorkSize()) {
final DOMPage newPage = new DOMPage();
final DOMFilePageHeader newPageHeader = newPage.getPageHeader();
if (isTransactional && transaction != null) {
Loggable loggable = new CreatePageLoggable(transaction,
rec.getPage().getPageNum(), newPage.getPageNum(),
pageHeader.getNextDataPage(), pageHeader.getCurrentTupleID());
writeToLog(loggable, firstSplitPage.page);
loggable = new UpdateHeaderLoggable(transaction,
pageHeader.getPreviousDataPage(),
rec.getPage().getPageNum(), newPage.getPageNum(),
pageHeader.getPreviousDataPage(), pageHeader.getNextDataPage());
writeToLog(loggable, nextSplitPage.page);
}
newPageHeader.setNextTupleID(pageHeader.getCurrentTupleID());
newPageHeader.setPrevDataPage(rec.getPage().getPageNum());
newPageHeader.setNextDataPage(pageHeader.getNextDataPage());
LOG.debug("Creating new page after split: " + newPage.getPageNum());
pageHeader.setNextDataPage(newPage.getPageNum());
pageHeader.setDataLength(rec.getPage().len);
pageHeader.setRecordCount(countRecordsInPage(rec.getPage()));
rec.getPage().cleanUp();
rec.getPage().setDirty(true);
dataCache.add(rec.getPage());
//switch record to new page...
rec.setPage(newPage);
rec.getPage().len = 0;
dataCache.add(newPage);
}
final long forwardLink = StorageAddress.createPointer(
(int) nextSplitPage.getPageNum(), ItemId.getId(tupleID));
if (isTransactional && transaction != null) {
final Loggable loggable = new AddLinkLoggable(transaction,
rec.getPage().getPageNum(), tupleID, forwardLink);
writeToLog(loggable, rec.getPage().page);
}
ByteConversion.shortToByte(ItemId.setIsLink(tupleID), rec.getPage().data, rec.getPage().len);
rec.getPage().len += LENGTH_TID;
ByteConversion.longToByte(forwardLink, rec.getPage().data, rec.getPage().len);
rec.getPage().len += LENGTH_FORWARD_LOCATION;
}
} //End of for loop: finished copying data
//Link the split pages to the original page
if (nextSplitPage.len == 0) {
LOG.warn("Page " + nextSplitPage.getPageNum() + " is empty. Remove it");
//If nothing has been copied to the last split page, remove it
if (nextSplitPage == firstSplitPage)
{firstSplitPage = null;}
try {
unlinkPages(nextSplitPage.page);
} catch (final IOException e) {
LOG.warn("Failed to remove empty split page: " + e.getMessage(), e);
}
nextSplitPage.setDirty(true);
dataCache.remove(nextSplitPage);
nextSplitPage = null;
} else {
if (isTransactional && transaction != null) {
final Loggable loggable = new UpdateHeaderLoggable(transaction,
nextSplitPage.getPageHeader().getPreviousDataPage(), nextSplitPage.getPageNum(),
pageHeader.getNextDataPage(), nextSplitPage.getPageHeader().getPreviousDataPage(),
nextSplitPage.getPageHeader().getNextDataPage());
writeToLog(loggable, nextSplitPage.page);
}
nextSplitPage.getPageHeader().setDataLength(nextSplitPage.len);
nextSplitPage.getPageHeader().setNextDataPage(pageHeader.getNextDataPage());
nextSplitPage.getPageHeader().setRecordCount(splitRecordCount);
nextSplitPage.cleanUp();
nextSplitPage.setDirty(true);
dataCache.add(nextSplitPage);
if (isTransactional && transaction != null) {
final DOMFilePageHeader fisrtPageHeader = firstSplitPage.getPageHeader();
final Loggable loggable = new UpdateHeaderLoggable(transaction,
rec.getPage().getPageNum(), firstSplitPage.getPageNum(),
fisrtPageHeader.getNextDataPage(), fisrtPageHeader.getPreviousDataPage(),
fisrtPageHeader.getNextDataPage());
writeToLog(loggable, nextSplitPage.page);
}
firstSplitPage.getPageHeader().setPrevDataPage(rec.getPage().getPageNum());
if (nextSplitPage != firstSplitPage) {
firstSplitPage.setDirty(true);
dataCache.add(firstSplitPage);
}
}
final long nextPageNum = pageHeader.getNextDataPage();
if (Page.NO_PAGE != nextPageNum) {
final DOMPage nextPage = getDOMPage(nextPageNum);
if (isTransactional && transaction != null) {
final Loggable loggable = new UpdateHeaderLoggable(transaction,
nextSplitPage.getPageNum(), nextPage.getPageNum(),
Page.NO_PAGE, nextPage.getPageHeader().getPreviousDataPage(),
nextPage.getPageHeader().getNextDataPage());
writeToLog(loggable, nextPage.page);
}
nextPage.getPageHeader().setPrevDataPage(nextSplitPage.getPageNum());
nextPage.setDirty(true);
dataCache.add(nextPage);
}
rec.setPage(getDOMPage(rec.getPage().getPageNum()));
if (firstSplitPage != null) {
if (isTransactional && transaction != null) {
final Loggable loggable = new UpdateHeaderLoggable(transaction,
pageHeader.getPreviousDataPage(), rec.getPage().getPageNum(),
firstSplitPage.getPageNum(), pageHeader.getPreviousDataPage(),
pageHeader.getNextDataPage());
writeToLog(loggable, rec.getPage().page);
}
pageHeader.setNextDataPage(firstSplitPage.getPageNum());
}
pageHeader.setDataLength(rec.getPage().len);
pageHeader.setRecordCount(countRecordsInPage(rec.getPage()));
rec.getPage().cleanUp();
rec.offset = rec.getPage().len;
return rec;
}
/**
* Returns the number of records stored in a page.
*
* @param page
* @return The number of records
*/
private short countRecordsInPage(DOMPage page) {
short count = 0;
final int dataLength = page.getPageHeader().getDataLength();
for (int pos = 0; pos < dataLength; count++) {
final short tupleID = ByteConversion.byteToShort(page.data, pos);
pos += LENGTH_TID;
if (ItemId.isLink(tupleID)) {
pos += LENGTH_FORWARD_LOCATION;
} else {
final short vlen = ByteConversion.byteToShort(page.data, pos);
pos += LENGTH_DATA_LENGTH;
if (ItemId.isRelocated(tupleID)) {
pos += vlen == OVERFLOW ?
LENGTH_ORIGINAL_LOCATION + LENGTH_OVERFLOW_LOCATION :
LENGTH_ORIGINAL_LOCATION + vlen;
} else
{pos += vlen == OVERFLOW ? LENGTH_OVERFLOW_LOCATION : vlen;}
}
}
return count;
}
public String debugPageContents(DOMPage page) {
final StringBuilder buf = new StringBuilder();
buf.append("Page " + page.getPageNum() + ": ");
short count = 0;
final int dataLength = page.getPageHeader().getDataLength();
for (int pos = 0; pos < dataLength; count++) {
buf.append(pos + "/");
final short tupleID = ByteConversion.byteToShort(page.data, pos);
pos += LENGTH_TID;
buf.append(ItemId.getId(tupleID));
if (ItemId.isLink(tupleID)) {
buf.append("L");
} else if (ItemId.isRelocated(tupleID)) {
buf.append("R");
}
if (ItemId.isLink(tupleID)) {
final long forwardLink = ByteConversion.byteToLong(page.data, pos);
buf.append(':').append(forwardLink).append(" ");
pos += LENGTH_FORWARD_LOCATION;
} else {
final short valueLength = ByteConversion.byteToShort(page.data, pos);
pos += LENGTH_DATA_LENGTH;
if (valueLength < 0) {
LOG.warn("Illegal length: " + valueLength);
return buf.append("[Illegal length : " + valueLength + "] ").toString();
//Probably unable to continue...
} else if (ItemId.isRelocated(tupleID)) {
//TODO : output to buffer ?
pos += LENGTH_ORIGINAL_LOCATION;
} else {
buf.append("[");
switch (Signatures.getType(page.data[pos])) {
case Node.ELEMENT_NODE :
{
buf.append("element ");
int readOffset = pos;
readOffset += 1;
final int children = ByteConversion.byteToInt(page.data, readOffset);
readOffset += ElementImpl.LENGTH_ELEMENT_CHILD_COUNT;
final int dlnLen = ByteConversion.byteToShort(page.data, readOffset);
readOffset += NodeId.LENGTH_NODE_ID_UNITS;
//That might happen during recovery runs : TODO, investigate
if (owner == null) {
buf.append("(Can't read data, owner is null)");
} else {
try {
final NodeId nodeId = ((NativeBroker)owner).getBrokerPool()
.getNodeFactory().createFromData(dlnLen, page.data, readOffset);
readOffset += nodeId.size();
buf.append("(" + nodeId.toString() + ")");
final short attributes = ByteConversion.byteToShort(page.data, readOffset);
buf.append(" children: " + children);
buf.append(" attributes: " + attributes);
} catch (final Exception e) {
//TODO : more friendly message. Provide the array of bytes ?
buf.append("(Unable to read the node ID at: " + readOffset);
buf.append(" children : " + children);
//Probably a wrong offset so... don't read it
buf.append(" attributes : unknown");
}
}
break;
}
case Node.TEXT_NODE:
case Node.CDATA_SECTION_NODE:
{
if (Signatures.getType(page.data[pos]) == Node.TEXT_NODE)
{buf.append("text ");}
else
{buf.append("CDATA ");}
int readOffset = pos;
readOffset += 1;
final int dlnLen = ByteConversion.byteToShort(page.data, readOffset);
readOffset += NodeId.LENGTH_NODE_ID_UNITS;
//That might happen during recovery runs : TODO, investigate
if (owner == null) {
buf.append("(Can't read data, owner is null)");
} else {
try {
final NodeId nodeId = ((NativeBroker)owner).getBrokerPool()
.getNodeFactory().createFromData(dlnLen, page.data, readOffset);
readOffset += nodeId.size();
buf.append("(" + nodeId.toString() + ")");
final ByteArrayOutputStream os = new ByteArrayOutputStream();
os.write(page.data, readOffset, valueLength - (readOffset - pos));
String value = new String(os.toByteArray(),UTF_8);
if (value.length() > 15) {
value = value.substring(0,8) + "..." + value.substring(value.length() - 8);
}
buf.append(":'" + value + "'");
} catch (final Exception e) {
//TODO : more friendly message. Provide the array of bytes ?
buf.append("(unable to read the node ID at : " + readOffset);
}
}
break;
}
case Node.ATTRIBUTE_NODE:
{
buf.append("[");
buf.append("attribute ");
int readOffset = pos;
final byte idSizeType = (byte) (page.data[readOffset] & 0x3);
final boolean hasNamespace = (page.data[readOffset] & 0x10) == 0x10;
readOffset += 1;
final int dlnLen = ByteConversion.byteToShort(page.data, readOffset);
readOffset += NodeId.LENGTH_NODE_ID_UNITS;
//That might happen during recovery runs : TODO, investigate
if (owner == null) {
buf.append("(can't read data, owner is null)");
} else {
try {
final NodeId nodeId = ((NativeBroker)owner).getBrokerPool()
.getNodeFactory().createFromData(dlnLen, page.data, readOffset);
readOffset += nodeId.size();
buf.append("(" + nodeId.toString() + ")");
readOffset += Signatures.getLength(idSizeType);
if (hasNamespace) {
//Untested
final short NSId = ByteConversion.byteToShort(page.data, readOffset);
readOffset += AttrImpl.LENGTH_NS_ID;
final short prefixLen = ByteConversion.byteToShort(page.data, readOffset);
readOffset += AttrImpl.LENGTH_PREFIX_LENGTH + prefixLen;
final ByteArrayOutputStream os = new ByteArrayOutputStream();
os.write(page.data, readOffset, valueLength - (readOffset - prefixLen));
String prefix = new String(os.toByteArray(), UTF_8);
final String NsURI = ((NativeBroker)owner).getBrokerPool()
.getSymbols().getNamespace(NSId);
buf.append(prefix + "{" + NsURI + "}");
}
final ByteArrayOutputStream os = new ByteArrayOutputStream();
os.write(page.data, readOffset, valueLength - (readOffset - pos));
String value = new String(os.toByteArray(),UTF_8);
if (value.length() > 15) {
value = value.substring(0,8) + "..." + value.substring(value.length() - 8);
}
buf.append(":'" + value + "'");
} catch (final Exception e) {
//TODO : more friendly message. Provide the array of bytes ?
buf.append("(unable to read the node ID at : " + readOffset);
}
}
buf.append("] ");
break;
}
default:
buf.append("Unknown node type !");
}
buf.append( "] ");
}
pos += valueLength;
}
}
buf.append("; records in page: " + count +
" (header says: " + page.getPageHeader().getRecordCount() + ")");
buf.append("; currentTupleID: " + page.getPageHeader().getCurrentTupleID());
buf.append("; data length: " + page.getPageHeader().getDataLength());
for (int i = page.data.length ; i > 0 ; i--) {
if (page.data[i - 1] != 0) {
buf.append(" (last non-zero byte: " + i + ")");
break;
}
}
return buf.toString();
}
public FileHeader createFileHeader(int pageSize) {
return new BTreeFileHeader(1024, pageSize);
}
protected void unlinkPages(Page page) throws IOException {
super.unlinkPages(page);
}
public PageHeader createPageHeader() {
return new DOMFilePageHeader();
}
public ArrayList<Value> findKeys(IndexQuery query)
throws IOException, BTreeException {
if (!lock.hasLock())
{LOG.warn("The file doesn't own a lock");}
final FindCallback callBack = new FindCallback(FindCallback.KEYS);
try {
query(query, callBack);
} catch (final TerminatedException e) {
// Should never happen here
LOG.error("Method terminated");
}
return callBack.getValues();
}
/**
* Retrieve node at virtual address.
*
* @param node The virtual address
* @return The reference of the node
*/
protected long findValue(DBBroker broker, NodeProxy node)
throws IOException, BTreeException {
if (!lock.hasLock())
{LOG.warn("The file doesn't own a lock");}
final DocumentImpl doc = node.getDocument();
final NodeRef nodeRef = new NativeBroker.NodeRef(doc.getDocId(), node.getNodeId());
// first try to find the node in the index
final long pointer = findValue(nodeRef);
if (pointer == KEY_NOT_FOUND) {
// node not found in index: try to find the nearest available
// ancestor and traverse it
NodeId nodeID = node.getNodeId();
long parentPointer = KEY_NOT_FOUND;
do {
nodeID = nodeID.getParentId();
if (nodeID == null) {
SanityCheck.TRACE("Node " + node.getDocument().getDocId() + ":" +
nodeID + " not found.");
throw new BTreeException("Node " + nodeID + " not found.");
}
if (nodeID == NodeId.DOCUMENT_NODE) {
SanityCheck.TRACE("Node " + node.getDocument().getDocId() + ":" +
nodeID + " not found.");
throw new BTreeException("Node " + nodeID + " not found.");
}
final NativeBroker.NodeRef parentRef = new NativeBroker.NodeRef(doc.getDocId(), nodeID);
try {
parentPointer = findValue(parentRef);
} catch (final BTreeException bte) {
LOG.error("report me", bte);
}
} while (parentPointer == KEY_NOT_FOUND);
try {
final NodeProxy parent = new NodeProxy(doc, nodeID, parentPointer);
final EmbeddedXMLStreamReader cursor = broker.getXMLStreamReader(parent, true);
while(cursor.hasNext()) {
final int status = cursor.next();
if (status != XMLStreamReader.END_ELEMENT) {
final NodeId nextId = (NodeId) cursor.getProperty(EmbeddedXMLStreamReader.PROPERTY_NODE_ID);
if (nextId.equals(node.getNodeId())) {
return cursor.getCurrentPosition();
}
}
}
if (LOG.isDebugEnabled())
{LOG.debug("Node " + node.getNodeId() + " could not be found. Giving up. This is usually not an error.");}
return KEY_NOT_FOUND;
} catch (final XMLStreamException e) {
SanityCheck.TRACE("Node " + node.getDocument().getDocId() + ":" + node.getNodeId() + " not found.");
throw new BTreeException("Node " + node.getNodeId() + " not found.");
}
} else {
return pointer;
}
}
/**
* Find matching nodes for the given query.
*
* @param query Description of the Parameter
* @return Description of the Return Value
* @exception IOException Description of the Exception
* @exception BTreeException Description of the Exception
*/
public ArrayList<Value> findValues(IndexQuery query) throws IOException,
BTreeException {
if (!lock.hasLock())
{LOG.warn("The file doesn't own a lock");}
final FindCallback callBack = new FindCallback(FindCallback.VALUES);
try {
query(query, callBack);
} catch (final TerminatedException e) {
// Should never happen
LOG.warn("Method terminated");
}
return callBack.getValues();
}
/**
* Flush all buffers to disk.
*
* @return Description of the Return Value
* @exception DBException Description of the Exception
*/
public boolean flush() throws DBException {
boolean flushed = false;
//TODO : record transaction as a valuable flush ?
if (isTransactional)
{logManager.flushToLog(true);}
if (!BrokerPool.FORCE_CORRUPTION) {
flushed = flushed | super.flush();
flushed = flushed | dataCache.flush();
}
return flushed;
}
public void printStatistics() {
super.printStatistics();
final NumberFormat nf1 = NumberFormat.getPercentInstance();
final NumberFormat nf2 = NumberFormat.getInstance();
final StringBuilder buf = new StringBuilder();
buf.append(getFile().getName()).append(" DATA ");
buf.append("Buffers occupation : ");
if (dataCache.getBuffers() == 0 && dataCache.getUsedBuffers() == 0)
{buf.append("N/A");}
else
{buf.append(nf1.format(dataCache.getUsedBuffers()/(float)dataCache.getBuffers()));}
buf.append(" (" + nf2.format(dataCache.getUsedBuffers()) + " out of " + nf2.format(dataCache.getBuffers()) + ")");
buf.append(" Cache efficiency : ");
if (dataCache.getHits() == 0 && dataCache.getFails() == 0)
{buf.append("N/A");}
else
{buf.append(nf1.format(dataCache.getHits()/(float)(dataCache.getFails() + dataCache.getHits())));}
LOGSTATS.info(buf.toString());
}
public BufferStats getDataBufferStats() {
return new BufferStats(dataCache.getBuffers(), dataCache.getUsedBuffers(),
dataCache.getHits(), dataCache.getFails());
}
/**
* Retrieve a node by key
*
* @param key
* @return Description of the Return Value
*/
public Value get(Value key) {
if (!lock.hasLock())
{LOG.warn("The file doesn't own a lock");}
try {
final long pointer = findValue(key);
if (pointer == KEY_NOT_FOUND) {
LOG.warn("Value not found : " + key);
return null;
}
return get(pointer);
} catch (final BTreeException bte) {
LOG.error(bte);
return null;
} catch (final IOException ioe) {
LOG.error(ioe);
return null;
}
}
public Value get(DBBroker broker, NodeProxy node) {
if (!lock.hasLock())
{LOG.warn("The file doesn't own a lock");}
try {
final long pointer = findValue(broker, node);
if (pointer == KEY_NOT_FOUND) {
return null;
}
return get(pointer);
} catch (final BTreeException bte) {
LOG.warn(bte);
return null;
//TODO : throw exception ?
} catch (final IOException ioe) {
LOG.warn(ioe);
return null;
//TODO : throw exception ?
}
}
/**
* Retrieve node at virtual address.
*
* @param pointer The virtual address
* @return The node
*/
public Value get(long pointer) {
return get(pointer, true);
}
/**
* Retrieve node at virtual address.
*
* @param pointer The virtual address
* @param warnIfMissing Whether or not a warning should be output
* if the node can not be found
* @return The node
*/
public Value get(long pointer, boolean warnIfMissing) {
if (!lock.hasLock())
{LOG.warn("The file doesn't own a lock");}
final RecordPos rec = findRecord(pointer);
if (rec == null) {
if (warnIfMissing)
{SanityCheck.TRACE("Object at " + StorageAddress.toString(pointer) + " not found.");}
//TODO : throw exception ?
return null;
}
final short vlen = ByteConversion.byteToShort(rec.getPage().data, rec.offset);
rec.offset += LENGTH_DATA_LENGTH;
if (ItemId.isRelocated(rec.getTupleID()))
{rec.offset += LENGTH_ORIGINAL_LOCATION;}
Value value;
if (vlen == OVERFLOW) {
final long pageNo = ByteConversion.byteToLong(rec.getPage().data, rec.offset);
final byte[] data = getOverflowValue(pageNo);
value = new Value(data);
} else {
value = new Value(rec.getPage().data, rec.offset, vlen);
}
value.setAddress(pointer);
return value;
}
protected void dumpValue(Writer writer, Value key, int status) throws IOException {
if (status == BRANCH) {
super.dumpValue(writer, key, status);
return;
}
if (key.getLength() == 0)
{return;}
writer.write(Integer.toString(ByteConversion.byteToInt(key.data(), key.start())));
writer.write(':');
try {
final int bytes = key.getLength() - 4;
final byte[] data = key.data();
for (int i = 0; i < bytes; i++) {
writer.write(DLNBase.toBitString(data[key.start() + 4 + i]));
}
} catch (final Exception e) {
LOG.error(e);
e.printStackTrace();
System.out.println(e.getMessage() + ": doc: " +
Integer.toString(ByteConversion.byteToInt(key.data(), key.start())));
}
}
/**
* Put a new key/value pair.
*
* @param key Description of the Parameter
* @param value Description of the Parameter
* @return Description of the Return Value
*/
public long put(Txn transaction, Value key, byte[] value)
throws ReadOnlyException {
if (!lock.hasLock())
{LOG.warn("The file doesn't own a lock");}
final long pointer = add(transaction, value);
try {
addValue(transaction, key, pointer);
} catch (final IOException ioe) {
//TODO : throw exception ?
LOG.error(ioe);
return KEY_NOT_FOUND;
} catch (final BTreeException bte) {
//TODO : throw exception ?
LOG.error(bte);
return KEY_NOT_FOUND;
}
return pointer;
}
/**
* Physically remove a node. The data of the node will be removed from the
* page and the occupied space is freed.
*/
//Unused ? -pb
//public void remove(Value key) {
//if (!lock.isLockedForWrite())
//LOG.warn("The file doesn't own a write lock");
//remove(null, key);
//}
public void remove(Txn transaction, Value key) {
if (!lock.isLockedForWrite())
{LOG.warn("The file doesn't own a write lock");}
try {
final long pointer = findValue(key);
if (pointer == KEY_NOT_FOUND) {
//TODO : throw exception ?
LOG.error("Value not found: " + key);
return;
}
remove(transaction, key, pointer);
} catch (final BTreeException bte) {
//TODO : throw exception ?
LOG.warn(bte);
} catch (final IOException ioe) {
//TODO : throw exception ?
LOG.warn(ioe);
}
}
protected byte[] getOverflowValue(long pointer) {
if (!lock.hasLock())
{LOG.warn("The file doesn't own a lock");}
try {
final OverflowDOMPage overflow = new OverflowDOMPage(pointer);
return overflow.read();
} catch (final IOException e) {
LOG.warn("IO error while loading overflow value", e);
//TODO : throw exception ?
return null;
}
}
/**
* Remove the overflow value.
*
* @param transaction The current transaction
* @param pointer The pointer to the value
*/
public void removeOverflowValue(Txn transaction, long pointer) {
if (!lock.isLockedForWrite())
{LOG.warn("The file doesn't own a write lock");}
try {
final OverflowDOMPage overflow = new OverflowDOMPage(pointer);
overflow.delete(transaction);
} catch (final IOException e) {
LOG.error("IO error while removing overflow value", e);
}
}
/**
* Remove the link at the specified position from the file.
*
* @param p
*/
private void removeLink(Txn transaction, long pointer) {
final RecordPos rec = findRecord(pointer, false);
final DOMFilePageHeader pageHeader = rec.getPage().getPageHeader();
if (isTransactional && transaction != null) {
final byte[] data = new byte[LENGTH_LINK];
System.arraycopy(rec.getPage().data, rec.offset, data, 0, LENGTH_LINK);
//Position the stream at the very beginning of the record
final RemoveValueLoggable loggable = new RemoveValueLoggable(transaction,
rec.getPage().getPageNum(), rec.getTupleID(), rec.offset - LENGTH_TID, data, false, 0);
writeToLog(loggable, rec.getPage().page);
}
final int end = rec.offset + LENGTH_LINK;
//Position the stream at the very beginning of the record
System.arraycopy(rec.getPage().data, end, rec.getPage().data,
rec.offset - LENGTH_TID, rec.getPage().len - end);
rec.getPage().len = rec.getPage().len - (LENGTH_TID + LENGTH_LINK);
if (rec.getPage().len < 0)
{LOG.warn("Page length < 0");}
pageHeader.setDataLength(rec.getPage().len);
pageHeader.decRecordCount();
if (rec.getPage().len == 0) {
if (pageHeader.getRecordCount() > 0)
{LOG.warn("Empty page seems to have record!");}
if (isTransactional && transaction != null) {
final RemoveEmptyPageLoggable loggable = new RemoveEmptyPageLoggable(
transaction, rec.getPage().getPageNum(),
pageHeader.getPreviousDataPage(), pageHeader.getNextDataPage());
writeToLog(loggable, rec.getPage().page);
}
removePage(rec.getPage());
rec.setPage(null);
} else {
rec.getPage().setDirty(true);
dataCache.add(rec.getPage());
}
}
/**
* Physically remove a node. The data of the node will be removed from the
* page and the occupied space is freed.
*
* @param pointer
*/
//Seems to be unused -pb
//public void removeNode(long pointer) {
//removeNode(null, pointer);
//}
public void removeNode(Txn transaction, long pointer) {
if (!lock.isLockedForWrite())
{LOG.warn("The file doesn't own a write lock");}
final RecordPos rec = findRecord(pointer);
//Position the stream at the very beginning of the record
final int startOffset = rec.offset - LENGTH_TID;
final DOMFilePageHeader pageHeader = rec.getPage().getPageHeader();
final short vlen = ByteConversion.byteToShort(rec.getPage().data, rec.offset);
rec.offset += LENGTH_DATA_LENGTH;
short realLen = vlen;
if (ItemId.isLink(rec.getTupleID())) {
throw new RuntimeException("Cannot remove link ...");
}
boolean isOverflow = false;
long backLink = 0;
if (ItemId.isRelocated(rec.getTupleID())) {
backLink = ByteConversion.byteToLong(rec.getPage().data, rec.offset);
rec.offset += LENGTH_ORIGINAL_LOCATION;
realLen += LENGTH_ORIGINAL_LOCATION;
removeLink(transaction, backLink);
}
if (vlen == OVERFLOW) {
// remove overflow value
isOverflow = true;
final long overflowLink = ByteConversion.byteToLong(rec.getPage().data, rec.offset);
rec.offset += LENGTH_OVERFLOW_LOCATION;
try {
final OverflowDOMPage overflow = new OverflowDOMPage(overflowLink);
overflow.delete(transaction);
} catch (final IOException e) {
LOG.warn("IO error while removing overflow page", e);
//TODO : rethrow exception ? -pb
}
realLen += LENGTH_OVERFLOW_LOCATION;
}
if (isTransactional && transaction != null) {
final byte[] data = new byte[vlen == OVERFLOW ? LENGTH_OVERFLOW_LOCATION : vlen];
System.arraycopy(rec.getPage().data, rec.offset, data, 0,
vlen == OVERFLOW ? LENGTH_OVERFLOW_LOCATION : vlen);
final RemoveValueLoggable loggable = new RemoveValueLoggable(transaction,
rec.getPage().getPageNum(), rec.getTupleID(), startOffset, data, isOverflow, backLink);
writeToLog(loggable, rec.getPage().page);
}
final int dataLength = pageHeader.getDataLength();
final int end = startOffset + LENGTH_TID + LENGTH_DATA_LENGTH + realLen;
// remove old value
System.arraycopy(rec.getPage().data, end, rec.getPage().data, startOffset, dataLength - end);
rec.getPage().setDirty(true);
rec.getPage().len = dataLength - (LENGTH_TID + LENGTH_DATA_LENGTH + realLen);
if (rec.getPage().len < 0) {
LOG.error("Page length < 0");
//TODO : throw exception ? -pb
}
rec.getPage().setDirty(true);
pageHeader.setDataLength(rec.getPage().len);
pageHeader.decRecordCount();
if (rec.getPage().len == 0) {
LOG.debug("Removing page " + rec.getPage().getPageNum());
if (pageHeader.getRecordCount() > 0)
{LOG.warn("Empty page seems to have record !");}
if (isTransactional && transaction != null) {
final RemoveEmptyPageLoggable loggable = new RemoveEmptyPageLoggable(
transaction, rec.getPage().getPageNum(),
rec.getPage().pageHeader.getPreviousDataPage(),
rec.getPage().pageHeader.getNextDataPage());
writeToLog(loggable, rec.getPage().page);
}
removePage(rec.getPage());
rec.setPage(null);
} else {
rec.getPage().setDirty(true);
dataCache.add(rec.getPage());
}
}
/**
* Physically remove a node. The data of the node will be removed from the
* page and the occupied space is freed.
*/
//Seems to be unused -pb
//public void remove(Value key, long pointer) {
//remove(null, key, pointer);
//}
public void remove(Txn transaction, Value key, long pointer) {
removeNode(transaction, pointer);
try {
removeValue(transaction, key);
} catch (final BTreeException e) {
LOG.error("BTree error while removing node", e);
//TODO : rethrow exception ? -pb
} catch (final IOException e) {
LOG.error("IO error while removing node", e);
//TODO : rethrow exception ? -pb
}
}
/**
* Remove the specified page. The page is added to the list of free pages.
*
* @param page
*/
private void removePage(DOMPage page) {
if (!lock.isLockedForWrite())
{LOG.warn("The file doesn't own a write lock");}
final DOMFilePageHeader pageHeader = page.getPageHeader();
if (pageHeader.getNextDataPage() != Page.NO_PAGE) {
final DOMPage nextPage = getDOMPage(pageHeader.getNextDataPage());
nextPage.getPageHeader().setPrevDataPage(pageHeader.getPreviousDataPage());
nextPage.setDirty(true);
dataCache.add(nextPage);
}
if (pageHeader.getPreviousDataPage() != Page.NO_PAGE) {
final DOMPage previousPage = getDOMPage(pageHeader.getPreviousDataPage());
previousPage.getPageHeader().setNextDataPage(pageHeader.getNextDataPage());
previousPage.setDirty(true);
dataCache.add(previousPage);
}
try {
pageHeader.setNextDataPage(Page.NO_PAGE);
pageHeader.setPrevDataPage(Page.NO_PAGE);
pageHeader.setDataLength(0);
pageHeader.setNextTupleID(ItemId.UNKNOWN_ID);
pageHeader.setRecordCount((short) 0);
unlinkPages(page.page);
page.setDirty(true);
dataCache.remove(page);
} catch (final IOException ioe) {
LOG.error(ioe);
//TODO : rethrow exception ? -pb
}
if (currentDocument != null)
{currentDocument.getMetadata().decPageCount();}
}
/**
* Remove a sequence of pages, starting with the page denoted by the passed
* address pointer p.
*/
public void removeAll(Txn transaction, long pointer) {
if (!lock.isLockedForWrite())
{LOG.warn("The file doesn't own a write lock");}
long pageNum = StorageAddress.pageFromPointer(pointer);
if (pageNum == Page.NO_PAGE) {
LOG.error("Tried to remove unknown page");
//TODO : throw exception ? -pb
}
while (pageNum != Page.NO_PAGE) {
final DOMPage currentPage = getDOMPage(pageNum);
final DOMFilePageHeader currentPageHeader = currentPage.getPageHeader();
if (isTransactional && transaction != null) {
final RemovePageLoggable loggable = new RemovePageLoggable(transaction, pageNum,
currentPageHeader.getPreviousDataPage(), currentPageHeader.getNextDataPage(),
currentPage.data, currentPage.len,
currentPageHeader.getCurrentTupleID(), currentPageHeader.getRecordCount());
writeToLog(loggable, currentPage.page);
}
pageNum = currentPageHeader.getNextDataPage();
try {
currentPageHeader.setNextDataPage(Page.NO_PAGE);
currentPageHeader.setPrevDataPage(Page.NO_PAGE);
currentPageHeader.setDataLength(0);
currentPageHeader.setNextTupleID(ItemId.UNKNOWN_ID);
currentPageHeader.setRecordCount((short) 0);
currentPage.len = 0;
unlinkPages(currentPage.page);
currentPage.setDirty(true);
dataCache.remove(currentPage);
} catch (final IOException e) {
LOG.error("Error while removing page: " + e.getMessage(), e);
//TODO : rethrow the exception ? -pb
}
}
}
public String debugPages(DocumentImpl doc, boolean showPageContents) {
final StringBuilder buf = new StringBuilder();
buf.append("Pages used by ").append(doc.getURI());
buf.append("; (docId: ").append(doc.getDocId()).append("): ");
long pageNum = StorageAddress.pageFromPointer((
(StoredNode) doc.getFirstChild()).getInternalAddress());
while (pageNum != Page.NO_PAGE) {
final DOMPage page = getDOMPage(pageNum);
final DOMFilePageHeader pageHeader = page.getPageHeader();
dataCache.add(page);
buf.append(' ').append(pageNum);
pageNum = pageHeader.getNextDataPage();
if (showPageContents)
{LOG.debug(debugPageContents(page));}
}
return buf.toString();
}
/**
* Update the key/value pair.
*
* @param key Description of the Parameter
* @param value Description of the Parameter
* @return Description of the Return Value
*/
public boolean update(Txn transaction, Value key, byte[] value)
throws ReadOnlyException {
try {
final long pointer = findValue(key);
if (pointer == KEY_NOT_FOUND) {
//TODO : transform to error ? -pb
LOG.warn("Node value not found : " + key);
return false;
}
update(transaction, pointer, value);
} catch (final BTreeException bte) {
//TODO : rethrow exception ? -pb
LOG.warn(bte);
bte.printStackTrace();
return false;
} catch (final IOException ioe) {
//TODO : rethrow exception ? -pb
LOG.warn(ioe);
return false;
}
return true;
}
/**
* Update the key/value pair where the value is found at address p.
*/
public void update(Txn transaction, long pointer, byte[] value) throws ReadOnlyException {
if (!lock.isLockedForWrite())
{LOG.warn("The file doesn't own a write lock");}
final RecordPos recordPos = findRecord(pointer);
final short valueLength = ByteConversion.byteToShort(recordPos.getPage().data, recordPos.offset);
recordPos.offset += LENGTH_DATA_LENGTH;
if (ItemId.isRelocated(recordPos.getTupleID()))
{recordPos.offset += LENGTH_ORIGINAL_LOCATION;}
if (value.length < valueLength) {
// value is smaller than before
throw new IllegalStateException("Value too short. Expected: "
+ value.length + "; got: " + valueLength);
} else if (value.length > valueLength) {
throw new IllegalStateException("Value too long. Expected: "
+ value.length + "; got: " + valueLength);
} else {
if (isTransactional && transaction != null) {
if (ItemId.getId(recordPos.getTupleID()) < 0) {
LOG.error("Tuple ID < 0");
//TODO : throw exception ? -pb
}
final Loggable loggable = new UpdateValueLoggable(transaction,
recordPos.getPage().getPageNum(), recordPos.getTupleID(),
value, recordPos.getPage().data, recordPos.offset);
writeToLog(loggable, recordPos.getPage().page);
}
// value length unchanged
System.arraycopy(value, 0, recordPos.getPage().data, recordPos.offset, value.length);
}
recordPos.getPage().setDirty(true);
}
/**
* Retrieve the string value of the specified node. This is an optimized low-level method
* which will directly traverse the stored DOM nodes and collect the string values of
* the specified root node and all its descendants. By directly scanning the stored
* node data, we do not need to create a potentially large amount of node objects
* and thus save memory and time for garbage collection.
*
* @param node
* @return string value of the specified node
*/
public String getNodeValue(DBBroker broker, StoredNode node, boolean addWhitespace) {
if (!lock.hasLock())
{LOG.warn("The file doesn't own a lock");}
try {
long address = node.getInternalAddress();
RecordPos recordPos = null;
// try to directly locate the root node through its storage address
if (StorageAddress.hasAddress(address))
{recordPos = findRecord(address);}
if (recordPos == null) {
// fallback to a BTree lookup if the node could not be found
// by its storage address
address = findValue(broker, new NodeProxy(node));
if (address == BTree.KEY_NOT_FOUND) {
LOG.error("Node value not found: " + node);
//TODO : throw exception ? -pb
return null;
}
recordPos = findRecord(address);
SanityCheck.THROW_ASSERT(recordPos != null, "Node data could not be found!");
//TODO : throw exception ? -pb
}
// we collect the string values in binary format and append them to a ByteArrayOutputStream
final ByteArrayOutputStream os = new ByteArrayOutputStream();
// now traverse the tree
getNodeValue(broker.getBrokerPool(), (DocumentImpl)node.getOwnerDocument(),
os, recordPos, true, addWhitespace);
final byte[] data = os.toByteArray();
return new String(data, UTF_8);
} catch (final BTreeException e) {
LOG.error("BTree error while reading node value", e);
//TODO : rethrow exception ? -pb
} catch (final Exception e) {
LOG.error("IO error while reading node value", e);
//TODO : rethrow exception ? -pb
}
//TODO : remove if exceptions thrown...
return null;
}
/**
* Recursive method to retrieve the string values of the root node
* and all its descendants.
*/
private void getNodeValue(BrokerPool pool, DocumentImpl doc,
ByteArrayOutputStream os, RecordPos rec, boolean isTopNode, boolean addWhitespace) {
if (!lock.hasLock())
{LOG.warn("The file doesn't own a lock");}
//Locate the next real node, skipping relocated nodes
boolean foundNext = false;
do {
final DOMFilePageHeader pageHeader = rec.getPage().getPageHeader();
if (rec.offset > pageHeader.getDataLength()) {
// end of page reached, proceed to the next page
final long nextPage = pageHeader.getNextDataPage();
if (nextPage == Page.NO_PAGE) {
SanityCheck.TRACE("Bad link to next page! " +
"Offset: " + rec.offset +
", Len: " + pageHeader.getDataLength() +
", Page info : " + rec.getPage().page.getPageInfo());
//TODO : throw exception ? -pb
return;
}
rec.setPage(getDOMPage(nextPage));
dataCache.add(rec.getPage());
rec.offset = LENGTH_TID;
}
//Position the stream at the very beginning of the record
final short tupleID = ByteConversion.byteToShort(rec.getPage().data, rec.offset - LENGTH_TID);
rec.setTupleID(tupleID);
if (ItemId.isLink(rec.getTupleID())) {
//This is a link: skip it
//We position the offset *after* the next TupleID
rec.offset += (LENGTH_FORWARD_LOCATION + LENGTH_TID);
} else {
//OK: node found
foundNext = true;
}
} while (!foundNext);
final short valueLength = ByteConversion.byteToShort(rec.getPage().data, rec.offset);
int realLen = valueLength;
rec.offset += LENGTH_DATA_LENGTH;
//Check if the node was relocated
if (ItemId.isRelocated(rec.getTupleID())) {
rec.offset += LENGTH_ORIGINAL_LOCATION;
}
byte[] data = rec.getPage().data;
int readOffset = rec.offset;
boolean inOverflow = false;
if (valueLength == OVERFLOW) {
//If we have an overflow value, load it from the overflow page
final long p = ByteConversion.byteToLong(data, rec.offset);
data = getOverflowValue(p);
//We position the offset *after* the next TID
rec.offset += LENGTH_OVERFLOW_LOCATION + LENGTH_TID;
realLen = data.length;
readOffset = 0;
inOverflow = true;
}
// check the type of the node
final short type = Signatures.getType(data[readOffset]);
readOffset += StoredNode.LENGTH_SIGNATURE_LENGTH;
//Switch on the node type
switch (type) {
case Node.ELEMENT_NODE: {
final int children = ByteConversion.byteToInt(data, readOffset);
readOffset += ElementImpl.LENGTH_ELEMENT_CHILD_COUNT;
final int dlnLen = ByteConversion.byteToShort(data, readOffset);
readOffset += NodeId.LENGTH_NODE_ID_UNITS;
final int nodeIdLen = pool.getNodeFactory().lengthInBytes(dlnLen, data, readOffset);
readOffset += nodeIdLen;
final short attributes = ByteConversion.byteToShort(data, readOffset);
//Ignore the following NS data which are of no use
//We position the offset *after* the next TID
rec.offset += realLen + LENGTH_TID;
final boolean extraWhitespace = addWhitespace && (children - attributes) > 1;
for (int i = 0; i < children; i++) {
//recursive call : we ignore attributes children
getNodeValue(pool, doc, os, rec, false, addWhitespace);
if (extraWhitespace) {
os.write((byte) ' ');
}
}
return;
}
case Node.TEXT_NODE:
case Node.CDATA_SECTION_NODE: {
final int dlnLen = ByteConversion.byteToShort(data, readOffset);
readOffset += NodeId.LENGTH_NODE_ID_UNITS;
final int nodeIdLen = pool.getNodeFactory().lengthInBytes(dlnLen, data, readOffset);
readOffset += nodeIdLen;
os.write(data, readOffset, realLen -
(StoredNode.LENGTH_SIGNATURE_LENGTH + NodeId.LENGTH_NODE_ID_UNITS + nodeIdLen));
break;
}
case Node.PROCESSING_INSTRUCTION_NODE: {
final int dlnLen = ByteConversion.byteToShort(data, readOffset);
readOffset += NodeId.LENGTH_NODE_ID_UNITS;
final int nodeIdLen = pool.getNodeFactory().lengthInBytes(dlnLen, data, readOffset);
readOffset += nodeIdLen;
final int targetLen = ByteConversion.byteToInt(data, readOffset);
readOffset += 4 + targetLen;
os.write(
data,
readOffset,
realLen - (StoredNode.LENGTH_SIGNATURE_LENGTH + NodeId.LENGTH_NODE_ID_UNITS + nodeIdLen + targetLen + 4));
break;
}
case Node.ATTRIBUTE_NODE: {
if (isTopNode) {
final int start = readOffset - StoredNode.LENGTH_SIGNATURE_LENGTH;
final byte idSizeType = (byte) (data[start] & 0x3);
final boolean hasNamespace = (data[start] & 0x10) == 0x10;
final int dlnLen = ByteConversion.byteToShort(data, readOffset);
readOffset += NodeId.LENGTH_NODE_ID_UNITS;
final int nodeIdLen = pool.getNodeFactory().lengthInBytes(dlnLen, data, readOffset);
readOffset += nodeIdLen;
readOffset += Signatures.getLength(idSizeType);
if (hasNamespace) {
readOffset += AttrImpl.LENGTH_NS_ID; // skip namespace id
final short prefixLen = ByteConversion.byteToShort(data, readOffset);
readOffset += AttrImpl.LENGTH_PREFIX_LENGTH;
readOffset += prefixLen; // skip prefix
}
os.write(data, readOffset, realLen - (readOffset - start));
}
break;
}
case Node.COMMENT_NODE:
{
if (isTopNode) {
final int dlnLen = ByteConversion.byteToShort(data, readOffset);
readOffset += NodeId.LENGTH_NODE_ID_UNITS;
final int nodeIdLen = pool.getNodeFactory().lengthInBytes(dlnLen, data, readOffset);
readOffset += nodeIdLen;
os.write(data, readOffset, realLen - (StoredNode.LENGTH_SIGNATURE_LENGTH + NodeId.LENGTH_NODE_ID_UNITS + nodeIdLen));
}
break;
}
}
if (!inOverflow) {
//If it isn't an overflow value, add the value length to the current offset
//We position the offset *after* the next TID
rec.offset += realLen + LENGTH_TID;
}
}
protected RecordPos findRecord(long pointer) {
return findRecord(pointer, true);
}
/**
* Find a record within the page or the pages linked to it.
*
* @param pointer
* @return The record position in the page
*/
protected RecordPos findRecord(long pointer, boolean skipLinks) {
if (!lock.hasLock())
{LOG.warn("The file doesn't own a lock");}
long pageNum = StorageAddress.pageFromPointer(pointer);
short tupleID = StorageAddress.tidFromPointer(pointer);
while (pageNum != Page.NO_PAGE) {
final DOMPage page = getDOMPage(pageNum);
dataCache.add(page);
final RecordPos rec = page.findRecord(tupleID);
if (rec == null) {
pageNum = page.getPageHeader().getNextDataPage();
if (pageNum == page.getPageNum()) {
SanityCheck.TRACE("Circular link to next page on " + pageNum);
//TODO : throw exception ?
return null;
}
} else if (rec.isLink()) {
if (!skipLinks)
{return rec;}
final long forwardLink = ByteConversion.byteToLong(page.data, rec.offset);
// load the link page
pageNum = StorageAddress.pageFromPointer(forwardLink);
tupleID = StorageAddress.tidFromPointer(forwardLink);
} else {
return rec;
}
}
//TODO : throw exception ? -pb
return null;
}
@Override
public Lock getLock() {
return lock;
}
/**
* The current object owning this file.
*
* @param ownerObject The new ownerObject value
*/
public synchronized final void setOwnerObject(Object ownerObject) {
if (ownerObject == null) {
LOG.error("setOwnerObject(null)");
}
owner = ownerObject;
}
/*
* ---------------------------------------------------------------------------------
* Methods used by recovery and transaction management
* ---------------------------------------------------------------------------------
*/
private boolean requiresRedo(Loggable loggable, DOMPage page) {
return loggable.getLsn() > page.getPageHeader().getLsn();
}
protected void redoCreatePage(CreatePageLoggable loggable) {
final DOMPage newPage = getDOMPage(loggable.newPage);
final DOMFilePageHeader newPageHeader = newPage.getPageHeader();
if (newPageHeader.getLsn() == Lsn.LSN_INVALID || requiresRedo(loggable, newPage)) {
try {
reuseDeleted(newPage.page);
newPageHeader.setStatus(RECORD);
newPageHeader.setDataLength(0);
newPageHeader.setNextTupleID(ItemId.UNKNOWN_ID);
newPageHeader.setRecordCount((short) 0);
newPage.len = 0;
newPage.data = new byte[fileHeader.getWorkSize()];
newPageHeader.setPrevDataPage(Page.NO_PAGE);
if (loggable.nextTID != ItemId.UNKNOWN_ID)
{newPageHeader.setNextTupleID(loggable.nextTID);}
newPageHeader.setLsn(loggable.getLsn());
newPage.setDirty(true);
if (loggable.nextPage == Page.NO_PAGE)
{newPageHeader.setNextDataPage(Page.NO_PAGE);}
else
{newPageHeader.setNextDataPage(loggable.nextPage);}
if (loggable.prevPage == Page.NO_PAGE)
{newPageHeader.setPrevDataPage(Page.NO_PAGE);}
else
{newPageHeader.setPrevDataPage(loggable.prevPage);}
} catch (final IOException e) {
LOG.error("Failed to redo " + loggable.dump() + ": "
+ e.getMessage(), e);
//TODO : throw exc eption ?
}
}
dataCache.add(newPage);
}
protected void undoCreatePage(CreatePageLoggable loggable) {
final DOMPage page = getDOMPage(loggable.newPage);
final DOMFilePageHeader pageHeader = page.getPageHeader();
try {
pageHeader.setNextDataPage(Page.NO_PAGE);
pageHeader.setPrevDataPage(Page.NO_PAGE);
pageHeader.setDataLength(0);
pageHeader.setNextTupleID(ItemId.UNKNOWN_ID);
pageHeader.setRecordCount((short) 0);
page.len = 0;
unlinkPages(page.page);
page.setDirty(true);
dataCache.remove(page);
} catch (final IOException e) {
LOG.warn("Error while removing page: " + e.getMessage(), e);
//TODO : exception ?
}
}
protected void redoAddValue(AddValueLoggable loggable) {
final DOMPage page = getDOMPage(loggable.pageNum);
final DOMFilePageHeader pageHeader = page.getPageHeader();
if (pageHeader.getLsn() != Lsn.LSN_INVALID && requiresRedo(loggable, page)) {
try {
ByteConversion.shortToByte(loggable.tid, page.data, page.len);
page.len += LENGTH_TID;
// save data length
// overflow pages have length 0
final short vlen = (short) loggable.value.length;
ByteConversion.shortToByte(vlen, page.data, page.len);
page.len += LENGTH_DATA_LENGTH;
// save data
System.arraycopy(loggable.value, 0, page.data, page.len, vlen);
page.len += vlen;
pageHeader.incRecordCount();
pageHeader.setDataLength(page.len);
page.setDirty(true);
pageHeader.setNextTupleID(loggable.tid);
pageHeader.setLsn(loggable.getLsn());
dataCache.add(page, 2);
} catch (final ArrayIndexOutOfBoundsException e) {
LOG.warn("page: " + page.getPageNum() +
"; len = " + page.len +
"; value = " + loggable.value.length);
throw e;
}
}
}
protected void undoAddValue(AddValueLoggable loggable) {
final DOMPage page = getDOMPage(loggable.pageNum);
final DOMFilePageHeader pageHeader = page.getPageHeader();
final RecordPos pos = page.findRecord(ItemId.getId(loggable.tid));
SanityCheck.ASSERT(pos != null, "Record not found!");
//TODO : throw exception ? -pb
//Position the stream at the very beginning of the record
final int startOffset = pos.offset - LENGTH_TID;
//Get the record length
final short vlen = ByteConversion.byteToShort(page.data, pos.offset);
//End offset
final int end = startOffset + LENGTH_TID + LENGTH_DATA_LENGTH + vlen;
final int dlen = pageHeader.getDataLength();
//Remove old value
System.arraycopy(page.data, end, page.data, startOffset, dlen - end);
page.len = dlen - (LENGTH_TID + LENGTH_DATA_LENGTH + vlen);
if (page.len < 0) {
LOG.error("page length < 0");
//TODO : exception ?
}
pageHeader.setDataLength(page.len);
pageHeader.decRecordCount();
page.setDirty(true);
}
protected void redoUpdateValue(UpdateValueLoggable loggable) {
final DOMPage page = getDOMPage(loggable.pageNum);
final DOMFilePageHeader ph = page.getPageHeader();
if (ph.getLsn() != Lsn.LSN_INVALID && requiresRedo(loggable, page)) {
final RecordPos rec = page.findRecord(ItemId.getId(loggable.tid));
SanityCheck.THROW_ASSERT(rec != null,
"tid " + ItemId.getId(loggable.tid) +
" not found on page " + page.getPageNum() +
"; contents: " + debugPageContents(page));
ByteConversion.byteToShort(rec.getPage().data, rec.offset);
rec.offset += LENGTH_DATA_LENGTH;
if (ItemId.isRelocated(rec.getTupleID()))
{rec.offset += LENGTH_ORIGINAL_LOCATION;}
System.arraycopy(loggable.value, 0, rec.getPage().data, rec.offset, loggable.value.length);
rec.getPage().getPageHeader().setLsn(loggable.getLsn());
rec.getPage().setDirty(true);
dataCache.add(rec.getPage());
}
}
protected void undoUpdateValue(UpdateValueLoggable loggable) {
final DOMPage page = getDOMPage(loggable.pageNum);
final RecordPos rec = page.findRecord(ItemId.getId(loggable.tid));
SanityCheck.THROW_ASSERT(rec != null,
"tid " + ItemId.getId(loggable.tid) +
" not found on page " + page.getPageNum() +
"; contents: " + debugPageContents(page));
final short vlen = ByteConversion.byteToShort(rec.getPage().data, rec.offset);
SanityCheck.THROW_ASSERT(vlen == loggable.oldValue.length);
rec.offset += LENGTH_DATA_LENGTH;
if (ItemId.isRelocated(rec.getTupleID()))
{rec.offset += LENGTH_ORIGINAL_LOCATION;}
System.arraycopy(loggable.oldValue, 0, page.data, rec.offset, loggable.oldValue.length);
page.getPageHeader().setLsn(loggable.getLsn());
page.setDirty(true);
dataCache.add(page);
}
protected void redoRemoveValue(RemoveValueLoggable loggable) {
final DOMPage page = getDOMPage(loggable.pageNum);
final DOMFilePageHeader pageHeader = page.getPageHeader();
if (pageHeader.getLsn() != Lsn.LSN_INVALID && requiresRedo(loggable, page)) {
final RecordPos pos = page.findRecord(ItemId.getId(loggable.tid));
SanityCheck.ASSERT(pos != null,
"Record not found: " + ItemId.getId(loggable.tid) + ": " +
page.page.getPageInfo() + "\n" +
debugPageContents(page));
//Position the stream at the very beginning of the record
final int startOffset = pos.offset - LENGTH_TID;
if (ItemId.isLink(loggable.tid)) {
final int end = pos.offset + LENGTH_FORWARD_LOCATION;
System.arraycopy(page.data, end, page.data, startOffset, page.len - end);
page.len = page.len - (LENGTH_DATA_LENGTH + LENGTH_FORWARD_LOCATION);
} else {
// get the record length
short l = ByteConversion.byteToShort(page.data, pos.offset);
if (ItemId.isRelocated(loggable.tid)) {
pos.offset += LENGTH_ORIGINAL_LOCATION;
l += LENGTH_ORIGINAL_LOCATION;
}
if (l == OVERFLOW) {
l += LENGTH_OVERFLOW_LOCATION;
}
// end offset
final int end = startOffset + LENGTH_TID + LENGTH_DATA_LENGTH + l;
final int dlen = pageHeader.getDataLength();
// remove old value
System.arraycopy(page.data, end, page.data, startOffset, dlen - end);
page.setDirty(true);
page.len = dlen - (LENGTH_TID + LENGTH_DATA_LENGTH + l);
}
if (page.len < 0) {
LOG.error("page length < 0");
//TODO : throw exception ? -pb
}
pageHeader.setDataLength(page.len);
pageHeader.decRecordCount();
pageHeader.setLsn(loggable.getLsn());
page.setDirty(true);
dataCache.add(page);
}
}
protected void undoRemoveValue(RemoveValueLoggable loggable) {
final DOMPage page = getDOMPage(loggable.pageNum);
final DOMFilePageHeader pageHeader = page.getPageHeader();
int offset = loggable.offset;
final short vlen = (short) loggable.oldData.length;
if (offset < pageHeader.getDataLength()) {
// make room for the removed value
int required;
if (ItemId.isLink(loggable.tid))
{required = LENGTH_TID + LENGTH_FORWARD_LOCATION;}
else
{required = LENGTH_TID + LENGTH_DATA_LENGTH + vlen;}
if (ItemId.isRelocated(loggable.tid))
{required += LENGTH_ORIGINAL_LOCATION;}
final int end = offset + required;
try {
System.arraycopy(page.data, offset, page.data, end, pageHeader.getDataLength() - offset);
} catch(final ArrayIndexOutOfBoundsException e) {
LOG.warn(e);
SanityCheck.TRACE("Error while copying data on page " + page.getPageNum() +
"; tid: " + ItemId.getId(loggable.tid) + "; required: " + required +
"; offset: " + offset + "; end: " + end +
"; len: " + (pageHeader.getDataLength() - offset) +
"; avail: " + page.data.length + "; work: " + fileHeader.getWorkSize());
}
}
//save TID
ByteConversion.shortToByte(loggable.tid, page.data, offset);
offset += LENGTH_TID;
if (ItemId.isLink(loggable.tid)) {
System.arraycopy(loggable.oldData, 0, page.data, offset, LENGTH_FORWARD_LOCATION);
page.len += (LENGTH_TID + LENGTH_FORWARD_LOCATION);
} else {
// save data length
// overflow pages have length 0
if (loggable.isOverflow) {
ByteConversion.shortToByte(OVERFLOW, page.data, offset);
} else {
ByteConversion.shortToByte(vlen, page.data, offset);
}
offset += LENGTH_DATA_LENGTH;
if (ItemId.isRelocated(loggable.tid)) {
ByteConversion.longToByte(loggable.backLink, page.data, offset);
offset += LENGTH_ORIGINAL_LOCATION;
page.len += LENGTH_ORIGINAL_LOCATION;
}
// save data
System.arraycopy(loggable.oldData, 0, page.data, offset, vlen);
page.len += (LENGTH_TID + LENGTH_DATA_LENGTH + vlen);
}
pageHeader.incRecordCount();
pageHeader.setDataLength(page.len);
page.setDirty(true);
dataCache.add(page, 2);
}
protected void redoRemoveEmptyPage(RemoveEmptyPageLoggable loggable) {
final DOMPage page = getDOMPage(loggable.pageNum);
final DOMFilePageHeader pageHeader = page.getPageHeader();
if (pageHeader.getLsn() != Lsn.LSN_INVALID && requiresRedo(loggable, page)) {
removePage(page);
}
}
protected void undoRemoveEmptyPage(RemoveEmptyPageLoggable loggable) {
try {
final DOMPage newPage = getDOMPage(loggable.pageNum);
final DOMFilePageHeader newPageHeader = newPage.getPageHeader();
reuseDeleted(newPage.page);
if (loggable.prevPage == Page.NO_PAGE) {
newPageHeader.setPrevDataPage(Page.NO_PAGE);
} else {
final DOMPage oldPage = getDOMPage(loggable.prevPage);
final DOMFilePageHeader oldPageHeader = oldPage.getPageHeader();
newPageHeader.setPrevDataPage(oldPage.getPageNum());
oldPageHeader.setNextDataPage(newPage.getPageNum());
oldPage.setDirty(true);
dataCache.add(oldPage);
}
if (loggable.nextPage == Page.NO_PAGE) {
newPageHeader.setNextDataPage(Page.NO_PAGE);
} else {
final DOMPage oldPage = getDOMPage(loggable.nextPage);
final DOMFilePageHeader oldPageHeader = oldPage.getPageHeader();
oldPageHeader.setPrevDataPage(newPage.getPageNum());
newPageHeader.setNextDataPage(loggable.nextPage);
oldPage.setDirty(true);
dataCache.add(oldPage);
}
newPageHeader.setNextTupleID(ItemId.UNKNOWN_ID);
newPage.setDirty(true);
dataCache.add(newPage);
} catch (final IOException e) {
LOG.error("Error during undo: " + e.getMessage(), e);
//TODO : throw exception ? -pb
}
}
protected void redoRemovePage(RemovePageLoggable loggable) {
final DOMPage page = getDOMPage(loggable.pageNum);
final DOMFilePageHeader pageHeader = page.getPageHeader();
if (pageHeader.getLsn() != Lsn.LSN_INVALID && requiresRedo(loggable, page)) {
try {
pageHeader.setNextDataPage(Page.NO_PAGE);
pageHeader.setPrevDataPage(Page.NO_PAGE);
pageHeader.setDataLen(fileHeader.getWorkSize());
pageHeader.setDataLength(0);
pageHeader.setNextTupleID(ItemId.UNKNOWN_ID);
pageHeader.setRecordCount((short) 0);
page.len = 0;
unlinkPages(page.page);
page.setDirty(true);
dataCache.remove(page);
} catch (final IOException e) {
LOG.warn("Error while removing page: " + e.getMessage(), e);
//TODO : throw exception ? -pb
}
}
}
protected void undoRemovePage(RemovePageLoggable loggable) {
try {
final DOMPage page = getDOMPage(loggable.pageNum);
final DOMFilePageHeader pageHeader = page.getPageHeader();
reuseDeleted(page.page);
pageHeader.setStatus(RECORD);
pageHeader.setNextDataPage(loggable.nextPage);
pageHeader.setPrevDataPage(loggable.prevPage);
pageHeader.setNextTupleID(ItemId.getId(loggable.oldTid));
pageHeader.setRecordCount(loggable.oldRecCnt);
pageHeader.setDataLength(loggable.oldLen);
System.arraycopy(loggable.oldData, 0, page.data, 0, loggable.oldLen);
page.len = loggable.oldLen;
page.setDirty(true);
dataCache.add(page);
} catch (final IOException e) {
LOG.warn("Failed to undo " + loggable.dump() + ": " + e.getMessage(), e);
//TODO : throw exception ? -pb
}
}
protected void redoWriteOverflow(WriteOverflowPageLoggable loggable) {
try {
final Page page = getPage(loggable.pageNum);
page.read();
final PageHeader pageHeader = page.getPageHeader();
reuseDeleted(page);
pageHeader.setStatus(RECORD);
if (pageHeader.getLsn() != Lsn.LSN_INVALID && requiresRedo(loggable, page)) {
if (loggable.nextPage == Page.NO_PAGE) {
pageHeader.setNextPage(Page.NO_PAGE);
} else {
pageHeader.setNextPage(loggable.nextPage);
}
pageHeader.setLsn(loggable.getLsn());
writeValue(page, loggable.value);
}
} catch (final IOException e) {
LOG.warn("Failed to redo " + loggable.dump() + ": " + e.getMessage(), e);
//TODO : throw exception ? -pb
}
}
protected void undoWriteOverflow(WriteOverflowPageLoggable loggable) {
try {
final Page page = getPage(loggable.pageNum);
page.read();
unlinkPages(page);
} catch (final IOException e) {
LOG.warn("Failed to undo " + loggable.dump() + ": " + e.getMessage(), e);
//TODO : throw exception ? -pb
}
}
protected void redoRemoveOverflow(RemoveOverflowLoggable loggable) {
try {
final Page page = getPage(loggable.pageNum);
page.read();
final PageHeader pageHeader = page.getPageHeader();
if (pageHeader.getLsn() != Lsn.LSN_INVALID && requiresRedo(loggable, page)) {
unlinkPages(page);
}
} catch (final IOException e) {
LOG.warn("Failed to undo " + loggable.dump() + ": " + e.getMessage(), e);
//TODO : throw exception ? -pb
}
}
protected void undoRemoveOverflow(RemoveOverflowLoggable loggable) {
try {
final Page page = getPage(loggable.pageNum);
page.read();
final PageHeader pageHeader = page.getPageHeader();
reuseDeleted(page);
pageHeader.setStatus(RECORD);
if (loggable.nextPage == Page.NO_PAGE) {
pageHeader.setNextPage(Page.NO_PAGE);
} else {
pageHeader.setNextPage(loggable.nextPage);
}
writeValue(page, loggable.oldData);
} catch (final IOException e) {
LOG.warn("Failed to redo " + loggable.dump() + ": " + e.getMessage(), e);
//TODO : throw exception ? -pb
}
}
protected void redoInsertValue(InsertValueLoggable loggable) {
final DOMPage page = getDOMPage(loggable.pageNum);
final DOMFilePageHeader pageHeader = page.getPageHeader();
if (pageHeader.getLsn() != Lsn.LSN_INVALID && requiresRedo(loggable, page)) {
final int dlen = pageHeader.getDataLength();
int offset = loggable.offset;
// insert in the middle of the page?
if (offset < dlen) {
final int end = offset + LENGTH_TID + LENGTH_DATA_LENGTH + loggable.value.length;
try {
System.arraycopy(page.data, offset, page.data, end, dlen - offset);
} catch(final ArrayIndexOutOfBoundsException e) {
LOG.error(e);
SanityCheck.TRACE("Error while copying data on page " + page.getPageNum() +
"; tid: " + loggable.tid +
"; offset: " + offset +
"; end: " + end +
"; len: " + (dlen - offset));
}
}
// writing tid
ByteConversion.shortToByte(loggable.tid, page.data, offset);
offset += LENGTH_TID;
page.len += LENGTH_TID;
// writing value length
ByteConversion.shortToByte(loggable.isOverflow() ?
OVERFLOW : (short) loggable.value.length, page.data, offset);
offset += LENGTH_DATA_LENGTH;
page.len += LENGTH_DATA_LENGTH;
// writing data
System.arraycopy(loggable.value, 0, page.data, offset, loggable.value.length);
offset += loggable.value.length;
page.len += loggable.value.length;
pageHeader.incRecordCount();
pageHeader.setDataLength(page.len);
pageHeader.setNextTupleID(ItemId.getId(loggable.tid));
page.setDirty(true);
dataCache.add(page);
}
}
protected void undoInsertValue(InsertValueLoggable loggable) {
final DOMPage page = getDOMPage(loggable.pageNum);
final DOMFilePageHeader pageHeader = page.getPageHeader();
if (ItemId.isLink(loggable.tid)) {
final int end = loggable.offset + LENGTH_FORWARD_LOCATION;
//Position the stream at the very beginning of the record
System.arraycopy(page.data, end, page.data, loggable.offset - LENGTH_TID, page.len - end);
page.len = page.len - (LENGTH_DATA_LENGTH + LENGTH_FORWARD_LOCATION);
} else {
// get the record length
final int offset = loggable.offset + LENGTH_TID;
//TOUNDERSTAND Strange : in the lines above, the offset seems to be positionned *after* the TID
short l = ByteConversion.byteToShort(page.data, offset);
if (ItemId.isRelocated(loggable.tid)) {
l += LENGTH_ORIGINAL_LOCATION;
}
if (l == OVERFLOW)
{l += LENGTH_OVERFLOW_LOCATION;}
// end offset
final int end = loggable.offset + (LENGTH_TID + LENGTH_DATA_LENGTH + l);
final int dlen = pageHeader.getDataLength();
// remove value
try {
System.arraycopy(page.data, end, page.data, loggable.offset, dlen - end);
} catch (final ArrayIndexOutOfBoundsException e) {
LOG.warn(e);
SanityCheck.TRACE("Error while copying data on page " + page.getPageNum() +
"; tid: " + loggable.tid +
"; offset: " + loggable.offset +
"; end: " + end +
"; len: " + (dlen - end) +
"; dataLength: " + dlen);
}
page.len = dlen - (LENGTH_TID + LENGTH_DATA_LENGTH + l);
}
if (page.len < 0)
{LOG.warn("page length < 0");}
pageHeader.setDataLength(page.len);
pageHeader.decRecordCount();
pageHeader.setLsn(loggable.getLsn());
page.setDirty(true);
dataCache.add(page);
}
protected void redoSplitPage(SplitPageLoggable loggable) {
final DOMPage page = getDOMPage(loggable.pageNum);
final DOMFilePageHeader pageHeader = page.getPageHeader();
if (pageHeader.getLsn() != Lsn.LSN_INVALID && requiresRedo(loggable, page)) {
final byte[] oldData = page.data;
page.data = new byte[fileHeader.getWorkSize()];
System.arraycopy(oldData, 0, page.data, 0, loggable.splitOffset);
page.len = loggable.splitOffset;
if (page.len < 0)
{LOG.error("page length < 0");}
pageHeader.setDataLength(page.len);
pageHeader.setRecordCount(countRecordsInPage(page));
page.setDirty(true);
dataCache.add(page);
}
}
protected void undoSplitPage(SplitPageLoggable loggable) {
final DOMPage page = getDOMPage(loggable.pageNum);
final DOMFilePageHeader pageHeader = page.getPageHeader();
page.data = loggable.oldData;
page.len = loggable.oldLen;
if (page.len < 0)
{LOG.error("page length < 0");}
pageHeader.setDataLength(page.len);
pageHeader.setLsn(loggable.getLsn());
page.setDirty(true);
dataCache.add(page);
}
protected void redoAddLink(AddLinkLoggable loggable) {
final DOMPage page = getDOMPage(loggable.pageNum);
final DOMFilePageHeader pageHeader = page.getPageHeader();
if (pageHeader.getLsn() != Lsn.LSN_INVALID && requiresRedo(loggable, page)) {
ByteConversion.shortToByte(ItemId.setIsLink(loggable.tid), page.data, page.len);
page.len += LENGTH_TID;
ByteConversion.longToByte(loggable.link, page.data, page.len);
page.len += LENGTH_FORWARD_LOCATION;
pageHeader.setNextTupleID(ItemId.getId(loggable.tid));
pageHeader.setDataLength(page.len);
pageHeader.setLsn(loggable.getLsn());
pageHeader.incRecordCount();
page.setDirty(true);
dataCache.add(page);
}
}
protected void undoAddLink(AddLinkLoggable loggable) {
final DOMPage page = getDOMPage(loggable.pageNum);
final DOMFilePageHeader pageHeader = page.getPageHeader();
final RecordPos rec = page.findRecord(loggable.tid);
final int end = rec.offset + LENGTH_FORWARD_LOCATION;
//Position the stream at the very beginning of the record
System.arraycopy(page.data, end, page.data, rec.offset - LENGTH_TID, page.len - end);
page.len = page.len - (LENGTH_TID + LENGTH_FORWARD_LOCATION);
if (page.len < 0)
{LOG.error("page length < 0");}
pageHeader.setDataLength(page.len);
pageHeader.decRecordCount();
pageHeader.setLsn(loggable.getLsn());
page.setDirty(true);
dataCache.add(page);
}
protected void redoUpdateLink(UpdateLinkLoggable loggable) {
final DOMPage page = getDOMPage(loggable.pageNum);
final DOMFilePageHeader pageHeader = page.getPageHeader();
if (pageHeader.getLsn() != Lsn.LSN_INVALID && requiresRedo(loggable, page)) {
ByteConversion.longToByte(loggable.link, page.data, loggable.offset);
pageHeader.setLsn(loggable.getLsn());
page.setDirty(true);
dataCache.add(page);
}
}
protected void undoUpdateLink(UpdateLinkLoggable loggable) {
final DOMPage page = getDOMPage(loggable.pageNum);
final DOMFilePageHeader pageHeader = page.getPageHeader();
ByteConversion.longToByte(loggable.oldLink, page.data, loggable.offset);
pageHeader.setLsn(loggable.getLsn());
page.setDirty(true);
dataCache.add(page);
}
protected void redoAddMovedValue(AddMovedValueLoggable loggable) {
final DOMPage page = getDOMPage(loggable.pageNum);
final DOMFilePageHeader pageHeader = page.getPageHeader();
if (pageHeader.getLsn() != Lsn.LSN_INVALID && requiresRedo(loggable, page)) {
try {
ByteConversion.shortToByte(ItemId.setIsRelocated(loggable.tid), page.data, page.len);
page.len += LENGTH_TID;
final short vlen = (short) loggable.value.length;
// save data length
// overflow pages have length 0
ByteConversion.shortToByte(vlen, page.data, page.len);
page.len += LENGTH_DATA_LENGTH;
ByteConversion.longToByte(loggable.backLink, page.data, page.len);
page.len += LENGTH_FORWARD_LOCATION;
// save data
System.arraycopy(loggable.value, 0, page.data, page.len, vlen);
page.len += vlen;
//TOUNDERSTAND : why 2 occurences of ph.incRecordCount(); ?
pageHeader.incRecordCount();
pageHeader.setDataLength(page.len);
pageHeader.setNextTupleID(ItemId.getId(loggable.tid));
pageHeader.incRecordCount();
pageHeader.setLsn(loggable.getLsn());
page.setDirty(true);
dataCache.add(page, 2);
} catch (final ArrayIndexOutOfBoundsException e) {
LOG.error("page: " + page.getPageNum()
+ "; len = " + page.len +
"; value = " + loggable.value.length);
throw e;
}
}
}
protected void undoAddMovedValue(AddMovedValueLoggable loggable) {
final DOMPage page = getDOMPage(loggable.pageNum);
final DOMFilePageHeader pageHeader = page.getPageHeader();
final RecordPos rec = page.findRecord(ItemId.getId(loggable.tid));
SanityCheck.ASSERT(rec != null,
"Record with tid " + ItemId.getId(loggable.tid) + " not found: " +
debugPageContents(page));
// get the record's length
final short vlen = ByteConversion.byteToShort(page.data, rec.offset);
final int end = rec.offset + LENGTH_DATA_LENGTH + LENGTH_ORIGINAL_LOCATION + vlen;
final int dlen = pageHeader.getDataLength();
// remove value
try {
//Position the stream at the very beginning of the record
System.arraycopy(page.data, end, page.data, rec.offset - LENGTH_TID, dlen - end);
} catch (final ArrayIndexOutOfBoundsException e) {
LOG.error(e);
SanityCheck.TRACE("Error while copying data on page " + page.getPageNum() +
"; tid: " + loggable.tid +
"; offset: " + (rec.offset - LENGTH_TID) +
"; end: " + end + "; len: " + (dlen - end));
}
page.len = dlen - (LENGTH_TID + LENGTH_DATA_LENGTH + LENGTH_ORIGINAL_LOCATION + vlen);
if (page.len < 0) {
LOG.error("page length < 0");
//TODO : throw exception ? -pb
}
pageHeader.setDataLength(page.len);
pageHeader.decRecordCount();
pageHeader.setLsn(loggable.getLsn());
page.setDirty(true);
dataCache.add(page);
}
protected void redoUpdateHeader(UpdateHeaderLoggable loggable) {
final DOMPage page = getDOMPage(loggable.pageNum);
final DOMFilePageHeader pageHeader = page.getPageHeader();
if (pageHeader.getLsn() != Lsn.LSN_INVALID && requiresRedo(loggable, page)) {
if (loggable.nextPage != Page.NO_PAGE)
{pageHeader.setNextDataPage(loggable.nextPage);}
if (loggable.prevPage != Page.NO_PAGE)
{pageHeader.setPrevDataPage(loggable.prevPage);}
pageHeader.setLsn(loggable.getLsn());
page.setDirty(true);
dataCache.add(page, 2);
}
}
protected void undoUpdateHeader(UpdateHeaderLoggable loggable) {
final DOMPage page = getDOMPage(loggable.pageNum);
final DOMFilePageHeader pageHeader = page.getPageHeader();
pageHeader.setPrevDataPage(loggable.oldPrev);
pageHeader.setNextDataPage(loggable.oldNext);
pageHeader.setLsn(loggable.getLsn());
page.setDirty(true);
dataCache.add(page, 2);
}
protected final class DOMFilePageHeader extends BTreePageHeader {
protected int dataLength = 0;
protected long nextDataPage = Page.NO_PAGE;
protected long previousDataPage = Page.NO_PAGE;
protected short tupleID = ItemId.UNKNOWN_ID;
protected short records = 0;
public final static short LENGTH_RECORDS_COUNT = 2; //sizeof short
public final static int LENGTH_DATA_LENGTH = 4; //sizeof int
public final static long LENGTH_NEXT_PAGE_POINTER = 8; //sizeof long
public final static long LENGTH_PREV_PAGE_POINTER = 8; //sizeof long
public final static short LENGTH_CURRENT_TID = 2; //sizeof short
public DOMFilePageHeader() {
super();
}
public DOMFilePageHeader(byte[] data, int offset) throws IOException {
super(data, offset);
}
public void decRecordCount() {
//TODO : check negative value ? -pb
--records;
}
public short getCurrentTupleID() {
//TODO : overflow check ? -pb
return tupleID;
}
public short getNextTupleID() {
if (++tupleID == ItemId.ID_MASK)
{throw new RuntimeException("No spare ids on page");}
return tupleID;
}
public boolean hasRoom() {
return tupleID < ItemId.MAX_ID;
}
public void setNextTupleID(short tupleID) {
if (tupleID > ItemId.MAX_ID)
{throw new RuntimeException("TupleID overflow! TupleID = " + tupleID);}
this.tupleID = tupleID;
}
public int getDataLength() {
return dataLength;
}
public long getNextDataPage() {
return nextDataPage;
}
public long getPreviousDataPage() {
return previousDataPage;
}
public short getRecordCount() {
return records;
}
public void incRecordCount() {
records++;
}
public int read(byte[] data, int offset) throws IOException {
offset = super.read(data, offset);
records = ByteConversion.byteToShort(data, offset);
offset += LENGTH_RECORDS_COUNT;
dataLength = ByteConversion.byteToInt(data, offset);
offset += LENGTH_DATA_LENGTH;
nextDataPage = ByteConversion.byteToLong(data, offset);
offset += LENGTH_NEXT_PAGE_POINTER;
previousDataPage = ByteConversion.byteToLong(data, offset);
offset += LENGTH_PREV_PAGE_POINTER;
tupleID = ByteConversion.byteToShort(data, offset);
return offset + LENGTH_CURRENT_TID;
}
public int write(byte[] data, int offset) throws IOException {
offset = super.write(data, offset);
ByteConversion.shortToByte(records, data, offset);
offset += LENGTH_RECORDS_COUNT;
ByteConversion.intToByte(dataLength, data, offset);
offset += LENGTH_DATA_LENGTH;
ByteConversion.longToByte(nextDataPage, data, offset);
offset += LENGTH_NEXT_PAGE_POINTER;
ByteConversion.longToByte(previousDataPage, data, offset);
offset += LENGTH_PREV_PAGE_POINTER;
ByteConversion.shortToByte(tupleID, data, offset);
return offset + LENGTH_CURRENT_TID;
}
public void setDataLength(int dataLength) {
if (dataLength > fileHeader.getWorkSize()) {
LOG.error("data too long for file header !");
//TODO :throw exception ? -pb
}
this.dataLength = dataLength;
}
public void setNextDataPage(long page) {
nextDataPage = page;
}
public void setPrevDataPage(long page) {
previousDataPage = page;
}
public void setRecordCount(short recs) {
records = recs;
}
}
protected final class DOMPage implements Cacheable {
// the raw working data (without page header) of this page
byte[] data;
// the current size of the used data
int len = 0;
// the low-level page
Page page;
DOMFilePageHeader pageHeader;
// fields required by Cacheable
int refCount = 0;
int timestamp = 0;
// has the page been saved or is it dirty?
boolean saved = true;
// set to true if the page has been removed from the cache
boolean invalidated = false;
public DOMPage() {
page = createNewPage();
pageHeader = (DOMFilePageHeader) page.getPageHeader();
data = new byte[fileHeader.getWorkSize()];
len = 0;
}
public DOMPage(long pos) {
try {
page = getPage(pos);
load(page);
} catch (final IOException ioe) {
LOG.error(ioe);
ioe.printStackTrace();
//TODO :throw exception ? -pb
}
}
public DOMPage(Page page) {
this.page = page;
load(page);
}
protected Page createNewPage() {
try {
final Page page = getFreePage();
final DOMFilePageHeader pageHeader = (DOMFilePageHeader) page.getPageHeader();
pageHeader.setStatus(RECORD);
pageHeader.setDirty(true);
pageHeader.setNextDataPage(Page.NO_PAGE);
pageHeader.setPrevDataPage(Page.NO_PAGE);
pageHeader.setNextPage(Page.NO_PAGE);
pageHeader.setNextTupleID(ItemId.UNKNOWN_ID);
pageHeader.setDataLength(0);
pageHeader.setRecordCount((short) 0);
if (currentDocument != null)
{currentDocument.getMetadata().incPageCount();}
return page;
} catch (final IOException ioe) {
LOG.error(ioe);
return null;
}
}
public RecordPos findRecord(short targetId) {
final int dlen = pageHeader.getDataLength();
RecordPos rec = null;
for (int pos = 0; pos < dlen;) {
final short tupleID = ByteConversion.byteToShort(data, pos);
pos += LENGTH_TID;
if (ItemId.matches(tupleID, targetId)) {
if (ItemId.isLink(tupleID)) {
rec = new RecordPos(pos, this, tupleID, true);
} else {
rec = new RecordPos(pos, this, tupleID);
}
break;
} else if (ItemId.isLink(tupleID)) {
pos += LENGTH_FORWARD_LOCATION;
} else {
final short vlen = ByteConversion.byteToShort(data, pos);
pos += LENGTH_DATA_LENGTH;
if (vlen < 0) {
LOG.error("page = " + page.getPageNum() +
"; pos = " + pos +
"; vlen = " + vlen +
"; tupleID = " + tupleID +
"; target = " + targetId);
}
if (ItemId.isRelocated(tupleID)) {
pos += LENGTH_ORIGINAL_LOCATION + vlen;
} else {
pos += vlen;
}
if (vlen == OVERFLOW) {
pos += LENGTH_OVERFLOW_LOCATION;
}
}
}
return rec;
}
/*
* (non-Javadoc)
*
* @see org.exist.storage.cache.Cacheable#getKey()
*/
public long getKey() {
return page.getPageNum();
}
/*
* (non-Javadoc)
*
* @see org.exist.storage.cache.Cacheable#getReferenceCount()
*/
public int getReferenceCount() {
return refCount;
}
public int decReferenceCount() {
//TODO : check if the decrementation is allowed ? -pb
return refCount > 0 ? --refCount : 0;
}
public int incReferenceCount() {
//TODO : check uf the incrementation is allowed ? -pb
if (refCount < Cacheable.MAX_REF)
{++refCount;}
return refCount;
}
/*
* (non-Javadoc)
*
* @see org.exist.storage.cache.Cacheable#setReferenceCount(int)
*/
public void setReferenceCount(int count) {
refCount = count;
}
/*
* (non-Javadoc)
*
* @see org.exist.storage.cache.Cacheable#setTimestamp(int)
*/
public void setTimestamp(int timestamp) {
this.timestamp = timestamp;
}
/*
* (non-Javadoc)
*
* @see org.exist.storage.cache.Cacheable#getTimestamp()
*/
public int getTimestamp() {
return timestamp;
}
public DOMFilePageHeader getPageHeader() {
return pageHeader;
}
public long getPageNum() {
return page.getPageNum();
}
public boolean isDirty() {
return !saved;
}
public void setDirty(boolean dirty) {
saved = !dirty;
page.getPageHeader().setDirty(dirty);
}
private void load(Page page) {
try {
data = page.read();
pageHeader = (DOMFilePageHeader) page.getPageHeader();
len = pageHeader.getDataLength();
if (data.length == 0) {
data = new byte[fileHeader.getWorkSize()];
len = 0;
return;
}
} catch (final IOException ioe) {
LOG.error(ioe);
ioe.printStackTrace();
}
saved = true;
}
public void write() {
if (page == null)
{return;}
try {
if (!pageHeader.isDirty())
{return;}
pageHeader.setDataLength(len);
writeValue(page, data);
setDirty(false);
} catch (final IOException ioe) {
LOG.error(ioe);
//TODO : thow exception ? -pb
}
}
public String dumpPage() {
return "Contents of page " + page.getPageNum() + ": " + hexDump(data);
}
public boolean sync(boolean syncJournal) {
if (isDirty()) {
write();
if (isTransactional && syncJournal && logManager.lastWrittenLsn() < pageHeader.getLsn())
{logManager.flushToLog(true);}
return true;
}
return false;
}
/*
* (non-Javadoc)
*
* @see org.exist.storage.cache.Cacheable#allowUnload()
*/
public boolean allowUnload() {
return true;
}
/*
* (non-Javadoc)
*
* @see java.lang.Object#equals(java.lang.Object)
*/
public boolean equals(Object obj) {
final DOMPage other = (DOMPage) obj;
return page.equals(other.page);
}
public void invalidate() {
invalidated = true;
}
public boolean isInvalidated() {
return invalidated;
}
/**
* Walk through the page after records have been removed. Set the tid
* counter to the next spare id that can be used for following
* insertions.
*/
public void cleanUp() {
final int dlen = pageHeader.getDataLength();
short maxTupleID = 0;
short recordCount = 0;
for (int pos = 0; pos < dlen; recordCount++) {
final short tupleID = ByteConversion.byteToShort(data, pos);
pos += LENGTH_TID;
if (ItemId.getId(tupleID) > ItemId.MAX_ID) {
LOG.error(debugPageContents(this));
throw new RuntimeException("TupleID overflow in page " + getPageNum());
}
if (ItemId.getId(tupleID) > maxTupleID) {
maxTupleID = ItemId.getId(tupleID);
}
if (ItemId.isLink(tupleID)) {
pos += LENGTH_FORWARD_LOCATION;
} else {
final short vlen = ByteConversion.byteToShort(data, pos);
pos += LENGTH_DATA_LENGTH;
if (ItemId.isRelocated(tupleID)) {
pos += vlen == OVERFLOW ?
LENGTH_ORIGINAL_LOCATION + LENGTH_OVERFLOW_LOCATION :
LENGTH_ORIGINAL_LOCATION + vlen;
} else {
pos += vlen == OVERFLOW ? LENGTH_OVERFLOW_LOCATION : vlen;
}
}
}
pageHeader.setNextTupleID(maxTupleID);
}
}
/**
* This represents an overflow page. Overflow pages are created if the node
* data exceeds the size of one page. An overflow page is a sequence of
* DOMPages.
*
* @author wolf
*
*/
protected final class OverflowDOMPage {
Page firstPage = null;
public OverflowDOMPage(Txn transaction) {
firstPage = createNewPage();
LOG.debug("Creating overflow page: " + firstPage.getPageNum());
}
public OverflowDOMPage(long first) throws IOException {
firstPage = getPage(first);
}
protected Page createNewPage() {
try {
final Page page = getFreePage();
final DOMFilePageHeader pageHeader = (DOMFilePageHeader) page.getPageHeader();
pageHeader.setStatus(RECORD);
pageHeader.setDirty(true);
pageHeader.setNextDataPage(Page.NO_PAGE);
pageHeader.setPrevDataPage(Page.NO_PAGE);
pageHeader.setNextPage(Page.NO_PAGE);
pageHeader.setNextTupleID(ItemId.UNKNOWN_ID);
pageHeader.setDataLength(0);
pageHeader.setRecordCount((short) 0);
if (currentDocument != null)
{currentDocument.getMetadata().incPageCount();}
return page;
} catch (final IOException ioe) {
LOG.error(ioe);
return null;
}
}
// Write binary resource from inputstream
public int write(Txn transaction, InputStream is) {
int pageCount = 0;
Page currentPage = firstPage;
try {
// Transfer bytes from inputstream to db
final int chunkSize = fileHeader.getWorkSize();
final byte[] buf = new byte[chunkSize];
final byte[] altbuf = new byte[chunkSize];
byte[] currbuf = buf;
byte[] fullbuf = null;
boolean isaltbuf = false;
int len;
int basebuf = 0;
int basemax = chunkSize;
boolean emptyPage = true;
while((len = is.read(currbuf, basebuf, basemax))!=-1) {
emptyPage=false;
// We are going to use a buffer swapping technique
if(fullbuf != null) {
final Value value = new Value(fullbuf, 0, chunkSize);
Page nextPage = createNewPage();
currentPage.getPageHeader().setNextPage(nextPage.getPageNum());
if (isTransactional && transaction != null) {
final long nextPageNum = nextPage.getPageNum();
final Loggable loggable = new WriteOverflowPageLoggable(
transaction, currentPage.getPageNum(),
nextPageNum , value);
writeToLog(loggable, currentPage);
}
writeValue(currentPage, value);
pageCount++;
currentPage = nextPage;
fullbuf=null;
}
// Let's swap the buffer
basebuf += len;
if(basebuf == chunkSize) {
fullbuf = currbuf;
currbuf = (isaltbuf)? buf : altbuf;
isaltbuf = !isaltbuf;
basebuf = 0;
basemax = chunkSize;
} else {
basemax -= len;
}
}
// Detecting a zero byte stream
if(emptyPage) {
currentPage.setPageNum(Page.NO_PAGE);
currentPage.getPageHeader().setNextPage(Page.NO_PAGE);
} else {
// Just in the limit of a page
if (fullbuf != null) {
basebuf = chunkSize;
currbuf = fullbuf;
}
final Value value = new Value(currbuf, 0, basebuf);
currentPage.getPageHeader().setNextPage(Page.NO_PAGE);
if (isTransactional && transaction != null) {
final long nextPageNum = Page.NO_PAGE;
final Loggable loggable = new WriteOverflowPageLoggable(
transaction, currentPage.getPageNum(), nextPageNum , value);
writeToLog(loggable, currentPage);
}
writeValue(currentPage, value);
pageCount++;
}
// TODO what if remaining length == 0 ?
} catch (final IOException ex) {
LOG.error("IO error while writing overflow page", ex);
//TODO : throw exception ? -pb
}
return pageCount;
}
public int write(Txn transaction, byte[] data) {
int pageCount = 0;
try {
Page currentPage = firstPage;
int remaining = data.length;
int pos = 0;
while (remaining > 0) {
final int chunkSize = remaining > fileHeader.getWorkSize() ?
fileHeader.getWorkSize() : remaining;
remaining -= chunkSize;
final Value value = new Value(data, pos, chunkSize);
Page nextPage;
if (remaining > 0) {
nextPage = createNewPage();
currentPage.getPageHeader().setNextPage(nextPage.getPageNum());
} else {
nextPage = null;
currentPage.getPageHeader().setNextPage(Page.NO_PAGE);
}
if (isTransactional && transaction != null) {
final Loggable loggable = new WriteOverflowPageLoggable(
transaction, currentPage.getPageNum(),
remaining > 0 ? nextPage.getPageNum() : Page.NO_PAGE, value);
writeToLog(loggable, currentPage);
}
writeValue(currentPage, value);
pos += chunkSize;
currentPage = nextPage;
++pageCount;
}
} catch (final IOException e) {
LOG.warn("IO error while writing overflow page", e);
//TODO : throw exception ? -pb
}
return pageCount;
}
public byte[] read() {
final ByteArrayOutputStream os = new ByteArrayOutputStream();
streamTo(os);
return os.toByteArray();
}
public void streamTo(OutputStream os) {
Page page = firstPage;
int count = 0;
while (page != null) {
try {
final byte[] chunk = page.read();
os.write(chunk);
final long nextPageNumber = page.getPageHeader().getNextPage();
page = (nextPageNumber == Page.NO_PAGE) ? null : getPage(nextPageNumber);
} catch (final IOException e) {
LOG.error("IO error while loading overflow page "
+ firstPage.getPageNum() + "; read: " + count, e);
//TODO : too soft ? throw the exception ?
break;
}
++count;
}
}
public void delete(Txn transaction) throws IOException {
Page page = firstPage;
while (page != null) {
LOG.debug("Removing overflow page " + page.getPageNum());
final long nextPageNumber = page.getPageHeader().getNextPage();
if (isTransactional && transaction != null) {
final byte[] chunk = page.read();
final Loggable loggable = new RemoveOverflowLoggable(transaction,
page.getPageNum(), nextPageNumber, chunk);
writeToLog(loggable, page);
}
unlinkPages(page);
page = (nextPageNumber == Page.NO_PAGE) ? null : getPage(nextPageNumber);
}
}
public long getPageNum() {
return firstPage.getPageNum();
}
}
private final class FindCallback implements BTreeCallback {
public final static int KEYS = 1;
public final static int VALUES = 0;
int mode;
ArrayList<Value> values = new ArrayList<Value>();
public FindCallback(int mode) {
this.mode = mode;
}
public ArrayList<Value> getValues() {
return values;
}
public boolean indexInfo(Value value, long pointer) {
switch (mode) {
case VALUES:
final RecordPos rec = findRecord(pointer);
final short vlen = ByteConversion.byteToShort(rec.getPage().data, rec.offset);
values.add(new Value(rec.getPage().data, rec.offset + LENGTH_DATA_LENGTH, vlen));
return true;
case KEYS:
values.add(value);
return true;
}
return false;
}
}
}