/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* $Id: HashFiler.java 571938 2007-09-02 10:14:13Z vgritsenko $
*/
package org.apache.xindice.core.filer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.xindice.core.DBException;
import org.apache.xindice.core.FaultCodes;
import org.apache.xindice.core.data.Key;
import org.apache.xindice.core.data.Record;
import org.apache.xindice.core.data.RecordSet;
import org.apache.xindice.core.data.Value;
import org.apache.xindice.util.Configuration;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
/**
* HashFiler is a Filer implementation based on the Paged class. By
* extending Paged, HashFiler inherits the ability to maintain Record
* metadata such as creation and modification time. It also provides
* quite a bit more flexibility in its ability to retreive blocks of
* data and allocate Record space.
*
* <br/>
* HashFile has folowing configuration attributes:
* <ul>
* <li><strong>pagesize</strong>: Size of the page used by the paged file.
* Default page size is 4096 bytes. This parameter can be set only
* before paged file is created. Once it is created, this parameter
* can not be changed.</li>
* <li><strong>pagecount</strong>: This parameter has a special meaning
* for HashFiler. This determines the size of the hash table main
* storage, which is equal to the number of pages filer will be
* created with. The default is 1024. Please note that if made
* too small, it will affect efficiency of the hash table.</li>
* <li><strong>maxkeysize</strong>: Maximum allowed size of the key.
* Default maximum key size is 256 bytes.</li>
* <li><strong>max-descriptors</strong>: Defines maximum amount of
* simultaneously opened file descriptors this paged file can have.
* Several descriptors are needed to provide multithreaded access
* to the underlying file. Too large number will limit amount of
* collections you can open. Default value is 16
* (DEFAULT_DESCRIPTORS_MAX).</li>
* </ul>
*
* @version $Revision: 571938 $, $Date: 2007-09-02 06:14:13 -0400 (Sun, 02 Sep 2007) $
*/
public class HashFiler extends Paged
implements Filer {
private static final Log log = LogFactory.getLog(HashFiler.class);
/**
* Record page status
*/
protected static final byte RECORD = 1;
private HashFileHeader fileHeader;
public HashFiler() {
super();
fileHeader = (HashFileHeader) getFileHeader();
}
public void setLocation(File root, String location) {
setFile(new File(root, location + ".tbl"));
}
public String getName() {
return "HashFiler";
}
public void setConfig(Configuration config) {
super.setConfig(config);
// Since pageCount is used as a hash table size, all pageCount pages
// are considered used; so set totalCount to pageCount.
fileHeader.setTotalCount(fileHeader.getPageCount());
}
private Page seekRecordPage(Key key) throws IOException {
int hash = key.hashCode();
long pageNum = hash % fileHeader.getPageCount();
Page p = getPage(pageNum);
synchronized (p) {
while (true) {
HashPageHeader ph = (HashPageHeader) p.getPageHeader();
if (ph.getStatus() == RECORD && ph.getKeyHash() == hash && p.getKey().equals(key)) {
return p;
}
pageNum = ph.getNextCollision();
if (pageNum == NO_PAGE) {
return null;
}
p = getPage(pageNum);
}
}
}
public Record readRecord(Key key) throws DBException {
return readRecord(key, false);
}
public Record readRecord(Key key, boolean metaOnly) throws DBException {
if (key == null || key.getLength() == 0) {
return null;
}
checkOpened();
try {
Page startPage = seekRecordPage(key);
if (startPage != null) {
Value v = metaOnly ? null: readValue(startPage);
HashPageHeader sph = (HashPageHeader) startPage.getPageHeader();
HashMap meta = new HashMap(3);
meta.put(Record.CREATED, new Long(sph.getCreated()));
meta.put(Record.MODIFIED, new Long(sph.getModified()));
return new Record(key, v, meta);
}
} catch (Exception e) {
if (log.isWarnEnabled()) {
log.warn("ignored exception", e);
}
}
return null;
}
private Page seekInsertionPage(Key key) throws IOException {
// Calculate hash and retrieve chain head page
int hash = key.hashCode();
Page p = getPage(hash % fileHeader.getPageCount());
// Synchronize by chain head page
synchronized (p) {
HashPageHeader ph;
while (true) {
ph = (HashPageHeader) p.getPageHeader();
if (ph.getStatus() == UNUSED || ph.getStatus() == DELETED
|| (ph.getStatus() == RECORD && ph.getKeyHash() == hash && p.getKey().equals(key))) {
// Found free page
break;
}
// Check the chain
long pageNum = ph.getNextCollision();
if (pageNum == NO_PAGE) {
// Reached end of chain, add new page
Page np = getFreePage();
ph.setNextCollision(np.getPageNum());
p.write();
p = np;
ph = (HashPageHeader) p.getPageHeader();
ph.setNextCollision(NO_PAGE);
break;
}
// Go to the next page in chain
p = getPage(pageNum);
}
// Here we have a page
long t = System.currentTimeMillis();
if (ph.getStatus() == UNUSED || ph.getStatus() == DELETED) {
// This is a new Record
fileHeader.incRecordCount();
ph.setCreated(t);
}
ph.setModified(t);
ph.setStatus(RECORD);
}
return p;
}
public Record writeRecord(Key key, Value value) throws DBException {
// Check that key is not larger than space on the page
if (key == null || key.getLength() == 0 || key.getLength() > fileHeader.getPageSize() - fileHeader.getPageHeaderSize()) {
throw new FilerException(FaultCodes.DBE_CANNOT_CREATE, "Invalid key: '" + key + "'");
}
if (value == null) {
throw new FilerException(FaultCodes.DBE_CANNOT_CREATE, "Invalid null value");
}
checkOpened();
Page p = null;
try {
p = seekInsertionPage(key);
p.setKey(key);
writeValue(p, value);
} catch (Exception e) {
// FIXME It's not enough. At this point, new record could have been added to the chain
if (p != null) {
p.getPageHeader().setStatus(DELETED);
try {
p.write();
} catch (IOException ignored) {
// Double exception
}
}
throw new FilerException(FaultCodes.DBE_CANNOT_CREATE, "Exception: " + e, e);
}
flush();
HashPageHeader ph = (HashPageHeader) p.getPageHeader();
HashMap meta = new HashMap(3);
meta.put(Record.CREATED, new Long(ph.getCreated()));
meta.put(Record.MODIFIED, new Long(ph.getModified()));
return new Record(key, value, meta);
}
/**
* Mark pages in primary store as 'DELETED', and let Paged handle all
* overflow pages.
*/
protected void unlinkPages(Page page) throws IOException {
// Handle the page if it's in primary space by setting its status to
// DELETED and freeing any overflow pages linked to it.
if (page.getPageNum() < fileHeader.getPageCount()) {
long nextPage = page.getPageHeader().getNextPage();
page.getPageHeader().setStatus(DELETED);
page.getPageHeader().setNextPage(NO_PAGE);
page.write();
// If there are no chained pages, we are done.
if (nextPage == NO_PAGE) {
return;
}
// Free the chained pages from the page that was just removed
page = getPage(nextPage);
}
super.unlinkPages(page);
}
public boolean deleteRecord(Key key) throws DBException {
if (key == null || key.getLength() == 0) {
return false;
}
checkOpened();
try {
int hash = key.hashCode();
long pageNum = hash % fileHeader.getPageCount();
Page page = getPage(pageNum);
synchronized (page) {
HashPageHeader prevHead = null;
HashPageHeader pageHead;
Page prev = null;
while (true) {
pageHead = (HashPageHeader) page.getPageHeader();
if (pageHead.getStatus() == RECORD && pageHead.getKeyHash() == hash && page.getKey().equals(key)) {
break;
}
pageNum = pageHead.getNextCollision();
if (pageNum == NO_PAGE) {
return false;
}
prev = page;
prevHead = pageHead;
page = getPage(pageNum);
}
if (prev != null) {
prevHead.setNextCollision(pageHead.nextCollision);
pageHead.setNextCollision(NO_PAGE);
prev.write();
}
unlinkPages(page);
}
fileHeader.decRecordCount();
flush();
return true;
} catch (Exception e) {
if (log.isWarnEnabled()) {
log.warn("ignored exception", e);
}
}
return false;
}
public long getRecordCount() throws DBException {
checkOpened();
return fileHeader.getRecordCount();
}
public RecordSet getRecordSet() throws DBException {
checkOpened();
return new HashFilerRecordSet();
}
/**
* HashFilerRecordSet that does not use a BTree.
*/
private class HashFilerRecordSet implements RecordSet {
private List keys = new ArrayList();
private Iterator iter;
public HashFilerRecordSet() {
try {
long pageNum = 0;
// Iterate over main hash table...
while (pageNum < fileHeader.getPageCount()) {
Page p = getPage(pageNum);
HashPageHeader ph = (HashPageHeader) p.getPageHeader();
if (ph.getStatus() == RECORD) {
keys.add(p.getKey());
}
// ... and over collision chains
while (ph.getNextCollision() != NO_PAGE) {
long pn = ph.getNextCollision();
p = getPage(pn);
ph = (HashPageHeader) p.getPageHeader();
if (ph.getStatus() == RECORD) {
keys.add(p.getKey());
}
}
pageNum++;
}
iter = keys.iterator();
} catch (Exception e) {
if (log.isWarnEnabled()) {
log.warn("ignored exception", e);
}
}
}
public synchronized Key getNextKey() {
return (Key) iter.next();
}
public synchronized Record getNextRecord() throws DBException {
return readRecord((Key) iter.next(), false);
}
public synchronized Value getNextValue() throws DBException {
return getNextRecord().getValue();
}
public synchronized boolean hasMoreRecords() {
return iter.hasNext();
}
}
////////////////////////////////////////////////////////////////////
public FileHeader createFileHeader() {
return new HashFileHeader();
}
public PageHeader createPageHeader() {
return new HashPageHeader();
}
/**
* HashFileHeader
*/
private final class HashFileHeader extends FileHeader {
private long totalBytes;
public HashFileHeader() {
super();
// For hash filer, totalCount >= pageCount. See setConfig().
setTotalCount(getPageCount());
}
protected synchronized void read(RandomAccessFile raf) throws IOException {
super.read(raf);
totalBytes = raf.readLong();
}
protected synchronized void write(RandomAccessFile raf) throws IOException {
super.write(raf);
raf.writeLong(totalBytes);
}
/** The total number of bytes in use by the file */
public synchronized void setTotalBytes(long totalBytes) {
this.totalBytes = totalBytes;
setDirty();
}
/** The total number of bytes in use by the file */
public synchronized long getTotalBytes() {
return totalBytes;
}
/** Adjust total number of bytes in use by the file */
public synchronized void addTotalBytes(int count) {
totalBytes += count;
}
}
/**
* HashPageHeader
*/
protected final class HashPageHeader extends PageHeader {
private long created = 0;
private long modified = 0;
private long nextCollision = NO_PAGE;
public HashPageHeader() {
}
public HashPageHeader(DataInput dis) throws IOException {
super(dis);
}
public synchronized void read(DataInput dis) throws IOException {
super.read(dis);
if (getStatus() == UNUSED) {
return;
}
created = dis.readLong();
modified = dis.readLong();
nextCollision = dis.readLong();
}
public synchronized void write(DataOutput dos) throws IOException {
super.write(dos);
dos.writeLong(created);
dos.writeLong(modified);
dos.writeLong(nextCollision);
}
public synchronized void setRecordLen(int recordLen) {
fileHeader.addTotalBytes(recordLen - getRecordLen());
super.setRecordLen(recordLen);
}
/** UNIX-time when this record was created */
public synchronized void setCreated(long created) {
this.created = created;
setDirty();
}
/** UNIX-time when this record was created */
public synchronized long getCreated() {
return created;
}
/** UNIX-time when this record was last modified */
public synchronized void setModified(long modified) {
this.modified = modified;
setDirty();
}
/** UNIX-time when this record was last modified */
public synchronized long getModified() {
return modified;
}
/** The next page for a Record collision (if any) */
public synchronized void setNextCollision(long nextCollision) {
this.nextCollision = nextCollision;
setDirty();
}
/** The next page for a Record collision (if any) */
public synchronized long getNextCollision() {
return nextCollision;
}
}
}