/* HeliDB -- A simple database for Java, http://www.helidb.org
* Copyright (C) 2008, 2009 Karl Gustafsson
*
* This file is a part of HeliDB.
*
* HeliDB is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* HeliDB is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.helidb.resources.perf;
import java.io.File;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.entityfs.Directory;
import org.entityfs.ReadWritableFile;
import org.entityfs.fs.FSRWFileSystemBuilder;
import org.entityfs.support.log.LogAdapterHolder;
import org.entityfs.util.io.ReadWritableFileAdapter;
import org.helidb.Database;
import org.helidb.backend.DatabaseBackendFactory;
import org.helidb.backend.heapcrs.ConstantRecordSizeHeapBackendFactory;
import org.helidb.backend.index.bplus.BPlusTreeIndexBackendFactory;
import org.helidb.impl.txn.sc.ShadowCopyTransactionalDatabase;
import org.helidb.impl.txn.sc.SingleFileAndProxiedFileManager;
import org.helidb.impl.txn.sc.SingleFileManager;
import org.helidb.lang.hasher.IntegerToIntegerHasher;
import org.helidb.lang.serializer.IntegerNullSerializer;
import org.helidb.lang.serializer.IntegerSerializer;
import org.helidb.lang.serializer.LongSerializer;
import org.helidb.test.support.FileSupport;
import org.helidb.util.bplus.FileBackedNodeRepositoryBuilder;
import org.helidb.util.bplus.FixedSizeNodeSizeStrategy;
import org.helidb.util.bplus.LruCacheNodeRepositoryBuilder;
import org.helidb.util.bplus.NodeRepositoryBuilder;
public class ShadowCopyTransactionalDatabaseCRSHeapWBPlusTreeIndexConfiguration extends AbstractTestConfiguration implements CRSTestConfiguration
{
private final int m_bTreeLruNodeCacheSize;
private final int m_bTreeNodeSize;
private Collection<ShadowCopyTransactionalDatabase<Integer, Long, Long>> m_databases;
private Map<Integer, File[]> m_dbFiles;
public ShadowCopyTransactionalDatabaseCRSHeapWBPlusTreeIndexConfiguration(int noOfDatabases, int noOfBaseRecords, int noOfAdditionalRecords, int bTreeLruNodeCacheSize, int bTreeNodeSize)
{
super(noOfDatabases, noOfBaseRecords, noOfAdditionalRecords);
m_bTreeLruNodeCacheSize = bTreeLruNodeCacheSize;
m_bTreeNodeSize = bTreeNodeSize;
}
public String getGraphFileNamePrefix()
{
return "sct_crsh_bpti_" + m_bTreeNodeSize + "_" + m_bTreeLruNodeCacheSize + "_";
}
public String getDatabaseImplementationName()
{
return "ShadowCopyTxnDatabase";
}
public String[] getBackendImplementationNames()
{
return new String[] { "BPlusTreeIndexBackend", "CRSHeapBackend" };
}
public String getAdditionalInfo()
{
return "nd sz: " + m_bTreeNodeSize + (m_bTreeLruNodeCacheSize > 0 ? " cache: " + m_bTreeLruNodeCacheSize + "nds" : "");
}
public Collection<? extends Database<Integer, Long>> getDatabases()
{
return m_databases;
}
public String getHeader()
{
return "Shadow copy transactional database with constant record size heap backend with B+ tree index. B+ Tree LRU node cache " + m_bTreeLruNodeCacheSize + " entries. B+ Tree node size " + m_bTreeNodeSize + " bytes";
}
public void setup(LogAdapterHolder lah)
{
final int noOfDatabases = getNoOfDatabases();
m_databases = new ArrayList<ShadowCopyTransactionalDatabase<Integer, Long, Long>>(noOfDatabases);
m_dbFiles = new HashMap<Integer, File[]>(noOfDatabases);
for (int i = 0; i < noOfDatabases; i++)
{
File f = FileSupport.createTempFile();
ReadWritableFile ff = new ReadWritableFileAdapter(f);
File indf = FileSupport.createTempFile();
ReadWritableFile indff = new ReadWritableFileAdapter(indf);
File tmpDir = FileSupport.createTempDirectory();
Directory tmpDirD = new FSRWFileSystemBuilder().setRoot(tmpDir).create().getRootDirectory();
NodeRepositoryBuilder<Integer> nrb = new FileBackedNodeRepositoryBuilder<Integer, Long>().setNodeSizeStrategy(new FixedSizeNodeSizeStrategy(m_bTreeNodeSize)).setKeySerializer(IntegerNullSerializer.INSTANCE).setValueSerializer(LongSerializer.INSTANCE);
if (m_bTreeLruNodeCacheSize > 0)
{
nrb = new LruCacheNodeRepositoryBuilder<Integer, Long>().setProxiedBuilder(nrb).setMaxCacheSize(m_bTreeLruNodeCacheSize);
}
DatabaseBackendFactory<Integer, Long, Long> baf = new ConstantRecordSizeHeapBackendFactory<Integer, Long>(IntegerSerializer.INSTANCE, LongSerializer.INSTANCE, 0, 8192, lah);
baf = new BPlusTreeIndexBackendFactory<Integer, Long, Integer, Long>(baf, nrb, IntegerToIntegerHasher.INSTANCE, lah);
ShadowCopyTransactionalDatabase<Integer, Long, Long> db = new ShadowCopyTransactionalDatabase<Integer, Long, Long>(new SingleFileAndProxiedFileManager(new SingleFileManager(ff, tmpDirD), indff, tmpDirD), baf, false, lah);
m_dbFiles.put(System.identityHashCode(db), new File[] { f, tmpDir, indf });
m_databases.add(db);
}
}
public void tearDown()
{
for (ShadowCopyTransactionalDatabase<Integer, Long, Long> db : m_databases)
{
db.close();
deleteFile(m_dbFiles.get(System.identityHashCode(db))[0]);
deleteFile(m_dbFiles.get(System.identityHashCode(db))[1]);
deleteFile(m_dbFiles.get(System.identityHashCode(db))[2]);
}
m_dbFiles = null;
m_databases = null;
}
}