/* Open Source Java Caching Service
* Copyright (C) 2002 Frank Karlstr�m
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* The author can be contacted by email: fjankk@users.sourceforge.net
*/
package org.fjank.jcache.persistence;
import java.io.File;
import java.io.Serializable;
import java.util.HashMap;
import java.util.Iterator;
import javax.util.jcache.CacheAttributes;
import javax.util.jcache.DiskCacheException;
import org.fjank.jcache.CacheObject;
import org.fjank.jcache.DiskCacheObject;
import EDU.oswego.cs.dl.util.concurrent.ReadWriteLock;
import EDU.oswego.cs.dl.util.concurrent.WriterPreferenceReadWriteLock;
/**
* Clas implementing a simple disk peristence solution.
*/
public class DiskCache implements Serializable {
/**
* Indicates whether the cache is 'alive', defined as having been
* initialized, but not yet disposed.
*/
private boolean alive;
/** the name of the cache */
private final String cacheName;
/** the cache attributes.
* @todo never used.
*/
private final CacheAttributes cattr;
/** the adapter wich contains the actuall data. */
private transient CacheFileAdapter dataFile;
/** the filename */
private final String fileName;
/** the adapter which contains the keys and positions */
private transient CacheFileAdapter keyFile;
/** a map of the keys */
private HashMap keyHash;
/** the file describing the root of the cachefiles. */
private final File rafDir;
/**
* Each instance of a Disk cache should use this lock to synchronize reads
* and writes to the underlying storage mechansism.
*/
private final ReadWriteLock storageLock = new WriterPreferenceReadWriteLock();
/**
* The size of the diskCache.
*/
private int currentSize;
/**
* Creates a new DiskCache object.
*
* @param attributes the attributes for this disk cache.
* @throws DiskCacheException
*
* @throws DiskCacheException if any exceptions occur.
*/
public DiskCache(final CacheAttributes attributes) throws DiskCacheException {
this.cacheName = "Fjanks FKache";
String rootDirName = attributes.getDiskPath();
this.cattr = attributes;
this.fileName = cacheName;
rafDir = new File(rootDirName);
rafDir.mkdirs();
dataFile = new CacheFileAdapter(new File(rafDir, fileName + ".data"));
keyFile = new CacheFileAdapter(new File(rafDir, fileName + ".key"));
if (keyFile.length() > 0) {
loadKeysFromFile();
if (keyHash.size() == 0) {
dataFile.reset();
}
} else {
keyHash = new HashMap();
if (dataFile.length() > 0) {
dataFile.reset();
}
}
alive = true;
}
/**
* gets an object from the disk cache
*
* @param key the key for the object
*
* @return an object from the disk cache
*
* @throws DiskCacheException if exceptions occur.
*/
private CacheObject doGet(Serializable key) throws DiskCacheException {
try {
storageLock.readLock().acquire();
if (!alive) {
return null;
}
return readElement(key);
} catch (InterruptedException e) {
throw new DiskCacheException("The read was interrupted.");
} finally {
storageLock.readLock().release();
}
}
/**
* Update the disk cache.
*
* @param ce the object to write.
*
* @throws DiskCacheException if exceptions occur.
*/
private void doUpdate(Object key, byte[] data) {
try {
DiskElementDescriptor ded = new DiskElementDescriptor();
ded.init(dataFile.length(), data);
storageLock.writeLock().acquire();
if (!alive) {
return;
}
DiskElementDescriptor old =
(DiskElementDescriptor) keyHash.put(key, ded);
if ((old != null) && (ded.len <= old.len)) {
ded.pos = old.pos;
}
dataFile.write(data, ded.pos);
} catch (InterruptedException e) {
;
} finally {
storageLock.writeLock().release();
}
}
/**
* gets an object from the diskCache
*
* @param key the key for the object
*
* @return an object from the diskCache
*
* @throws DiskCacheException if exceptions occur.
*/
public final CacheObject getObject(final Serializable key)
throws DiskCacheException {
if (!alive) {
return null;
}
return doGet(key);
}
/**
* lods keys from an existing cachefile
* @throws DiskCacheException
*
* @throws DiskCacheException if exceptions occur.
*/
private void loadKeysFromFile() throws DiskCacheException {
try {
storageLock.readLock().acquire();
keyHash = (HashMap) keyFile.readObject(0);
if (keyHash == null) {
keyHash = new HashMap();
}
} catch (InterruptedException e) {
;
} finally {
storageLock.readLock().release();
}
}
/**
* closes the diskcache, and optimizes the disk files.
*/
public void close() {
try {
storageLock.writeLock().acquire();
if (!alive) {
return;
}
optimizeFile();
dataFile.close();
dataFile = null;
keyFile.close();
keyFile = null;
} catch (DiskCacheException e) {
//duh, so what. its closed down and useless anyway...
//the user will get the error when he tries to open it the next time.
} catch (InterruptedException e) {
;
} finally {
alive = false;
storageLock.writeLock().release();
}
}
/**
* defragments the cachefiles. optimize this to let it be done online.
*
* @throws DiskCacheException if exceptions occur.
*/
private void optimizeFile() throws DiskCacheException {
HashMap keyHashTemp = new HashMap();
CacheFileAdapter dataFileTemp =
new CacheFileAdapter(new File(rafDir, fileName + "Temp.data"));
Iterator itr = keyHash.keySet().iterator();
while (itr.hasNext()) {
Serializable key = (Serializable) itr.next();
CacheObject tempDe = readElement(key);
DiskElementDescriptor de = dataFileTemp.appendObject(tempDe);
keyHashTemp.put(key, de);
}
dataFileTemp.close();
dataFile.close();
File oldData = new File(rafDir, fileName + ".data");
if (oldData.exists()) {
oldData.delete();
}
File newData = new File(rafDir, fileName + "Temp.data");
File newFileName = new File(rafDir, fileName + ".data");
if (newData.exists()) {
newData.renameTo(newFileName);
}
keyHash = keyHashTemp;
keyFile.reset();
if (keyHash.size() > 0) {
keyFile.writeObject(keyHash, 0);
}
}
/**
* reads an element from the diskcache
*
* @param key the key for the diskobject
*
* @return an element from the diskcache
*
* @throws DiskCacheException if exceptions occur.
*/
CacheObject readElement(final Serializable key)
throws DiskCacheException {
DiskElementDescriptor ded = (DiskElementDescriptor) keyHash.get(key);
if (ded != null) {
Serializable readObject = dataFile.readObject(ded.pos);
return ((DiskCacheObject) readObject).getCacheObject();
}
throw new DiskCacheException("The object " + key
+ " was not found in the diskCache.");
}
/**
* Adds the provided element to the cache.
*
* @param cacheElement the element to add.
*/
public final boolean update(final CacheObject cacheElement) {
byte[] data = CacheFileAdapter.serialize(new DiskCacheObject(cacheElement));
int newSize = currentSize+data.length;
int maxSize = cattr.getDiskSize()*1024*1024;
if(newSize>maxSize) {
return false;
}
doUpdate(cacheElement.getKey(), data);
currentSize=newSize;
return true;
}
/**
* Will remove all objects in this diskcache. Just a quick and dirty
* implementation to make things work.
*
* @throws DiskCacheException if removed was not successfull.
*
* @todo make this operation asynchronous to speed up flushing.
*/
public void removeAll() throws DiskCacheException {
try {
storageLock.writeLock().acquire();
} catch (InterruptedException e) {
;
}
try {
if(dataFile!=null)dataFile.reset();
if(keyFile!=null)keyFile.reset();
currentSize=0;
} finally {
storageLock.writeLock().release();
}
}
}