/*
* JBoss, the OpenSource J2EE webOS
*
* Distributable under LGPL license.
* See terms of license at gnu.org.
*/
package org.jboss.cache;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.jboss.cache.buddyreplication.BuddyManager;
import org.jboss.cache.buddyreplication.GravitateResult;
import org.jboss.cache.config.Configuration;
import org.jboss.cache.config.Option;
import org.jboss.cache.factories.ComponentRegistry;
import org.jboss.cache.factories.InterceptorChainFactory;
import org.jboss.cache.factories.annotations.ComponentName;
import org.jboss.cache.factories.annotations.Inject;
import org.jboss.cache.interceptors.Interceptor;
import org.jboss.cache.invocation.NodeInvocationDelegate;
import org.jboss.cache.invocation.RemoteCacheInvocationDelegate;
import org.jboss.cache.loader.CacheLoaderManager;
import org.jboss.cache.lock.IsolationLevel;
import org.jboss.cache.lock.LockStrategyFactory;
import org.jboss.cache.lock.LockingException;
import org.jboss.cache.lock.NodeLock;
import org.jboss.cache.lock.TimeoutException;
import org.jboss.cache.marshall.Marshaller;
import org.jboss.cache.marshall.MethodCall;
import org.jboss.cache.marshall.MethodCallFactory;
import org.jboss.cache.marshall.MethodDeclarations;
import org.jboss.cache.marshall.NodeData;
import org.jboss.cache.notifications.Notifier;
import org.jboss.cache.notifications.event.NodeModifiedEvent;
import org.jboss.cache.optimistic.DataVersion;
import org.jboss.cache.transaction.GlobalTransaction;
import org.jboss.cache.transaction.OptimisticTransactionEntry;
import org.jboss.cache.transaction.TransactionEntry;
import org.jboss.cache.transaction.TransactionTable;
import org.jboss.cache.util.CachePrinter;
import org.jgroups.Address;
import javax.management.MBeanServerFactory;
import javax.transaction.Status;
import javax.transaction.SystemException;
import javax.transaction.Transaction;
import javax.transaction.TransactionManager;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* The default implementation class of {@link org.jboss.cache.Cache} and {@link org.jboss.cache.CacheSPI}. This class
* has its roots in the legacy (JBoss Cache 1.x.x) org.jboss.cache.TreeCache class.
* <p/>
* You should not use this class directly, or attempt to cast {@link org.jboss.cache.Cache} or {@link org.jboss.cache.CacheSPI}
* interfaces directly to this class.
*
* @author Bela Ban
* @author Ben Wang
* @author <a href="mailto:manik@jboss.org">Manik Surtani (manik@jboss.org)</a>
* @author Brian Stansberry
* @author Daniel Huang (dhuang@jboss.org)
* @see org.jboss.cache.Cache
*/
public class CacheImpl
{
/**
* CacheImpl log.
*/
private Log log = LogFactory.getLog(CacheImpl.class);
/**
* Root node.
*/
private NodeSPI root;
/**
* Cache's region manager.
*/
private RegionManager regionManager = null;
/**
* Maintains mapping of transactions (keys) and Modifications/Undo-Operations
*/
private TransactionTable transactionTable;
/**
* Set<Fqn> of Fqns of the topmost node of internal regions that should
* not included in standard state transfers.
*/
private final Set<Fqn> internalFqns = new HashSet<Fqn>();
/**
* Marshaller if register to handle marshalling
*/
private Marshaller marshaller = null;
/**
* Used to get the Transaction associated with the current thread
*/
private TransactionManager transactionManager = null;
/**
* Cache loader manager.
*/
private CacheLoaderManager cacheLoaderManager;
/**
* The current lifecycle state.
*/
CacheStatus cacheStatus;
/**
* Buddy Manager
*/
private BuddyManager buddyManager;
private RemoteCacheInvocationDelegate remoteDelegate;
/**
* Cache notifier handler class.
*/
private Notifier notifier;
private final Configuration configuration;
private final ComponentRegistry componentRegistry;
private NodeFactory nodeFactory;
private CacheSPI spi;
private Interceptor interceptorChain;
private boolean trace;
/**
* Hook to shut down the cache when the JVM exits.
*/
private Thread shutdownHook;
/**
* A flag that the shutdown hook sets before calling cache.stop(). Allows stop() to identify if it has been called
* from a shutdown hook.
*/
private boolean invokedFromShutdownHook;
private RPCManager rpcManager;
/**
* Constructs an uninitialized CacheImpl.
*/
protected CacheImpl()
{
this(new Configuration());
}
/**
* Constructs an uninitialized CacheImpl.
*/
protected CacheImpl(Configuration configuration)
{
this.configuration = configuration;
this.componentRegistry = new ComponentRegistry(configuration);
this.cacheStatus = CacheStatus.INSTANTIATED;
}
protected ComponentRegistry getComponentRegistry()
{
return componentRegistry;
}
// NOTE: The RemoteCacheInvocationDelegate is a bit tricky since it is a subclass for CacheInvocationDelegate and hence
// also an implementation of CacheSPI. Components requesting a CacheSPI may inadvertently get a RemoteCacheInvocationDelegate
// To work around this, I'm explicitly naming the RCID as a "remoteDelegate". Perhaps all components should be named.
@Inject
private void injectDependencies(Notifier notifier, RegionManager regionManager, TransactionManager transactionManager, Marshaller marshaller,
TransactionTable transactionTable, NodeFactory nodeFactory,
CacheSPI spi, @ComponentName("remoteDelegate")RemoteCacheInvocationDelegate remoteDelegate,
Interceptor interceptorChain, BuddyManager buddyManager, RPCManager rpcManager)
{
this.notifier = notifier;
this.regionManager = regionManager;
this.transactionManager = transactionManager;
this.transactionTable = transactionTable;
this.nodeFactory = nodeFactory;
this.spi = spi;
this.remoteDelegate = remoteDelegate;
this.marshaller = marshaller;
this.interceptorChain = interceptorChain;
this.buddyManager = buddyManager;
this.rpcManager = rpcManager;
}
public Configuration getConfiguration()
{
return configuration;
}
/**
* Returns the CacheImpl implementation version.
*/
public String getVersion()
{
return Version.printVersion();
}
/**
* Returns the root node.
*/
public NodeSPI getRoot()
{
return root;
}
/**
* Returns the transaction table.
*/
public TransactionTable getTransactionTable()
{
return transactionTable;
}
/**
* Sets the cache locking isolation level.
*/
private void setIsolationLevel(IsolationLevel level)
{
LockStrategyFactory.setIsolationLevel(level);
}
/**
* Returns the transaction manager in use.
*/
public TransactionManager getTransactionManager()
{
return transactionManager;
}
/**
* Lifecycle method. This is like initialize.
*
* @throws Exception
*/
public void create() throws CacheException
{
if (!cacheStatus.createAllowed())
{
if (cacheStatus.needToDestroyFailedCache())
destroy();
else
return;
}
try
{
internalCreate();
}
catch (Throwable t)
{
handleLifecycleTransitionFailure(t);
}
}
/**
* Sets the cacheStatus to FAILED and rethrows the problem as one
* of the declared types. Converts any non-RuntimeException Exception
* to CacheException.
*
* @param t
* @throws CacheException
* @throws RuntimeException
* @throws Error
*/
private void handleLifecycleTransitionFailure(Throwable t)
throws RuntimeException, Error
{
cacheStatus = CacheStatus.FAILED;
if (t instanceof CacheException)
throw (CacheException) t;
else if (t instanceof RuntimeException)
throw (RuntimeException) t;
else if (t instanceof Error)
throw (Error) t;
else
throw new CacheException(t);
}
/**
* The actual create implementation.
*
* @throws CacheException
*/
private void internalCreate() throws CacheException
{
// Include our clusterName in our log category
configureLogCategory();
componentRegistry.wire();
correctRootNodeType();
setIsolationLevel(configuration.getIsolationLevel());
cacheStatus = CacheStatus.CREATED;
}
/**
* Creates a new root node if one does not exist, or if the existing one does not match the type according to the configuration.
*/
private void correctRootNodeType()
{
// create a new root temporarily.
NodeSPI tempRoot = nodeFactory.createRootDataNode();
// if we don't already have a root or the new (temp) root is of a different class (optimistic vs pessimistic) to
// the current root, then we use the new one.
Class currentRootType = root == null ? null : ((NodeInvocationDelegate) root).getDelegationTarget().getClass();
Class tempRootType = ((NodeInvocationDelegate) tempRoot).getDelegationTarget().getClass();
if (!tempRootType.equals(currentRootType)) root = tempRoot;
}
/**
* Lifecyle method.
*
* @throws CacheException
*/
public void start() throws CacheException
{
if (!cacheStatus.startAllowed())
{
if (cacheStatus.needToDestroyFailedCache())
destroy(); // this will take us back to DESTROYED
if (cacheStatus.needCreateBeforeStart())
create();
else
return;
}
try
{
internalStart();
}
catch (Throwable t)
{
handleLifecycleTransitionFailure(t);
}
}
private void removeConfigurationDependentComponents()
{
// remove the Interceptor.class component though, since it may pertain to an old config
componentRegistry.unregisterComponent(Interceptor.class);
componentRegistry.unregisterComponent(Marshaller.class);
componentRegistry.unregisterComponent(TransactionManager.class);
componentRegistry.unregisterComponent(BuddyManager.class);
componentRegistry.unregisterComponent(CacheLoaderManager.class);
}
/**
* The actual start implementation.
*
* @throws CacheException
* @throws IllegalArgumentException
*/
private void internalStart() throws CacheException, IllegalArgumentException
{
// re-wire all dependencies in case stuff has changed since the cache was created
// remove any components whose construction may have depended upon a configuration that may have changed.
removeConfigurationDependentComponents();
// this will recreate any missing components based on the current config
componentRegistry.updateDependencies();
componentRegistry.wireDependencies(root);
cacheStatus = CacheStatus.STARTING;
// start all internal components
componentRegistry.start();
if (log.isDebugEnabled())
log.debug("Interceptor chain is:\n" + CachePrinter.printInterceptorChain(interceptorChain));
if (configuration.getNodeLockingScheme() == Configuration.NodeLockingScheme.OPTIMISTIC && transactionManager == null)
{
log.fatal("No transaction manager lookup class has been defined. Transactions cannot be used and thus OPTIMISTIC locking cannot be used! Expect errors!!");
}
correctRootNodeType();
switch (configuration.getCacheMode())
{
case LOCAL:
log.debug("cache mode is local, will not create the channel");
break;
case REPL_SYNC:
case REPL_ASYNC:
case INVALIDATION_ASYNC:
case INVALIDATION_SYNC:
// reconfigure log category so that the instance name is reflected as well.
configureLogCategory();
break;
default:
throw new IllegalArgumentException("cache mode " + configuration.getCacheMode() + " is invalid");
}
startManualComponents();
//now attempt to preload the cache from the loader - Manik
if (cacheLoaderManager != null)
{
cacheLoaderManager.preloadCache();
}
// start any eviction threads.
if (regionManager.isUsingEvictions())
{
regionManager.startEvictionThread();
}
notifier.notifyCacheStarted(spi, spi.getInvocationContext());
addShutdownHook();
log.info("JBoss Cache version: " + getVersion());
cacheStatus = CacheStatus.STARTED;
}
private void startManualComponents()
{
// these 2 components need to be started manually since they can only be started after ALL other components have started.
// i.e., rpcManager's start() method may do state transfers. State transfers will rely on the interceptor chain being started.
// the interceptor chain cannot start until the rpcManager is started. And similarly, the buddyManager relies on the
// rpcManager being started.
if (rpcManager != null) rpcManager.start();
if (buddyManager != null)
{
buddyManager.init();
if (buddyManager.isEnabled())
{
internalFqns.add(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN);
}
}
}
private void addShutdownHook()
{
ArrayList al = MBeanServerFactory.findMBeanServer(null);
boolean registerShutdownHook = (configuration.getShutdownHookBehavior() == Configuration.ShutdownHookBehavior.DEFAULT && al.size() == 0)
|| configuration.getShutdownHookBehavior() == Configuration.ShutdownHookBehavior.REGISTER;
if (registerShutdownHook)
{
if (trace)
log.trace("Registering a shutdown hook. Configured behavior = " + configuration.getShutdownHookBehavior());
shutdownHook = new Thread()
{
public void run()
{
try
{
invokedFromShutdownHook = true;
CacheImpl.this.stop();
}
finally
{
invokedFromShutdownHook = false;
}
}
};
Runtime.getRuntime().addShutdownHook(shutdownHook);
}
else
{
if (trace)
log.trace("Not registering a shutdown hook. Configured behavior = " + configuration.getShutdownHookBehavior());
}
}
/**
* Lifecycle method.
*/
public void destroy()
{
if (!cacheStatus.destroyAllowed())
{
if (cacheStatus.needStopBeforeDestroy())
{
try
{
stop();
}
catch (CacheException e)
{
log.warn("Needed to call stop() before destroying but stop() " +
"threw exception. Proceeding to destroy", e);
}
}
else
return;
}
try
{
internalDestroy();
}
finally
{
// We always progress to destroyed
cacheStatus = CacheStatus.DESTROYED;
}
}
/**
* The actual destroy implementation.
*/
private void internalDestroy()
{
cacheStatus = CacheStatus.DESTROYING;
cacheLoaderManager = null;
// The rest of these should have already been taken care of in stop,
// but we do it here as well in case stop failed.
rpcManager.stop();
transactionManager = null;
componentRegistry.reset();
}
/**
* Lifecycle method.
*/
public void stop()
{
if (!cacheStatus.stopAllowed())
{
return;
}
// Trying to stop() from FAILED is valid, but may not work
boolean failed = cacheStatus == CacheStatus.FAILED;
try
{
internalStop();
}
catch (Throwable t)
{
if (failed)
{
log.warn("Attempted to stop() from FAILED state, " +
"but caught exception; try calling destroy()", t);
}
handleLifecycleTransitionFailure(t);
}
}
/**
* The actual stop implementation.
*/
private void internalStop()
{
cacheStatus = CacheStatus.STOPPING;
// if this is called from a source other than the shutdown hook, deregister the shutdown hook.
if (!invokedFromShutdownHook && shutdownHook != null) Runtime.getRuntime().removeShutdownHook(shutdownHook);
componentRegistry.stop();
if (notifier != null)
{
notifier.notifyCacheStopped(spi, spi.getInvocationContext());
notifier.removeAllCacheListeners();
}
// unset transaction manager reference
transactionManager = null;
cacheStatus = CacheStatus.STOPPED;
// empty in-memory state
root.clearDataDirect();
root.removeChildrenDirect();
}
public CacheStatus getCacheStatus()
{
return cacheStatus;
}
/**
* Returns a Set<Fqn> of Fqns of the topmost node of internal regions that
* should not included in standard state transfers. Will include
* {@link BuddyManager#BUDDY_BACKUP_SUBTREE} if buddy replication is
* enabled.
*
* @return an unmodifiable Set<Fqn>. Will not return <code>null</code>.
*/
public Set<Fqn> getInternalFqns()
{
return Collections.unmodifiableSet(internalFqns);
}
/**
* Evicts the node at <code>subtree</code> along with all descendant nodes.
*
* @param subtree Fqn indicating the uppermost node in the
* portion of the cache that should be evicted.
* @throws CacheException
*/
protected void _evictSubtree(Fqn subtree) throws CacheException
{
if (!exists(subtree))
{
return;// node does not exist. Maybe it has been recursively removed.
}
if (trace)
{
log.trace("_evictSubtree(" + subtree + ")");
}
// Recursively remove any children
Set children = spi.getChildrenNames(subtree);
if (children != null)
{
for (Object s : children)
{
Fqn<Object> tmp = new Fqn<Object>(subtree, s);
_remove(null, // no tx
tmp,
false, // no undo ops
true, // no nodeEvent
true);// is an eviction
}
}
// Remove the root node of the subtree
_remove(null, subtree, false, true, true);
}
// ----------- End Marshalling and State Transfer -----------------------
/**
* Returns the raw data of the node; called externally internally.
*/
public Node _get(Fqn<?> fqn) throws CacheException
{
return findNode(fqn);
}
/**
* Returns the raw data of the node; called externally internally.
*/
public Map _getData(Fqn<?> fqn)
{
NodeSPI n = findNode(fqn);
if (n == null) return null;
return n.getDataDirect();
}
public Set _getKeys(Fqn<?> fqn) throws CacheException
{
NodeSPI n = findNode(fqn);
if (n == null)
{
return null;
}
Set keys = n.getKeysDirect();
return new HashSet(keys);
}
public Object _get(Fqn<?> fqn, Object key, boolean sendNodeEvent) throws CacheException
{
InvocationContext ctx = spi.getInvocationContext();
if (trace)
{
log.trace(new StringBuffer("_get(").append("\"").append(fqn).append("\", \"").append(key).append("\", \"").
append(sendNodeEvent).append("\")"));
}
if (sendNodeEvent) notifier.notifyNodeVisited(fqn, true, ctx);
NodeSPI n = findNode(fqn);
if (n == null)
{
log.trace("node not found");
return null;
}
if (sendNodeEvent) notifier.notifyNodeVisited(fqn, false, ctx);
return n.getDirect(key);
}
/**
* Checks whether a given node exists in current in-memory state of the cache.
* Does not acquire any locks in doing so (result may be dirty read). Does
* not attempt to load nodes from a cache loader (may return false if a
* node has been evicted).
*
* @param fqn The fully qualified name of the node
* @return boolean Whether or not the node exists
*/
public boolean exists(String fqn)
{
return exists(Fqn.fromString(fqn));
}
/**
* Checks whether a given node exists in current in-memory state of the cache.
* Does not acquire any locks in doing so (result may be dirty read). Does
* not attempt to load nodes from a cache loader (may return false if a
* node has been evicted).
*
* @param fqn The fully qualified name of the node
* @return boolean Whether or not the node exists
*/
public boolean exists(Fqn<?> fqn)
{
Node n = peek(fqn, false);
return n != null;
}
public NodeSPI peek(Fqn<?> fqn, boolean includeDeletedNodes)
{
return peek(fqn, includeDeletedNodes, false);
}
public NodeSPI peek(Fqn<?> fqn, boolean includeDeletedNodes, boolean includeInvalidNodes)
{
if (fqn == null || fqn.size() == 0) return root;
NodeSPI n = root;
int fqnSize = fqn.size();
for (int i = 0; i < fqnSize; i++)
{
Object obj = fqn.get(i);
n = n.getChildDirect(obj);
if (n == null)
{
return null;
}
else if (!includeDeletedNodes && n.isDeleted())
{
return null;
}
else if (!includeInvalidNodes && !n.isValid())
{
return null;
}
}
return n;
}
/**
* @param fqn
* @param key
*/
public boolean exists(String fqn, Object key)
{
return exists(Fqn.fromString(fqn), key);
}
/**
* Checks whether a given key exists in the given node. Does not interact with the interceptor stack.
*
* @param fqn The fully qualified name of the node
* @param key
* @return boolean Whether or not the node exists
*/
public boolean exists(Fqn<?> fqn, Object key)
{
NodeSPI n = peek(fqn, false);
return n != null && n.getKeysDirect().contains(key);
}
@SuppressWarnings("unchecked")
public <E> Set<E> _getChildrenNames(Fqn<E> fqn) throws CacheException
{
NodeSPI n = findNode(fqn);
if (n == null) return null;
Set<E> childNames = new HashSet<E>();
Map childrenMap = n.getChildrenMapDirect();
if (childrenMap == null || childrenMap.isEmpty()) return Collections.emptySet();
Collection s = childrenMap.values();
// prune deleted children - JBCACHE-1136
for (Object c : s)
{
NodeSPI child = (NodeSPI) c;
if (!child.isDeleted())
{
E e = (E) child.getFqn().getLastElement();
childNames.add(e);
}
}
return childNames;
}
/**
* Returns true if the FQN exists and the node has children.
*/
public boolean hasChild(Fqn fqn)
{
if (fqn == null) return false;
NodeSPI n = findNode(fqn);
return n != null && n.hasChildrenDirect();
}
/**
* Returns a debug string with few details.
*/
public String toString()
{
return toString(false);
}
/**
* Returns a debug string with optional details of contents.
*/
public String toString(boolean details)
{
StringBuffer sb = new StringBuffer();
int indent = 0;
if (!details)
{
sb.append(getClass().getName()).append(" [").append(getNumberOfNodes()).append(" nodes, ");
sb.append(getNumberOfLocksHeld()).append(" locks]");
}
else
{
if (root == null)
return sb.toString();
for (Object n : root.getChildrenDirect())
{
((NodeSPI) n).print(sb, indent);
sb.append("\n");
}
}
return sb.toString();
}
/**
* Prints information about the contents of the nodes in the cache's current
* in-memory state. Does not load any previously evicted nodes from a
* cache loader, so evicted nodes will not be included.
*/
public String printDetails()
{
StringBuffer sb = new StringBuffer();
root.printDetails(sb, 0);
sb.append("\n");
return sb.toString();
}
/**
* Returns lock information.
*/
public String printLockInfo()
{
StringBuffer sb = new StringBuffer("\n");
int indent = 0;
for (Object n : root.getChildrenDirect())
{
((NodeSPI) n).getLock().printLockInfo(sb, indent);
sb.append("\n");
}
return sb.toString();
}
/**
* Returns the number of read or write locks held across the entire cache.
*/
public int getNumberOfLocksHeld()
{
return numLocks(root);
}
private int numLocks(NodeSPI n)
{
int num = 0;
if (n != null)
{
if (n.getLock().isLocked())
{
num++;
}
for (Object cn : n.getChildrenDirect(true))
{
num += numLocks((NodeSPI) cn);
}
}
return num;
}
/**
* Returns an <em>approximation</em> of the total number of nodes in the
* cache. Since this method doesn't acquire any locks, the number might be
* incorrect, or the method might even throw a
* ConcurrentModificationException
*/
public int getNumberOfNodes()
{
return numNodes(root) - 1;
}
private int numNodes(NodeSPI n)
{
int count = 1;// for n
if (n != null)
{
for (Object child : n.getChildrenDirect())
{
count += numNodes((NodeSPI) child);
}
}
return count;
}
/**
* Returns an <em>approximation</em> of the total number of attributes in
* the cache. Since this method doesn't acquire any locks, the number might
* be incorrect, or the method might even throw a
* ConcurrentModificationException
*/
public int getNumberOfAttributes()
{
return numAttributes(root);
}
/**
* Returns an <em>approximation</em> of the total number of attributes in
* this sub cache.
*
* @see #getNumberOfAttributes
*/
public int getNumberOfAttributes(Fqn fqn)
{
return numAttributes(findNode(fqn));
}
private int numAttributes(NodeSPI n)
{
int count = 0;
for (Object child : n.getChildrenDirect())
{
count += numAttributes((NodeSPI) child);
}
count += n.getDataDirect().size();
return count;
}
/* --------------------- Callbacks -------------------------- */
/* ----- These are VERSIONED callbacks to facilitate JBCACHE-843. Also see docs/design/DataVersion.txt --- */
public void _putForExternalRead(GlobalTransaction gtx, Fqn fqn, Object key, Object value, DataVersion dv) throws CacheException
{
_putForExternalRead(gtx, fqn, key, value);
}
public void _put(GlobalTransaction tx, Fqn fqn, Map data, boolean create_undo_ops, DataVersion dv) throws CacheException
{
_put(tx, fqn, data, create_undo_ops, false, dv);
}
public void _put(GlobalTransaction tx, Fqn fqn, Map data, boolean create_undo_ops, boolean erase_contents, DataVersion dv) throws CacheException
{
_put(tx, fqn, data, create_undo_ops, erase_contents);
}
public Object _put(GlobalTransaction tx, Fqn fqn, Object key, Object value, boolean create_undo_ops, DataVersion dv) throws CacheException
{
return _put(tx, fqn, key, value, create_undo_ops);
}
public boolean _remove(GlobalTransaction tx, Fqn fqn, boolean create_undo_ops, DataVersion dv) throws CacheException
{
return _remove(tx, fqn, create_undo_ops, true);
}
public Object _remove(GlobalTransaction tx, Fqn fqn, Object key, boolean create_undo_ops, DataVersion dv) throws CacheException
{
return _remove(tx, fqn, key, create_undo_ops);
}
public void _removeData(GlobalTransaction tx, Fqn fqn, boolean create_undo_ops, DataVersion dv) throws CacheException
{
_removeData(tx, fqn, create_undo_ops, true);
}
/* ----- End VERSIONED callbacks - Now for the NORMAL callbacks. -------- */
/**
* Internal put method.
* Does the real work. Needs to acquire locks if accessing nodes, depending on
* the value of <tt>locking</tt>. If run inside a transaction, needs to (a) add
* newly acquired locks to {@link TransactionEntry}'s lock list, (b) add nodes
* that were created to {@link TransactionEntry}'s node list and (c) create
* {@link Modification}s and add them to {@link TransactionEntry}'s modification
* list and (d) create compensating modifications to undo the changes in case
* of a rollback
*
* @param fqn
* @param data
* @param create_undo_ops If true, undo operations will be created (default is true).
* Otherwise they will not be created (used by rollback()).
*/
public void _put(GlobalTransaction tx, String fqn, Map data, boolean create_undo_ops)
throws CacheException
{
_put(tx, Fqn.fromString(fqn), data, create_undo_ops);
}
/**
* Internal put method.
* Does the real work. Needs to acquire locks if accessing nodes, depending on
* the value of <tt>locking</tt>. If run inside a transaction, needs to (a) add
* newly acquired locks to {@link TransactionEntry}'s lock list, (b) add nodes
* that were created to {@link TransactionEntry}'s node list and (c) create
* {@link Modification}s and add them to {@link TransactionEntry}'s modification
* list and (d) create compensating modifications to undo the changes in case
* of a rollback
*
* @param fqn
* @param data
* @param create_undo_ops If true, undo operations will be created (default is true).
* Otherwise they will not be created (used by rollback()).
*/
public void _put(GlobalTransaction tx, Fqn fqn, Map data, boolean create_undo_ops)
throws CacheException
{
_put(tx, fqn, data, create_undo_ops, false);
}
/**
* Internal put method.
* Does the real work. Needs to acquire locks if accessing nodes, depending on
* the value of <tt>locking</tt>. If run inside a transaction, needs to (a) add
* newly acquired locks to {@link TransactionEntry}'s lock list, (b) add nodes
* that were created to {@link TransactionEntry}'s node list and (c) create
* {@link Modification}s and add them to {@link TransactionEntry}'s modification
* list and (d) create compensating modifications to undo the changes in case
* of a rollback
*
* @param fqn
* @param data
* @param create_undo_ops If true, undo operations will be created (default is true).
* @param erase_contents Clear the existing hashmap before putting the new data into it
* Otherwise they will not be created (used by rollback()).
*/
public void _put(GlobalTransaction tx, Fqn fqn, Map data, boolean create_undo_ops, boolean erase_contents)
throws CacheException
{
if (trace)
{
log.trace("_put(" + tx + ", \"" + fqn + "\", " + data + " undo=" + create_undo_ops + " erase=" + erase_contents + ")");
}
InvocationContext ctx = spi.getInvocationContext();
boolean isRollback = checkIsRollingBack(ctx.getTransaction());
NodeSPI n = findNodeCheck(tx, fqn, isRollback);
Map rawData = n.getDataDirect();
if (!isRollback) notifier.notifyNodeModified(fqn, true, NodeModifiedEvent.ModificationType.PUT_MAP, rawData, ctx);
// create a compensating method call (reverting the effect of
// this modification) and put it into the TX's undo list.
if (tx != null && create_undo_ops)
{
// erase and set to previous hashmap contents
MethodCall undo_op = MethodCallFactory.create(MethodDeclarations.putDataEraseMethodLocal_id, tx, fqn, new HashMap(rawData), false, true);
transactionTable.addUndoOperation(tx, undo_op);
}
if (erase_contents)
n.clearDataDirect();
n.putAllDirect(data);
if (!isRollback)
notifier.notifyNodeModified(fqn, false, NodeModifiedEvent.ModificationType.PUT_MAP, n.getDataDirect(), ctx);
}
/**
* Internal put method.
*
* @return Previous value (if any)
*/
public Object _put(GlobalTransaction tx, String fqn, Object key, Object value, boolean create_undo_ops)
throws CacheException
{
return _put(tx, Fqn.fromString(fqn), key, value, create_undo_ops);
}
private boolean checkIsRollingBack(Transaction tx)
{
try
{
return tx != null && (
tx.getStatus() == javax.transaction.Status.STATUS_ROLLEDBACK ||
tx.getStatus() == javax.transaction.Status.STATUS_ROLLING_BACK ||
tx.getStatus() == javax.transaction.Status.STATUS_MARKED_ROLLBACK);
}
catch (Exception e)
{
// can't get a hold of a transaction - probably no tx rolling back
return false;
}
}
/**
* Internal put method.
*
* @return Previous value (if any)
*/
public Object _put(GlobalTransaction tx, Fqn fqn, Object key, Object value, boolean create_undo_ops)
throws CacheException
{
if (trace)
{
log.trace(new StringBuffer("_put(").append(tx).append(", \"").
append(fqn).append("\", k=").append(key).append(", v=").append(value).append(")"));
}
// doesn't matter where we get this from - whether from the spi or the remote delegate - since they both refer to a single
// invocation context container instance.
InvocationContext ctx = spi.getInvocationContext();
// if this is a rollback then don't fire notifications.
boolean isRollback = checkIsRollingBack(ctx.getTransaction());
NodeSPI n = findNodeCheck(tx, fqn, isRollback);
Map rawData = n.getDataDirect();
if (!isRollback)
notifier.notifyNodeModified(fqn, true, NodeModifiedEvent.ModificationType.PUT_DATA, rawData, ctx);
Object old_value = n.putDirect(key, value);
// create a compensating method call (reverting the effect of
// this modification) and put it into the TX's undo list.
if (tx != null && create_undo_ops)
{
MethodCall undo_op;
if (old_value == null)
{
undo_op = MethodCallFactory.create(MethodDeclarations.removeKeyMethodLocal_id, tx, fqn, key, false);
}
else
{
undo_op = MethodCallFactory.create(MethodDeclarations.putKeyValMethodLocal_id, tx, fqn, key, old_value, false);
}
// 1. put undo-op in TX' undo-operations list (needed to rollback TX)
transactionTable.addUndoOperation(tx, undo_op);
}
Map newData = Collections.singletonMap(key, value);
if (!isRollback)
notifier.notifyNodeModified(fqn, false, NodeModifiedEvent.ModificationType.PUT_DATA, newData, ctx);
return old_value;
}
/**
* Internal remove method.
*/
public void _remove(GlobalTransaction tx, String fqn, boolean create_undo_ops) throws CacheException
{
_remove(tx, Fqn.fromString(fqn), create_undo_ops);
}
/**
* Internal remove method.
*/
public boolean _remove(GlobalTransaction tx, Fqn fqn, boolean create_undo_ops) throws CacheException
{
return _remove(tx, fqn, create_undo_ops, true);
}
public boolean _remove(GlobalTransaction tx, Fqn fqn, boolean create_undo_ops, boolean sendNodeEvent)
throws CacheException
{
return _remove(tx, fqn, create_undo_ops, sendNodeEvent, false);
}
/**
* Internal method to remove a node.
*
* @param tx
* @param fqn
* @param create_undo_ops
* @param sendNodeEvent
*/
public boolean _remove(GlobalTransaction tx, Fqn fqn, boolean create_undo_ops, boolean sendNodeEvent, boolean eviction)
throws CacheException
{
return _remove(tx, fqn, create_undo_ops, sendNodeEvent, eviction, null);
}
/**
* Internal method to remove a node.
* Performs a remove on a node, passing in a {@link DataVersion} which is used with optimistically locked nodes. Pass
* in a null if optimistic locking is not used.
*
* @param tx
* @param fqn
* @param create_undo_ops
* @param skipSendingNodeEvents
* @param eviction
* @param version
* @return true if the node was removed, false if not found
* @throws CacheException
*/
public boolean _remove(GlobalTransaction tx, Fqn fqn, boolean create_undo_ops, boolean skipSendingNodeEvents, boolean eviction, DataVersion version)
throws CacheException
{
NodeSPI n;
NodeSPI parent_node;
MethodCall undo_op;
if (trace)
{
log.trace("_remove(" + tx + ", \"" + fqn + "\", undo=" + create_undo_ops + ")");
}
InvocationContext ctx = spi.getInvocationContext();
// check if this is triggered by a rollback operation ...
boolean isRollback = checkIsRollingBack(ctx.getTransaction());
if (tx != null)
{
try
{
if (isRollback)
{
log.trace("This remove call is triggered by a transaction rollback, as a compensation operation. Do a realRemove() instead.");
return realRemove(fqn, true);
}
}
catch (Exception e)
{
// what do we do here?
log.warn("Unable to get a hold of the transaction for a supposedly transactional call! This *may* result in stale locks!", e);
}
}
// Find the node. This will add the temporarily created parent nodes to the TX's node list if tx != null)
n = findNode(fqn, version, true);
if (n == null)
{
if (trace)
{
log.trace("node " + fqn + " not found");
}
return false;
}
if (!isRollback && !skipSendingNodeEvents)
{
if (eviction)
{
notifier.notifyNodeEvicted(fqn, true, ctx);
}
else
{
notifier.notifyNodeRemoved(fqn, true, n.getDataDirect(), ctx);
}
}
parent_node = n.getParent();
boolean found;
// remove subtree from parent
if (eviction || configuration.isNodeLockingOptimistic())
{
// if there is no parent node and the fqn is root, found == true otherwise found == false.
found = n.isValid() && parent_node == null ? fqn.isRoot() : parent_node.removeChildDirect(n.getFqn().getLastElement());
}
else
{
found = n.isValid() && !n.isDeleted();
n.markAsDeleted(true, true);
}
if (eviction && parent_node != null)
{
parent_node.setChildrenLoaded(false);
}
// release all locks for the entire subtree
// n.getNodeSPI().getLock().releaseAll(tx != null ? tx : (Object) Thread.currentThread());
// create a compensating method call (reverting the effect of
// this modification) and put it into the TX's undo list.
if (tx != null && create_undo_ops && !eviction && found)
{
undo_op = MethodCallFactory.create(MethodDeclarations.addChildMethodLocal_id, tx, parent_node.getFqn(), n.getFqn().getLastElement(), n, false);
// 1. put undo-op in TX' undo-operations list (needed to rollback TX)
transactionTable.addUndoOperation(tx, undo_op);
}
if (!isRollback && !skipSendingNodeEvents)
{
if (eviction)
{
notifier.notifyNodeEvicted(fqn, false, ctx);
}
else
{
notifier.notifyNodeRemoved(fqn, false, null, ctx);
}
}
return found;
}
/**
* Internal method to remove a key.
*
* @param fqn
* @param key
* @return Object
*/
public Object _remove(GlobalTransaction tx, String fqn, Object key, boolean create_undo_ops)
throws CacheException
{
return _remove(tx, Fqn.fromString(fqn), key, create_undo_ops);
}
/**
* Internal method to remove a key.
*
* @param fqn
* @param key
* @return Object
*/
public Object _remove(GlobalTransaction tx, Fqn fqn, Object key, boolean create_undo_ops)
throws CacheException
{
MethodCall undo_op;
Object old_value;
if (trace)
{
log.trace("_remove(" + tx + ", \"" + fqn + "\", key=" + key + ")");
}
// Find the node. This will lock it (if <tt>locking</tt> is true) and
// add the temporarily created parent nodes to the TX's node list if tx != null)
NodeSPI n = findNode(fqn);
if (n == null)
{
log.warn("node " + fqn + " not found");
return null;
}
InvocationContext ctx = spi.getInvocationContext();
boolean isRollback = checkIsRollingBack(ctx.getTransaction());
if (!isRollback)
notifier.notifyNodeModified(fqn, true, NodeModifiedEvent.ModificationType.REMOVE_DATA, n.getDataDirect(), ctx);
old_value = n.removeDirect(key);
// create a compensating method call (reverting the effect of
// this modification) and put it into the TX's undo list.
if (tx != null && create_undo_ops && old_value != null)
{
undo_op = MethodCallFactory.create(MethodDeclarations.putKeyValMethodLocal_id, tx, fqn, key, old_value, false);
// 1. put undo-op in TX' undo-operations list (needed to rollback TX)
transactionTable.addUndoOperation(tx, undo_op);
}
Map removedData = Collections.singletonMap(key, old_value);
if (!isRollback)
notifier.notifyNodeModified(fqn, false, NodeModifiedEvent.ModificationType.REMOVE_DATA, removedData, ctx);
return old_value;
}
/**
* Internal method to remove data from a node.
*/
public void _removeData(GlobalTransaction tx, String fqn, boolean create_undo_ops)
throws CacheException
{
_removeData(tx, Fqn.fromString(fqn), create_undo_ops);
}
/**
* Internal method to remove data from a node.
*/
public void _removeData(GlobalTransaction tx, Fqn fqn, boolean create_undo_ops)
throws CacheException
{
_removeData(tx, fqn, create_undo_ops, true);
}
/**
* Internal method to remove data from a node.
*/
public void _removeData(GlobalTransaction tx, Fqn fqn, boolean create_undo_ops, boolean sendNodeEvent)
throws CacheException
{
_removeData(tx, fqn, create_undo_ops, sendNodeEvent, false);
}
/**
* Internal method to remove data from a node.
*/
public void _removeData(GlobalTransaction tx, Fqn fqn, boolean create_undo_ops, boolean sendNodeEvent, boolean eviction)
throws CacheException
{
_removeData(tx, fqn, create_undo_ops, sendNodeEvent, eviction, null);
}
/**
* Internal method to remove data from a node.
*/
public void _removeData(GlobalTransaction tx, Fqn fqn, boolean create_undo_ops, boolean sendNodeEvent, boolean eviction, DataVersion version)
throws CacheException
{
MethodCall undo_op = null;
if (trace)
{
log.trace("_removeData(" + tx + ", \"" + fqn + "\")");
}
// Find the node. This will lock it (if <tt>locking</tt> is true) and
// add the temporarily created parent nodes to the TX's node list if tx != null)
NodeSPI n = findNode(fqn, version);
if (n == null)
{
log.warn("node " + fqn + " not found");
return;
}
Map data = n.getDataDirect();
InvocationContext ctx = spi.getInvocationContext();
boolean isRollback = checkIsRollingBack(ctx.getTransaction());
// create a compensating method call (reverting the effect of
// this modification) and put it into the TX's undo list.
if (tx != null && create_undo_ops && !eviction)
{
if (!data.isEmpty())
{
undo_op = MethodCallFactory.create(MethodDeclarations.putDataMethodLocal_id,
tx, fqn, new HashMap(data), false);
}
}
if (!isRollback)
{
if (eviction)
{
notifier.notifyNodeEvicted(fqn, true, ctx);
}
else
{
notifier.notifyNodeModified(fqn, true, NodeModifiedEvent.ModificationType.REMOVE_DATA, data, ctx);
}
}
n.clearDataDirect();
if (eviction)
{
n.setDataLoaded(false);
}
if (!isRollback)
{
if (sendNodeEvent)
{
notifier.notifyNodeVisited(fqn, false, ctx);
}
else
{// FIXME Bela did this so GUI view can refresh the view after node is evicted. But this breaks eviction policy, especially AOP!!!!
if (eviction)
{
notifier.notifyNodeEvicted(fqn, false, ctx);
}
else
{
notifier.notifyNodeModified(fqn, false, NodeModifiedEvent.ModificationType.REMOVE_DATA, data, ctx);
}
}
}
// put undo-op in TX' undo-operations list (needed to rollback TX)
if (tx != null && create_undo_ops)
{
transactionTable.addUndoOperation(tx, undo_op);
}
}
/**
* Internal evict method called by eviction policy provider.
*
* @param fqn removes everything assoicated with this FQN
* @return <code>true</code> if the node has been completely removed,
* <code>false</code> if only the data map was removed, due
* to the presence of children
* @throws CacheException
*/
public boolean _evict(Fqn fqn) throws CacheException
{
if (peek(fqn, false, true) == null) return true;
// node does not exist. Maybe it has been recursively removed.
// use remove method now if there is a child node. Otherwise, it is removed
boolean create_undo_ops = false;
boolean sendNodeEvent = false;
boolean eviction = true;
if (trace)
{
log.trace("_evict(" + fqn + ")");
}
if (hasChild(fqn))
{
_removeData(null, fqn, create_undo_ops, sendNodeEvent, eviction);
return false;
}
else
{
_remove(null, fqn, create_undo_ops, sendNodeEvent, eviction);
return true;
}
}
/**
* Internal evict method called by eviction policy provider.
*
* @param fqn
* @param version
* @return <code>true</code> if the node has been completely removed,
* <code>false</code> if only the data map was removed, due
* to the presence of children
* @throws CacheException
*/
public boolean _evict(Fqn fqn, DataVersion version) throws CacheException
{
if (!exists(fqn))
return true;// node does not exist
if (trace)
{
log.trace("_evict(" + fqn + ", " + version + ")");
}
if (hasChild(fqn))
{
_removeData(null, fqn, false, false, true, version);
return false;
}
else
{
_remove(null, fqn, false, true, true, version);
return true;
}
}
/**
* Very much like an evict(), except that regardless of whether there is a child present, this call will never
* remove the node from memory - just remove its contents.
* <p/>
* Also, potentially throws a cache exception if data versioning is used and the node in memory has a newer data
* version than what is passed in.
* <p/>
* Finally, the data version of the in-memory node is updated to the version being evicted to prevent versions
* going out of sync.
*/
public void invalidate(Fqn fqn, DataVersion versionToInvalidate)
{
Node node = spi.getNode(fqn); // force interceptor chain, load if necessary from cache loader.
if (node == null)
{
// if pessimistic locking, just return.
if (!configuration.isNodeLockingOptimistic()) return;
// check if a tombstone already exists
NodeSPI nodeSPI = peek(fqn, false, true);
if (nodeSPI == null)
{
if (versionToInvalidate == null)
{
if (trace)
log.trace("Would have created a tombstone since the node doesn't exist, but the version to invalidate is null and hence cannot create a tombstone!");
return;
}
if (trace) log.trace("Node doesn't exist; creating a tombstone with data version " + versionToInvalidate);
// create the node we need.
Map m = Collections.emptyMap();
InvocationContext ic = spi.getInvocationContext();
Option o = ic.getOptionOverrides();
boolean origCacheModeLocal = o.isCacheModeLocal();
o.setCacheModeLocal(true);
o.setDataVersion(versionToInvalidate);
// if we are in a tx this call should happen outside of any tx
try
{
Transaction suspended = null;
if (getTransactionManager() != null)
{
suspended = getTransactionManager().suspend();
}
spi.put(fqn, m);
if (suspended != null) getTransactionManager().resume(suspended);
ic.getOptionOverrides().setCacheModeLocal(origCacheModeLocal);
}
catch (Exception e)
{
log.error("Unable to create tombstone!", e);
}
nodeSPI = (NodeSPI) root.getChild(fqn);
}
node = nodeSPI;
}
if (configuration.isNodeLockingOptimistic())
_removeData(null, fqn, false, false, true, versionToInvalidate);
else
_evict(fqn);
// mark the node to be removed (and all children) as invalid so anyone holding a direct reference to it will
// be aware that it is no longer valid.
((NodeSPI) node).setValid(false, true);
if (versionToInvalidate != null)
{
NodeSPI n = peek(fqn, false, true);
n.setVersion(versionToInvalidate);
}
}
/**
* Compensating method to {@link #_remove(GlobalTransaction,Fqn,boolean)}.
*/
public void _addChild(GlobalTransaction gtx, Fqn parent_fqn, Object child_name, Node cn, boolean undoOps)
throws CacheException
{
NodeSPI childNode = (NodeSPI) cn;
if (trace)
{
log.trace("_addChild(\"" + parent_fqn + "\", \"" + child_name + "\", node=" + childNode + ")");
}
if (parent_fqn == null || child_name == null || childNode == null)
{
log.error("parent_fqn or child_name or childNode was null");
return;
}
NodeSPI parentNode = findNode(parent_fqn);
if (parentNode == null)
{
log.warn("node " + parent_fqn + " not found");
return;
}
InvocationContext ctx = spi.getInvocationContext();
boolean isRollback = checkIsRollingBack(ctx.getTransaction());
Fqn fqn = new Fqn(parent_fqn, child_name);
if (!isRollback) notifier.notifyNodeCreated(fqn, true, ctx);
parentNode.addChild(child_name, childNode);
childNode.markAsDeleted(false, true);
// tricky stuff here - this does look kludgy since we're recursively re-validating nodes
// potentially mistakenly revalidating tombstones, but this method would only be called
// when using pess locking and tombstones don't exist with PL, so this is OK.
childNode.setValid(true, true);
if (gtx != null && undoOps)
{
// 1. put undo-op in TX' undo-operations list (needed to rollback TX)
transactionTable.addUndoOperation(gtx, MethodCallFactory.create(MethodDeclarations.removeNodeMethodLocal_id, gtx, fqn, false, false));
}
if (!isRollback) notifier.notifyNodeCreated(fqn, false, ctx);
}
/**
* Used with buddy replication's data gravitation interceptor. If marshalling is necessary, ensure that the cache is
* configured to use {@link org.jboss.cache.config.Configuration#useRegionBasedMarshalling} and the {@link org.jboss.cache.Region}
* pertaining to the Fqn passed in is activated, and has an appropriate ClassLoader.
*
* @param fqn the fqn to gravitate
* @param searchSubtrees if true, buddy backup subtrees are searched and if false, they are not.
* @return a GravitateResult which contains the data for the gravitation
*/
public GravitateResult gravitateData(Fqn fqn, boolean searchSubtrees)
throws CacheException
{
// we need to get the state for this Fqn and its sub-nodes.
// for now, perform a very simple series of getData calls.
InvocationContext ctx = spi.getInvocationContext();
if (trace) log.trace("Caller is asking for " + fqn);
try
{
ctx.setOriginLocal(false);
// use a get() call into the cache to make sure cache loading takes place.
// no need to cache the original skipDataGravitation setting here - it will always be false of we got here!!
ctx.getOptionOverrides().setSkipDataGravitation(true);
Node actualNode = spi.getNode(fqn);
ctx.getOptionOverrides().setSkipDataGravitation(false);
if (trace) log.trace("In local tree, this is " + actualNode);
Fqn backupNodeFqn = null;
if (actualNode == null && searchSubtrees)
{
log.trace("Looking at backup trees.");
NodeSPI backupSubtree = findNode(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN);
if (backupSubtree != null)
{
// need to loop through backupSubtree's children
Set childNames = backupSubtree.getChildrenNamesDirect();
if (childNames != null)
{
for (Object childName : childNames)
{
// childName is the name of a buddy group since all child names in this
// collection are direct children of BUDDY_BACKUP_SUBTREE_FQN
backupNodeFqn = BuddyManager.getBackupFqn(childName.toString(), fqn);
// use a get() call into the cache to make sure cache loading takes place.
ctx.getOptionOverrides().setSkipDataGravitation(true);
actualNode = spi.getNode(backupNodeFqn);
ctx.getOptionOverrides().setSkipDataGravitation(false);
if (trace)
log.trace("Looking for " + backupNodeFqn + ". Search result: " + actualNode);
if (actualNode != null) break;
}
}
}
}
if (actualNode == null)
{
return GravitateResult.noDataFound();
}
else
{
// make sure we LOAD data for this node!!
actualNode.getData();
}
if (backupNodeFqn == null && searchSubtrees)
{
backupNodeFqn = BuddyManager.getBackupFqn(BuddyManager.getGroupNameFromAddress(rpcManager.getLocalAddress()), fqn);
}
List<NodeData> list = getNodeData(new LinkedList<NodeData>(), (NodeSPI) actualNode);
return GravitateResult.subtreeResult(list, backupNodeFqn);
}
finally
{
ctx.setOriginLocal(true);
}
}
private List<NodeData> getNodeData(List<NodeData> list, NodeSPI node)
{
NodeData data = new NodeData(BuddyManager.getActualFqn(node.getFqn()), node.getDataDirect());
list.add(data);
for (Object childNode : node.getChildrenDirect())
{
getNodeData(list, (NodeSPI) childNode);
}
return list;
}
/**
* Releases all locks for a FQN.
*/
public void _releaseAllLocks(Fqn fqn)
{
NodeSPI n;
try
{
n = findNode(fqn);
if (n == null)
{
log.error("releaseAllLocks(): node " + fqn + " not found");
return;
}
releaseAll(n);
}
catch (Throwable t)
{
log.error("releaseAllLocks(): failed", t);
}
}
private void releaseAll(NodeSPI n)
{
for (Object child : n.getChildrenDirect())
{
releaseAll((NodeSPI) child);
}
n.getLock().releaseAll();
}
/**
* Finds and returns the string value for the Fqn.
* Returns null if not found or upon error.
*/
public String _print(Fqn fqn)
{
try
{
Node n = findNode(fqn);
if (n == null) return null;
return n.toString();
}
catch (Throwable t)
{
return null;
}
}
/**
* Should not be called.
*/
public void _lock(Fqn fqn, NodeLock.LockType lock_type, boolean recursive)
throws TimeoutException, LockingException
{
throw new UnsupportedOperationException("method _lock() should not be invoked on CacheImpl");
}
/**
* Throws UnsupportedOperationException.
*/
public void optimisticPrepare(GlobalTransaction gtx, List modifications, Map data, Address address, boolean onePhaseCommit)
{
throw new UnsupportedOperationException("optimisticPrepare() should not be called on CacheImpl directly");
}
/**
* Throws UnsupportedOperationException.
*/
public void prepare(GlobalTransaction global_tx, List modifications, Address coord, boolean onePhaseCommit)
{
throw new UnsupportedOperationException("prepare() should not be called on CacheImpl directly");
}
/**
* Throws UnsupportedOperationException.
*/
public void commit(GlobalTransaction tx)
{
throw new UnsupportedOperationException("commit() should not be called on CacheImpl directly");
}
/**
* Throws UnsupportedOperationException.
*/
public void rollback(GlobalTransaction tx)//, Boolean hasMods)
{
throw new UnsupportedOperationException("rollback() should not be called on CacheImpl directly");
}
/* ----------------- End of Callbacks ---------------------- */
/**
* Adds an undo operatoin to the transaction table.
*/
public void addUndoOperation(GlobalTransaction gtx, MethodCall undo_op)
{
transactionTable.addUndoOperation(gtx, undo_op);
}
/**
* Called by reflection
*
* @param newParentFqn
* @param nodeToMoveFqn
*/
public void _move(Fqn nodeToMoveFqn, Fqn newParentFqn)
{
// the actual move algorithm.
NodeSPI newParent = findNode(newParentFqn);
if (newParent == null)
{
throw new NodeNotExistsException("New parent node " + newParentFqn + " does not exist when attempting to move node!!");
}
NodeSPI node = findNode(nodeToMoveFqn);
if (node == null)
{
throw new NodeNotExistsException("Node " + nodeToMoveFqn + " does not exist when attempting to move node!!");
}
NodeSPI oldParent = node.getParent();
Object nodeName = nodeToMoveFqn.getLastElement();
// now that we have the parent and target nodes:
// first correct the pointers at the pruning point
oldParent.removeChildDirect(nodeName);
newParent.addChild(nodeName, node);
InvocationContext ctx = spi.getInvocationContext();
// parent pointer is calculated on the fly using Fqns.
boolean isRollback = checkIsRollingBack(ctx.getTransaction());
// notify
if (!isRollback)
notifier.notifyNodeMoved(nodeToMoveFqn, new Fqn(newParentFqn, nodeToMoveFqn.getLastElement()), true, ctx);
// now adjust Fqns of node and all children.
moveFqns(node, newParent.getFqn());
if (!isRollback)
notifier.notifyNodeMoved(nodeToMoveFqn, new Fqn(newParentFqn, nodeToMoveFqn.getLastElement()), false, ctx);
// now register an undo op
if (ctx.getTransaction() != null)
{
MethodCall undo = MethodCallFactory.create(MethodDeclarations.moveMethodLocal_id, new Fqn(newParentFqn, nodeToMoveFqn.getLastElement()), oldParent.getFqn());
transactionTable.addUndoOperation(ctx.getGlobalTransaction(), undo);
}
}
public void _block()
{
//intentionally empty, used only for reflection in MethodDeclarations.blockChannelLocal
}
public void _unblock()
{
//intentionally empty, used only for reflection in MethodDeclarations.unblockChannelLocal
}
private void moveFqns(NodeSPI node, Fqn newBase)
{
Fqn newFqn = new Fqn(newBase, node.getFqn().getLastElement());
node.setFqn(newFqn);
}
/**
* Set our log category to include our clusterName, if we have one.
*/
private void configureLogCategory()
{
StringBuilder category = new StringBuilder(getClass().getName());
if (configuration != null)
{
if (rpcManager != null)
{
String clusterName = configuration.getClusterName();
if (clusterName != null)
{
category.append('.');
category.append(clusterName);
if (rpcManager.getLocalAddress() != null)
{
category.append('.');
category.append(rpcManager.getLocalAddress().toString().replace('.', '_'));
}
}
}
else
{
// we're in LOCAL mode
category.append("_LOCAL");
}
}
// replace .s with _s otherwise Log4J will strip them out
log = LogFactory.getLog(category.toString());
trace = log.isTraceEnabled();
}
/* ------------------------------ Private methods --------------------------- */
/**
* Returns the transaction associated with the current thread. We get the
* initial context and a reference to the TransactionManager to get the
* transaction. This method is used by {@link #getCurrentTransaction()}
*/
protected Transaction getLocalTransaction()
{
if (transactionManager == null)
{
return null;
}
try
{
return transactionManager.getTransaction();
}
catch (Throwable t)
{
return null;
}
}
/**
* Returns true if transaction is ACTIVE or PREPARING, false otherwise.
*/
private boolean isValid(Transaction tx)
{
if (tx == null) return false;
int status;
try
{
status = tx.getStatus();
return status == Status.STATUS_ACTIVE || status == Status.STATUS_PREPARING;
}
catch (SystemException e)
{
log.error("failed getting transaction status", e);
return false;
}
}
/**
* Returns the transaction associated with the current thread.
* If a local transaction exists, but doesn't yet have a mapping to a
* GlobalTransaction, a new GlobalTransaction will be created and mapped to
* the local transaction. Note that if a local transaction exists, but is
* not ACTIVE or PREPARING, null is returned.
*
* @return A GlobalTransaction, or null if no (local) transaction was associated with the current thread
*/
public GlobalTransaction getCurrentTransaction()
{
return getCurrentTransaction(true);
}
/**
* Returns the transaction associated with the thread; optionally creating
* it if is does not exist.
*/
public GlobalTransaction getCurrentTransaction(boolean createIfNotExists)
{
Transaction tx;
if ((tx = getLocalTransaction()) == null)
{// no transaction is associated with the current thread
return null;
}
if (!isValid(tx))
{// we got a non-null transaction, but it is not active anymore
int status = -1;
try
{
status = tx.getStatus();
}
catch (SystemException e)
{
}
// JBCACHE-982 -- don't complain if COMMITTED
if (status != Status.STATUS_COMMITTED)
{
log.warn("status is " + status + " (not ACTIVE or PREPARING); returning null)", new Throwable());
}
else
{
log.trace("status is COMMITTED; returning null");
}
return null;
}
return getCurrentTransaction(tx, createIfNotExists);
}
/**
* Returns the global transaction for this local transaction.
*/
public GlobalTransaction getCurrentTransaction(Transaction tx)
{
return getCurrentTransaction(tx, true);
}
/**
* Returns the global transaction for this local transaction.
*
* @param createIfNotExists if true, if a global transaction is not found; one is created
*/
public GlobalTransaction getCurrentTransaction(Transaction tx, boolean createIfNotExists)
{
// removed synchronization on tx_table because underlying implementation is thread safe
// and JTA spec (section 3.4.3 Thread of Control, par 2) says that only one thread may
// operate on the transaction at one time so no concern about 2 threads trying to call
// this method for the same Transaction instance at the same time
//
GlobalTransaction gtx = transactionTable.get(tx);
if (gtx == null && createIfNotExists)
{
Address addr = rpcManager.getLocalAddress();
gtx = GlobalTransaction.create(addr);
transactionTable.put(tx, gtx);
TransactionEntry ent = null;
try
{
ent = configuration.isNodeLockingOptimistic() ? new OptimisticTransactionEntry(tx) : new TransactionEntry(tx);
}
catch (Exception e)
{
throw new CacheException("Unable to create a transaction entry!", e);
}
transactionTable.put(gtx, ent);
if (trace)
{
log.trace("created new GTX: " + gtx + ", local TX=" + tx);
}
}
return gtx;
}
/**
* Returns an object suitable for use in node locking, either the current
* transaction or the current thread if there is no transaction.
*/
protected Object getOwnerForLock()
{
Object owner = getCurrentTransaction();
if (owner == null)
{
owner = Thread.currentThread();
}
return owner;
}
/**
* Finds a node given a fully qualified name.
* Whenever nodes are created, and the global transaction is not null, the created
* nodes have to be added to the transaction's {@link TransactionEntry}
* field.<br>
* When a lock is acquired on a node, a reference to the lock has to be
* {@link TransactionEntry#addLock(org.jboss.cache.lock.NodeLock) added to the list of locked nodes}
* in the {@link TransactionEntry}.
* <p>This operation will also apply different locking to the cache nodes, depending on
* <tt>operation_type</tt>. If it is <tt>read</tt> type, all nodes will be acquired with
* read lock. Otherwise, the operation is <tt>write</tt> type, all parent nodes will be acquired
* with read lock while the destination node acquires write lock.</p>
*
* @param fqn Fully qualified name for the corresponding node.
* @return DataNode
*/
public NodeSPI findNode(Fqn fqn)
{
try
{
return findNode(fqn, null);
}
catch (CacheException e)
{
log.warn("Unexpected error", e);
return null;
}
}
private NodeSPI findNodeCheck(GlobalTransaction tx, Fqn fqn, boolean includeInvalid)
{
NodeSPI n = findNode(fqn, null, includeInvalid);
if (n == null)
{
String errStr = "node " + fqn + " not found (gtx=" + tx + ", caller=" + Thread.currentThread() + ")";
if (trace)
{
log.trace(errStr);
}
throw new NodeNotExistsException(errStr);
}
return n;
}
/**
* Internal method; not to be used externally.
* Returns true if the node was found, false if not.
*
* @param f
*/
public boolean realRemove(Fqn f, boolean skipMarkerCheck)
{
NodeSPI n = peek(f, true);
if (n == null)
{
return false;
}
if (trace) log.trace("Performing a real remove for node " + f + ", marked for removal.");
if (skipMarkerCheck || n.isDeleted())
{
if (n.getFqn().isRoot())
{
// do not actually delete; just remove deletion marker
n.markAsDeleted(true);
// but now remove all children, since the call has been to remove("/")
n.removeChildrenDirect();
// mark the node to be removed (and all children) as invalid so anyone holding a direct reference to it will
// be aware that it is no longer valid.
n.setValid(false, true);
n.setValid(true, false);
return true;
}
else
{
// mark the node to be removed (and all children) as invalid so anyone holding a direct reference to it will
// be aware that it is no longer valid.
n.setValid(false, true);
return n.getParent().removeChildDirect(n.getFqn().getLastElement());
}
}
else
{
if (log.isDebugEnabled()) log.debug("Node " + f + " NOT marked for removal as expected, not removing!");
return false;
}
}
/**
* Finds a node given a fully qualified name and DataVersion. Does not include invalid nodes.
*/
private NodeSPI findNode(Fqn fqn, DataVersion version)
{
return findNode(fqn, version, false);
}
private NodeSPI findNode(Fqn fqn, DataVersion version, boolean includeInvalidNodes)
{
if (fqn == null) return null;
NodeSPI toReturn = peek(fqn, false, includeInvalidNodes);
if (toReturn != null && version != null && configuration.isNodeLockingOptimistic())
{
// we need to check the version of the data node...
DataVersion nodeVersion = toReturn.getVersion();
if (trace)
{
log.trace("looking for optimistic node [" + fqn + "] with version [" + version + "]. My version is [" + nodeVersion + "]");
}
if (nodeVersion.newerThan(version))
{
// we have a versioning problem; throw an exception!
throw new CacheException("Unable to validate versions.");
}
}
return toReturn;
}
// ================== methods to implement Cache and CacheSPI interfaces ============================
public List<Interceptor> getInterceptorChain()
{
List<Interceptor> modifiable = getInterceptors();
return modifiable == null ? null : Collections.unmodifiableList(modifiable);
}
private List<Interceptor> getInterceptors()
{
return InterceptorChainFactory.asList(interceptorChain);
}
private void setInterceptorChain(Interceptor startOfNewChain)
{
componentRegistry.registerComponent(Interceptor.class.getName(), startOfNewChain, Interceptor.class);
}
public synchronized void addInterceptor(Interceptor i, int position)
{
List<Interceptor> interceptors = getInterceptors();
InterceptorChainFactory factory = componentRegistry.getComponent(InterceptorChainFactory.class);
interceptors.add(position, i);
// now correct the chaining of interceptors...
Interceptor linkedChain = factory.correctInterceptorChaining(interceptors);
setInterceptorChain(linkedChain);
}
public synchronized void removeInterceptor(int position)
{
InterceptorChainFactory factory = componentRegistry.getComponent(InterceptorChainFactory.class);
List<Interceptor> i = getInterceptors();
i.remove(position);
setInterceptorChain(factory.correctInterceptorChaining(i));
}
public synchronized void removeInterceptor(Class<? extends Interceptor> interceptorType)
{
InterceptorChainFactory factory = componentRegistry.getComponent(InterceptorChainFactory.class);
List<Interceptor> interceptors = getInterceptors();
int position = -1;
boolean found = false;
for (Interceptor interceptor : interceptors)
{
position++;
if (interceptor.getClass().equals(interceptorType))
{
found = true;
break;
}
}
if (found)
{
interceptors.remove(position);
setInterceptorChain(factory.correctInterceptorChaining(interceptors));
}
}
public synchronized void addInterceptor(Interceptor i, Class<? extends Interceptor> afterInterceptor)
{
InterceptorChainFactory factory = componentRegistry.getComponent(InterceptorChainFactory.class);
List<Interceptor> interceptors = getInterceptors();
int position = -1;
boolean found = false;
for (Interceptor interceptor : interceptors)
{
position++;
if (interceptor.getClass().equals(afterInterceptor))
{
found = true;
break;
}
}
if (found)
{
componentRegistry.registerComponent(i, Interceptor.class);
interceptors.add(++position, i);
setInterceptorChain(factory.correctInterceptorChaining(interceptors));
componentRegistry.start();
// make sure I start the last 2 "manually startable" components
startManualComponents();
}
}
public RPCManager getRPCManager()
{
return configuration.getRuntimeConfig().getRPCManager();
}
public String getClusterName()
{
return getConfiguration().getClusterName();
}
public Region getRegion(Fqn<?> fqn, boolean createIfAbsent)
{
return regionManager.getRegion(fqn, createIfAbsent);
}
public boolean removeRegion(Fqn<?> fqn)
{
return regionManager.removeRegion(fqn);
}
public void _putForExternalRead(GlobalTransaction gtx, Fqn fqn, Object key, Object value)
{
_put(gtx, fqn, key, value, true);
}
}