Package com.sun.sgs.app

Examples of com.sun.sgs.app.DataManager


     * Get the list of all root cells in the world, creating it if it doesn't
     * exist
     * @return a set of root cells
     */
    public Set<CellID> getRootCells() {
        DataManager dm = AppContext.getDataManager();
        Set<CellID> out;
        try {
            out = (Set<CellID>) dm.getBinding(ROOTCELLS_BINDING_NAME);
        } catch (NameNotBoundException nnbe) {
            out = new ScalableHashSet<CellID>();
            dm.setBinding(ROOTCELLS_BINDING_NAME, out);
        }

        return out;
    }
View Full Code Here


    /**
     * Returns a unique cell id and registers the cell with the system
     * @return
     */
    CellID createCellID(CellMO cell) {
        DataManager dm = AppContext.getDataManager();
        CellID cellID;

        if (cell instanceof EnvironmentCellMO) {
            // special case: environment cell is a singleton that has a fixed id
            cellID = CellID.getEnvironmentCellID();
        } else {
            // default case: assign a new cell ID
            CellCounter counter;
            try {
                counter = (CellCounter) dm.getBindingForUpdate(COUNTER_BINDING_NAME);
            } catch (NameNotBoundException nnbe) {
                counter = new CellCounter();
                dm.setBinding(COUNTER_BINDING_NAME, counter);
            }

            cellID = new CellID(counter.nextCellID());
        }

        dm.setBinding(getCellBinding(cellID), cell);
        return cellID;
    }
View Full Code Here

        return out;
    }

    private static CellComponentMap getComponentMap() {
        DataManager dm = AppContext.getDataManager();
        CellComponentMap out;
        try {
            out = (CellComponentMap) dm.getBinding(COMPONENTS_BINDING_NAME);
        } catch (NameNotBoundException nnbe) {
            logger.log(Level.WARNING, COMPONENTS_BINDING_NAME + " not bound",
                       nnbe);
            out = new CellComponentMap();
            dm.setBinding(COMPONENTS_BINDING_NAME, out);
        }

        return out;
    }
View Full Code Here

  /**
   * {@inheritDoc}
   */
  public void clear() {
      TreeNode<E> parent = getParent();
      DataManager dm = AppContext.getDataManager();
      dm.removeObject(getSubList());
      dm.removeObject(this);
      parent.clear();
  }
View Full Code Here

        private ManagedReference<ClientSession> sessionRef;

        private TestProtocolSessionListener(ClientSession session) {
            logger.info("New session for " + session.getName());
           
            DataManager dm = AppContext.getDataManager();
            sessionRef = dm.createReference(session);
        }
View Full Code Here

            public SendTask(WonderlandClientSender sender,
                            ClientSession session)
            {
                this.sender = sender;
               
                DataManager dm = AppContext.getDataManager();
                sessionRef = dm.createReference(session);
            }
View Full Code Here

  // If we need more depth, we will use a recursive call to the ensure
  // depth on the leaves.
  int leafBits = Math.min(minDepth - depth, maxDirBits);
  int numLeaves = 1 << leafBits;

  DataManager dm = AppContext.getDataManager();
  dm.markForUpdate(this);
        ManagedReference<ScalableHashMap<K, V>> thisRef =
      dm.createReference(this);

  ScalableHashMap[] leaves = new ScalableHashMap[numLeaves];
  for (int i = 0; i < numLeaves; ++i) {
            ScalableHashMap<K, V> leaf = new ScalableHashMap<K, V>(
    depth + leafBits, minDepth, splitThreshold, 1 << maxDirBits);
      leaves[i] = leaf;
      leaf.parentRef = thisRef;
  }

  // for the linked list for the leaves
        for (int i = 1; i < numLeaves - 1; ++i) {
            ScalableHashMap<K, V> leaf = uncheckedCast(leaves[i]);
            leaf.leftLeafRef = uncheckedCast(
                    dm.createReference(leaves[i - 1]));
            leaf.rightLeafRef = uncheckedCast(
                    dm.createReference(leaves[i + 1]));
  }

  // edge updating - Note that since there are guaranteed to be at least
  // two leaves, these absolute offset calls are safe
        ScalableHashMap<K, V> firstLeaf = uncheckedCast(leaves[0]);
  firstLeaf.leftLeafRef = leftLeafRef;
  if (leftLeafRef != null) {
      ScalableHashMap<K, V> leftLeaf = leftLeafRef.get();
      leftLeaf.rightLeafRef = dm.createReference(firstLeaf);
  }
  firstLeaf.rightLeafRef = uncheckedCast(dm.createReference(leaves[1]));
        ScalableHashMap<K, V> lastLeaf = uncheckedCast(leaves[numLeaves - 1]);
  lastLeaf.leftLeafRef =
                uncheckedCast(dm.createReference(leaves[numLeaves - 2]));
  lastLeaf.rightLeafRef = rightLeafRef;
  if (rightLeafRef != null) {
      ScalableHashMap<K, V> rightLeaf = rightLeafRef.get();
      rightLeaf.leftLeafRef = dm.createReference(lastLeaf);
  }

  // since this node is now a directory, invalidate its leaf-list
  // references
  leftLeafRef = null;
  rightLeafRef = null;

  int entriesPerLeaf = nodeDirectory.length / numLeaves;

  // lastly, fill the directory with the references
  int pos = 0;
  for (ScalableHashMap leaf : leaves) {
      int nextPos = pos + entriesPerLeaf;
      Arrays.fill(nodeDirectory, pos, nextPos, dm.createReference(leaf));
      pos = nextPos;
  }

  /* Make sure the leaves have the minimum required depth. */
  for (ScalableHashMap leaf : leaves) {
View Full Code Here

     */
    private void split() {
  assert isLeafNode() : "Can't split an directory node";
  assert depth < MAX_DEPTH : "Can't split at maximum depth";

  DataManager dataManager = AppContext.getDataManager();
  dataManager.markForUpdate(this);

        ScalableHashMap<K, V> leftChild =
                new ScalableHashMap<K, V>(
                depth + 1, minDepth, splitThreshold, 1 << maxDirBits);
        ScalableHashMap<K, V> rightChild =
                new ScalableHashMap<K, V>(
                depth + 1, minDepth, splitThreshold, 1 << maxDirBits);

  // to add this node to the parent directory, we need to determine the
  // prefix that will lead to this node.  Grabbing a hash code from one
  // of our entries will suffice.
  int prefix = 0x0; // this should never stay at its initial value

  // iterate over all the entries in this table and assign them to either
  // the right child or left child
  int firstRight = table.length / 2;
  for (int i = 0; i < table.length; i++) {
            ScalableHashMap<K, V> child =
                    (i < firstRight) ? leftChild : rightChild;
            PrefixEntry<K, V> prev = null;
      int prevIndex = 0;
            PrefixEntry<K, V> e = getBucket(i);
      while (e != null) {
    prefix = e.hash;
    int index = child.indexFor(e.hash);
                PrefixEntry<K, V> next = e.next;
    /* Chain to the previous node if the index is the same */
    child.addEntry(e, index == prevIndex ? prev : null);
    prev = e;
    prevIndex = index;
    e = next;
      }
  }

  // null out the intermediate node's table
  setDirectoryNode();
  size = 0;

  // create the references to the new children
        ManagedReference<ScalableHashMap<K, V>> leftChildRef =
                dataManager.createReference(leftChild);
        ManagedReference<ScalableHashMap<K, V>> rightChildRef =
                dataManager.createReference(rightChild);

  if (leftLeafRef != null) {
            ScalableHashMap<K, V> leftLeaf = leftLeafRef.get();
      leftLeaf.rightLeafRef = leftChildRef;
      leftChild.leftLeafRef = leftLeafRef;
      leftLeafRef = null;
  }

  if (rightLeafRef != null) {
            ScalableHashMap<K, V> rightLeaf = rightLeafRef.get();
      rightLeaf.leftLeafRef = rightChildRef;
      rightChild.rightLeafRef = rightLeafRef;
      rightLeafRef = null;
  }

  // update the family links
  leftChild.rightLeafRef = rightChildRef;
  rightChild.leftLeafRef = leftChildRef;

  // Decide what to do with this node:

  // This node should form a new directory node in the following cases:
  //
  // 1. This node is the root node.
  //
  // 2. This node has reached the maximum permitted depth relative to its
  //    parent.  Each directory node uses at most maxDirBits of the hash
  //    code to determine the position of a child node in its directory.
  //    If the depth of this node relative to its parent already uses
  //    that number of bits, then an additional level of directory nodes
  //    is needed to reach the desired depth.  Note that a node will
  //    reach its maximum depth only after all of its parents have
  //    already done so.
  //
  // 3. The minimum concurrency requested requires that the parent node
  //    not be modified.  When the trie is constructed, leaves are
  //    created at a minimum depth.  When one of these leaves needs to
  //    split, it should not be added to its parent directory, in order
  //    to provide the requested concurrency.
  if (isRootNode() ||
      depth % maxDirBits == 0 ||
      depth == minDepth) {

      // this leaf node will become a directory node
            ManagedReference<ScalableHashMap<K, V>> thisRef =
    dataManager.createReference(this);
      rightChild.parentRef = thisRef;
      leftChild.parentRef = thisRef;

      setDirectoryNode();
      nodeDirectory = new ManagedReference[1 << getNodeDirBits()];
View Full Code Here

  int index = highBits(prefix, dirBits);

  // the leaf is under this node, so just look it up using the directory
        ScalableHashMap<K, V> leaf = getChildNode(index);

  DataManager dm = AppContext.getDataManager();

  // remove the old leaf node
  dm.removeObject(leaf);
  // mark this node for update since we will be changing its directory
  dm.markForUpdate(this);

  // update the new children nodes to point to this directory node as
  // their parent
        ManagedReference<ScalableHashMap<K, V>> thisRef =
      dm.createReference(this);
  rightChildRef.get().parentRef = thisRef;
  leftChildRef.get().parentRef = thisRef;

  // how many bits in the prefix are significant for looking up the
  // old leaf
View Full Code Here

  private final Stack<Integer> offsets = new Stack<Integer>();

  /** Creates an instance for the specified directory node. */
        private RemoveNodesTask(ScalableHashMap<K, V> node) {
      assert !node.isLeafNode();
      DataManager dm = AppContext.getDataManager();
            ManagedReference<ScalableHashMap<K, V>> lastRef = null;
      for (int i = 0; i < node.nodeDirectory.length; i++) {
                ManagedReference<ScalableHashMap<K, V>> ref =
        uncheckedCast(node.nodeDirectory[i]);
    /* Skip clearing duplicate nodes in the directory */
    if (ref != lastRef) {
                    ScalableHashMap<K, V> child = ref.get();
        /*
         * Clear the parent reference so we don't walk up to the
         * root node, which is being reused.
         */
        dm.markForUpdate(child);
        child.parentRef = null;
        if (lastRef == null) {
      currentNodeRef = ref;
        } else {
      nodeRefs.add(ref);
View Full Code Here

TOP

Related Classes of com.sun.sgs.app.DataManager

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.