Package org.exist.storage.lock

Examples of org.exist.storage.lock.Lock


            newName = doc.getFileURI();
        }

        final CollectionCache collectionsCache = pool.getCollectionsCache();
        synchronized(collectionsCache) {
            final Lock lock = collectionsDb.getLock();
            try {
                lock.acquire(Lock.WRITE_LOCK);
                final DocumentImpl oldDoc = destination.getDocument(this, newName);

                if(!destination.getPermissionsNoLock().validate(getSubject(), Permission.EXECUTE)) {
                    throw new PermissionDeniedException("Account '" + getSubject().getName() + "' does not have execute access on the destination collection '" + destination.getURI() + "'.");
                }

                if(destination.hasChildCollection(this, newName.lastSegment())) {
                    throw new EXistException(
                        "The collection '" + destination.getURI() + "' already has a sub-collection named '" + newName.lastSegment() + "', you cannot create a Document with the same name as an existing collection."
                    );
                }

                final XmldbURI newURI = destination.getURI().append(newName);
                final XmldbURI oldUri = doc.getURI();

                final DocumentTrigger trigger = new DocumentTriggers(this, collection);

                if(oldDoc == null) {
                    if(!destination.getPermissionsNoLock().validate(getSubject(), Permission.WRITE)) {
                        throw new PermissionDeniedException("Account '" + getSubject().getName() + "' does not have write access on the destination collection '" + destination.getURI() + "'.");
                    }
                } else {
                    //overwrite existing document

                    if(doc.getDocId() == oldDoc.getDocId()) {
                        throw new EXistException("Cannot copy resource to itself '" + doc.getURI() + "'.");
                    }

                    if(!oldDoc.getPermissions().validate(getSubject(), Permission.WRITE)) {
                        throw new PermissionDeniedException("A resource with the same name already exists in the target collection '" + oldDoc.getURI() + "', and you do not have write access on that resource.");
                    }

                    trigger.beforeDeleteDocument(this, transaction, oldDoc);
                    trigger.afterDeleteDocument(this, transaction, newURI);
                }

                trigger.beforeCopyDocument(this, transaction, doc, newURI);

                DocumentImpl newDocument = null;
                if(doc.getResourceType() == DocumentImpl.BINARY_FILE) {
                    InputStream is = null;
                    try {
                        is = getBinaryResource((BinaryDocument) doc);
                        newDocument = destination.addBinaryResource(transaction, this, newName, is, doc.getMetadata().getMimeType(), -1);
                    } finally {
                        if(is != null) {
                            is.close();
                        }
                    }
                } else {
                    final DocumentImpl newDoc = new DocumentImpl(pool, destination, newName);
                    newDoc.copyOf(doc, oldDoc != null);
                    newDoc.setDocId(getNextResourceId(transaction, destination));
                    newDoc.getUpdateLock().acquire(Lock.WRITE_LOCK);
                    try {
                        copyXMLResource(transaction, doc, newDoc);
                        destination.addDocument(transaction, this, newDoc);
                        storeXMLResource(transaction, newDoc);
                    } finally {
                        newDoc.getUpdateLock().release(Lock.WRITE_LOCK);
                    }
                    newDocument = newDoc;
                }

                trigger.afterCopyDocument(this, transaction, newDocument, oldUri);

            } catch(final IOException e) {
                LOG.warn("An error occurred while copying resource", e);
            } catch(final TriggerException e) {
                throw new PermissionDeniedException(e.getMessage(), e);
            } finally {
                lock.release(Lock.WRITE_LOCK);
            }
        }
    }
View Full Code Here


     * @param transaction
     * @param document
     */
    private void removeResourceMetadata(final Txn transaction, final DocumentImpl document) {
        // remove document metadata
        final Lock lock = collectionsDb.getLock();
        try {
            lock.acquire(Lock.READ_LOCK);
            if(LOG.isDebugEnabled()) {
                LOG.debug("Removing resource metadata for " + document.getDocId());
            }
            final Value key = new CollectionStore.DocumentKey(document.getCollection().getId(), document.getResourceType(), document.getDocId());
            collectionsDb.remove(transaction, key);
            //} catch (ReadOnlyException e) {
            //LOG.warn(DATABASE_IS_READ_ONLY);
        } catch(final LockException e) {
            LOG.warn("Failed to acquire lock on " + collectionsDb.getFile().getName());
        } finally {
            lock.release(Lock.READ_LOCK);
        }
    }
View Full Code Here

        int nextDocId = collectionsDb.getFreeResourceId();
        if(nextDocId != DocumentImpl.UNKNOWN_DOCUMENT_ID) {
            return nextDocId;
        }
        nextDocId = 1;
        final Lock lock = collectionsDb.getLock();
        try {
            lock.acquire(Lock.WRITE_LOCK);
            final Value key = new CollectionStore.CollectionKey(CollectionStore.NEXT_DOC_ID_KEY);
            final Value data = collectionsDb.get(key);
            if(data != null) {
                nextDocId = ByteConversion.byteToInt(data.getData(), 0);
                ++nextDocId;
                if(nextDocId == 0x7FFFFFFF) {
                    pool.setReadOnly();
                    throw new EXistException("Max. number of document ids reached. Database is set to " +
                        "read-only state. Please do a complete backup/restore to compact the db and " +
                        "free document ids.");
                }
            }
            final byte[] d = new byte[4];
            ByteConversion.intToByte(nextDocId, d, 0);
            collectionsDb.put(transaction, key, d, true);
            //} catch (ReadOnlyException e) {
            //LOG.warn("Database is read-only");
            //return DocumentImpl.UNKNOWN_DOCUMENT_ID;
            //TODO : rethrow ? -pb
        } catch(final LockException e) {
            LOG.warn("Failed to acquire lock on " + collectionsDb.getFile().getName(), e);
            //TODO : rethrow ? -pb
        } finally {
            lock.release(Lock.WRITE_LOCK);
        }
        return nextDocId;
    }
View Full Code Here

        rebuildIndex(COLLECTIONS_DBX_ID);
    }

    protected void rebuildIndex(final byte indexId) {
        final BTree btree = getStorage(indexId);
        final Lock lock = btree.getLock();
        try {
            lock.acquire(Lock.WRITE_LOCK);

            LOG.info("Rebuilding index " + btree.getFile().getName());
            btree.rebuild();
            LOG.info("Index " + btree.getFile().getName() + " was rebuilt.");
        } catch(LockException | IOException | TerminatedException | DBException e) {
            LOG.warn("Caught error while rebuilding core index " + btree.getFile().getName() + ": " + e.getMessage(), e);
        } finally {
            lock.release(Lock.WRITE_LOCK);
        }
    }
View Full Code Here

                    }
                    return null;
                }
            }.run();
            if(syncEvent == Sync.MAJOR_SYNC) {
                final Lock lock = collectionsDb.getLock();
                try {
                    lock.acquire(Lock.WRITE_LOCK);
                    collectionsDb.flush();
                } catch(final LockException e) {
                    LOG.warn("Failed to acquire lock on " + collectionsDb.getFile().getName(), e);
                } finally {
                    lock.release(Lock.WRITE_LOCK);
                }
                notifySync();
                pool.getIndexManager().sync();
                final NumberFormat nf = NumberFormat.getNumberInstance();
                LOG_STATS.info("Memory: " + nf.format(run.totalMemory() / 1024) + "K total; " +
View Full Code Here

   * @throws LockException
   * @throws TriggerException
   */
  protected StoredNode[] selectAndLock(Txn transaction, Sequence nodes) throws LockException, PermissionDeniedException,
    XPathException, TriggerException {
      final Lock globalLock = context.getBroker().getBrokerPool().getGlobalUpdateLock();
      try {
          globalLock.acquire(Lock.READ_LOCK);
        
          lockedDocuments = nodes.getDocumentSet();
         
        // acquire a lock on all documents
          // we have to avoid that node positions change
          // during the modification
          lockedDocuments.lock(context.getBroker(), true, false);
         
        final StoredNode ql[] = new StoredNode[nodes.getItemCount()];
      for (int i = 0; i < ql.length; i++) {
                final Item item = nodes.itemAt(i);
                if (!Type.subTypeOf(item.getType(), Type.NODE))
                    {throw new XPathException(this, "XQuery update expressions can only be applied to nodes. Got: " +
                        item.getStringValue());}
                final NodeValue nv = (NodeValue)item;
                if (nv.getImplementationType() == NodeValue.IN_MEMORY_NODE)
                    {throw new XPathException(this, "XQuery update expressions can not be applied to in-memory nodes.");}
                final Node n = nv.getNode();
                if (n.getNodeType() == Node.DOCUMENT_NODE)
                    {throw new XPathException(this, "Updating the document object is not allowed.");}
        ql[i] = (StoredNode) n;
        final DocumentImpl doc = (DocumentImpl)ql[i].getOwnerDocument();
        //prepare Trigger
        prepareTrigger(transaction, doc);
      }
      return ql;
      } finally {
          globalLock.release(Lock.READ_LOCK);
      }
  }
View Full Code Here

    /* (non-Javadoc)
     * @see org.exist.storage.IndexGenerator#sync()
     */
    public void sync()
    {
        final Lock lock = dbValues.getLock();

        try {
            lock.acquire( Lock.WRITE_LOCK );
            dbValues.flush();
        }
        catch( final LockException e ) {
            LOG.warn( "Failed to acquire lock for '" + dbValues.getFile().getName() + "'", e );
            //TODO : throw an exception ? -pb
        }
        catch( final DBException e ) {
            LOG.error( e.getMessage(), e );
            //TODO : throw an exception ? -pb
        }
        finally {
            lock.release( Lock.WRITE_LOCK );
        }
    }
View Full Code Here

        if( keyCount == 0 ) {
            return;
        }
        final int  collectionId = this.doc.getCollection().getId();
        final Lock lock         = dbValues.getLock();

        for( byte section = 0; section <= IDX_QNAME; section++ ) {

            for( final Map.Entry<Object, List<NodeId>> entry : pending[section].entrySet()) {
                final Object key = entry.getKey();

                //TODO : NativeElementIndex uses ArrayLists -pb
                final List<NodeId> gids = entry.getValue();
                final int gidsCount = gids.size();

                //Don't forget this one
                FastQSort.sort( gids, 0, gidsCount - 1 );
                os.clear();
                os.writeInt( this.doc.getDocId() );
                os.writeInt( gidsCount );

                //Mark position
                final int nodeIDsLength = os.position();

                //Dummy value : actual one will be written below
                os.writeFixedInt( 0 );

                //Compute the GID list
                NodeId previous = null;

                for( final NodeId nodeId : gids ) {
                    try {
                        previous = nodeId.write( previous, os );
//                        nodeId.write(os);
                    }
                    catch( final IOException e ) {
                        LOG.warn( "IO error while writing range index: " + e.getMessage(), e );
                        //TODO : throw exception?
                    }
                }

                //Write (variable) length of node IDs
                os.writeFixedInt( nodeIDsLength, os.position() - nodeIDsLength - LENGTH_NODE_IDS );

                try {
                    lock.acquire( Lock.WRITE_LOCK );
                    Value v;

                    if( section == IDX_GENERIC ) {
                        v = new SimpleValue( collectionId, ( Indexable )key );
                    } else {
                        final QNameKey qnk = ( QNameKey )key;
                        v = new QNameValue( collectionId, qnk.qname, qnk.value, broker.getBrokerPool().getSymbols() );
                    }

                    if( dbValues.append( v, os.data() ) == BFile.UNKNOWN_ADDRESS ) {
                        LOG.warn( "Could not append index data for key '" + key + "'" );
                        //TODO : throw exception ?
                    }
                }
                catch( final EXistException e ) {
                    LOG.error( e.getMessage(), e );
                }
                catch( final LockException e ) {
                    LOG.warn( "Failed to acquire lock for '" + dbValues.getFile().getName() + "'", e );
                    //TODO : return ?
                }
                catch( final IOException e ) {
                    LOG.error( e.getMessage(), e );
                    //TODO : return ?
                }
                catch( final ReadOnlyException e ) {
                    LOG.warn( e.getMessage(), e );

                    //Return without clearing the pending entries
                    return;
                }
                finally {
                    lock.release( Lock.WRITE_LOCK );
                    os.clear();
                }
            }
            pending[section].clear();
        }
View Full Code Here

        if( keyCount == 0 ) {
            return;
        }
        final int  collectionId = this.doc.getCollection().getId();
        final Lock lock         = dbValues.getLock();

        for( byte section = 0; section <= IDX_QNAME; section++ ) {

            for( final Map.Entry<Object, List<NodeId>> entry : pending[section].entrySet() ) {
                final Object    key           = entry.getKey();
                final List<NodeId> storedGIDList = entry.getValue();
                final List<NodeId> newGIDList    = new ArrayList<NodeId>();
                os.clear();

                try {
                    lock.acquire( Lock.WRITE_LOCK );

                    //Compute a key for the value
                    Value searchKey;

                    if( section == IDX_GENERIC ) {
                        searchKey = new SimpleValue( collectionId, ( Indexable )key );
                    } else {
                        final QNameKey qnk = ( QNameKey )key;
                        searchKey = new QNameValue( collectionId, qnk.qname, qnk.value, broker.getBrokerPool().getSymbols() );
                    }
                    final Value value = dbValues.get( searchKey );

                    //Does the value already has data in the index ?
                    if( value != null ) {

                        //Add its data to the new list
                        final VariableByteArrayInput is = new VariableByteArrayInput( value.getData() );

                        while( is.available() > 0 ) {
                            final int storedDocId = is.readInt();
                            final int gidsCount   = is.readInt();
                            final int size        = is.readFixedInt();

                            if( storedDocId != this.doc.getDocId() ) {

                                // data are related to another document:
                                // append them to any existing data
                                os.writeInt( storedDocId );
                                os.writeInt( gidsCount );
                                os.writeFixedInt( size );
                                is.copyRaw( os, size );
                            } else {

                                // data are related to our document:
                                // feed the new list with the GIDs
                                NodeId previous = null;

                                for( int j = 0; j < gidsCount; j++ ) {
                                    NodeId nodeId = broker.getBrokerPool().getNodeFactory().createFromStream( previous, is );
                                    previous = nodeId;

                                    // add the node to the new list if it is not
                                    // in the list of removed nodes
                                    if( !containsNode( storedGIDList, nodeId ) ) {
                                        newGIDList.add( nodeId );
                                    }
                                }
                            }
                        }

                        //append the data from the new list
                        if( newGIDList.size() > 0 ) {
                            final int gidsCount = newGIDList.size();

                            //Don't forget this one
                            FastQSort.sort( newGIDList, 0, gidsCount - 1 );
                            os.writeInt( this.doc.getDocId() );
                            os.writeInt( gidsCount );

                            //Mark position
                            final int nodeIDsLength = os.position();

                            //Dummy value : actual one will be written below
                            os.writeFixedInt( 0 );
                            NodeId previous = null;

                            for( final NodeId nodeId : newGIDList ) {
                                try {
                                    previous = nodeId.write( previous, os );
                                }
                                catch( final IOException e ) {
                                    LOG.warn( "IO error while writing range index: " + e.getMessage(), e );
                                    //TODO : throw exception ?
                                }
                            }

                            //Write (variable) length of node IDs
                            os.writeFixedInt( nodeIDsLength, os.position() - nodeIDsLength - LENGTH_NODE_IDS );
                        }

//                        if(os.data().size() == 0)
//                            dbValues.remove(value);
                        if( dbValues.update( value.getAddress(), searchKey, os.data() ) == BFile.UNKNOWN_ADDRESS ) {
                            LOG.error( "Could not update index data for value '" + searchKey + "'" );
                            //TODO: throw exception ?
                        }
                    } else {

                        if( dbValues.put( searchKey, os.data() ) == BFile.UNKNOWN_ADDRESS ) {
                            LOG.error( "Could not put index data for value '" + searchKey + "'" );
                            //TODO : throw exception ?
                        }
                    }
                }
                catch( final EXistException e ) {
                    LOG.error( e.getMessage(), e );
                }
                catch( final LockException e ) {
                    LOG.warn( "Failed to acquire lock for '" + dbValues.getFile().getName() + "'", e );
                    //TODO : return ?
                }
                catch( final ReadOnlyException e ) {
                    LOG.warn( "Read-only error on '" + dbValues.getFile().getName() + "'", e );
                }
                catch( final IOException e ) {
                    LOG.error( e.getMessage(), e );
                }
                finally {
                    lock.release( Lock.WRITE_LOCK );
                    os.clear();
                }
            }
            pending[section].clear();
        }
View Full Code Here

    /* Drop all index entries for the given collection.
     * @see org.exist.storage.IndexGenerator#dropIndex(org.exist.collections.Collection)
     */
    public void dropIndex( Collection collection )
    {
        final Lock lock = dbValues.getLock();

        try {
            lock.acquire( Lock.WRITE_LOCK );

            //TODO : flush ? -pb
            // remove generic index
            Value ref = new SimpleValue( collection.getId() );
            dbValues.removeAll( null, new IndexQuery( IndexQuery.TRUNC_RIGHT, ref ) );

            // remove QName index
            ref = new QNameValue( collection.getId() );
            dbValues.removeAll( null, new IndexQuery( IndexQuery.TRUNC_RIGHT, ref ) );
        }
        catch( final LockException e ) {
            LOG.warn( "Failed to acquire lock for '" + dbValues.getFile().getName() + "'", e );
        }
        catch( final BTreeException e ) {
            LOG.error( e.getMessage(), e );
        }
        catch( final IOException e ) {
            LOG.error( e.getMessage(), e );
        }
        finally {
            lock.release( Lock.WRITE_LOCK );
        }
    }
View Full Code Here

TOP

Related Classes of org.exist.storage.lock.Lock

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.