Package org.apache.stanbol.ontologymanager.ontonet.api.session

Examples of org.apache.stanbol.ontologymanager.ontonet.api.session.Session


    @Override
    public void computeEnhancements(ContentItem ci) throws EngineException {

        // Prepare the OntoNet environment. First we create the OntoNet session in which run the whole
        final Session session;
        try {
            session = sessionManager.createSession();
        } catch (SessionLimitException e1) {
            throw new EngineException(
                    "OntoNet session quota reached. The Refactor Engine requires its own new session to execute.");
        }
        if (session == null) throw new EngineException(
                "Failed to create OntoNet session. The Refactor Engine requires its own new session to execute.");

        log.debug("Refactor enhancement job will run in session '{}'.", session.getID());

        // Retrieve and filter the metadata graph for entities recognized by the engines.
        final MGraph metadataGraph = ci.getMetadata(), signaturesGraph = new IndexedMGraph();
        // FIXME the Stanbol Enhancer vocabulary should be retrieved from somewhere in the enhancer API.
        final UriRef ENHANCER_ENTITY_REFERENCE = new UriRef(
                "http://fise.iks-project.eu/ontology/entity-reference");
        Iterator<Triple> tripleIt = metadataGraph.filter(null, ENHANCER_ENTITY_REFERENCE, null);
        while (tripleIt.hasNext()) {
            // Get the entity URI
            Resource obj = tripleIt.next().getObject();
            if (!(obj instanceof UriRef)) {
                log.warn("Invalid UriRef for entity reference {}. Skipping.", obj);
                continue;
            }
            final String entityReference = ((UriRef) obj).getUnicodeString();
            log.debug("Trying to resolve entity {}", entityReference);

            // Populate the entity signatures graph, by querying either the Entity Hub or the dereferencer.
            if (engineConfiguration.isEntityHubUsed()) {
                MGraph result = populateWithEntity(entityReference, signaturesGraph);
                if (result != signaturesGraph && result != null) {
                    log.warn("Entity Hub query added triples to a new graph instead of populating the supplied one!"
                             + " New signatures will be discarded.");
                }
            } else try {
                OntologyInputSource<TripleCollection,?> source = new GraphContentSourceWithPhysicalIRI(
                        dereferencer.resolve(entityReference), IRI.create(entityReference));
                signaturesGraph.addAll(source.getRootOntology());
            } catch (FileNotFoundException e) {
                log.error("Failed to dereference entity " + entityReference + ". Skipping.", e);
                continue;
            }
        }

        try {
            /*
             * The dedicated session for this job will store the following: (1) all the (merged) signatures
             * for all detected entities; (2) the original content metadata graph returned earlier in the
             * chain.
             *
             * There is no chance that (2) could be null, as it was previously controlled by the JobManager
             * through the canEnhance() method and the computeEnhancement is always called iff the former
             * returns true.
             */
            session.addOntology(new GraphSource(signaturesGraph));
            session.addOntology(new GraphSource(metadataGraph));
        } catch (UnmodifiableOntologyCollectorException e1) {
            throw new EngineException("Cannot add enhancement graph to OntoNet session for refactoring", e1);
        }

        try {
            /*
             * Export the entire session (incl. entities and enhancement graph) as a single merged ontology.
             *
             * TODO the refactorer should have methods to accommodate an OntologyCollector directly instead.
             */
            OWLOntology ontology = session.export(OWLOntology.class, true);
            log.debug("Refactoring recipe IRI is : " + engineConfiguration.getRecipeId());

            /*
             * We pass the ontology and the recipe IRI to the Refactor that returns the refactored graph
             * expressed by using the given vocabulary.
             *
             * To perform the refactoring of the ontology to a given vocabulary we use the Stanbol Refactor.
             */
            Recipe recipe = ruleStore.getRecipe(IRI.create(engineConfiguration.getRecipeId()));

            log.debug("Recipe {} contains {} rules.", recipe, recipe.getkReSRuleList().size());
            log.debug("The ontology to be refactor is {}", ontology);

            ontology = refactorer
                    .ontologyRefactoring(ontology, IRI.create(engineConfiguration.getRecipeId()));

            /*
             * The newly generated ontology is converted to Clarezza format and then added os substitued to
             * the old mGraph.
             */
            if (engineConfiguration.isInGraphAppendMode()) {
                log.debug("Metadata of the content will replace old ones.", this);
            } else {
                metadataGraph.clear();
                log.debug("Content metadata will be appended to the existing ones.", this);
            }
            metadataGraph.addAll(OWLAPIToClerezzaConverter.owlOntologyToClerezzaTriples(ontology));

        } catch (RefactoringException e) {
            String msg = "Refactor engine execution failed on content item " + ci + ".";
            log.error(msg, e);
            throw new EngineException(msg, e);
        } catch (NoSuchRecipeException e) {
            String msg = "Refactor engine could not find recipe " + engineConfiguration.getRecipeId()
                         + " to refactor content item " + ci + ".";
            log.error(msg, e);
            throw new EngineException(msg, e);
        } catch (Exception e) {
            throw new EngineException("Refactor Engine has failed.", e);
        } finally {
            /*
             * The session needs to be destroyed anyhow.
             *
             * Clear contents before destroying (FIXME only do this until this is implemented in the
             * destroySession() method).
             */
            for (IRI iri : session.listManagedOntologies()) {
                try {
                    String key = ontologyProvider.getKey(iri);
                    ontologyProvider.getStore().deleteTripleCollection(new UriRef(key));
                } catch (Exception ex) {
                    log.error("Failed to delete triple collection " + iri, ex);
                    continue;
                }
            }
            sessionManager.destroySession(session.getID());
        }

    }
View Full Code Here


    @Override
    public Session createSession() throws SessionLimitException {
        checkSessionLimit();
        Set<String> exclude = getRegisteredSessionIDs();
        Session session = null;
        while (session == null)
            try {
                session = createSession(idgen.createSessionID(exclude));
            } catch (DuplicateSessionIDException e) {
                exclude.add(e.getDuplicateID());
View Full Code Here

         * before creating a new one.
         */
        if (sessionsByID.containsKey(sessionID)) throw new DuplicateSessionIDException(sessionID.toString());
        checkSessionLimit();
        IRI ns = IRI.create(getNamespace() + getID() + "/");
        Session session = new SessionImpl(sessionID, ns, ontologyProvider);
        addSession(session);
        fireSessionCreated(session);
        return session;
    }
View Full Code Here

    }

    @Override
    public synchronized void destroySession(String sessionID) {
        try {
            Session ses = sessionsByID.get(sessionID);
            if (ses == null) log.warn(
                "Tried to destroy nonexisting session {} . Could it have been previously destroyed?",
                sessionID);
            else {
                ses.close();
                if (ses instanceof SessionImpl) ((SessionImpl) ses).state = State.ZOMBIE;
                // Make session no longer referenceable
                removeSession(ses);
                fireSessionDestroyed(ses);
            }
View Full Code Here

        // return result;
    }

    protected synchronized void removeSession(Session session) {
        String id = session.getID();
        Session s2 = sessionsByID.get(id);
        if (session == s2) sessionsByID.remove(id);
    }
View Full Code Here

            }
            if (scope == null) {
                log.error("Scope {} cannot be retrieved", this.scopeId);
                throw new IOException("Scope " + this.scopeId + " cannot be retrieved");
            }
            Session session = null;
            if (sessionManager != null) synchronized (sessionManager) {
                session = sessionManager.getSession(sessionId);
            }
            if (session == null) {
                log.warn("Session {} cannot be retrieved. Ignoring.", this.sessionId);
            }
            final Set<OWLOntology> set = new HashSet<OWLOntology>();
            set.add(scope.export(OWLOntology.class, true));
            if (session != null) {
                set.add(session.export(OWLOntology.class, true));
            }

            if (set.size() == 1) return set.iterator().next();
            OWLOntologyMerger merger = new OWLOntologyMerger(new OWLOntologySetProvider() {
                @Override
View Full Code Here

            scope.setUp();
        } catch (DuplicateIDException e) {
            fail("Unexpected DuplicateIDException was caught while testing scope " + e.getDuplicateID());
        }
        assertNotNull(scope);
        Session ses = sesmgr.createSession();
        assertTrue(scope.getSessionSpaces().isEmpty());
        // scope.addSessionSpace(spaceFactory.createSessionOntologySpace(scopeId1), ses.getID());
        // assertFalse(scope.getSessionSpaces().isEmpty());
    }
View Full Code Here

            scopeRegistry.setScopeActive(scopeId2, false);
            scopeRegistry.setScopeActive(scopeId3, true);
        } catch (DuplicateIDException e) {
            fail("Unexpected DuplicateIDException was caught while testing scope " + e.getDuplicateID());
        }
        Session ses = sesmgr.createSession();
        String sesid = ses.getID();
        // FIXME replace with proper tests
        // assertFalse(scope1.getSessionSpaces().isEmpty());
        // assertNotNull(scope1.getSessionSpace(sesid));
        // assertFalse(scope3.getSessionSpaces().isEmpty());
        // assertNull(scope2.getSessionSpace(sesid));
View Full Code Here

    }

    @Test
    public void testRegisterSession() throws Exception {
        int before = sesmgr.getRegisteredSessionIDs().size();
        Session ses = sesmgr.createSession();
        assertNotNull(ses);
        assertEquals(before + 1, sesmgr.getRegisteredSessionIDs().size());
    }
View Full Code Here

        int initialSize = sesmgr.getRegisteredSessionIDs().size();
        Set<Session> sessions = new HashSet<Session>();
        // Create and open many sessions.
        synchronized (sesmgr) {
            for (int i = 0; i < size; i++) {
                Session ses = sesmgr.createSession();
                try {
                    ses.open();
                } catch (NonReferenceableSessionException e) {
                    fail("Test method tried to open nonreferenceable session.");
                }
                sessions.add(ses);
            }
            // Check that 500 sessions have been created
            assertEquals(initialSize + size, sesmgr.getRegisteredSessionIDs().size());
        }
        boolean open = true;
        for (Session ses : sessions)
            open &= ses.getSessionState() == State.ACTIVE;
        // Check that all created sessions have been opened
        assertTrue(open);
        // Kill 'em all, to quote Metallica
        synchronized (sesmgr) {
            for (Session ses : sessions)
                sesmgr.destroySession(ses.getID());
            assertEquals(initialSize, sesmgr.getRegisteredSessionIDs().size());
        }
        // Check that they are all zombies
        boolean zombi = true;
        for (Session ses : sessions)
            zombi &= ses.getSessionState() == State.ZOMBIE;
        assertTrue(zombi);
        // Try to resurrect them (hopefully failing)
        boolean resurrect = false;
        for (Session ses : sessions)
            try {
                ses.open();
                resurrect |= true;
            } catch (NonReferenceableSessionException e) {
                resurrect |= false;
                continue;
            }
View Full Code Here

TOP

Related Classes of org.apache.stanbol.ontologymanager.ontonet.api.session.Session

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.