Package org.mediameter.cliff.extractor

Examples of org.mediameter.cliff.extractor.ExtractedEntities


        this.fuzzy = fuzzy;
    }

    public ExtractedEntities extractAndResolve(String inputText, boolean manuallyReplaceDemonyms) throws Exception {
        logger.trace("input: {}", inputText);
        ExtractedEntities extractedEntities = extractor.extractEntities(inputText,manuallyReplaceDemonyms);
        logger.trace("extracted: {}", extractedEntities.getLocations());
        return resolve(extractedEntities);
    }
View Full Code Here


        HashMap results = null;
        if(text.trim().length()==0){
            return getErrorText("No text");
        }
        try {
            ExtractedEntities entities = extractAndResolve(text,manuallyReplaceDemonyms);
            results = parseFromEntities(entities);
        } catch (Exception e) {
            results = getErrorText(e.toString());
        }
        long endTime = System.currentTimeMillis();
View Full Code Here

        HashMap results = null;
        if(nlpJsonString.trim().length()==0){
            return getErrorText("No text");
        }
        try {
            ExtractedEntities entities = MuckUtils.entitiesFromJsonString(nlpJsonString);
            entities = getParserInstance().resolve(entities);;
            results = parseFromEntities(entities);
        } catch (Exception e) {
            results = getErrorText(e.toString());
        }
View Full Code Here

       
    /**
     * I've overloaded "position" in each of the occurrences to be sentenceIndex
     */
    private static ExtractedEntities entitiesFromSentenceMap(Map mcSentences){
        ExtractedEntities entities = new ExtractedEntities();
        Iterator it = mcSentences.entrySet().iterator();
        while (it.hasNext()) {
            Map.Entry pairs = (Map.Entry)it.next();
            String storySentencesId = pairs.getKey().toString();
            Map corenlp = (Map) pairs.getValue();
            List<Map> nlpSentences = (List<Map>) ((Map) corenlp.get("corenlp")).get("sentences");
            for(Map sentence:nlpSentences){ // one mc sentence could be multiple corenlp sentences
                String queuedEntityText = null;
                String lastEntityType = null;
                List<Map> tokens = (List<Map>) sentence.get("tokens");
                for (Map token : tokens){
                    String entityType = (String) token.get("ne");
                    String tokenText = (String) token.get("word");
                    if(entityType.equals(lastEntityType)){
                        queuedEntityText+= " "+tokenText;
                    } else {
                        if(queuedEntityText!=null && lastEntityType!=null){
                            //TODO: figure out if we need the character index here or not
                            switch(lastEntityType){
                            case "PERSON":
                                entities.addPerson(new PersonOccurrence(queuedEntityText, 0));
                                break;
                            case "LOCATION":
                                entities.addLocation(new SentenceLocationOccurrence(queuedEntityText, storySentencesId));
                                break;
                            case "ORGANIZATION":
                                entities.addOrganization(new OrganizationOccurrence(queuedEntityText, 0));
                                break;
                            }
                        }
                        queuedEntityText = tokenText;
                    }
View Full Code Here

                if(text.length()<100){
                    logger.debug("  Skipping because it is too short");
                    continue; //assume we didn't fetch/extract it right
                }
                logger.debug(text);
                ExtractedEntities entities = ParseManager.extractAndResolve(text, true);
                List<CountryCode> countries = entities.getUniqueCountries();
                if( countries.contains(event.getActor1().getCountryCodeObj()) && countries.contains(event.getActor2().getCountryCodeObj())){
                    mentionedSuccesses = mentionedSuccesses + 1;
                } else {
                    logger.error("  We found "+countries+" - GDELT Says:"+event.getActor1().getCountryCodeObj()+" and "+event.getActor2().getCountryCodeObj());
                    mentionedFailures++;
View Full Code Here

                ReutersCorpusDocument doc;
                try {
                   
                    doc = ReutersCorpusDocument.fromFile(aFile.toString(),substitutions);
                    if(doc.hasCodedCountries()){
                        ExtractedEntities entities =  ParseManager.extractAndResolve(doc.getCompiledText());
                       
                        logger.info("Checking file "+aFile);
                        articlesWithLocations++;
                        List<GeoName> countriesTheyCoded = new ArrayList<GeoName>();
                        for(CountryCode countryCode:doc.getCountryCodeObjects()){
                            countriesTheyCoded.add( CountryGeoNameLookup.lookup(countryCode.name()) );
                        }
                        logger.info(doc.getId()+": "+countriesTheyCoded);
                        List<GeoName> ourMentionedCountries = entities.getUniqueCountryGeoNames();

                        // check to make sure we found all the countries they coded
                        if(ourMentionedCountries.size()>0){
                            boolean allMatched = true;
                            for(GeoName countryTheyCoded:countriesTheyCoded){
                                if(!ourMentionedCountries.contains(countryTheyCoded)){
                                    allMatched = false;
                                }
                            }
                            if(allMatched){
                                mentionsArticlesWeGotRight++;
                            } else {
                                logger.warn(doc.getId()+": mentions "+ourMentionedCountries+" they coded "+countriesTheyCoded);
                            }
                        }

                        //also have a measure for making sure the main "about" country is included in their list of countries
                        FocusStrategy focus = ParseManager.getFocusStrategy();
                        List<FocusLocation> ourAboutnessCountries = focus.selectCountries(entities.getResolvedLocations());
                        List<GeoName> ourAboutnessGeoNames = new ArrayList<GeoName>();
                        for(FocusLocation aboutLocation: ourAboutnessCountries){
                            ourAboutnessGeoNames.add(aboutLocation.getGeoName());
                        }
                        if(ourAboutnessGeoNames.size()>0){
View Full Code Here

public class SantoDomingoPlaceTest {
   
    @Test
    public void testSantoDomingo() throws Exception{
        ExtractedEntities entities = ParseManager.extractAndResolve("This is about the Santo Domingo.");
        assertEquals("Found "+entities.getResolvedLocations().size()+" places, should have been 1!",1,entities.getResolvedLocations().size());
        assertEquals("Found "+entities.getResolvedPeople().size()+" people, should have been 0!",0,entities.getResolvedPeople().size());
        assertEquals(TestPlaces.COUNTRY_DOMINICAN_REPUBLIC,entities.getResolvedLocations().get(0).getGeoname().getGeonameID());
    }
View Full Code Here

    public void testStory1() throws IOException {
        String fileName = "nlp-test-1.json";
        File file = new File("src/test/resources/sample-muck-json/"+fileName);
        String json = FileUtils.readFileToString(file);
       
        ExtractedEntities entities  = MuckUtils.entitiesFromJsonString(json);
        assertEquals("Wrong number of location occurrences", 19, entities.getLocations().size());
        assertEquals("Wrong number of people occurrences", 15, entities.getPeople().size());
        assertEquals("Wrong number of organization occurrences", 4, entities.getOrganizations().size());
    }
View Full Code Here

    }
   
    private void createCountryCodeMap() throws Exception{
        regionToCountryCode = new HashMap<String,CountryCode>();
        for(String region:map.keySet()){
            ExtractedEntities entities = ParseManager.extractAndResolve(map.get(region));
            List<CountryCode> countryCodes = entities.getUniqueCountries();
            if(countryCodes.size()>0){
                regionToCountryCode.put(region,countryCodes.get(0));    // does this make sense?
                logger.debug("  Added country "+countryCodes.get(0)+" found for region "+region);
            } else {
                logger.warn("  No country found for region "+region);
View Full Code Here

TOP

Related Classes of org.mediameter.cliff.extractor.ExtractedEntities

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.