Examples of AnalyzedTokenReadings


Examples of org.languagetool.AnalyzedTokenReadings

public class GermanHelperTest {
 
  @Test
  public void testHasReadingOfType() throws Exception {
    AnalyzedTokenReadings readings = new AnalyzedTokenReadings(new AnalyzedToken("der", "ART:DEF:DAT:SIN:FEM", null), 0);
    assertTrue(GermanHelper.hasReadingOfType(readings, GermanToken.POSType.DETERMINER));
    assertFalse(GermanHelper.hasReadingOfType(readings, GermanToken.POSType.NOMEN));
  }
View Full Code Here

Examples of org.languagetool.AnalyzedTokenReadings

    AnalyzedToken comma = new AnalyzedToken(",", "comma", ",");

    uni.isUnified(sing1a, equiv, false);
    uni.isUnified(sing1b, equiv, true);
    uni.addNeutralElement(new AnalyzedTokenReadings(comma, 0));
    assertEquals(true, uni.isUnified(sing2, equiv, true));
    assertEquals("[osobiste[osobisty/adj:sg:nom.acc.voc:n:pos:aff*], ,[,/comma*], godło[godło/subst:sg:nom.acc.voc:n*]]",
        Arrays.toString(uni.getFinalUnified()));
    uni.reset();

View Full Code Here

Examples of org.languagetool.AnalyzedTokenReadings

public class GermanTaggerTest extends TestCase {

  public void testTagger() throws IOException {
    final GermanTagger tagger = new GermanTagger();
   
    AnalyzedTokenReadings aToken = tagger.lookup("Haus");
    assertEquals("Haus[Haus/SUB:AKK:SIN:NEU, Haus/SUB:DAT:SIN:NEU, Haus/SUB:NOM:SIN:NEU]", toSortedString(aToken));
    assertEquals("Haus", aToken.getReadings().get(0).getLemma());
    assertEquals("Haus", aToken.getReadings().get(1).getLemma());
    assertEquals("Haus", aToken.getReadings().get(2).getLemma());
   
    aToken = tagger.lookup("Hauses");
    assertEquals("Hauses[Haus/SUB:GEN:SIN:NEU]", toSortedString(aToken));
    assertEquals("Haus", aToken.getReadings().get(0).getLemma());
   
    assertNull(tagger.lookup("hauses"));
    assertNull(tagger.lookup("Groß"));

    assertEquals("Lieblingsbuchstabe[Lieblingsbuchstabe/SUB:NOM:SIN:MAS]", toSortedString(tagger.lookup("Lieblingsbuchstabe")));

    aToken = tagger.lookup("großer");
    assertEquals("großer[groß/ADJ:DAT:SIN:FEM:GRU:SOL, groß/ADJ:GEN:PLU:FEM:GRU:SOL, groß/ADJ:GEN:PLU:MAS:GRU:SOL, " +
            "groß/ADJ:GEN:PLU:NEU:GRU:SOL, groß/ADJ:GEN:SIN:FEM:GRU:SOL, groß/ADJ:NOM:SIN:MAS:GRU:IND, " +
            "groß/ADJ:NOM:SIN:MAS:GRU:SOL]", toSortedString(tagger.lookup("großer")));
    assertEquals("groß", aToken.getReadings().get(0).getLemma());
   
    // from both german.dict and added.txt:
    aToken = tagger.lookup("Interessen");
    assertEquals("Interessen[Interesse/SUB:AKK:PLU:NEU, Interesse/SUB:DAT:PLU:NEU, " +
            "Interesse/SUB:GEN:PLU:NEU, Interesse/SUB:NOM:PLU:NEU]",
        toSortedString(aToken));
    assertEquals("Interesse", aToken.getReadings().get(0).getLemma());
    assertEquals("Interesse", aToken.getReadings().get(1).getLemma());
    assertEquals("Interesse", aToken.getReadings().get(2).getLemma());
    assertEquals("Interesse", aToken.getReadings().get(3).getLemma());
   
    // words that are not in the dictionary but that are recognized thanks to noun splitting:
    aToken = tagger.lookup("Donaudampfschiff");
    assertEquals("Donaudampfschiff[Donaudampfschiff/SUB:AKK:SIN:NEU, Donaudampfschiff/SUB:DAT:SIN:NEU, " +
            "Donaudampfschiff/SUB:NOM:SIN:NEU]", toSortedString(aToken));
    assertEquals("Donaudampfschiff", aToken.getReadings().get(0).getLemma());
    assertEquals("Donaudampfschiff", aToken.getReadings().get(1).getLemma());
   
    aToken = tagger.lookup("Häuserkämpfe");
    assertEquals("Häuserkämpfe[Häuserkampf/SUB:AKK:PLU:MAS, Häuserkampf/SUB:GEN:PLU:MAS, Häuserkampf/SUB:NOM:PLU:MAS]",
        toSortedString(aToken));
    assertEquals("Häuserkampf", aToken.getReadings().get(0).getLemma());
    assertEquals("Häuserkampf", aToken.getReadings().get(1).getLemma());
    assertEquals("Häuserkampf", aToken.getReadings().get(2).getLemma());
   
    aToken = tagger.lookup("Häuserkampfes");
    assertEquals("Häuserkampfes[Häuserkampf/SUB:GEN:SIN:MAS]", toSortedString(aToken));
    assertEquals("Häuserkampf", aToken.getReadings().get(0).getLemma());
   
    aToken = tagger.lookup("Häuserkampfs");
    assertEquals("Häuserkampfs[Häuserkampf/SUB:GEN:SIN:MAS]", toSortedString(aToken));
    assertEquals("Häuserkampf", aToken.getReadings().get(0).getLemma());

    aToken = tagger.lookup("Lieblingsfarben");
    assertEquals("Lieblingsfarben[Lieblingsfarbe/SUB:AKK:PLU:FEM, Lieblingsfarbe/SUB:DAT:PLU:FEM, " +
            "Lieblingsfarbe/SUB:GEN:PLU:FEM, Lieblingsfarbe/SUB:NOM:PLU:FEM]", toSortedString(aToken));
    assertEquals("Lieblingsfarbe", aToken.getReadings().get(0).getLemma());

    aToken = tagger.lookup("Autolieblingsfarben");
    assertEquals("Autolieblingsfarben[Autolieblingsfarbe/SUB:AKK:PLU:FEM, Autolieblingsfarbe/SUB:DAT:PLU:FEM, " +
            "Autolieblingsfarbe/SUB:GEN:PLU:FEM, Autolieblingsfarbe/SUB:NOM:PLU:FEM]", toSortedString(aToken));
    assertEquals("Autolieblingsfarbe", aToken.getReadings().get(0).getLemma());

    aToken = tagger.lookup("übrigbleibst");
    assertEquals("übrigbleibst[übrigbleiben/VER:2:SIN:PRÄ:NON:NEB]", toSortedString(aToken));
    assertEquals("übrigbleiben", aToken.getReadings().get(0).getLemma());
  }
View Full Code Here

Examples of org.languagetool.AnalyzedTokenReadings

    assertTrue(p.testAllReadings(tokenReadings("bar", "myChunk"), elemMatcher, null, 0, 0, 0));
    assertFalse(p.testAllReadings(tokenReadings("bar", "otherChunk"), elemMatcher, null, 0, 0, 0));
  }

  private AnalyzedTokenReadings[] tokenReadings(String token, String chunkTag) {
    AnalyzedTokenReadings tokenReadings1 = new AnalyzedTokenReadings(new AnalyzedToken(token, "pos", "lemma"), 0);
    if (chunkTag != null) {
      tokenReadings1.setChunkTags(Collections.singletonList(new ChunkTag(chunkTag)));
    }
    return new AnalyzedTokenReadings[] { tokenReadings1 };
  }
View Full Code Here

Examples of org.languagetool.AnalyzedTokenReadings

  private final RuleFilterEvaluator eval = new RuleFilterEvaluator(null);

  @Test
  public void testGetResolvedArguments() throws Exception {
    AnalyzedTokenReadings[] readingsList = {
            new AnalyzedTokenReadings(new AnalyzedToken("fake1", "pos", null), 0),
            new AnalyzedTokenReadings(new AnalyzedToken("fake2", "pos", null), 0)
    };
    Map<String,String> map = eval.getResolvedArguments("year:\\1 month:\\2", readingsList, Arrays.asList(1, 1));
    assertThat(map.get("year"), is("fake1"));
    assertThat(map.get("month"), is("fake2"));
    assertThat(map.size(), is(2));
View Full Code Here

Examples of org.languagetool.AnalyzedTokenReadings

  }

  @Test(expected = RuntimeException.class)
  public void testDuplicateKey() throws Exception {
    AnalyzedTokenReadings[] readingsList = {
            new AnalyzedTokenReadings(new AnalyzedToken("fake1", "SENT_START", null), 0),
            new AnalyzedTokenReadings(new AnalyzedToken("fake1", "pos", null), 0),
            new AnalyzedTokenReadings(new AnalyzedToken("fake2", "pos", null), 0)
    };
    eval.getResolvedArguments("year:\\1 year:\\2", readingsList, Arrays.asList(1, 2));
  }
View Full Code Here

Examples of org.languagetool.AnalyzedTokenReadings

        }
      } else {
        // Single letter word (no tag).
        l.add(new AnalyzedToken(word, null, null));
      }
      tokenReadings.add(new AnalyzedTokenReadings(l, 0));
    }
    return tokenReadings;
  }
View Full Code Here

Examples of org.languagetool.AnalyzedTokenReadings

    return tokenReadings;
  }

  @Override
  public AnalyzedTokenReadings createNullToken(String token, int startPos) {
    return new AnalyzedTokenReadings(
      new AnalyzedToken(token, null, null), startPos);
  }
View Full Code Here

Examples of org.languagetool.AnalyzedTokenReadings

    String alternative = rule.getBetterAlternativeOrNull(tokens, 1, confusionSet);
    assertThat(alternative, is("a"));
  }

  private AnalyzedTokenReadings reading(String token) {
    return new AnalyzedTokenReadings(new AnalyzedToken(token, null, null), 0);
  }
View Full Code Here

Examples of org.languagetool.AnalyzedTokenReadings

      }

      if (l.isEmpty()) {
        l.add(new AnalyzedToken(word, null, null));
      }
      tokenReadings.add(new AnalyzedTokenReadings(l, pos));
      pos += word.length();
    }

    return tokenReadings;
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.