Examples of Lexer


Examples of com.thoughtworks.qdox.parser.Lexer

        return addSource(reader, "UNKNOWN SOURCE");
    }

    public JavaSource addSource(Reader reader, String sourceInfo) {
        ModelBuilder builder = new ModelBuilder(classLibrary, docletTagFactory);
        Lexer lexer = new JFlexLexer(reader);
        Parser parser = new Parser(lexer, builder);
        parser.setDebugLexer(debugLexer);
        parser.setDebugParser(debugParser);
        try {
            parser.parse();
View Full Code Here

Examples of csa.util.syntax.Syntax.Lexer.Lexer

    if(doc == null) return;
   
    // slurp everything up into local variables in case another
    // thread changes them during coloring process
    AttributeSet globalStyle = doc.getGlobalStyle();
    Lexer syntaxLexer = doc.getSyntaxLexer();
    DocumentReader documentReader = doc.getDocumentReader();
    Object docLock = doc.getDocumentLock();

    if(globalStyle != null) {
      int start = Math.min(position, position + adjustment);
      int stop = Math.max(position, position + adjustment);
      synchronized(docLock) {
        doc.setCharacterAttributes(start, stop - start,
          globalStyle, true);
      }
      return;
    }
   
    SortedSet workingSet;
    Iterator workingIt;
    DocPosition startRequest = new DocPosition(position);
    DocPosition endRequest = new DocPosition(position + Math.abs(adjustment));
    DocPosition dp;
    DocPosition dpStart = null;
    DocPosition dpEnd = null;

    // find the starting position. We must start at least one
    // token before the current position
    try {
      // all the good positions before
      workingSet = iniPositions.headSet(startRequest);
      // the last of the stuff before
      dpStart = (DocPosition) workingSet.last();
    } catch (NoSuchElementException x) {
      // if there were no good positions before the requested
      // start,
      // we can always start at the very beginning.
      dpStart = new DocPosition(0);
    }

    // if stuff was removed, take any removed positions off the
    // list.
    if (adjustment < 0) {
      workingSet = iniPositions.subSet(startRequest,
          endRequest);
      workingIt = workingSet.iterator();
      while (workingIt.hasNext()) {
        workingIt.next();
        workingIt.remove();
      }
    }

    // adjust the positions of everything after the
    // insertion/removal.
    workingSet = iniPositions.tailSet(startRequest);
    workingIt = workingSet.iterator();
    while (workingIt.hasNext()) {
      ((DocPosition) workingIt.next()).adjustPosition(adjustment);
    }

    // now go through and highlight as much as needed
    workingSet = iniPositions.tailSet(dpStart);
    workingIt = workingSet.iterator();
    dp = null;
    if (workingIt.hasNext()) {
      dp = (DocPosition) workingIt.next();
    }
    try {
      Token t;
      boolean done = false;
      dpEnd = dpStart;
      synchronized (docLock) {
        // we are playing some games with the lexer for
        // efficiency.
        // we could just create a new lexer each time here,
        // but instead,
        // we will just reset it so that it thinks it is
        // starting at the
        // beginning of the document but reporting a funny
        // start position.
        // Reseting the lexer causes the close() method on
        // the reader
        // to be called but because the close() method has
        // no effect on the
        // DocumentReader, we can do this.
        syntaxLexer.reset(documentReader, 0, dpStart
            .getPosition(), 0);
        // After the lexer has been set up, scroll the
        // reader so that it
        // is in the correct spot as well.
        documentReader.seek(dpStart.getPosition());
        // we will highlight tokens until we reach a good
        // stopping place.
        // the first obvious stopping place is the end of
        // the document.
        // the lexer will return null at the end of the
        // document and wee
        // need to stop there.
        t = syntaxLexer.getNextToken();
      }
      newPositions.add(dpStart);
      while (!done && t != null) {
        // this is the actual command that colors the stuff.
        // Color stuff with the description of the styles
        // stored in tokenStyles.
        if (t.getCharEnd() <= doc.getLength()) {
          doc.setCharacterAttributes(t.getCharBegin() + change,
              t.getCharEnd()  - t.getCharBegin(),
              TokenStyles.getStyle(t.getDescription()),
              true);
          // record the position of the last bit of
          // text that we colored
          dpEnd = new DocPosition(t.getCharEnd());
        }
        lastPosition = (t.getCharEnd() + change);
        // The other more complicated reason for doing no
        // more highlighting
        // is that all the colors are the same from here on
        // out anyway.
        // We can detect this by seeing if the place that
        // the lexer returned
        // to the initial state last time we highlighted is
        // the same as the
        // place that returned to the initial state this
        // time.
        // As long as that place is after the last changed
        // text, everything
        // from there on is fine already.
        if (t.getState() == Token.INITIAL_STATE) {
          // look at all the positions from last time that
          // are less than or
          // equal to the current position
          while (dp != null
              && dp.getPosition() <= t.getCharEnd()) {
            if (dp.getPosition() == t.getCharEnd()
                && dp.getPosition() >= endRequest
                    .getPosition()) {
              // we have found a state that is the
              // same
              done = true;
              dp = null;
            } else if (workingIt.hasNext()) {
              // didn't find it, try again.
              dp = (DocPosition) workingIt.next();
            } else {
              // didn't find it, and there is no more
              // info from last
              // time. This means that we will just
              // continue
              // until the end of the document.
              dp = null;
            }
          }
          // so that we can do this check next time,
          // record all the
          // initial states from this time.
          newPositions.add(dpEnd);
        }
        synchronized (docLock) {
          t = syntaxLexer.getNextToken();
        }
      }

      // remove all the old initial positions from the place
      // where
View Full Code Here

Examples of csep.parser.Lexer

    //Assert::assertEquals('errors ' + resource.errors, 0, resource.errors.size)
    EObject parseResult = null;
    EList<Diagnostic> errors = null;
    EList<Diagnostic> warnings = null;
    try {
      Lexer lexer = new Lexer(input);
      tokens = lexer.tokenizeToStrings();
      InputStream in = getAsStream("" + input);
      URI uri = URI.createURI("mytestmodel." + getCurrentFileExtension());
      XtextResource resource = doGetResource(in, uri);
      parseResult = getModel(resource);
      errors = resource.getErrors();
View Full Code Here

Examples of freegressi.parser.Lexer

  }

  private double evalueOk(Tableur tableur, String str, int index) {
    ArrayList<Noeud> liste = new ArrayList<>();
    try {
      Lexer lex = new Lexer( new java.io.StringReader(str), liste );
      lex.yylex();
    } catch (Exception e) {
      System.err.println("Erreur du lexer!");
    }
    Parser parser = new Parser(liste);
    Noeud racine = parser.parser();
View Full Code Here

Examples of fri.patterns.interpreter.parsergenerator.Lexer

   
  /** Returns the built Lexer, loaded with passed input (file, stream, string, ...). */
  public Lexer getLexer(Object input)
    throws IOException
  {
    Lexer lexer = getLexer();
    lexer.setInput(input);
    return lexer;
  }
View Full Code Here

Examples of fri.patterns.interpreter.parsergenerator.Lexer

    @return deserialized Lexer, or one built from scratch that gets written to filesystem.
  */
  public Lexer get(Object syntaxInput, String baseName, List tokenSymbols, List ignoredSymbols)
    throws Exception
  {
    Lexer lexer = readLexer(syntaxInput, baseName);
    if (lexer == null)
      lexer = buildAndStoreLexer(syntaxInput, baseName, tokenSymbols, ignoredSymbols);
    return lexer;
  }
View Full Code Here

Examples of fri.patterns.interpreter.parsergenerator.Lexer

    }
    // else: assume that syntaxInput is a prebuilt lexer Syntax and a list of ignored tokens
   
    TimeStopper ts = new TimeStopper();
    LexerBuilder builder = newLexerBuilder(syntax, ignoredSymbols);
    Lexer lexer = builder.getLexer();
    lexer.setTerminals(tokenSymbols);
    System.err.println("Lexer scratch construction took "+ts.getTimeMillis()+" millis");
   
    if (PRODUCTION)
      write(ensureFileName(syntaxInput, baseName), lexer);
View Full Code Here

Examples of fri.patterns.interpreter.parsergenerator.Lexer

  /** Test main. Building serialized Lexer takes 330, building from scratch takes 130 millis. */
  public static void main(String [] args)  {
    try  {
      TimeStopper ts = new TimeStopper();
      Lexer lexer = new SerializedLexer().get(StandardLexerRules.lexerSyntax, "SyntaxBuilder");
      System.err.println("Lexer was built in "+ts.getTimeMillis()+" millis");
    }
    catch (Exception e)  {
      e.printStackTrace();
    }
View Full Code Here

Examples of fri.patterns.interpreter.parsergenerator.Lexer

    try  {
      fri.util.TimeStopper ts = new fri.util.TimeStopper();

      SyntaxSeparation separation = new SyntaxSeparation(syntax);
      LexerBuilder builder = new LexerBuilder(separation.getLexerSyntax(), separation.getIgnoredSymbols());
      Lexer lexer = builder.getLexer();
      lexer.setDebug(true);
      lexer.setTerminals(separation.getTokenSymbols());

      System.err.println("time to build lexer was: "+ts.getInterval());

      InputStream in = ExampleLexer.class.getResourceAsStream("ExampleLexer.java")//Reader in = new FileReader(args[0]);
      lexer.setInput(in);

      System.err.println("time to build input was: "+ts.getInterval());
     
      Token t;
      do  {
        t = lexer.getNextToken(null);
        //System.err.println(ts.getInterval()+" was time to read token "+t.symbol+" -> "+t.text);
       
        if (t.symbol == null)  {
          lexer.dump(System.err);
          throw new LexerException("Uninterpretable input!");
        }
        System.out.println(t.symbol+" "+">"+t.text+"<");
      }
      while (Token.isEpsilon(t) == false);
View Full Code Here

Examples of fri.patterns.interpreter.parsergenerator.Lexer

  public static void main(String [] args)
    throws Exception
  {
    SyntaxSeparation separation = new SyntaxSeparation(new Syntax(syntax))// separate lexer and parser syntax
    LexerBuilder builder = new LexerBuilder(separation.getLexerSyntax(), separation.getIgnoredSymbols())// build a Lexer
    Lexer lexer = builder.getLexer();
    lexer.setInput("\tHello \r\n\tWorld\n")// give the lexer some very complex input :-)
    ParserTables parserTables = new SLRParserTables(separation.getParserSyntax());
    Parser parser = new Parser(parserTables);
    parser.parse(lexer, new PrintSemantic())// start parsing with a print-semantic
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.