Examples of StreamTokenizer


Examples of java.io.StreamTokenizer

    m_UpperBoundIsOpen = false;

    if (rangeString == null) return;

    // set up a tokenzier to parse the string
    StreamTokenizer tokenizer =
      new StreamTokenizer(new StringReader(rangeString));
    tokenizer.resetSyntax();        
    tokenizer.whitespaceChars(0, ' ');   
    tokenizer.wordChars(' '+1,'\u00FF');
    tokenizer.ordinaryChar('[');
    tokenizer.ordinaryChar('(');
    tokenizer.ordinaryChar(',');
    tokenizer.ordinaryChar(']');
    tokenizer.ordinaryChar(')');

    try {

      // get opening brace
      tokenizer.nextToken();
   
      if (tokenizer.ttype == '[') m_LowerBoundIsOpen = false;
      else if (tokenizer.ttype == '(') m_LowerBoundIsOpen = true;
      else throw new IllegalArgumentException("Expected opening brace on range,"
                + " found: "
                + tokenizer.toString());

      // get lower bound
      tokenizer.nextToken();
      if (tokenizer.ttype != tokenizer.TT_WORD)
  throw new IllegalArgumentException("Expected lower bound in range,"
             + " found: "
             + tokenizer.toString());
      if (tokenizer.sval.compareToIgnoreCase("-inf") == 0)
  m_LowerBound = Double.NEGATIVE_INFINITY;
      else if (tokenizer.sval.compareToIgnoreCase("+inf") == 0)
  m_LowerBound = Double.POSITIVE_INFINITY;
      else if (tokenizer.sval.compareToIgnoreCase("inf") == 0)
  m_LowerBound = Double.NEGATIVE_INFINITY;
      else try {
  m_LowerBound = Double.valueOf(tokenizer.sval).doubleValue();
      } catch (NumberFormatException e) {
  throw new IllegalArgumentException("Expected lower bound in range,"
             + " found: '" + tokenizer.sval + "'");
      }

      // get separating comma
      if (tokenizer.nextToken() != ',')
  throw new IllegalArgumentException("Expected comma in range,"
             + " found: "
             + tokenizer.toString());

      // get upper bound
      tokenizer.nextToken();
      if (tokenizer.ttype != tokenizer.TT_WORD)
  throw new IllegalArgumentException("Expected upper bound in range,"
             + " found: "
             + tokenizer.toString());
      if (tokenizer.sval.compareToIgnoreCase("-inf") == 0)
  m_UpperBound = Double.NEGATIVE_INFINITY;
      else if (tokenizer.sval.compareToIgnoreCase("+inf") == 0)
  m_UpperBound = Double.POSITIVE_INFINITY;
      else if (tokenizer.sval.compareToIgnoreCase("inf") == 0)
  m_UpperBound = Double.POSITIVE_INFINITY;
      else try {
  m_UpperBound = Double.valueOf(tokenizer.sval).doubleValue();
      } catch (NumberFormatException e) {
  throw new IllegalArgumentException("Expected upper bound in range,"
             + " found: '" + tokenizer.sval + "'");
      }

      // get closing brace
      tokenizer.nextToken();
   
      if (tokenizer.ttype == ']') m_UpperBoundIsOpen = false;
      else if (tokenizer.ttype == ')') m_UpperBoundIsOpen = true;
      else throw new IllegalArgumentException("Expected closing brace on range,"
                + " found: "
                + tokenizer.toString());

      // check for rubbish on end
      if (tokenizer.nextToken() != tokenizer.TT_EOF)
  throw new IllegalArgumentException("Expected end of range string,"
             + " found: "
             + tokenizer.toString());

    } catch (IOException e) {
      throw new IllegalArgumentException("IOException reading attribute range"
           + " string: " + e.getMessage());
    }
View Full Code Here

Examples of java.io.StreamTokenizer

      throw new IOException("No source has been specified");
    }

    if (m_structure == null) {
      try {
  m_st = new StreamTokenizer(m_sourceReader);
  initTokenizer(m_st);
  readStructure(m_st);
      } catch (FileNotFoundException ex) {
      }
    }
View Full Code Here

Examples of java.io.StreamTokenizer

    if (m_structure == null) {
      getStructure();
    }
   
    if (m_st == null) {
      m_st = new StreamTokenizer(m_sourceReader);
      initTokenizer(m_st);     
    }
       
    m_st.ordinaryChar(m_FieldSeparator.charAt(0));
   
View Full Code Here

Examples of java.io.StreamTokenizer

     * @param reader    the reader to use
     * @throws IOException  if something goes wrong
     * @see      #getData()
     */
    public ArffReader(Reader reader) throws IOException {
      m_Tokenizer = new StreamTokenizer(reader);
      initTokenizer();

      readHeader(1000);
      initBuffers();
     
View Full Code Here

Examples of java.io.StreamTokenizer

     */
    public ArffReader(Reader reader, int capacity) throws IOException {
      if (capacity < 0)
  throw new IllegalArgumentException("Capacity has to be positive!");

      m_Tokenizer = new StreamTokenizer(reader);
      initTokenizer();

      readHeader(capacity);
      initBuffers();
    }
View Full Code Here

Examples of java.io.StreamTokenizer

     * @throws IOException  if something goes wrong
     * @see      #getData()
     */
    public ArffReader(Reader reader, Instances template, int lines, int capacity) throws IOException {
      m_Lines     = lines;
      m_Tokenizer = new StreamTokenizer(reader);
      initTokenizer();

      m_Data = new Instances(template, capacity);
      initBuffers();
    }
View Full Code Here

Examples of java.io.StreamTokenizer

     * @param setting the fulltext settings
     * @param set the hash set
     * @param reader the reader
     */
    protected static void addWords(FullTextSettings setting, HashSet<String> set, Reader reader) {
        StreamTokenizer tokenizer = new StreamTokenizer(reader);
        tokenizer.resetSyntax();
        tokenizer.wordChars(' ' + 1, 255);
        char[] whitespaceChars = setting.getWhitespaceChars().toCharArray();
        for (char ch : whitespaceChars) {
            tokenizer.whitespaceChars(ch, ch);
        }
        try {
            while (true) {
                int token = tokenizer.nextToken();
                if (token == StreamTokenizer.TT_EOF) {
                    break;
                } else if (token == StreamTokenizer.TT_WORD) {
                    String word = tokenizer.sval;
                    word = setting.convertWord(word);
View Full Code Here

Examples of java.io.StreamTokenizer

      return null;
    }

    try
    {
      final StreamTokenizer strtok = new StreamTokenizer(new StringReader(value));
      strtok.parseNumbers();
      final int firstToken = strtok.nextToken();
      if (firstToken != StreamTokenizer.TT_NUMBER)
      {
        return null;
      }
      final double nval = strtok.nval;
      final int nextToken = strtok.nextToken();
      if (nextToken != StreamTokenizer.TT_WORD)
      {
        // yeah, this is against the standard, but we are dealing with deadly ugly non-standard documents here
        // maybe we will be able to integrate a real HTML processor at some point.
        return new Float(nval);
View Full Code Here

Examples of java.io.StreamTokenizer

    /**
     * Sets up the stream tokenizer
     */
    private void setup() {
        st = new StreamTokenizer(this);
        st.resetSyntax();
        st.eolIsSignificant(false);
        st.lowerCaseMode(true);

        // Parse numbers as words
View Full Code Here

Examples of java.io.StreamTokenizer

  // openFile does the real work of opening the MIX file.
  // It is a helper for use by both constructors.
 
    private void openFile(File file) {
        try {
            st = new StreamTokenizer(          
                fr = new FileReader(file));
            st.eolIsSignificant(true);
            st.parseNumbers();
            st.whitespaceChars(' ', ' ');
            st.whitespaceChars('\t', '\t');
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.