Package org.apache.hadoop.io

Examples of org.apache.hadoop.io.Text.clear()


      if (startHash <= hashCode && hashCode <= endHash) {
        stores.add(new Store(Long.parseLong(readStr[2]),
            Long.parseLong(readStr[3]), startHash,
            endHash));
      }
      line.clear();
    }
    try {
      lin.close();
    } catch(IOException io){
      // do nothing just a read.
View Full Code Here


        if (harPath.compareTo(new Path(parsed[0])) == 0) {
          // bingo!
          retStr = lineFeed;
          break;
        }
        line.clear();
      }
      if (retStr != null)
        break;
    }
    try {
View Full Code Here

      Map<Object, Object> r;
      if (reuse == null) {
        r = new HashMap<Object, Object>();
      } else {
        r = (HashMap<Object, Object>) reuse;
        r.clear();
      }

      while (true) {
        int more = buffer.read(invert);
        if (more == 0) {
View Full Code Here

        // 3/4 Tool to Hadoop
        while (lineReader.readLine(line) > 0) {
          answer = line.getBytes();
          splitKeyVal(answer, line.getLength(), key, val);
          output.collect(key, val);
          line.clear();
          numRecWritten_++;
          long now = System.currentTimeMillis();
          if (now-lastStdoutReport > reporterOutDelay_) {
            lastStdoutReport = now;
            String hline = "Records R/W=" + numRecRead_ + "/" + numRecWritten_;
View Full Code Here

          long now = System.currentTimeMillis();
          if (reporter != null && now-lastStderrReport > reporterErrDelay_) {
            lastStderrReport = now;
            reporter.progress();
          }
          line.clear();
        }
        if (lineReader != null) {
          lineReader.close();
        }
        if (clientErr_ != null) {
View Full Code Here

        // so the buffer will be null, in that case just return result
        // as it will default to empty
        if (dictionaryBuffer != null) {
          dictionaryBuffer.setText(result, offset, length);
        } else {
          result.clear();
        }
      }
      return result;
    }
View Full Code Here

      for (int i = 0; i < 100; ++i) {
        text.readFields(inputBuffer);
        assertTrue("Testing the spillage of spilling buffer".equals(text
            .toString()));
        text.clear();
      }

      try {
        text.readFields(inputBuffer);
        assertTrue(false);
View Full Code Here

      queue.add(text);
    }
    queue.prepareRead();
    for (Text t : queue) {
      assertTrue(msg.equals(t.toString()));
      text.clear();
    }

    assertTrue(queue.poll() == null);

    assertTrue(file.exists());
View Full Code Here

      Map<Object, Object> r;
      if (reuse == null) {
        r = new HashMap<Object, Object>();
      } else {
        r = (HashMap<Object, Object>) reuse;
        r.clear();
      }

      while (true) {
        int more = buffer.read(invert);
        if (more == 0) {
View Full Code Here

          int startHash = Integer.parseInt(readStr[0]);
          int endHash  = Integer.parseInt(readStr[1]);
          stores.add(new Store(Long.parseLong(readStr[2]),
              Long.parseLong(readStr[3]), startHash,
              endHash));
          line.clear();
        }
      } catch (IOException ioe) {
        LOG.warn("Encountered exception ", ioe);
        throw ioe;
      } finally {
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.