Package com.cloudera.sqoop.io

Examples of com.cloudera.sqoop.io.SplittableBufferedWriter


      for (String arg : args) {
        LOG.debug("  " + arg);
      }

      // This writer will be closed by AsyncSink.
      SplittableBufferedWriter w = DirectImportUtils.createHdfsSink(
          options.getConf(), options, context);

      // Actually start the psql dump.
      p = Runtime.getRuntime().exec(args.toArray(new String[0]),
          envp.toArray(new String[0]));
View Full Code Here


        this.counters = ctrs;
      }

      public void run() {
        BufferedReader r = null;
        SplittableBufferedWriter w = this.writer;

        char recordDelim = this.options.getOutputRecordDelim();

        try {
          r = new BufferedReader(new InputStreamReader(this.stream));

          // read/write transfer loop here.
          while (true) {
            String inLine = r.readLine();
            if (null == inLine) {
              break; // EOF
            }

            w.write(inLine);
            w.write(recordDelim);
            w.allowSplit();
            counters.addBytes(1 + inLine.length());
          }
        } catch (IOException ioe) {
          LOG.error("IOException reading from psql: " + ioe.toString());
          // set the error bit so our caller can see that something went wrong.
          setError();
        } finally {
          if (null != r) {
            try {
              r.close();
            } catch (IOException ioe) {
              LOG.info("Error closing FIFO stream: " + ioe.toString());
            }
          }

          if (null != w) {
            try {
              w.close();
            } catch (IOException ioe) {
              LOG.info("Error closing HDFS stream: " + ioe.toString());
            }
          }
        }
View Full Code Here

      for (String arg : args) {
        LOG.debug("  " + arg);
      }

      // This writer will be closed by AsyncSink.
      SplittableBufferedWriter w = DirectImportUtils.createHdfsSink(
          options.getConf(), options, context);

      // Actually start the psql dump.
      p = Runtime.getRuntime().exec(args.toArray(new String[0]),
          envp.toArray(new String[0]));
View Full Code Here

        this.counters = ctrs;
      }

      public void run() {
        BufferedReader r = null;
        SplittableBufferedWriter w = this.writer;

        char recordDelim = this.options.getOutputRecordDelim();

        try {
          r = new BufferedReader(new InputStreamReader(this.stream));

          // read/write transfer loop here.
          while (true) {
            String inLine = r.readLine();
            if (null == inLine) {
              break; // EOF
            }

            w.write(inLine);
            w.write(recordDelim);
            w.allowSplit();
            counters.addBytes(1 + inLine.length());
          }
        } catch (IOException ioe) {
          LOG.error("IOException reading from psql: " + ioe.toString());
          // set the error bit so our caller can see that something went wrong.
          setError();
        } finally {
          if (null != r) {
            try {
              r.close();
            } catch (IOException ioe) {
              LOG.info("Error closing FIFO stream: " + ioe.toString());
            }
          }

          if (null != w) {
            try {
              w.close();
            } catch (IOException ioe) {
              LOG.info("Error closing HDFS stream: " + ioe.toString());
            }
          }
        }
View Full Code Here

      for (String arg : args) {
        LOG.debug("  " + arg);
      }

      // This writer will be closed by AsyncSink.
      SplittableBufferedWriter w = DirectImportUtils.createHdfsSink(
          options.getConf(), options, context);

      // Actually start the psql dump.
      p = Runtime.getRuntime().exec(args.toArray(new String[0]),
          envp.toArray(new String[0]));
View Full Code Here

        this.counters = ctrs;
      }

      public void run() {
        BufferedReader r = null;
        SplittableBufferedWriter w = this.writer;

        char recordDelim = this.options.getOutputRecordDelim();

        try {
          r = new BufferedReader(new InputStreamReader(this.stream));

          // read/write transfer loop here.
          while (true) {
            String inLine = r.readLine();
            if (null == inLine) {
              break; // EOF
            }

            w.write(inLine);
            w.write(recordDelim);
            w.allowSplit();
            counters.addBytes(1 + inLine.length());
          }
        } catch (IOException ioe) {
          LOG.error("IOException reading from psql: " + ioe.toString());
          // set the error bit so our caller can see that something went wrong.
          setError();
        } finally {
          if (null != r) {
            try {
              r.close();
            } catch (IOException ioe) {
              LOG.info("Error closing FIFO stream: " + ioe.toString());
            }
          }

          if (null != w) {
            try {
              w.close();
            } catch (IOException ioe) {
              LOG.info("Error closing HDFS stream: " + ioe.toString());
            }
          }
        }
View Full Code Here

    LOG.debug("Writing to filesystem: " + fs.getUri());
    LOG.debug("Creating destination directory " + destDir);
    fs.mkdirs(destDir);

    // This Writer will be closed by the caller.
    return new SplittableBufferedWriter(
        new SplittingOutputStream(conf, destDir, "part-m-",
        options.getDirectSplitSize(), getCodec(conf, options)));
  }
View Full Code Here

        this.counters = ctrs;
      }

      public void run() {
        BufferedReader r = null;
        SplittableBufferedWriter w = this.writer;

        char recordDelim = this.options.getOutputRecordDelim();

        try {
          r = new BufferedReader(new InputStreamReader(this.stream));

          // read/write transfer loop here.
          while (true) {
            String inLine = r.readLine();
            if (null == inLine) {
              break; // EOF
            }

            w.write(inLine);
            w.write(recordDelim);
            w.allowSplit();
            counters.addBytes(1 + inLine.length());
          }
        } catch (IOException ioe) {
          LOG.error("IOException reading from psql: " + ioe.toString());
          // set the error bit so our caller can see that something went wrong.
          setError();
        } finally {
          if (null != r) {
            try {
              r.close();
            } catch (IOException ioe) {
              LOG.info("Error closing FIFO stream: " + ioe.toString());
            }
          }

          if (null != w) {
            try {
              w.close();
            } catch (IOException ioe) {
              LOG.info("Error closing HDFS stream: " + ioe.toString());
            }
          }
        }
View Full Code Here

    LOG.debug("Writing to filesystem: " + fs.getUri());
    LOG.debug("Creating destination directory " + destDir);
    fs.mkdirs(destDir);

    // This Writer will be closed by the caller.
    return new SplittableBufferedWriter(
        new SplittingOutputStream(conf, destDir, "data-",
        options.getDirectSplitSize(), getCodec(conf, options)));
  }
View Full Code Here

      for (String arg : args) {
        LOG.debug("  " + arg);
      }

      // This writer will be closed by AsyncSink.
      SplittableBufferedWriter w = DirectImportUtils.createHdfsSink(
          options.getConf(), options, context);

      // Actually start the psql dump.
      p = Runtime.getRuntime().exec(args.toArray(new String[0]),
          envp.toArray(new String[0]));
View Full Code Here

TOP

Related Classes of com.cloudera.sqoop.io.SplittableBufferedWriter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.