Package org.apache.hadoop.mapreduce.lib.input

Examples of org.apache.hadoop.mapreduce.lib.input.CombineFileSplit


    Path[] files = {new Path("one"), new Path("two")};
    long[] start = {1, 2};
    long[] lengths = {100, 200};
    String[] locations = {"locOne", "loctwo"};

    CombineFileSplit cfSplit = new CombineFileSplit(files, start, lengths,
            locations);
    ResourceUsageMetrics metrics = new ResourceUsageMetrics();
    metrics.setCumulativeCpuUsage(200);

    double[] reduceBytes = {8.1d, 8.2d};
View Full Code Here


    Path[] files = {new Path("one"), new Path("two")};
    long[] start = {1, 2};
    long[] lengths = {100, 200};
    String[] locations = {"locOne", "loctwo"};

    CombineFileSplit cfSplit = new CombineFileSplit(files, start, lengths,
            locations);
    ResourceUsageMetrics metrics = new ResourceUsageMetrics();
    metrics.setCumulativeCpuUsage(200);
    ResourceUsageMetrics[] rMetrics = {metrics};
View Full Code Here

    Path[] paths = {p1, p2};

    long[] start = {0, 0};
    long[] lengths = {1000, 1000};
    String[] locations = {"temp1", "temp2"};
    CombineFileSplit cfsplit = new CombineFileSplit(paths, start, lengths,
            locations);
    double[] reduceBytes = {100, 100};
    double[] reduceRecords = {2, 2};
    long[] reduceOutputBytes = {500, 500};
    long[] reduceOutputRecords = {2, 2};
View Full Code Here

  @Override
  @SuppressWarnings("unchecked")
  public RecordReader createRecordReader(
      InputSplit split, TaskAttemptContext context) throws IOException {

    CombineFileSplit combineSplit = (CombineFileSplit) split;

    // Use CombineFileRecordReader since this can handle CombineFileSplits
    // and instantiate another RecordReader in a loop; do this with the
    // CombineShimRecordReader.
    RecordReader rr = new CombineFileRecordReader(combineSplit, context,
View Full Code Here

    writer.close();
   
    compressedFile = compressedFile.suffix(".gz");
    // now read back the data from the compressed stream using FileQueue
    long fileSize = lfs.listStatus(compressedFile)[0].getLen();
    CombineFileSplit split =
      new CombineFileSplit(new Path[] {compressedFile}, new long[] {fileSize});
    FileQueue queue = new FileQueue(split, conf);
    byte[] bytes = new byte[inputLine.getBytes().length];
    queue.read(bytes);
    queue.close();
    String readLine = new String(bytes);
View Full Code Here

    Path[] files = {new Path("one"), new Path("two")};
    long[] start = {1, 2};
    long[] lengths = {100, 200};
    String[] locations = {"locOne", "loctwo"};

    CombineFileSplit cfsplit = new CombineFileSplit(files, start, lengths,
            locations);
    ResourceUsageMetrics metrics = new ResourceUsageMetrics();
    metrics.setCumulativeCpuUsage(200);

    double[] reduceBytes = {8.1d, 8.2d};
View Full Code Here

    Path[] files = {new Path("one"), new Path("two")};
    long[] start = {1, 2};
    long[] lengths = {100, 200};
    String[] locations = {"locOne", "loctwo"};

    CombineFileSplit cfsplit = new CombineFileSplit(files, start, lengths,
            locations);
    ResourceUsageMetrics metrics = new ResourceUsageMetrics();
    metrics.setCumulativeCpuUsage(200);
    ResourceUsageMetrics[] rMetrics = {metrics};
View Full Code Here

    Path[] paths = {p1, p2};

    long[] start = {0, 0};
    long[] lengths = {1000, 1000};
    String[] locations = {"temp1", "temp2"};
    CombineFileSplit cfsplit = new CombineFileSplit(paths, start, lengths,
            locations);
    double[] reduceBytes = {100, 100};
    double[] reduceRecords = {2, 2};
    long[] reduceOutputBytes = {500, 500};
    long[] reduceOutputRecords = {2, 2};
View Full Code Here

    writer.close();
   
    compressedFile = compressedFile.suffix(".gz");
    // now read back the data from the compressed stream using FileQueue
    long fileSize = lfs.listStatus(compressedFile)[0].getLen();
    CombineFileSplit split =
      new CombineFileSplit(new Path[] {compressedFile}, new long[] {fileSize});
    FileQueue queue = new FileQueue(split, conf);
    byte[] bytes = new byte[inputLine.getBytes().length];
    queue.read(bytes);
    queue.close();
    String readLine = new String(bytes);
View Full Code Here

    Collections.sort(sort, hostRank);
    final String[] hosts = new String[Math.min(nLocs, sort.size())];
    for (int i = 0; i < nLocs && i < sort.size(); ++i) {
      hosts[i] = sort.get(i).getKey();
    }
    return new CombineFileSplit(paths.toArray(new Path[0]),
        toLongArray(start), toLongArray(length), hosts);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.lib.input.CombineFileSplit

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.