Package org.apache.crunch

Examples of org.apache.crunch.CrunchRuntimeException


  @Override
  public Double map(T input) {
    try {
      return call(input);
    } catch (Exception e) {
      throw new CrunchRuntimeException(e);
    }
  }
View Full Code Here


    try {
      for (Double d : call(input)) {
        emitter.emit(d);
      }
    } catch (Exception e) {
      throw new CrunchRuntimeException(e);
    }
  }
View Full Code Here

        byte[] newBytes = new byte[bytes.length];
        System.arraycopy(bytes, 0, newBytes, 0, bytes.length);
        blob = new BytesWritable(newBytes);
      }
    } catch (SerDeException e) {
      throw new CrunchRuntimeException("Unable to serialize object: "
          + orc);
    }
  }
View Full Code Here

    try {
      Object row = serde.deserialize(blob);
      StructObjectInspector rowOi = (StructObjectInspector) serde.getObjectInspector();
      orc = (OrcStruct) OrcUtils.convert(row, rowOi, oi);
    } catch (SerDeException e) {
      throw new CrunchRuntimeException("Unable to deserialize blob: " + blob);
    }
  }
View Full Code Here

  @Override
  public Iterator<T> read(FileSystem fs, final Path path) {
    try {
      if (!fs.isFile(path)) {
        throw new CrunchRuntimeException("Not a file: " + path);
      }
     
      inputFn.initialize();
     
      FileStatus status = fs.getFileStatus(path);
      FileSplit split = new FileSplit(path, 0, status.getLen(), new String[0]);
     
      JobConf conf = new JobConf();
      if (readColumns != null) {
        conf.setBoolean(OrcFileSource.HIVE_READ_ALL_COLUMNS, false);
        conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, OrcFileSource.getColumnIdsStr(readColumns));
      }
      final RecordReader<NullWritable, OrcStruct> reader = inputFormat.getRecordReader(split, conf, Reporter.NULL);
     
      return new UnmodifiableIterator<T>() {
       
        private boolean checked = false;
        private boolean hasNext;
        private OrcStruct value;
        private OrcWritable writable = new OrcWritable();

        @Override
        public boolean hasNext() {
          try {
            if (value == null) {
              value = reader.createValue();
            }
            if (!checked) {
              hasNext = reader.next(NullWritable.get(), value);
              checked = true;
            }
            return hasNext;
          } catch (Exception e) {
            throw new CrunchRuntimeException("Error while reading local file: " + path, e);
          }
        }

        @Override
        public T next() {
          try {
            if (value == null) {
              value = reader.createValue();
            }
            if (!checked) {
              reader.next(NullWritable.get(), value);
            }
            checked = false;
            writable.set(value);
            return inputFn.map(writable);
          } catch (Exception e) {
            throw new CrunchRuntimeException("Error while reading local file: " + path, e);
          }
        }
       
      };
    } catch (Exception e) {
      throw new CrunchRuntimeException("Error while reading local file: " + path, e);
    }
  }
View Full Code Here

    tbl.setProperty(serdeConstants.LIST_COLUMN_TYPES, types);
   
    try {
      serde.initialize(null, tbl);
    } catch (SerDeException e) {
      throw new CrunchRuntimeException("Unable to initialize binary serde");
    }
   
    return serde;
  }
View Full Code Here

        for (Pair<K, V> joinPair : readable.read(getContext())) {
          Pair<K, V> detachedPair = tableType.getDetachedValue(joinPair);
          joinMap.put(detachedPair.first(), detachedPair.second());
        }
      } catch (IOException e) {
        throw new CrunchRuntimeException("Error reading map-side join data", e);
      }
    }
View Full Code Here

    @Override
    public void process(Long input, Emitter<Long> emitter) {
      try {
        Thread.sleep(Long.MAX_VALUE);
      } catch (InterruptedException e) {
        throw new CrunchRuntimeException(e);
      }
    }
View Full Code Here

        // Hacking this for Hadoop1 and Hadoop2
        getConfiguration().set("mapreduce.job.cache.local.files", sparkFiles);
        getConfiguration().set("mapred.cache.localFiles", sparkFiles);
      }
    } catch (IOException e) {
      throw new CrunchRuntimeException(e);
    }
  }
View Full Code Here

   
    try {
      TableMapReduceUtil.addDependencyJars(job);
      FileOutputFormat.setOutputPath(job, outputPath);
    } catch (IOException e) {
      throw new CrunchRuntimeException(e);
    }

    if (null == name) {
      job.setOutputFormatClass(TableOutputFormat.class);
      job.setOutputKeyClass(ImmutableBytesWritable.class);
View Full Code Here

TOP

Related Classes of org.apache.crunch.CrunchRuntimeException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.