Package org.apache.hcatalog.mapreduce

Examples of org.apache.hcatalog.mapreduce.OutputJobInfo


    return (Transaction) HCatUtil.deserialize(outputJobInfo.getProperties()
      .getProperty(HBaseConstants.PROPERTY_WRITE_TXN_KEY));
  }

  static void setWriteTransaction(Configuration conf, Transaction txn) throws IOException {
    OutputJobInfo outputJobInfo = (OutputJobInfo) HCatUtil.deserialize(conf.get(HCatConstants.HCAT_KEY_OUTPUT_INFO));
    outputJobInfo.getProperties().setProperty(HBaseConstants.PROPERTY_WRITE_TXN_KEY, HCatUtil.serialize(txn));
    conf.set(HCatConstants.HCAT_KEY_OUTPUT_INFO, HCatUtil.serialize(outputJobInfo));
  }
View Full Code Here


    // Populate RM transaction in OutputJobInfo
    // In case of bulk mode, populate intermediate output location
    Map<String, String> tableJobProperties = tableDesc.getJobProperties();
    String jobString = tableJobProperties.get(HCatConstants.HCAT_KEY_OUTPUT_INFO);
    try {
      OutputJobInfo outputJobInfo = (OutputJobInfo) HCatUtil.deserialize(jobString);
      HCatTableInfo tableInfo = outputJobInfo.getTableInfo();
      String qualifiedTableName = HBaseHCatStorageHandler.getFullyQualifiedHBaseTableName(tableInfo);
      jobProperties.put(HBaseConstants.PROPERTY_OUTPUT_TABLE_NAME_KEY, qualifiedTableName);
      jobProperties.put(TableOutputFormat.OUTPUT_TABLE, qualifiedTableName);

      Configuration jobConf = getJobConf();
      addResources(jobConf, jobProperties);

      Configuration copyOfConf = new Configuration(jobConf);
      HBaseConfiguration.addHbaseResources(copyOfConf);

      String txnString = outputJobInfo.getProperties().getProperty(
        HBaseConstants.PROPERTY_WRITE_TXN_KEY);
      Transaction txn = null;
      if (txnString == null) {
        txn = HBaseRevisionManagerUtil.beginWriteTransaction(qualifiedTableName, tableInfo,
          RevisionManagerConfiguration.create(copyOfConf));
        String serializedTxn = HCatUtil.serialize(txn);
        outputJobInfo.getProperties().setProperty(HBaseConstants.PROPERTY_WRITE_TXN_KEY,
          serializedTxn);
      } else {
        txn = (Transaction) HCatUtil.deserialize(txnString);
      }
      if (isBulkMode(outputJobInfo)) {
        String tableLocation = tableInfo.getTableLocation();
        String location = new Path(tableLocation, "REVISION_" + txn.getRevisionNumber())
          .toString();
        outputJobInfo.getProperties().setProperty(PROPERTY_INT_OUTPUT_LOCATION, location);
        // We are writing out an intermediate sequenceFile hence
        // location is not passed in OutputJobInfo.getLocation()
        // TODO replace this with a mapreduce constant when available
        jobProperties.put("mapred.output.dir", location);
        jobProperties.put("mapred.output.committer.class", HBaseBulkOutputCommitter.class.getName());
View Full Code Here

  }

  private OutputFormat<WritableComparable<?>, Object> getOutputFormat(JobConf job)
    throws IOException {
    String outputInfo = job.get(HCatConstants.HCAT_KEY_OUTPUT_INFO);
    OutputJobInfo outputJobInfo = (OutputJobInfo) HCatUtil.deserialize(outputInfo);
    OutputFormat<WritableComparable<?>, Object> outputFormat = null;
    if (HBaseHCatStorageHandler.isBulkMode(outputJobInfo)) {
      outputFormat = new HBaseBulkOutputFormat();
    } else {
      outputFormat = new HBaseDirectOutputFormat();
View Full Code Here

      if (crd != null) {
        job.getCredentials().addAll(crd);
      }
    } else {
      Job clone = new Job(job.getConfiguration());
      OutputJobInfo outputJobInfo;
      if (userStr.length == 2) {
        outputJobInfo = OutputJobInfo.create(userStr[0], userStr[1], partitions);
      } else if (userStr.length == 1) {
        outputJobInfo = OutputJobInfo.create(null, userStr[0], partitions);
      } else {
        throw new FrontendException("location " + location
          + " is invalid. It must be of the form [db.]table",
          PigHCatUtil.PIG_EXCEPTION_CODE);
      }
      Schema schema = (Schema) ObjectSerializer.deserialize(udfProps.getProperty(PIG_SCHEMA));
      if (schema != null) {
        pigSchema = schema;
      }
      if (pigSchema == null) {
        throw new FrontendException(
          "Schema for data cannot be determined.",
          PigHCatUtil.PIG_EXCEPTION_CODE);
      }
      String externalLocation = (String) udfProps.getProperty(HCatConstants.HCAT_PIG_STORER_EXTERNAL_LOCATION);
      if (externalLocation != null) {
        outputJobInfo.setLocation(externalLocation);
      }
      try {
        HCatOutputFormat.setOutput(job, outputJobInfo);
      } catch (HCatException he) {
        // pass the message to the user - essentially something about
View Full Code Here

    job.setOutputCommitter(HBaseBulkOutputCommitter.class);

    //manually create transaction
    RevisionManager rm = HBaseRevisionManagerUtil.getOpenedRevisionManager(conf);
    try {
      OutputJobInfo outputJobInfo = OutputJobInfo.create("default", tableName, null);
      Transaction txn = rm.beginWriteTransaction(tableName, Arrays.asList(familyName));
      outputJobInfo.getProperties().setProperty(HBaseConstants.PROPERTY_WRITE_TXN_KEY,
        HCatUtil.serialize(txn));
      job.set(HCatConstants.HCAT_KEY_OUTPUT_INFO,
        HCatUtil.serialize(outputJobInfo));
    } finally {
      rm.close();
View Full Code Here

    job.setInputFormatClass(TextInputFormat.class);
    TextInputFormat.setInputPaths(job, inputPath);


    job.setOutputFormatClass(HCatOutputFormat.class);
    OutputJobInfo outputJobInfo = OutputJobInfo.create(databaseName, tableName, null);
    HCatOutputFormat.setOutput(job, outputJobInfo);

    job.setMapOutputKeyClass(BytesWritable.class);
    job.setMapOutputValueClass(HCatRecord.class);
View Full Code Here

    job.setInputFormatClass(TextInputFormat.class);
    TextInputFormat.setInputPaths(job, inputPath);


    job.setOutputFormatClass(HCatOutputFormat.class);
    OutputJobInfo outputJobInfo = OutputJobInfo.create(databaseName, tableName, null);
    HCatOutputFormat.setOutput(job, outputJobInfo);

    job.setMapOutputKeyClass(BytesWritable.class);
    job.setMapOutputValueClass(HCatRecord.class);
View Full Code Here

      os.write(Bytes.toBytes(data[i] + "\n"));
      os.close();
    }

    Path workingDir = new Path(methodTestDir, "mr_abort");
    OutputJobInfo outputJobInfo = OutputJobInfo.create(databaseName,
      tableName, null);
    Job job = configureJob(testName,
      conf, workingDir, MapWriteAbortTransaction.class,
      outputJobInfo, inputPath);
    assertFalse(job.waitForCompletion(true));
View Full Code Here

  }

  public static class MapHCatWrite extends Mapper<LongWritable, Text, BytesWritable, HCatRecord> {
    @Override
    public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
      OutputJobInfo jobInfo = (OutputJobInfo) HCatUtil.deserialize(context.getConfiguration().get(HCatConstants.HCAT_KEY_OUTPUT_INFO));
      HCatRecord record = new DefaultHCatRecord(3);
      HCatSchema schema = jobInfo.getOutputSchema();
      String vals[] = value.toString().split(",");
      record.setInteger("key", schema, Integer.parseInt(vals[0]));
      for (int i = 1; i < vals.length; i++) {
        String pair[] = vals[i].split(":");
        record.set(pair[0], schema, pair[1]);
View Full Code Here

    job.set(HBaseConstants.PROPERTY_OUTPUT_TABLE_NAME_KEY, tableName);

    //manually create transaction
    RevisionManager rm = HBaseRevisionManagerUtil.getOpenedRevisionManager(conf);
    try {
      OutputJobInfo outputJobInfo = OutputJobInfo.create("default", tableName, null);
      Transaction txn = rm.beginWriteTransaction(tableName, Arrays.asList(familyName));
      outputJobInfo.getProperties().setProperty(HBaseConstants.PROPERTY_WRITE_TXN_KEY,
        HCatUtil.serialize(txn));
      job.set(HCatConstants.HCAT_KEY_OUTPUT_INFO,
        HCatUtil.serialize(outputJobInfo));
    } finally {
      rm.close();
View Full Code Here

TOP

Related Classes of org.apache.hcatalog.mapreduce.OutputJobInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.