Package org.apache.hive.hcatalog.data.schema

Examples of org.apache.hive.hcatalog.data.schema.HCatSchema


    for (PartInfo partitionInfo : partitionInfoList) {
      jobConf = HCatUtil.getJobConfFromContext(jobContext);
      setInputPath(jobConf, partitionInfo.getLocation());
      Map<String, String> jobProperties = partitionInfo.getJobProperties();

      HCatSchema allCols = new HCatSchema(new LinkedList<HCatFieldSchema>());
      for (HCatFieldSchema field :
        inputJobInfo.getTableInfo().getDataColumns().getFields())
        allCols.append(field);
      for (HCatFieldSchema field :
        inputJobInfo.getTableInfo().getPartitionColumns().getFields())
        allCols.append(field);

      HCatUtil.copyJobPropertiesToJobConf(jobProperties, jobConf);

      storageHandler = HCatUtil.getStorageHandler(
        jobConf, partitionInfo);
View Full Code Here


  /**
   * gets values for fields requested by output schema which will not be in the data
   */
  private static Map<String, String> getColValsNotInDataColumns(HCatSchema outputSchema,
                                  PartInfo partInfo) {
    HCatSchema dataSchema = partInfo.getPartitionSchema();
    Map<String, String> vals = new HashMap<String, String>();
    for (String fieldName : outputSchema.getFieldNames()) {
      if (dataSchema.getPosition(fieldName) == null) {
        // this entry of output is not present in the output schema
        // so, we first check the table schema to see if it is a part col

        if (partInfo.getPartitionValues().containsKey(fieldName)) {
          vals.put(fieldName, partInfo.getPartitionValues().get(fieldName));
View Full Code Here

   *                     for the current context
   */
  public static HCatSchema getTableSchema(Configuration conf)
    throws IOException {
    InputJobInfo inputJobInfo = getJobInfo(conf);
    HCatSchema allCols = new HCatSchema(new LinkedList<HCatFieldSchema>());
    for (HCatFieldSchema field :
      inputJobInfo.getTableInfo().getDataColumns().getFields())
      allCols.append(field);
    for (HCatFieldSchema field :
      inputJobInfo.getTableInfo().getPartitionColumns().getFields())
      allCols.append(field);
    return allCols;
  }
View Full Code Here

      context.write(NullWritable.get(), record);
    }
  }

  private HCatSchema getSchema() throws HCatException {
    HCatSchema schema = new HCatSchema(new ArrayList<HCatFieldSchema>());
    schema.append(new HCatFieldSchema("a0", HCatFieldSchema.Type.INT,
        ""));
    schema.append(new HCatFieldSchema("a1",
        HCatFieldSchema.Type.STRING, ""));
    schema.append(new HCatFieldSchema("a2",
        HCatFieldSchema.Type.STRING, ""));
    return schema;
  }
View Full Code Here

      dbName, tableName, null));
    // initialize HCatOutputFormat
    HCatOutputFormat.setOutput(job, OutputJobInfo.create(
      dbName, outputTableName, outputPartitionKvps));
    // test with and without specifying schema randomly
    HCatSchema s = HCatInputFormat.getTableSchema(job);
    System.err.println("INFO: output schema explicitly set for writing:" + s);
    HCatOutputFormat.setSchema(job, s);

    job.setInputFormatClass(HCatInputFormat.class);
    job.setOutputFormatClass(HCatOutputFormat.class);
View Full Code Here

    job.setMapOutputKeyClass(BytesWritable.class);
    job.setMapOutputValueClass(DefaultHCatRecord.class);

    job.setNumReduceTasks(0);

    HCatOutputFormat.setSchema(job, new HCatSchema(partitionColumns));

    boolean success = job.waitForCompletion(true);

    // Ensure counters are set when data has actually been read.
    if (partitionValues != null) {
View Full Code Here

    // initialize HCatOutputFormat
    HCatOutputFormat.setOutput(job, OutputJobInfo.create(
      dbName, outputTableName, outputPartitionKvps));


    HCatSchema s = HCatInputFormat.getTableSchema(job);
    HCatOutputFormat.setSchema(job, s);
    job.setInputFormatClass(HCatInputFormat.class);
    job.setOutputFormatClass(HCatOutputFormat.class);
    job.setJarByClass(StoreComplex.class);
    job.setMapperClass(ComplexMapper.class);
View Full Code Here

      dbName, tableName, null));
    // initialize HCatOutputFormat
    HCatOutputFormat.setOutput(job, OutputJobInfo.create(
      dbName, outputTableName, outputPartitionKvps));
    // test with and without specifying schema randomly
    HCatSchema s = HCatInputFormat.getTableSchema(job);
    if (writeToNonPartPigTable) {
      List<HCatFieldSchema> newHfsList = new ArrayList<HCatFieldSchema>();
      // change smallint and tinyint to int
      for (HCatFieldSchema hfs : s.getFields()) {
        if (hfs.getTypeString().equals("smallint")) {
          newHfsList.add(new HCatFieldSchema(hfs.getName(),
            HCatFieldSchema.Type.INT, hfs.getComment()));
        } else if (hfs.getTypeString().equals("tinyint")) {
          newHfsList.add(new HCatFieldSchema(hfs.getName(),
            HCatFieldSchema.Type.INT, hfs.getComment()));
        } else {
          newHfsList.add(hfs);
        }
      }
      s = new HCatSchema(newHfsList);
    }
    HCatOutputFormat.setSchema(job, s);


    job.setInputFormatClass(HCatInputFormat.class);
View Full Code Here

    job.setMapOutputKeyClass(BytesWritable.class);
    job.setMapOutputValueClass(DefaultHCatRecord.class);

    job.setNumReduceTasks(0);

    HCatOutputFormat.setSchema(job, new HCatSchema(columns));

    boolean success = job.waitForCompletion(true);
    Assert.assertTrue(success == false);
  }
View Full Code Here

    job.setOutputKeyClass(WritableComparable.class);
    job.setOutputValueClass(DefaultHCatRecord.class);
    job.setNumReduceTasks(0);
    HCatOutputFormat.setOutput(job, OutputJobInfo.create(dbName,
      outputTableName, null));
    HCatSchema s = HCatInputFormat.getTableSchema(job);
    System.err.println("INFO: output schema explicitly set for writing:"
      + s);
    HCatOutputFormat.setSchema(job, s);
    job.setOutputFormatClass(HCatOutputFormat.class);
    return (job.waitForCompletion(true) ? 0 : 1);
View Full Code Here

TOP

Related Classes of org.apache.hive.hcatalog.data.schema.HCatSchema

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.