Package com.facebook.hiveio.common

Examples of com.facebook.hiveio.common.HiveTableDesc


   *
   * @return HiveOutputDescription
   */
  private HiveOutputDescription makeOutputDesc() {
    HiveOutputDescription outputDesc = new HiveOutputDescription();
    HiveTableDesc tableDesc = outputDesc.getTableDesc();
    tableDesc.setDatabaseName(HIVE_VERTEX_OUTPUT_DATABASE.get(getConf()));
    tableDesc.setTableName(HIVE_VERTEX_OUTPUT_TABLE.get(getConf()));
    outputDesc.setPartitionValues(
        parsePartitionValues(HIVE_VERTEX_OUTPUT_PARTITION.get(getConf())));
    return outputDesc;
  }
View Full Code Here


          public SchemaAndPartitions idempotentTask() throws TException {
            ThriftHiveMetastore.Iface client = inputDesc.metastoreClient(conf);

            LOG.info("getSplits of " + inputDesc);

            HiveTableDesc tableDesc = inputDesc.getTableDesc();
            Table table =
                client.get_table(tableDesc.getDatabaseName(), tableDesc.getTableName());

            SchemaAndPartitions result = new SchemaAndPartitions();
            result.tableSchema = HiveTableSchemaImpl.fromTable(conf, table);
            HiveTableSchemas.put(conf, myProfileId, result.tableSchema);
            result.partitions = computePartitions(inputDesc, client, table);
View Full Code Here

      // table without partitions
      partitions.add(InputPartition.newFromHiveTable(table));
    } else {
      // table with partitions, find matches to user filter.
      List<Partition> hivePartitions;
      HiveTableDesc tableDesc = inputDesc.getTableDesc();
      hivePartitions = client.get_partitions_by_filter(tableDesc.getDatabaseName(),
          tableDesc.getTableName(), inputDesc.getPartitionFilter(), (short) -1);
      for (Partition hivePartition : hivePartitions) {
        partitions.add(InputPartition.newFromHivePartition(hivePartition));
      }
    }
    return partitions;
View Full Code Here

    HiveInputDescription inputDesc, ThriftHiveMetastore.Iface client)
    throws IOException
  {
    LOG.info("getSplits of " + inputDesc);

    HiveTableDesc tableDesc = inputDesc.getTableDesc();
    Table table;
    try {
      table = client.get_table(tableDesc.getDatabaseName(), tableDesc.getTableName());
      // CHECKSTYLE: stop IllegalCatch
    } catch (Exception e) {
      // CHECKSTYLE: resume IllegalCatch
      throw new IOException(e);
    }
View Full Code Here

      // table without partitions
      partitions.add(InputPartition.newFromHiveTable(table));
    } else {
      // table with partitions, find matches to user filter.
      List<Partition> hivePartitions;
      HiveTableDesc tableDesc = inputDesc.getTableDesc();
      try {
        hivePartitions = client.get_partitions_by_filter(tableDesc.getDatabaseName(),
            tableDesc.getTableName(), inputDesc.getPartitionFilter(), (short) -1);
        // CHECKSTYLE: stop IllegalCatch
      } catch (Exception e) {
        // CHECKSTYLE: resume IllegalCatch
        throw new IOException(e);
      }
View Full Code Here

  {
    ArrayParserData data = new ArrayParserData(deserializer, columnIndexes, schema,
        partitionValues);

    int numColumns = schema.numColumns();
    HiveTableDesc tableDesc = schema.getTableDesc();

    for (int i = 0; i < numColumns; ++i) {
      data.structFields[i] = data.inspector.getAllStructFieldRefs().get(i);
      ObjectInspector fieldInspector = data.structFields[i].getFieldObjectInspector();
      data.hiveTypes[i] = HiveType.fromHiveObjectInspector(fieldInspector);
      if (data.hiveTypes[i].isPrimitive()) {
        data.primitiveInspectors[i] = (PrimitiveObjectInspector) fieldInspector;
      }
    }

    boolean hasCollections = false;

    for (int i = 0; i < columnIndexes.length; ++i) {
      int columnId = columnIndexes[i];
      if (data.hiveTypes[columnId].isCollection()) {
        hasCollections = true;
        break;
      }
    }

    RecordParser<Writable> parser = null;

    if (!hasCollections && exampleValue instanceof BytesRefArrayWritable) {
      parser = new BytesParser(partitionValues, data);
    } else {
      parser = new ArrayParser(partitionValues, data);
    }

    Class<? extends RecordParser> forcedParserClass = FORCE_PARSER.get(conf);
    if (forcedParserClass == null) {
      LOG.info("Using {} to parse hive records from table {}",
          parser.getClass().getSimpleName(), tableDesc.dotString());
    } else {
      LOG.info("Using {} chosen by user instead of {} to parse hive records from table {}",
          forcedParserClass.getSimpleName(), parser.getClass().getSimpleName(),
          tableDesc.dotString());
      parser = createForcedParser(deserializer, schema, partitionValues,
          data, forcedParserClass);
    }

    return parser;
View Full Code Here

          public SchemaAndPartitions idempotentTask() throws TException {
            ThriftHiveMetastore.Iface client = inputDesc.metastoreClient(conf);

            LOG.info("getSplits of " + inputDesc);

            HiveTableDesc tableDesc = inputDesc.getTableDesc();
            Table table =
                client.get_table(tableDesc.getDatabaseName(), tableDesc.getTableName());

            SchemaAndPartitions result = new SchemaAndPartitions();
            result.tableSchema = HiveTableSchemaImpl.fromTable(conf, table);
            HiveTableSchemas.putToConf(conf, myProfileId, result.tableSchema);
            result.partitions = computePartitions(inputDesc, client, table);
View Full Code Here

      // table without partitions
      partitions.add(InputPartition.newFromHiveTable(table));
    } else {
      // table with partitions, find matches to user filter.
      List<Partition> hivePartitions;
      HiveTableDesc tableDesc = inputDesc.getTableDesc();
      hivePartitions = client.get_partitions_by_filter(tableDesc.getDatabaseName(),
          tableDesc.getTableName(), inputDesc.getPartitionFilter(), (short) -1);
      for (Partition hivePartition : hivePartitions) {
        partitions.add(InputPartition.newFromHivePartition(hivePartition));
      }
    }
    return partitions;
View Full Code Here

  {
    ArrayParserData data = new ArrayParserData(deserializer, columnIndexes, schema,
        partitionValues);

    int numColumns = schema.numColumns();
    HiveTableDesc tableDesc = schema.getTableDesc();

    for (int i = 0; i < numColumns; ++i) {
      data.structFields[i] = i < data.inspector.getAllStructFieldRefs().size() ?
          data.inspector.getAllStructFieldRefs().get(i) : NULL_STRUCT_FIELD;
      ObjectInspector fieldInspector = data.structFields[i].getFieldObjectInspector();
      data.hiveTypes[i] = HiveType.fromHiveObjectInspector(fieldInspector);
      if (data.hiveTypes[i].isPrimitive()) {
        data.primitiveInspectors[i] = (PrimitiveObjectInspector) fieldInspector;
      }
    }

    boolean hasCollections = false;

    for (int i = 0; i < columnIndexes.length; ++i) {
      int columnId = columnIndexes[i];
      if (data.hiveTypes[columnId].isCollection()) {
        hasCollections = true;
        break;
      }
    }

    RecordParser<Writable> parser = null;

    if (!hasCollections && exampleValue instanceof BytesRefArrayWritable) {
      parser = new BytesParser(partitionValues, data);
    } else {
      parser = new ArrayParser(partitionValues, data);
    }

    Class<? extends RecordParser> forcedParserClass = FORCE_PARSER.get(conf);
    if (forcedParserClass == null) {
      LOG.info("Using {} to parse hive records from table {}",
          parser.getClass().getSimpleName(), tableDesc.dotString());
    } else {
      LOG.info("Using {} chosen by user instead of {} to parse hive records from table {}",
          forcedParserClass.getSimpleName(), parser.getClass().getSimpleName(),
          tableDesc.dotString());
      parser = createForcedParser(deserializer, schema, partitionValues,
          data, forcedParserClass);
    }

    return parser;
View Full Code Here

  /**
   * Constructor
   */
  public HiveTableSchemaImpl() {
    tableName = new HiveTableDesc("_unknown_");
    partitionPositions = Maps.newHashMap();
    columnPositions = Maps.newHashMap();
    hiveTypes = new HiveType[0];
  }
View Full Code Here

TOP

Related Classes of com.facebook.hiveio.common.HiveTableDesc

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.