Package org.apache.hcatalog.common

Examples of org.apache.hcatalog.common.HCatException


                        client.dropPartition(tableInfo.getDatabaseName(),
                                tableInfo.getTableName(), p.getValues());
                    }
                } catch (Exception te) {
                    // Keep cause as the original exception
                    throw new HCatException(
                            ErrorType.ERROR_PUBLISHING_PARTITION, e);
                }
            }
            if (e instanceof HCatException) {
                throw (HCatException) e;
            } else {
                throw new HCatException(ErrorType.ERROR_PUBLISHING_PARTITION, e);
            }
        } finally {
            HCatUtil.closeHiveClientQuietly(client);
        }
    }
View Full Code Here


        if (parts != null) {
          // Default to 100,000 partitions if hive.metastore.maxpartition is not
          // defined
          int maxPart = hiveConf.getInt("hcat.metastore.maxpartitions", 100000);
          if (parts.size() > maxPart) {
            throw new HCatException(ErrorType.ERROR_EXCEED_MAXPART,
                "total number of partitions is " + parts.size());
          }

          // Populate partition info
          for (Partition ptn : parts) {
View Full Code Here

      ReaderContext cntxt = new ReaderContext();
      cntxt.setInputSplits(hcif.getSplits(new JobContext(job.getConfiguration(), null)));
      cntxt.setConf(job.getConfiguration());
      return cntxt;
    } catch (IOException e) {
      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    } catch (InterruptedException e) {
      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED,e);
    }
  }
View Full Code Here

    try {
      TaskAttemptContext cntxt = new TaskAttemptContext(conf, new TaskAttemptID());
      rr = inpFmt.createRecordReader(split, cntxt);
      rr.initialize(split, cntxt);
    } catch (IOException e) {
      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    } catch (InterruptedException e) {
      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    }
    return new HCatRecordItr(rr);
  }
View Full Code Here

      HCatOutputFormat.setSchema(job, HCatOutputFormat.getTableSchema(job));
      HCatOutputFormat outFormat = new HCatOutputFormat();
      outFormat.checkOutputSpecs(job);
      outFormat.getOutputCommitter(new TaskAttemptContext(job.getConfiguration(), new TaskAttemptID())).setupJob(job);
    } catch (IOException e) {
      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    } catch (InterruptedException e) {
      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    }
    WriterContext cntxt = new WriterContext();
    cntxt.setConf(job.getConfiguration());
    return cntxt;
  }
View Full Code Here

    } catch (IOException e) {
      if(null != committer) {
        try {
          committer.abortTask(cntxt);
        } catch (IOException e1) {
          throw new HCatException(ErrorType.ERROR_INTERNAL_EXCEPTION, e1);
        }
      }
      throw new HCatException("Failed while writing",e);
    } catch (InterruptedException e) {
      if(null != committer) {
        try {
          committer.abortTask(cntxt);
        } catch (IOException e1) {
          throw new HCatException(ErrorType.ERROR_INTERNAL_EXCEPTION, e1);
        }
      }
      throw new HCatException("Failed while writing", e);
    }
  }
View Full Code Here

  public void commit(WriterContext context) throws HCatException {
    try {
      new HCatOutputFormat().getOutputCommitter(new TaskAttemptContext(context.getConf(), new TaskAttemptID()))
      .commitJob(new JobContext(context.getConf(), null));
    } catch (IOException e) {
      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    } catch (InterruptedException e) {
      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    }
  }
View Full Code Here

  public void abort(WriterContext context) throws HCatException {
    try {
      new HCatOutputFormat().getOutputCommitter(new TaskAttemptContext(context.getConf(), new TaskAttemptID()))
      .abortJob(new JobContext(context.getConf(), null),State.FAILED);
    } catch (IOException e) {
      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    } catch (InterruptedException e) {
      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    }
  }
View Full Code Here

        dynamicPartitioningUsed = jobInfo.isDynamicPartitioningUsed();
        dynamicPartCols = jobInfo.getPosOfDynPartCols();
        maxDynamicPartitions = jobInfo.getMaxDynamicPartitions();

        if((partColsToDel == null) || (dynamicPartitioningUsed && (dynamicPartCols == null))){
            throw new HCatException("It seems that setSchema() is not called on " +
                    "HCatOutputFormat. Please make sure that method is called.");
        }


        if (!dynamicPartitioningUsed) {
View Full Code Here

            }

            String dynKey = dynamicPartValues.toString();
            if (!baseDynamicWriters.containsKey(dynKey)){
                if ((maxDynamicPartitions != -1) && (baseDynamicWriters.size() > maxDynamicPartitions)){
                    throw new HCatException(ErrorType.ERROR_TOO_MANY_DYNAMIC_PTNS,
                            "Number of dynamic partitions being created "
                                    + "exceeds configured max allowable partitions["
                                    + maxDynamicPartitions
                                    + "], increase parameter ["
                                    + HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS.varname
View Full Code Here

TOP

Related Classes of org.apache.hcatalog.common.HCatException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.