Package org.apache.hcatalog.common

Examples of org.apache.hcatalog.common.HCatException


                                                              (short) -1);

        // Default to 100,000 partitions if hive.metastore.maxpartition is not defined
        int maxPart = hiveConf.getInt("hcat.metastore.maxpartitions", 100000);
        if (parts != null && parts.size() > maxPart) {
          throw new HCatException(ErrorType.ERROR_EXCEED_MAXPART, "total number of partitions is " + parts.size());
        }

        // populate partition info
        for (Partition ptn : parts){
          PartInfo partInfo = extractPartInfo(ptn.getSd(),ptn.getParameters(),
View Full Code Here


   * @throws IOException the IO exception
   */
  public static OutputJobInfo getJobInfo(JobContext jobContext) throws IOException {
      String jobString = jobContext.getConfiguration().get(HCatConstants.HCAT_KEY_OUTPUT_INFO);
      if( jobString == null ) {
          throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED);
      }

      return (OutputJobInfo) HCatUtil.deserialize(jobString);
  }
View Full Code Here

          if (dynamicPartVals != null){
            // dynamic part vals specified
            List<String> dynamicPartKeys = jobInfo.getDynamicPartitioningKeys();
            if (dynamicPartVals.size() != dynamicPartKeys.size()){
              throw new HCatException(ErrorType.ERROR_INVALID_PARTITION_VALUES,
                  "Unable to configure dynamic partitioning for storage handler, mismatch between"
                  + " number of partition values obtained["+dynamicPartVals.size()
                  + "] and number of partition values required["+dynamicPartKeys.size()+"]");
            }
            for (int i = 0; i < dynamicPartKeys.size(); i++){
              partitionValues.put(dynamicPartKeys.get(i), dynamicPartVals.get(i));
            }

//            // re-home location, now that we know the rest of the partvals
//            Table table = jobInfo.getTableInfo().getTable();
//
//            List<String> partitionCols = new ArrayList<String>();
//            for(FieldSchema schema : table.getPartitionKeys()) {
//              partitionCols.add(schema.getName());
//            }
            jobInfo.setPartitionValues(partitionValues);
          }

          HCatUtil.configureOutputStorageHandler(storageHandler,jobContext,jobInfo);
      } catch(Exception e) {
        if (e instanceof HCatException){
          throw (HCatException)e;
        }else{
          throw new HCatException(ErrorType.ERROR_INIT_STORAGE_HANDLER, e);
        }
      }
  }
View Full Code Here

        } catch(Exception e) {
            if( e instanceof HCatException ) {
                throw (HCatException) e;
            } else {
                throw new HCatException(ErrorType.ERROR_PUBLISHING_PARTITION, e);
            }
        }

        Path src;
        if (dynamicPartitioningUsed){
View Full Code Here

                        client.dropPartition(tableInfo.getDatabaseName(),
                                tableInfo.getTableName(), p.getValues());
                    }
                } catch(Exception te) {
                    //Keep cause as the original exception
                    throw new HCatException(ErrorType.ERROR_PUBLISHING_PARTITION, e);
                }
            }

            if( e instanceof HCatException ) {
                throw (HCatException) e;
            } else {
                throw new HCatException(ErrorType.ERROR_PUBLISHING_PARTITION, e);
            }
        } finally {
            if( client != null ) {
                client.close();
            }
View Full Code Here

            Path finalOutputPath = getFinalPath(file, src, dest);

            if (dryRun){
//        LOG.info("Testing if moving ["+file+"] to ["+finalOutputPath+"] would cause a problem");
                if (fs.exists(finalOutputPath)){
                    throw new HCatException(ErrorType.ERROR_MOVE_FAILED, "Data already exists in " + finalOutputPath + ", duplicate publish possible.");
                }
            }else{
//        LOG.info("Moving ["+file+"] to ["+finalOutputPath+"]");
                if (!fs.rename(file, finalOutputPath)) {
                    if (!fs.delete(finalOutputPath, true)) {
                        throw new HCatException(ErrorType.ERROR_MOVE_FAILED, "Failed to delete existing path " + finalOutputPath);
                    }
                    if (!fs.rename(file, finalOutputPath)) {
                        throw new HCatException(ErrorType.ERROR_MOVE_FAILED, "Failed to move output to " + dest);
                    }
                }
            }
        } else if(fs.getFileStatus(file).isDir()) {
            FileStatus[] paths = fs.listStatus(file);
View Full Code Here

    private Path getFinalPath(Path file, Path src,
                              Path dest) throws IOException {
        URI taskOutputUri = file.toUri();
        URI relativePath = src.toUri().relativize(taskOutputUri);
        if (taskOutputUri == relativePath) {
            throw new HCatException(ErrorType.ERROR_MOVE_FAILED, "Can not get the relative path: base = " +
                    src + " child = " + file);
        }
        if (relativePath.getPath().length() > 0) {
            return new Path(dest, relativePath.getPath());
        } else {
View Full Code Here

                //            +loadPath+"] with depth["+jobInfo.getTable().getPartitionKeysSize()
                //            +"], dynSpec["+dynPathSpec+"]");
            }else{
                if ((maxDynamicPartitions != -1) && (status.length > maxDynamicPartitions)){
                    this.partitionsDiscovered = true;
                    throw new HCatException(ErrorType.ERROR_TOO_MANY_DYNAMIC_PTNS,
                            "Number of dynamic partitions being created "
                                    + "exceeds configured max allowable partitions["
                                    + maxDynamicPartitions
                                    + "], increase parameter ["
                                    + HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS.varname
View Full Code Here

//      }
//      doHarCheck(fs,harFile);
//      LOG.info("Nuking " + dir);
      fs.delete(new Path(dir), true);
    } catch (Exception e){
      throw new HCatException("Error creating Har ["+harFile+"] from ["+dir+"]", e);
    }
  }
View Full Code Here

        List<String> indexList = client.listIndexNames(outputJobInfo.getDatabaseName(), outputJobInfo.getTableName(), Short.MAX_VALUE);
       
        for (String indexName : indexList) {
            Index index = client.getIndex(outputJobInfo.getDatabaseName(), outputJobInfo.getTableName(), indexName);
            if (!index.isDeferredRebuild()) {
                throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a table with an automatic index from Pig/Mapreduce is not supported");
            }
        }
        StorageDescriptor sd = table.getSd();
       
        if (sd.isCompressed()) {
            throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a compressed partition from Pig/Mapreduce is not supported");
        }
       
        if (sd.getBucketCols()!=null && !sd.getBucketCols().isEmpty()) {
            throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a partition with bucket definition from Pig/Mapreduce is not supported");
        }
       
        if (sd.getSortCols()!=null && !sd.getSortCols().isEmpty()) {
            throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a partition with sorted column definition from Pig/Mapreduce is not supported");
        }
       
        if (table.getPartitionKeysSize() == 0 ){
          if ((outputJobInfo.getPartitionValues() != null) && (!outputJobInfo.getPartitionValues().isEmpty())){
            // attempt made to save partition values in non-partitioned table - throw error.
            throw new HCatException(ErrorType.ERROR_INVALID_PARTITION_VALUES,
                "Partition values specified for non-partitioned table");
          }
          // non-partitioned table
          outputJobInfo.setPartitionValues(new HashMap<String, String>());

        } else {
          // partitioned table, we expect partition values
          // convert user specified map to have lower case key names
          Map<String, String> valueMap = new HashMap<String, String>();
          if (outputJobInfo.getPartitionValues() != null){
            for(Map.Entry<String, String> entry : outputJobInfo.getPartitionValues().entrySet()) {
              valueMap.put(entry.getKey().toLowerCase(), entry.getValue());
            }
          }

          if ((outputJobInfo.getPartitionValues() == null)
              || (outputJobInfo.getPartitionValues().size() < table.getPartitionKeysSize())){
            // dynamic partition usecase - partition values were null, or not all were specified
            // need to figure out which keys are not specified.
            List<String> dynamicPartitioningKeys = new ArrayList<String>();
            boolean firstItem = true;
            for (FieldSchema fs : table.getPartitionKeys()){
              if (!valueMap.containsKey(fs.getName().toLowerCase())){
                dynamicPartitioningKeys.add(fs.getName().toLowerCase());
              }
            }

            if (valueMap.size() + dynamicPartitioningKeys.size() != table.getPartitionKeysSize()){
              // If this isn't equal, then bogus key values have been inserted, error out.
              throw new HCatException(ErrorType.ERROR_INVALID_PARTITION_VALUES,"Invalid partition keys specified");
            }

            outputJobInfo.setDynamicPartitioningKeys(dynamicPartitioningKeys);
            String dynHash;
            if ((dynHash = conf.get(HCatConstants.HCAT_DYNAMIC_PTN_JOBID)) == null){
              dynHash = String.valueOf(Math.random());
//              LOG.info("New dynHash : ["+dynHash+"]");
//            }else{
//              LOG.info("Old dynHash : ["+dynHash+"]");
            }
            conf.set(HCatConstants.HCAT_DYNAMIC_PTN_JOBID, dynHash);

          }

          outputJobInfo.setPartitionValues(valueMap);
        }

        StorageDescriptor tblSD = table.getSd();
        HCatSchema tableSchema = HCatUtil.extractSchemaFromStorageDescriptor(tblSD);
        StorerInfo storerInfo = InternalUtil.extractStorerInfo(tblSD,table.getParameters());

        List<String> partitionCols = new ArrayList<String>();
        for(FieldSchema schema : table.getPartitionKeys()) {
          partitionCols.add(schema.getName());
        }

       HCatStorageHandler storageHandler = HCatUtil.getStorageHandler(job.getConfiguration(), storerInfo);

        //Serialize the output info into the configuration
        outputJobInfo.setTableInfo(HCatTableInfo.valueOf(table));
        outputJobInfo.setOutputSchema(tableSchema);
        harRequested = getHarRequested(hiveConf);
        outputJobInfo.setHarRequested(harRequested);
        maxDynamicPartitions = getMaxDynamicPartitions(hiveConf);
        outputJobInfo.setMaximumDynamicPartitions(maxDynamicPartitions);

        HCatUtil.configureOutputStorageHandler(storageHandler,job,outputJobInfo);

        Path tblPath = new Path(table.getSd().getLocation());

        /*  Set the umask in conf such that files/dirs get created with table-dir
         * permissions. Following three assumptions are made:
         * 1. Actual files/dirs creation is done by RecordWriter of underlying
         * output format. It is assumed that they use default permissions while creation.
         * 2. Default Permissions = FsPermission.getDefault() = 777.
         * 3. UMask is honored by underlying filesystem.
         */

        FsPermission.setUMask(conf, FsPermission.getDefault().applyUMask(
            tblPath.getFileSystem(conf).getFileStatus(tblPath).getPermission()));

        try {
          UserGroupInformation.class.getMethod("isSecurityEnabled");
          Security.getInstance().handleSecurity(job, outputJobInfo, client, conf, harRequested);
        } catch (NoSuchMethodException e) {
          LOG.info("Security is not supported by this version of hadoop.");
        }
      } catch(Exception e) {
        if( e instanceof HCatException ) {
          throw (HCatException) e;
        } else {
          throw new HCatException(ErrorType.ERROR_SET_OUTPUT, e);
        }
      } finally {
        if( client != null ) {
          client.close();
        }
View Full Code Here

TOP

Related Classes of org.apache.hcatalog.common.HCatException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.