Package org.apache.hive.hcatalog.common

Examples of org.apache.hive.hcatalog.common.HCatException


      HCatOutputFormat outFormat = new HCatOutputFormat();
      outFormat.checkOutputSpecs(job);
      outFormat.getOutputCommitter(ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext(
          job.getConfiguration(), ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptID())).setupJob(job);
    } catch (IOException e) {
      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    } catch (InterruptedException e) {
      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    }
    WriterContext cntxt = new WriterContext();
    cntxt.setConf(job.getConfiguration());
    return cntxt;
  }
View Full Code Here


    } catch (IOException e) {
      if (null != committer) {
        try {
          committer.abortTask(cntxt);
        } catch (IOException e1) {
          throw new HCatException(ErrorType.ERROR_INTERNAL_EXCEPTION, e1);
        }
      }
      throw new HCatException("Failed while writing", e);
    } catch (InterruptedException e) {
      if (null != committer) {
        try {
          committer.abortTask(cntxt);
        } catch (IOException e1) {
          throw new HCatException(ErrorType.ERROR_INTERNAL_EXCEPTION, e1);
        }
      }
      throw new HCatException("Failed while writing", e);
    }
  }
View Full Code Here

    try {
      new HCatOutputFormat().getOutputCommitter(ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext(
          context.getConf(), ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptID()))
        .commitJob(ShimLoader.getHadoopShims().getHCatShim().createJobContext(context.getConf(), null));
    } catch (IOException e) {
      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    } catch (InterruptedException e) {
      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    }
  }
View Full Code Here

      new HCatOutputFormat().getOutputCommitter(ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext(
        context.getConf(), ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptID()))
        .abortJob(ShimLoader.getHadoopShims().getHCatShim().createJobContext(
            context.getConf(), null), State.FAILED);
    } catch (IOException e) {
      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    } catch (InterruptedException e) {
      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    }
  }
View Full Code Here

        if(LOG.isDebugEnabled()) {
          LOG.debug("Testing if moving file: [" + file + "] to ["
              + finalOutputPath + "] would cause a problem");
        }
        if (fs.exists(finalOutputPath)) {
          throw new HCatException(ErrorType.ERROR_MOVE_FAILED, "Data already exists in " + finalOutputPath
              + ", duplicate publish not possible.");
        }
      } else {
        if(LOG.isDebugEnabled()) {
          LOG.debug("Moving file: [ " + file + "] to [" + finalOutputPath + "]");
        }
        // Make sure the parent directory exists.  It is not an error
        // to recreate an existing directory
        fs.mkdirs(finalOutputPath.getParent());
        if (!fs.rename(file, finalOutputPath)) {
          if (!fs.delete(finalOutputPath, true)) {
            throw new HCatException(ErrorType.ERROR_MOVE_FAILED, "Failed to delete existing path " + finalOutputPath);
          }
          if (!fs.rename(file, finalOutputPath)) {
            throw new HCatException(ErrorType.ERROR_MOVE_FAILED, "Failed to move output to " + finalOutputPath);
          }
        }
      }
    } else if(fs.getFileStatus(file).isDir()) {
      FileStatus[] children = fs.listStatus(file);
      FileStatus firstChild = null;
      if (children != null) {
        int index=0;
        while (index < children.length) {
          if (!children[index].getPath().getName().equals(TEMP_DIR_NAME) && !children[index].getPath().getName().equals(LOGS_DIR_NAME) && !children[index].getPath().getName().equals(SUCCEEDED_FILE_NAME)) {
            firstChild = children[index];
            break;
          }
          index++;
        }
      }
      if(firstChild!=null && firstChild.isDir()) {
        // If the first child is directory, then rest would be directory too according to HCatalog dir structure
        // recurse in that case
        for (FileStatus child : children) {
          moveTaskOutputs(fs, child.getPath(), srcDir, destDir, dryRun);
        }
      } else {

        if (!dryRun) {
          if (dynamicPartitioningUsed) {
            // Optimization: if the first child is file, we have reached the leaf directory, move the parent directory itself
            // instead of moving each file under the directory. See HCATALOG-538

            final Path parentDir = finalOutputPath.getParent();
            // Create the directory
            Path placeholder = new Path(parentDir, "_placeholder");
            if (fs.mkdirs(parentDir)) {
              // It is weired but we need a placeholder,
              // otherwise rename cannot move file to the right place
              fs.create(placeholder).close();
            }
            if (LOG.isDebugEnabled()) {
              LOG.debug("Moving directory: " + file + " to " + parentDir);
            }
            if (!fs.rename(file, parentDir)) {
              final String msg = "Failed to move file: " + file + " to " + parentDir;
              LOG.error(msg);
              throw new HCatException(ErrorType.ERROR_MOVE_FAILED, msg);
            }
            fs.delete(placeholder, false);
          } else {
            // In case of no partition we have to move each file
            for (FileStatus child : children) {
              moveTaskOutputs(fs, child.getPath(), srcDir, destDir, dryRun);
            }
          }
        } else {
          if(fs.exists(finalOutputPath)) {
            throw new HCatException(ErrorType.ERROR_MOVE_FAILED, "Data already exists in " + finalOutputPath
                + ", duplicate publish not possible.");
          }
        }
      }
    } else {
      // Should never happen
      final String msg = "Unknown file type being asked to be moved, erroring out";
      throw new HCatException(ErrorType.ERROR_MOVE_FAILED, msg);
    }
  }
View Full Code Here

  private Path getFinalPath(Path file, Path src,
                Path dest) throws IOException {
    URI taskOutputUri = file.toUri();
    URI relativePath = src.toUri().relativize(taskOutputUri);
    if (taskOutputUri == relativePath) {
      throw new HCatException(ErrorType.ERROR_MOVE_FAILED, "Can not get the relative path: base = " +
        src + " child = " + file);
    }
    if (relativePath.getPath().length() > 0) {
      return new Path(dest, relativePath.getPath());
    } else {
View Full Code Here

        //            +loadPath+"] with depth["+jobInfo.getTable().getPartitionKeysSize()
        //            +"], dynSpec["+dynPathSpec+"]");
      } else {
        if ((maxDynamicPartitions != -1) && (status.length > maxDynamicPartitions)) {
          this.partitionsDiscovered = true;
          throw new HCatException(ErrorType.ERROR_TOO_MANY_DYNAMIC_PTNS,
            "Number of dynamic partitions being created "
              + "exceeds configured max allowable partitions["
              + maxDynamicPartitions
              + "], increase parameter ["
              + HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS.varname
View Full Code Here

            client.dropPartition(tableInfo.getDatabaseName(),
                tableInfo.getTableName(), p.getValues());
          }
        } catch (Exception te) {
          // Keep cause as the original exception
          throw new HCatException(
              ErrorType.ERROR_PUBLISHING_PARTITION, e);
        }
      }
      if (e instanceof HCatException) {
        throw (HCatException) e;
      } else {
        throw new HCatException(ErrorType.ERROR_PUBLISHING_PARTITION, e);
      }
    } finally {
      HCatUtil.closeHiveClientQuietly(client);
    }
  }
View Full Code Here

      List<String> indexList = client.listIndexNames(outputJobInfo.getDatabaseName(), outputJobInfo.getTableName(), Short.MAX_VALUE);

      for (String indexName : indexList) {
        Index index = client.getIndex(outputJobInfo.getDatabaseName(), outputJobInfo.getTableName(), indexName);
        if (!index.isDeferredRebuild()) {
          throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a table with an automatic index from Pig/Mapreduce is not supported");
        }
      }
      StorageDescriptor sd = table.getTTable().getSd();

      if (sd.isCompressed()) {
        throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a compressed partition from Pig/Mapreduce is not supported");
      }

      if (sd.getBucketCols() != null && !sd.getBucketCols().isEmpty()) {
        throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a partition with bucket definition from Pig/Mapreduce is not supported");
      }

      if (sd.getSortCols() != null && !sd.getSortCols().isEmpty()) {
        throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a partition with sorted column definition from Pig/Mapreduce is not supported");
      }

      if (table.getTTable().getPartitionKeysSize() == 0) {
        if ((outputJobInfo.getPartitionValues() != null) && (!outputJobInfo.getPartitionValues().isEmpty())) {
          // attempt made to save partition values in non-partitioned table - throw error.
          throw new HCatException(ErrorType.ERROR_INVALID_PARTITION_VALUES,
            "Partition values specified for non-partitioned table");
        }
        // non-partitioned table
        outputJobInfo.setPartitionValues(new HashMap<String, String>());

      } else {
        // partitioned table, we expect partition values
        // convert user specified map to have lower case key names
        Map<String, String> valueMap = new HashMap<String, String>();
        if (outputJobInfo.getPartitionValues() != null) {
          for (Map.Entry<String, String> entry : outputJobInfo.getPartitionValues().entrySet()) {
            valueMap.put(entry.getKey().toLowerCase(), entry.getValue());
          }
        }

        if ((outputJobInfo.getPartitionValues() == null)
          || (outputJobInfo.getPartitionValues().size() < table.getTTable().getPartitionKeysSize())) {
          // dynamic partition usecase - partition values were null, or not all were specified
          // need to figure out which keys are not specified.
          List<String> dynamicPartitioningKeys = new ArrayList<String>();
          boolean firstItem = true;
          for (FieldSchema fs : table.getPartitionKeys()) {
            if (!valueMap.containsKey(fs.getName().toLowerCase())) {
              dynamicPartitioningKeys.add(fs.getName().toLowerCase());
            }
          }

          if (valueMap.size() + dynamicPartitioningKeys.size() != table.getTTable().getPartitionKeysSize()) {
            // If this isn't equal, then bogus key values have been inserted, error out.
            throw new HCatException(ErrorType.ERROR_INVALID_PARTITION_VALUES, "Invalid partition keys specified");
          }

          outputJobInfo.setDynamicPartitioningKeys(dynamicPartitioningKeys);
          String dynHash;
          if ((dynHash = conf.get(HCatConstants.HCAT_DYNAMIC_PTN_JOBID)) == null) {
            dynHash = String.valueOf(Math.random());
//              LOG.info("New dynHash : ["+dynHash+"]");
//            }else{
//              LOG.info("Old dynHash : ["+dynHash+"]");
          }
          conf.set(HCatConstants.HCAT_DYNAMIC_PTN_JOBID, dynHash);

        }

        outputJobInfo.setPartitionValues(valueMap);
      }

      // To get around hbase failure on single node, see BUG-4383
      conf.set("dfs.client.read.shortcircuit", "false");
      HCatSchema tableSchema = HCatUtil.extractSchema(table);
      StorerInfo storerInfo =
        InternalUtil.extractStorerInfo(table.getTTable().getSd(), table.getParameters());

      List<String> partitionCols = new ArrayList<String>();
      for (FieldSchema schema : table.getPartitionKeys()) {
        partitionCols.add(schema.getName());
      }

      HiveStorageHandler storageHandler = HCatUtil.getStorageHandler(conf, storerInfo);

      //Serialize the output info into the configuration
      outputJobInfo.setTableInfo(HCatTableInfo.valueOf(table.getTTable()));
      outputJobInfo.setOutputSchema(tableSchema);
      harRequested = getHarRequested(hiveConf);
      outputJobInfo.setHarRequested(harRequested);
      maxDynamicPartitions = getMaxDynamicPartitions(hiveConf);
      outputJobInfo.setMaximumDynamicPartitions(maxDynamicPartitions);

      HCatUtil.configureOutputStorageHandler(storageHandler, conf, outputJobInfo);

      Path tblPath = new Path(table.getTTable().getSd().getLocation());

      /*  Set the umask in conf such that files/dirs get created with table-dir
      * permissions. Following three assumptions are made:
      * 1. Actual files/dirs creation is done by RecordWriter of underlying
      * output format. It is assumed that they use default permissions while creation.
      * 2. Default Permissions = FsPermission.getDefault() = 777.
      * 3. UMask is honored by underlying filesystem.
      */

      FsPermission.setUMask(conf, FsPermission.getDefault().applyUMask(
        tblPath.getFileSystem(conf).getFileStatus(tblPath).getPermission()));

      if (Security.getInstance().isSecurityEnabled()) {
        Security.getInstance().handleSecurity(credentials, outputJobInfo, client, conf, harRequested);
      }
    } catch (Exception e) {
      if (e instanceof HCatException) {
        throw (HCatException) e;
      } else {
        throw new HCatException(ErrorType.ERROR_SET_OUTPUT, e);
      }
    } finally {
      HCatUtil.closeHiveClientQuietly(client);
    }
  }
View Full Code Here

   * @throws IOException the IO exception
   */
  public static OutputJobInfo getJobInfo(Configuration conf) throws IOException {
    String jobString = conf.get(HCatConstants.HCAT_KEY_OUTPUT_INFO);
    if (jobString == null) {
      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED);
    }

    return (OutputJobInfo) HCatUtil.deserialize(jobString);
  }
View Full Code Here

TOP

Related Classes of org.apache.hive.hcatalog.common.HCatException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.