Package org.apache.hive.hcatalog.common

Examples of org.apache.hive.hcatalog.common.HCatException


      if (dynamicPartVals != null) {
        // dynamic part vals specified
        List<String> dynamicPartKeys = jobInfo.getDynamicPartitioningKeys();
        if (dynamicPartVals.size() != dynamicPartKeys.size()) {
          throw new HCatException(ErrorType.ERROR_INVALID_PARTITION_VALUES,
            "Unable to configure dynamic partitioning for storage handler, mismatch between"
              + " number of partition values obtained[" + dynamicPartVals.size()
              + "] and number of partition values required[" + dynamicPartKeys.size() + "]");
        }
        for (int i = 0; i < dynamicPartKeys.size(); i++) {
          partitionValues.put(dynamicPartKeys.get(i), dynamicPartVals.get(i));
        }

//            // re-home location, now that we know the rest of the partvals
//            Table table = jobInfo.getTableInfo().getTable();
//
//            List<String> partitionCols = new ArrayList<String>();
//            for(FieldSchema schema : table.getPartitionKeys()) {
//              partitionCols.add(schema.getName());
//            }
        jobInfo.setPartitionValues(partitionValues);
      }

      HCatUtil.configureOutputStorageHandler(storageHandler, conf, jobInfo);
    } catch (Exception e) {
      if (e instanceof HCatException) {
        throw (HCatException) e;
      } else {
        throw new HCatException(ErrorType.ERROR_INIT_STORAGE_HANDLER, e);
      }
    }
  }
View Full Code Here


     * @return An instance of HCatCreateDBDesc
     * @throws HCatException
     */
    public HCatCreateDBDesc build() throws HCatException {
      if (this.dbName == null) {
        throw new HCatException("Database name cannot be null.");
      }
      HCatCreateDBDesc desc = new HCatCreateDBDesc(this.dbName);
      desc.comment = this.innerComment;
      desc.locationUri = this.innerLoc;
      desc.dbProperties = this.innerDBProps;
View Full Code Here

          sh.getSerDeClass().getName());
        newTable.putToParameters(
          org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE,
          storageHandler);
      } catch (HiveException e) {
        throw new HCatException(
          "Exception while creating instance of storage handler",
          e);
      }
    }
    newTable.setSd(sd);
View Full Code Here

      Class<? extends HCatClient> clientClass = Class.forName(className,
        true, JavaUtils.getClassLoader()).asSubclass(
          HCatClient.class);
      client = (HCatClient) clientClass.newInstance();
    } catch (ClassNotFoundException e) {
      throw new HCatException(
        "ClassNotFoundException while creating client class.", e);
    } catch (InstantiationException e) {
      throw new HCatException(
        "InstantiationException while creating client class.", e);
    } catch (IllegalAccessException e) {
      throw new HCatException(
        "IllegalAccessException while creating client class.", e);
    }
    if (client != null) {
      client.initialize(conf);
    }
View Full Code Here

      case HiveParser.TOK_SHOWPARTITIONS:
      case HiveParser.TOK_SHOWTABLES:
        break;

      default:
        throw new HCatException(ErrorType.ERROR_INTERNAL_EXCEPTION, "Unexpected token: " + ast.getToken());
      }

      authorizeDDL(context, rootTasks);

    } catch (HCatException e) {
View Full Code Here

          if(LOG.isDebugEnabled()) {
            LOG.debug("Testing if moving file: [" + file + "] to ["
                + finalOutputPath + "] would cause a problem");
          }
          if (fs.exists(finalOutputPath)) {
            throw new HCatException(ErrorType.ERROR_MOVE_FAILED, "Data already exists in "
                + finalOutputPath + ", duplicate publish not possible.");
          }
        }
      } else {
        if(LOG.isDebugEnabled()) {
          LOG.debug("Moving file: [ " + file + "] to [" + finalOutputPath + "]");
        }
        // Make sure the parent directory exists.  It is not an error
        // to recreate an existing directory
        fs.mkdirs(finalOutputPath.getParent());
        if (!fs.rename(file, finalOutputPath)) {
          if (!fs.delete(finalOutputPath, true)) {
            throw new HCatException(ErrorType.ERROR_MOVE_FAILED, "Failed to delete existing path " + finalOutputPath);
          }
          if (!fs.rename(file, finalOutputPath)) {
            throw new HCatException(ErrorType.ERROR_MOVE_FAILED, "Failed to move output to " + finalOutputPath);
          }
        }
      }
    } else if(fs.getFileStatus(file).isDir()) {

      FileStatus[] children = fs.listStatus(file);
      FileStatus firstChild = null;
      if (children != null) {
        int index=0;
        while (index < children.length) {
          if ( !children[index].getPath().getName().equals(TEMP_DIR_NAME)
              && !children[index].getPath().getName().equals(LOGS_DIR_NAME)
              && !children[index].getPath().getName().equals(SUCCEEDED_FILE_NAME)) {
            firstChild = children[index];
            break;
          }
          index++;
        }
      }
      if(firstChild!=null && firstChild.isDir()) {
        // If the first child is directory, then rest would be directory too according to HCatalog dir structure
        // recurse in that case
        for (FileStatus child : children) {
          moveTaskOutputs(fs, child.getPath(), srcDir, destDir, dryRun, immutable);
        }
      } else {

        if (!dryRun) {
          if (dynamicPartitioningUsed) {

            // Optimization: if the first child is file, we have reached the leaf directory, move the parent directory itself
            // instead of moving each file under the directory. See HCATALOG-538
            // Note for future Append implementation : This optimization is another reason dynamic
            // partitioning is currently incompatible with append on mutable tables.

            final Path parentDir = finalOutputPath.getParent();
            // Create the directory
            Path placeholder = new Path(parentDir, "_placeholder");
            if (fs.mkdirs(parentDir)) {
              // It is weired but we need a placeholder,
              // otherwise rename cannot move file to the right place
              fs.create(placeholder).close();
            }
            if (LOG.isDebugEnabled()) {
              LOG.debug("Moving directory: " + file + " to " + parentDir);
            }

            // If custom dynamic location provided, need to rename to final output path
            Path dstPath = !customDynamicLocationUsed ? parentDir : finalOutputPath;
            if (!fs.rename(file, dstPath)) {
              final String msg = "Failed to move file: " + file + " to " + dstPath;
              LOG.error(msg);
              throw new HCatException(ErrorType.ERROR_MOVE_FAILED, msg);
            }
            fs.delete(placeholder, false);
          } else {

            // In case of no partition we have to move each file
            for (FileStatus child : children) {
              moveTaskOutputs(fs, child.getPath(), srcDir, destDir, dryRun, immutable);
            }

          }

        } else {
          if(immutable && fs.exists(finalOutputPath) && !MetaStoreUtils.isDirEmpty(fs, finalOutputPath)) {

            throw new HCatException(ErrorType.ERROR_DUPLICATE_PARTITION, "Data already exists in " + finalOutputPath
                + ", duplicate publish not possible.");
          }

        }
      }
    } else {
      // Should never happen
      final String msg = "Unknown file type being asked to be moved, erroring out";
      throw new HCatException(ErrorType.ERROR_MOVE_FAILED, msg);
    }
  }
View Full Code Here

  private Path getFinalPath(FileSystem fs, Path file, Path src,
                Path dest, final boolean immutable) throws IOException {
    URI taskOutputUri = file.toUri();
    URI relativePath = src.toUri().relativize(taskOutputUri);
    if (taskOutputUri == relativePath) {
      throw new HCatException(ErrorType.ERROR_MOVE_FAILED, "Can not get the relative path: base = " +
        src + " child = " + file);
    }
    if (relativePath.getPath().length() > 0) {

      Path itemDest = new Path(dest, relativePath.getPath());
      if (!immutable){
        String name = relativePath.getPath();
        String filetype;
        int index = name.lastIndexOf('.');
        if (index >= 0) {
          filetype = name.substring(index);
          name = name.substring(0, index);
        } else {
          filetype = "";
        }

        // Attempt to find COUNTER_MAX possible alternatives to a filename by
        // appending _a_N and seeing if that destination also clashes. If we're
        // still clashing after that, give up.
        final int COUNTER_MAX = 1000;
        int counter = 1;
        for (; fs.exists(itemDest) && counter < COUNTER_MAX ; counter++) {
          itemDest = new Path(dest, name + (APPEND_SUFFIX + counter) + filetype);
        }

        if (counter == COUNTER_MAX){
          throw new HCatException(ErrorType.ERROR_MOVE_FAILED,
              "Could not find a unique destination path for move: file = "
                  + file + " , src = " + src + ", dest = " + dest);
        }

      }
View Full Code Here

        //            +loadPath+"] with depth["+jobInfo.getTable().getPartitionKeysSize()
        //            +"], dynSpec["+dynPathSpec+"]");
      } else {
        if ((maxDynamicPartitions != -1) && (status.length > maxDynamicPartitions)) {
          this.partitionsDiscovered = true;
          throw new HCatException(ErrorType.ERROR_TOO_MANY_DYNAMIC_PTNS,
            "Number of dynamic partitions being created "
              + "exceeds configured max allowable partitions["
              + maxDynamicPartitions
              + "], increase parameter ["
              + HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS.varname
View Full Code Here

          if (!dynamicPartitioningUsed ) {

            // regular single-partition write into a partitioned table.
            //Move data from temp directory the actual table directory
            if (partitionsToAdd.size() > 1){
              throw new HCatException(ErrorType.ERROR_PUBLISHING_PARTITION,
                  "More than one partition to publish in non-dynamic partitioning job");
            }
            Partition p = partitionsToAdd.get(0);
            Path src = new Path(jobInfo.getLocation());
            Path dest = new Path(p.getSd().getLocation());
            moveTaskOutputs(fs, src, src, dest, true, table.isImmutable());
            moveTaskOutputs(fs,src,src,dest,false,table.isImmutable());
            if (!src.equals(dest)){
              fs.delete(src, true);
            }

            // Now, we check if the partition already exists. If not, we go ahead.
            // If so, we error out if immutable, and if mutable, check that the partition's IF
            // matches our current job's IF (table's IF) to check for compatibility. If compatible, we
            // ignore and do not add. If incompatible, we error out again.

            boolean publishRequired = false;
            try {
              Partition existingP = client.getPartition(p.getDbName(),p.getTableName(),p.getValues());
              if (existingP != null){
                if (table.isImmutable()){
                  throw new HCatException(ErrorType.ERROR_DUPLICATE_PARTITION,
                      "Attempted duplicate partition publish on to immutable table");
                } else {
                  if (! existingP.getSd().getInputFormat().equals(table.getInputFormatClass().getName())){
                    throw new HCatException(ErrorType.ERROR_PUBLISHING_PARTITION,
                        "Attempted partition append, where old partition format was "
                            + existingP.getSd().getInputFormat()
                            + " and table format was "
                            + table.getInputFormatClass().getName());
                  }
                }
              } else {
                publishRequired = true;
              }
            } catch (NoSuchObjectException e){
              // All good, no such partition exists, move on.
              publishRequired = true;
            }
            if (publishRequired){
              client.add_partitions(partitionsToAdd);
              partitionsAdded = partitionsToAdd;
            }

          } else {
            // Dynamic partitioning usecase
            if (!customDynamicLocationUsed) {
              Path src = new Path(ptnRootLocation);
              moveTaskOutputs(fs, src, src, tblPath, true, true); // dryRun = true, immutable = true
              moveTaskOutputs(fs, src, src, tblPath, false, true);
              if (!src.equals(tblPath)){
                fs.delete(src, true);
              }
            } else {
              moveCustomLocationTaskOutputs(fs, table, hiveConf);
            }
            client.add_partitions(partitionsToAdd);
            partitionsAdded = partitionsToAdd;
          }
        }

        // Set permissions appropriately for each of the partitions we just created
        // so as to have their permissions mimic the table permissions
        for (Partition p : partitionsAdded){
          applyGroupAndPerms(fs,new Path(p.getSd().getLocation()),tblStat.getPermission(),tblStat.getGroup(),true);
        }

      }
    } catch (Exception e) {
      if (partitionsAdded.size() > 0) {
        try {
          // baseCommitter.cleanupJob failed, try to clean up the
          // metastore
          for (Partition p : partitionsAdded) {
            client.dropPartition(tableInfo.getDatabaseName(),
                tableInfo.getTableName(), p.getValues());
          }
        } catch (Exception te) {
          // Keep cause as the original exception
          throw new HCatException(
              ErrorType.ERROR_PUBLISHING_PARTITION, e);
        }
      }
      if (e instanceof HCatException) {
        throw (HCatException) e;
      } else {
        throw new HCatException(ErrorType.ERROR_PUBLISHING_PARTITION, e);
      }
    } finally {
      HCatUtil.closeHiveClientQuietly(client);
    }
  }
View Full Code Here

      cntxt.setInputSplits(hcif.getSplits(
          ShimLoader.getHadoopShims().getHCatShim().createJobContext(job.getConfiguration(), null)));
      cntxt.setConf(job.getConfiguration());
      return cntxt;
    } catch (IOException e) {
      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    } catch (InterruptedException e) {
      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hive.hcatalog.common.HCatException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.