Package org.apache.hcatalog.common

Examples of org.apache.hcatalog.common.HCatException


            if (dynamicPartVals != null) {
                // dynamic part vals specified
                List<String> dynamicPartKeys = jobInfo.getDynamicPartitioningKeys();
                if (dynamicPartVals.size() != dynamicPartKeys.size()) {
                    throw new HCatException(ErrorType.ERROR_INVALID_PARTITION_VALUES,
                        "Unable to configure dynamic partitioning for storage handler, mismatch between"
                            + " number of partition values obtained[" + dynamicPartVals.size()
                            + "] and number of partition values required[" + dynamicPartKeys.size() + "]");
                }
                for (int i = 0; i < dynamicPartKeys.size(); i++) {
                    partitionValues.put(dynamicPartKeys.get(i), dynamicPartVals.get(i));
                }

//            // re-home location, now that we know the rest of the partvals
//            Table table = jobInfo.getTableInfo().getTable();
//
//            List<String> partitionCols = new ArrayList<String>();
//            for(FieldSchema schema : table.getPartitionKeys()) {
//              partitionCols.add(schema.getName());
//            }
                jobInfo.setPartitionValues(partitionValues);
            }

            HCatUtil.configureOutputStorageHandler(storageHandler, conf, jobInfo);
        } catch (Exception e) {
            if (e instanceof HCatException) {
                throw (HCatException) e;
            } else {
                throw new HCatException(ErrorType.ERROR_INIT_STORAGE_HANDLER, e);
            }
        }
    }
View Full Code Here


            cntxt.setInputSplits(hcif.getSplits(
                HCatHadoopShims.Instance.get().createJobContext(job.getConfiguration(), null)));
            cntxt.setConf(job.getConfiguration());
            return cntxt;
        } catch (IOException e) {
            throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
        } catch (InterruptedException e) {
            throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
        }
    }
View Full Code Here

        try {
            TaskAttemptContext cntxt = HCatHadoopShims.Instance.get().createTaskAttemptContext(conf, new TaskAttemptID());
            rr = inpFmt.createRecordReader(split, cntxt);
            rr.initialize(split, cntxt);
        } catch (IOException e) {
            throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
        } catch (InterruptedException e) {
            throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
        }
        return new HCatRecordItr(rr);
    }
View Full Code Here

            HCatOutputFormat outFormat = new HCatOutputFormat();
            outFormat.checkOutputSpecs(job);
            outFormat.getOutputCommitter(HCatHadoopShims.Instance.get().createTaskAttemptContext
                (job.getConfiguration(), HCatHadoopShims.Instance.get().createTaskAttemptID())).setupJob(job);
        } catch (IOException e) {
            throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
        } catch (InterruptedException e) {
            throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
        }
        WriterContext cntxt = new WriterContext();
        cntxt.setConf(job.getConfiguration());
        return cntxt;
    }
View Full Code Here

        } catch (IOException e) {
            if (null != committer) {
                try {
                    committer.abortTask(cntxt);
                } catch (IOException e1) {
                    throw new HCatException(ErrorType.ERROR_INTERNAL_EXCEPTION, e1);
                }
            }
            throw new HCatException("Failed while writing", e);
        } catch (InterruptedException e) {
            if (null != committer) {
                try {
                    committer.abortTask(cntxt);
                } catch (IOException e1) {
                    throw new HCatException(ErrorType.ERROR_INTERNAL_EXCEPTION, e1);
                }
            }
            throw new HCatException("Failed while writing", e);
        }
    }
View Full Code Here

        try {
            new HCatOutputFormat().getOutputCommitter(HCatHadoopShims.Instance.get().createTaskAttemptContext
                (context.getConf(), HCatHadoopShims.Instance.get().createTaskAttemptID()))
                .commitJob(HCatHadoopShims.Instance.get().createJobContext(context.getConf(), null));
        } catch (IOException e) {
            throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
        } catch (InterruptedException e) {
            throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
        }
    }
View Full Code Here

        try {
            new HCatOutputFormat().getOutputCommitter(HCatHadoopShims.Instance.get().createTaskAttemptContext
                (context.getConf(), HCatHadoopShims.Instance.get().createTaskAttemptID()))
                .abortJob(HCatHadoopShims.Instance.get().createJobContext(context.getConf(), null), State.FAILED);
        } catch (IOException e) {
            throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
        } catch (InterruptedException e) {
            throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
        }
    }
View Full Code Here

                if(LOG.isDebugEnabled()) {
                    LOG.debug("Testing if moving file: [" + file + "] to ["
                            + finalOutputPath + "] would cause a problem");
                }
                if (fs.exists(finalOutputPath)) {
                    throw new HCatException(ErrorType.ERROR_MOVE_FAILED, "Data already exists in " + finalOutputPath
                            + ", duplicate publish not possible.");
                }
            } else {
                if(LOG.isDebugEnabled()) {
                    LOG.debug("Moving file: [ " + file + "] to [" + finalOutputPath + "]");
                }
                // Make sure the parent directory exists.  It is not an error
                // to recreate an existing directory
                fs.mkdirs(finalOutputPath.getParent());
                if (!fs.rename(file, finalOutputPath)) {
                    if (!fs.delete(finalOutputPath, true)) {
                        throw new HCatException(ErrorType.ERROR_MOVE_FAILED, "Failed to delete existing path " + finalOutputPath);
                    }
                    if (!fs.rename(file, finalOutputPath)) {
                        throw new HCatException(ErrorType.ERROR_MOVE_FAILED, "Failed to move output to " + finalOutputPath);
                    }
                }
            }
        } else if(fs.getFileStatus(file).isDir()) {
            FileStatus[] children = fs.listStatus(file);
            FileStatus firstChild = null;
            if (children != null) {
                int index=0;
                while (index < children.length) {
                    if (!children[index].getPath().getName().equals(TEMP_DIR_NAME) && !children[index].getPath().getName().equals(LOGS_DIR_NAME) && !children[index].getPath().getName().equals(SUCCEEDED_FILE_NAME)) {
                        firstChild = children[index];
                        break;
                    }
                    index++;
                }
            }
            if(firstChild!=null && firstChild.isDir()) {
                // If the first child is directory, then rest would be directory too according to HCatalog dir structure
                // recurse in that case
                for (FileStatus child : children) {
                    moveTaskOutputs(fs, child.getPath(), srcDir, destDir, dryRun);
                }
            } else {

                if (!dryRun) {
                    if (dynamicPartitioningUsed) {
                        // Optimization: if the first child is file, we have reached the leaf directory, move the parent directory itself
                        // instead of moving each file under the directory. See HCATALOG-538

                        final Path parentDir = finalOutputPath.getParent();
                        // Create the directory
                        Path placeholder = new Path(parentDir, "_placeholder");
                        if (fs.mkdirs(parentDir)) {
                            // It is weired but we need a placeholder,
                            // otherwise rename cannot move file to the right place
                            fs.create(placeholder).close();
                        }
                        if (LOG.isDebugEnabled()) {
                            LOG.debug("Moving directory: " + file + " to " + parentDir);
                        }
                        if (!fs.rename(file, parentDir)) {
                            final String msg = "Failed to move file: " + file + " to " + parentDir;
                            LOG.error(msg);
                            throw new HCatException(ErrorType.ERROR_MOVE_FAILED, msg);
                        }
                        fs.delete(placeholder, false);
                    } else {
                        // In case of no partition we have to move each file
                        for (FileStatus child : children) {
                            moveTaskOutputs(fs, child.getPath(), srcDir, destDir, dryRun);
                        }
                    }
                } else {
                    if(fs.exists(finalOutputPath)) {
                        throw new HCatException(ErrorType.ERROR_MOVE_FAILED, "Data already exists in " + finalOutputPath
                                + ", duplicate publish not possible.");
                    }
                }
            }
        } else {
            // Should never happen
            final String msg = "Unknown file type being asked to be moved, erroring out";
            throw new HCatException(ErrorType.ERROR_MOVE_FAILED, msg);
        }
    }
View Full Code Here

    private Path getFinalPath(Path file, Path src,
                              Path dest) throws IOException {
        URI taskOutputUri = file.toUri();
        URI relativePath = src.toUri().relativize(taskOutputUri);
        if (taskOutputUri == relativePath) {
            throw new HCatException(ErrorType.ERROR_MOVE_FAILED, "Can not get the relative path: base = " +
                src + " child = " + file);
        }
        if (relativePath.getPath().length() > 0) {
            return new Path(dest, relativePath.getPath());
        } else {
View Full Code Here

                //            +loadPath+"] with depth["+jobInfo.getTable().getPartitionKeysSize()
                //            +"], dynSpec["+dynPathSpec+"]");
            } else {
                if ((maxDynamicPartitions != -1) && (status.length > maxDynamicPartitions)) {
                    this.partitionsDiscovered = true;
                    throw new HCatException(ErrorType.ERROR_TOO_MANY_DYNAMIC_PTNS,
                        "Number of dynamic partitions being created "
                            + "exceeds configured max allowable partitions["
                            + maxDynamicPartitions
                            + "], increase parameter ["
                            + HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS.varname
View Full Code Here

TOP

Related Classes of org.apache.hcatalog.common.HCatException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.