Examples of HCatPartition


Examples of org.apache.hcatalog.api.HCatPartition

                                         Map<String, String> partitionSpec) throws FalconException {
        LOG.info("List partitions for : " + tableName + ", partition spec: " + partitionSpec);

        try {
            HCatClient client = get(catalogUrl);
            HCatPartition hCatPartition = client.getPartition(database, tableName, partitionSpec);
            return createCatalogPartition(hCatPartition);
        } catch (HCatException e) {
            throw new FalconException(e);
        }
    }
View Full Code Here

Examples of org.apache.hcatalog.api.HCatPartition

    public void testGetPartitionsFullSpec() throws Exception {
        Map<String, String> partitionSpec = new HashMap<String, String>();
        partitionSpec.put("ds", "20130902");
        partitionSpec.put("region", "in");

        HCatPartition ptn = client.getPartition(DATABASE_NAME, TABLE_NAME, partitionSpec);
        Assert.assertTrue(ptn != null);
    }
View Full Code Here

Examples of org.apache.hcatalog.api.HCatPartition

        WorkflowJob jobInfo = OozieTestUtils.getWorkflowJob(context.getCluster().getCluster(),
                OozieClient.FILTER_NAME + "=FALCON_PROCESS_DEFAULT_" + pigProcessName);
        Assert.assertEquals(WorkflowJob.Status.SUCCEEDED, jobInfo.getStatus());

        HCatPartition partition = HiveTestUtils.getPartition(
                metastoreUrl, DATABASE_NAME, OUT_TABLE_NAME, "ds", PARTITION_VALUE);
        Assert.assertTrue(partition != null);

        InstancesResult response = context.getService().path("api/instance/running/process/" + pigProcessName)
                .header("Remote-User", "guest")
View Full Code Here

Examples of org.apache.hcatalog.api.HCatPartition

        WorkflowJob jobInfo = OozieTestUtils.getWorkflowJob(context.getCluster().getCluster(),
                OozieClient.FILTER_NAME + "=FALCON_PROCESS_DEFAULT_" + hiveProcessName);
        Assert.assertEquals(WorkflowJob.Status.SUCCEEDED, jobInfo.getStatus());

        HCatPartition partition = HiveTestUtils.getPartition(
                metastoreUrl, DATABASE_NAME, OUT_TABLE_NAME, "ds", PARTITION_VALUE);
        Assert.assertTrue(partition != null);

        InstancesResult response = context.getService().path("api/instance/running/process/" + hiveProcessName)
                .header("Remote-User", "guest")
View Full Code Here

Examples of org.apache.hcatalog.api.HCatPartition

        Assert.assertEquals(0, TestContext.executeWithURL("entity -submit -type cluster -file " + filePath));

        filePath = targetContext.overlayParametersOverTemplate("/table/bcp-cluster.xml", overlay);
        Assert.assertEquals(0, TestContext.executeWithURL("entity -submit -type cluster -file " + filePath));

        HCatPartition sourcePartition = HiveTestUtils.getPartition(
                sourceMetastoreUrl, SOURCE_DATABASE_NAME, SOURCE_TABLE_NAME, "ds", PARTITION_VALUE);
        Assert.assertNotNull(sourcePartition);

        filePath = sourceContext.overlayParametersOverTemplate("/table/customer-table-replicating-feed.xml", overlay);
        Assert.assertEquals(0, TestContext.executeWithURL("entity -submitAndSchedule -type feed -file " + filePath));

        // wait until the workflow job completes
        WorkflowJob jobInfo = OozieTestUtils.getWorkflowJob(targetContext.getCluster().getCluster(),
                OozieClient.FILTER_NAME + "=FALCON_FEED_REPLICATION_" + feedName);
        Assert.assertEquals(jobInfo.getStatus(), WorkflowJob.Status.SUCCEEDED);

        // verify if the partition on the target exists
        HCatPartition targetPartition = HiveTestUtils.getPartition(
                targetMetastoreUrl, TARGET_DATABASE_NAME, TARGET_TABLE_NAME, "ds", PARTITION_VALUE);
        Assert.assertNotNull(targetPartition);

        InstancesResult response = targetContext.getService().path("api/instance/running/feed/" + feedName)
                .header("Remote-User", "guest")
View Full Code Here

Examples of org.apache.hcatalog.api.HCatPartition

        Assert.assertEquals(0, TestContext.executeWithURL("entity -submit -type cluster -file " + filePath));

        filePath = targetContext.overlayParametersOverTemplate("/table/bcp-cluster.xml", overlay);
        Assert.assertEquals(0, TestContext.executeWithURL("entity -submit -type cluster -file " + filePath));

        HCatPartition sourcePartition = HiveTestUtils.getPartition(
                sourceMetastoreUrl, SOURCE_DATABASE_NAME, SOURCE_TABLE_NAME, "ds", PARTITION_VALUE);
        Assert.assertNotNull(sourcePartition);

        addPartitionToTarget();
        // verify if the partition on the target exists before replication starts
        // to see import drops partition before importing partition
        HCatPartition targetPartition = HiveTestUtils.getPartition(
                targetMetastoreUrl, TARGET_DATABASE_NAME, TARGET_TABLE_NAME, "ds", PARTITION_VALUE);
        Assert.assertNotNull(targetPartition);

        filePath = sourceContext.overlayParametersOverTemplate("/table/customer-table-replicating-feed.xml", overlay);
        Assert.assertEquals(0, TestContext.executeWithURL("entity -submitAndSchedule -type feed -file " + filePath));
View Full Code Here

Examples of org.apache.hcatalog.api.HCatPartition

        Assert.assertEquals(0, TestContext.executeWithURL("entity -submit -type cluster -file " + filePath));

        filePath = TestContext.overlayParametersOverTemplate("/table/bcp-cluster.xml", overlay);
        Assert.assertEquals(0, TestContext.executeWithURL("entity -submit -type cluster -file " + filePath));

        HCatPartition sourcePartition = HiveTestUtils.getPartition(
                sourceMetastoreUrl, SOURCE_DATABASE_NAME, SOURCE_TABLE_NAME, "ds", PARTITION_VALUE);
        Assert.assertNotNull(sourcePartition);

        filePath = TestContext.overlayParametersOverTemplate("/table/customer-table-replicating-feed.xml", overlay);
        Assert.assertEquals(0, TestContext.executeWithURL("entity -submitAndSchedule -type feed -file " + filePath));

        // wait until the workflow job completes
        WorkflowJob jobInfo = OozieTestUtils.getWorkflowJob(targetContext.getCluster().getCluster(),
                OozieClient.FILTER_NAME + "=FALCON_FEED_REPLICATION_" + feedName);
        Assert.assertEquals(jobInfo.getStatus(), WorkflowJob.Status.SUCCEEDED);

        // verify if the partition on the target exists
        HCatPartition targetPartition = HiveTestUtils.getPartition(
                targetMetastoreUrl, TARGET_DATABASE_NAME, TARGET_TABLE_NAME, "ds", PARTITION_VALUE);
        Assert.assertNotNull(targetPartition);

        InstancesResult response = targetContext.getService().path("api/instance/running/feed/" + feedName)
                .header("Cookie", targetContext.getAuthenticationToken())
View Full Code Here

Examples of org.apache.hcatalog.api.HCatPartition

        Assert.assertEquals(0, TestContext.executeWithURL("entity -submit -type cluster -file " + filePath));

        filePath = TestContext.overlayParametersOverTemplate("/table/bcp-cluster.xml", overlay);
        Assert.assertEquals(0, TestContext.executeWithURL("entity -submit -type cluster -file " + filePath));

        HCatPartition sourcePartition = HiveTestUtils.getPartition(
                sourceMetastoreUrl, SOURCE_DATABASE_NAME, SOURCE_TABLE_NAME, "ds", PARTITION_VALUE);
        Assert.assertNotNull(sourcePartition);

        addPartitionToTarget();
        // verify if the partition on the target exists before replication starts
        // to see import drops partition before importing partition
        HCatPartition targetPartition = HiveTestUtils.getPartition(
                targetMetastoreUrl, TARGET_DATABASE_NAME, TARGET_TABLE_NAME, "ds", PARTITION_VALUE);
        Assert.assertNotNull(targetPartition);

        filePath = TestContext.overlayParametersOverTemplate("/table/customer-table-replicating-feed.xml", overlay);
        Assert.assertEquals(0, TestContext.executeWithURL("entity -submitAndSchedule -type feed -file " + filePath));
View Full Code Here

Examples of org.apache.hcatalog.api.HCatPartition

        WorkflowJob jobInfo = OozieTestUtils.getWorkflowJob(context.getCluster().getCluster(),
                OozieClient.FILTER_NAME + "=FALCON_PROCESS_DEFAULT_" + pigProcessName);
        Assert.assertEquals(WorkflowJob.Status.SUCCEEDED, jobInfo.getStatus());

        HCatPartition partition = HiveTestUtils.getPartition(
                metastoreUrl, DATABASE_NAME, OUT_TABLE_NAME, "ds", PARTITION_VALUE);
        Assert.assertTrue(partition != null);

        InstancesResult response = context.getService().path("api/instance/running/process/" + pigProcessName)
                .header("Cookie", context.getAuthenticationToken())
View Full Code Here

Examples of org.apache.hcatalog.api.HCatPartition

        WorkflowJob jobInfo = OozieTestUtils.getWorkflowJob(context.getCluster().getCluster(),
                OozieClient.FILTER_NAME + "=FALCON_PROCESS_DEFAULT_" + hiveProcessName);
        Assert.assertEquals(WorkflowJob.Status.SUCCEEDED, jobInfo.getStatus());

        HCatPartition partition = HiveTestUtils.getPartition(
                metastoreUrl, DATABASE_NAME, OUT_TABLE_NAME, "ds", PARTITION_VALUE);
        Assert.assertTrue(partition != null);

        InstancesResult response = context.getService().path("api/instance/running/process/" + hiveProcessName)
                .header("Cookie", context.getAuthenticationToken())
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.