Package org.apache.falcon.entity

Examples of org.apache.falcon.entity.CatalogStorage


        HashMap<String, String> props = new HashMap<String, String>();
        for (Property prop : coord.getAction().getWorkflow().getConfiguration().getProperty()) {
            props.put(prop.getName(), prop.getValue());
        }

        final CatalogStorage srcStorage = (CatalogStorage) FeedHelper.createStorage(srcCluster, tableFeed);
        final CatalogStorage trgStorage = (CatalogStorage) FeedHelper.createStorage(trgCluster, tableFeed);

        // verify the replication param that feed replicator depends on
        Assert.assertEquals(props.get("sourceRelativePaths"), "IGNORE");

        Assert.assertTrue(props.containsKey("distcpSourcePaths"));
View Full Code Here


    private Map<String, String> getExpectedProperties(Feed inFeed, Feed outFeed, Process process,
                                                      Cluster cluster) throws FalconException {
        Map<String, String> expected = new HashMap<String, String>();
        for (Input input : process.getInputs().getInputs()) {
            CatalogStorage storage = (CatalogStorage) FeedHelper.createStorage(cluster, inFeed);
            propagateStorageProperties(input.getName(), storage, expected);
        }

        for (Output output : process.getOutputs().getOutputs()) {
            CatalogStorage storage = (CatalogStorage) FeedHelper.createStorage(cluster, outFeed);
            propagateStorageProperties(output.getName(), storage, expected);
        }

        return expected;
    }
View Full Code Here

        Storage storage = FeedHelper.createStorage(cluster, feed);
        if (storage.getType() == Storage.TYPE.FILESYSTEM) {  // FS does NOT use staging dirs
            return;
        }

        final CatalogStorage tableStorage = (CatalogStorage) storage;
        String stagingDir = FeedHelper.getStagingDir(cluster, feed, tableStorage, Tag.REPLICATION);
        Path stagingPath = new Path(stagingDir + "/*/*/*")// stagingDir/dataOutPartitionValue/nominal-time/data
        FileSystem fs = getFileSystem(cluster);
        try {
            FileStatus[] paths = fs.globStatus(stagingPath);
View Full Code Here

        final InputStream inputStream = getClass().getResourceAsStream("/config/feed/hive-table-feed.xml");
        Feed tableFeed = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(inputStream);
        getStore().publish(EntityType.FEED, tableFeed);

        final Cluster srcCluster = dfsCluster.getCluster();
        final CatalogStorage sourceStorage = (CatalogStorage) FeedHelper.createStorage(srcCluster, tableFeed);
        String sourceStagingDir = FeedHelper.getStagingDir(srcCluster, tableFeed, sourceStorage, Tag.REPLICATION);

        sourceStagingPath1 = new Path(sourceStagingDir + "/ds=2012092400/" + System.currentTimeMillis());
        sourceStagingPath2 = new Path(sourceStagingDir + "/ds=2012092500/" + System.currentTimeMillis());

        final Cluster targetCluster = targetDfsCluster.getCluster();
        final CatalogStorage targetStorage = (CatalogStorage) FeedHelper.createStorage(targetCluster, tableFeed);
        String targetStagingDir = FeedHelper.getStagingDir(targetCluster, tableFeed, targetStorage, Tag.REPLICATION);

        targetStagingPath1 = new Path(targetStagingDir + "/ds=2012092400/" + System.currentTimeMillis());
        targetStagingPath2 = new Path(targetStagingDir + "/ds=2012092500/" + System.currentTimeMillis());
    }
View Full Code Here

     * @throws FalconException
     */
    private long getTablePartitionCreateTimeMetric(String feedUriTemplate)
        throws IOException, URISyntaxException, FalconException {

        CatalogStorage storage = (CatalogStorage)
                FeedHelper.createStorage(Storage.TYPE.TABLE.name(), feedUriTemplate);
        CatalogPartition partition = CatalogServiceFactory.getCatalogService().getPartition(
                storage.getCatalogUrl(), storage.getDatabase(), storage.getTable(), storage.getPartitions());
        return partition == null ? 0 : partition.getCreateTime();
    }
View Full Code Here

                    propagateFileSystemCopyProperties(pathsWithPartitions, props);
                } else if (sourceStorage.getType() == Storage.TYPE.TABLE) {
                    instancePaths = "${coord:dataIn('input')}";

                    final CatalogStorage sourceTableStorage = (CatalogStorage) sourceStorage;
                    propagateTableStorageProperties(srcCluster, sourceTableStorage, props, "falconSource");
                    final CatalogStorage targetTableStorage = (CatalogStorage) targetStorage;
                    propagateTableStorageProperties(trgCluster, targetTableStorage, props, "falconTarget");
                    propagateTableCopyProperties(srcCluster, sourceTableStorage,
                            trgCluster, targetTableStorage, props);
                    setupHiveConfiguration(trgCluster, sourceTableStorage, targetTableStorage, wfPath);
                }
View Full Code Here

            }

            final Storage storage = FeedHelper.createStorage(cluster, feed);
            if (!storage.exists()) {
                // this is only true for table, filesystem always returns true
                CatalogStorage catalogStorage = (CatalogStorage) storage;
                throw new ValidationException("Table [" + catalogStorage.getTable()
                        + "] does not exist for feed: " + feed.getName() + ", cluster: " + cluster.getName());
            }
        }
    }
View Full Code Here

    private Map<String, String> getExpectedProperties(Feed inFeed, Feed outFeed,
                                                      Process process) throws FalconException {
        Map<String, String> expected = new HashMap<String, String>();
        for (Input input : process.getInputs().getInputs()) {
            CatalogStorage storage = (CatalogStorage) FeedHelper.createStorage(cluster, inFeed);
            propagateStorageProperties(input.getName(), storage, expected);
        }

        for (Output output : process.getOutputs().getOutputs()) {
            CatalogStorage storage = (CatalogStorage) FeedHelper.createStorage(cluster, outFeed);
            propagateStorageProperties(output.getName(), storage, expected);
        }

        return expected;
    }
View Full Code Here

            // this is only true for table, filesystem always returns true
            if (storage.getType() == Storage.TYPE.FILESYSTEM) {
                continue;
            }

            CatalogStorage catalogStorage = (CatalogStorage) storage;
            String metaStorePrincipal = ClusterHelper.getPropertyValue(clusterEntity,
                    SecurityUtil.HIVE_METASTORE_PRINCIPAL);
            if (!CatalogServiceFactory.getCatalogService().tableExists(catalogStorage.getCatalogUrl(),
                    catalogStorage.getDatabase(), catalogStorage.getTable(), metaStorePrincipal)) {
                buffer.append("Table [")
                        .append(catalogStorage.getTable())
                        .append("] does not exist for feed: ")
                        .append(feed.getName())
                        .append(" in cluster: ")
                        .append(cluster.getName());
            }
View Full Code Here

        HashMap<String, String> props = new HashMap<String, String>();
        for (Property prop : coord.getAction().getWorkflow().getConfiguration().getProperty()) {
            props.put(prop.getName(), prop.getValue());
        }

        final CatalogStorage srcStorage = (CatalogStorage) FeedHelper.createStorage(srcCluster, tableFeed);
        final CatalogStorage trgStorage = (CatalogStorage) FeedHelper.createStorage(trgCluster, tableFeed);

        // verify the replication param that feed replicator depends on
        Assert.assertEquals(props.get("sourceRelativePaths"), "IGNORE");

        Assert.assertTrue(props.containsKey("distcpSourcePaths"));
View Full Code Here

TOP

Related Classes of org.apache.falcon.entity.CatalogStorage

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.