Package org.apache.falcon.entity.v0.cluster

Examples of org.apache.falcon.entity.v0.cluster.Cluster


        overlay = context.getUniqueOverlay();

        // String filePath = TestContext.overlayParametersOverTemplate(TestContext.CLUSTER_TEMPLATE, overlay);
        context.setCluster(overlay.get("cluster"));

        final Cluster cluster = context.getCluster().getCluster();
        final String storageUrl = ClusterHelper.getStorageUrl(cluster);

        copyDataAndScriptsToHDFS(storageUrl);
        copyLibsToHDFS(cluster, storageUrl);
    }
View Full Code Here


        overlay = context.getUniqueOverlay();
        String filePath = TestContext.overlayParametersOverTemplate(CLUSTER_TEMPLATE, overlay);
        context.setCluster(filePath);

        final Cluster cluster = context.getCluster().getCluster();
        final String storageUrl = ClusterHelper.getStorageUrl(cluster);
        metastoreUrl = ClusterHelper.getInterface(cluster, Interfacetype.REGISTRY).getEndpoint();

        copyDataAndScriptsToHDFS(storageUrl);
        copyLibsToHDFS(cluster, storageUrl);
View Full Code Here

    public FileSystem getFileSystem() throws IOException {
        return FileSystem.get(conf);
    }

    protected void buildClusterObject(String name) {
        clusterEntity = new Cluster();
        clusterEntity.setName(name);
        clusterEntity.setColo("local");
        clusterEntity.setDescription("Embeded cluster: " + name);

        Interfaces interfaces = new Interfaces();
View Full Code Here

        String filePath = TestContext.overlayParametersOverTemplate(
                TestContext.CLUSTER_TEMPLATE, context.getUniqueOverlay());
        context.setCluster(filePath);

        Cluster cluster = context.getCluster().getCluster();
        fs = FileSystem.get(ClusterHelper.getConfiguration(cluster));
        storageUrl = ClusterHelper.getStorageUrl(cluster);
        metastoreUrl = ClusterHelper.getInterface(cluster, Interfacetype.REGISTRY).getEndpoint();

        copyDataAndScriptsToHDFS();
View Full Code Here

            if (!feedCluster.getValidity().getStart().before(feedCluster.getValidity().getEnd())) {
                LOG.info("feed validity start <= end for cluster " + clusterName + ". Skipping schedule");
                break;
            }

            Cluster cluster = CONFIG_STORE.get(EntityType.CLUSTER, feedCluster.getName());
            Path bundlePath = EntityUtil.getNewStagingPath(cluster, entity);

            if (!map(cluster, bundlePath)) {
                break;
            }
View Full Code Here

        if (FeedHelper.getCluster(entity, targetCluster.getName()).getType() == ClusterType.TARGET) {
            for (org.apache.falcon.entity.v0.feed.Cluster feedCluster : entity.getClusters().getClusters()) {
                if (feedCluster.getType() == ClusterType.SOURCE) {
                    String coordName = EntityUtil.getWorkflowName(Tag.REPLICATION, entity).toString();
                    Path basePath = getCoordPath(bundlePath, coordName);
                    Cluster srcCluster = ConfigurationStore.get().get(EntityType.CLUSTER, feedCluster.getName());

                    // workflow is serialized to a specific dir
                    Path sourceSpecificWfPath = new Path(basePath, srcCluster.getName());

                    // Different workflow for each source since hive credentials vary for each cluster
                    replicationMapper.createReplicationWorkflow(
                            targetCluster, srcCluster, sourceSpecificWfPath, coordName);
View Full Code Here

        Entity entity = (Entity) unmarshaller
                .unmarshal(OozieFeedWorkflowBuilderTest.class.getResource(template));
        store.publish(type, entity);

        if (type == EntityType.CLUSTER) {
            Cluster cluster = (Cluster) entity;
            ClusterHelper.getInterface(cluster, Interfacetype.WRITE).setEndpoint(writeEndpoint);
            FileSystem fs = new Path(writeEndpoint).getFileSystem(EmbeddedCluster.newConfiguration());
            fs.create(new Path(ClusterHelper.getLocation(cluster, "working"), "libext/FEED/retention/ext.jar")).close();
            fs.create(
                    new Path(ClusterHelper.getLocation(cluster, "working"), "libext/FEED/replication/ext.jar")).close();
View Full Code Here

    public void testClusterEntityWithInvalidInterfaces(Interfacetype interfacetype, String endpoint)
        throws Exception {
        overlay = context.getUniqueOverlay();
        String filePath = TestContext.overlayParametersOverTemplate(TestContext.CLUSTER_TEMPLATE, overlay);
        InputStream stream = new FileInputStream(filePath);
        Cluster cluster = (Cluster) EntityType.CLUSTER.getUnmarshaller().unmarshal(stream);
        Assert.assertNotNull(cluster);
        cluster.setColo("default")// validations will be ignored if not default & tests fail

        Interface anInterface = ClusterHelper.getInterface(cluster, interfacetype);
        anInterface.setEndpoint(endpoint);

        File tmpFile = TestContext.getTempFile();
View Full Code Here

            if (processCluster.getValidity().getStart().compareTo(processCluster.getValidity().getEnd()) >= 0) {
                LOG.info("process validity start <= end for cluster " + clusterName + ". Skipping schedule");
                break;
            }

            Cluster cluster = CONFIG_STORE.get(EntityType.CLUSTER, processCluster.getName());
            Path bundlePath = EntityUtil.getNewStagingPath(cluster, entity);
            map(cluster, bundlePath);
            Properties properties = createAppProperties(clusterName, bundlePath, CurrentUser.getUser());

            //Add libpath
View Full Code Here

    public void onAdd(Entity entity) {
        if (entity.getEntityType() != EntityType.CLUSTER) {
            return;
        }

        Cluster cluster = (Cluster) entity;
        COLO_CLUSTER_MAP.putIfAbsent(cluster.getColo(), new HashSet<String>());
        COLO_CLUSTER_MAP.get(cluster.getColo()).add(cluster.getName());
    }
View Full Code Here

TOP

Related Classes of org.apache.falcon.entity.v0.cluster.Cluster

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.