Package org.apache.hadoop.gateway.topology

Examples of org.apache.hadoop.gateway.topology.Topology


  private static void contribute(
      DeploymentContext context,
      Map<String,List<ProviderDeploymentContributor>> providers,
      Map<String,List<ServiceDeploymentContributor>> services ) {
      Topology topology = context.getTopology();
    for( Provider provider : topology.getProviders() ) {
      ProviderDeploymentContributor contributor = getProviderContributor( providers, provider.getRole(), provider.getName() );
      if( contributor != null && provider.isEnabled() ) {
        try {
          contributor.contributeProvider( context, provider );
        } catch( Exception e ) {
          // Maybe it makes sense to throw exception
          log.failedToContributeProvider( provider.getName(), provider.getRole(), e );
          throw new DeploymentException("Failed to contribute provider.", e);
        }
      }
    }
    for( Service service : topology.getServices() ) {
      ServiceDeploymentContributor contributor = getServiceContributor( service.getRole(), null );
      if( contributor != null ) {
        try {
          contributor.contributeService( context, service );
          if (gatewayServices != null) {
            ServiceRegistry sr = (ServiceRegistry) gatewayServices.getService(GatewayServices.SERVICE_REGISTRY_SERVICE);
            if (sr != null) {
              String regCode = sr.getRegistrationCode(topology.getName());
              sr.registerService(regCode, topology.getName(), service.getRole(), service.getUrl() );
            }
          }
        } catch( Exception e ) {
          // Maybe it makes sense to throw exception
          log.failedToContributeService( service.getName(), service.getRole(), e );
View Full Code Here


      kickMonitor( monitor );

      Collection<Topology> topologies = provider.getTopologies();
      assertThat( topologies, notNullValue() );
      assertThat( topologies.size(), is( 1 ) );
      Topology topology = topologies.iterator().next();
      assertThat( topology.getName(), is( "one" ) );
      assertThat( topology.getTimestamp(), is( time ) );
      assertThat( topoListener.events.size(), is( 1 ) );
      topoListener.events.clear();

      // Add a file to the directory.
      File two = createFile( dir, "two.xml", "org/apache/hadoop/gateway/topology/file/topology-two.xml", 1L );
      kickMonitor( monitor );
      topologies = provider.getTopologies();
      assertThat( topologies.size(), is( 2 ) );
      Set<String> names = new HashSet<String>( Arrays.asList( "one", "two" ) );
      Iterator<Topology> iterator = topologies.iterator();
      topology = iterator.next();
      assertThat( names, hasItem( topology.getName() ) );
      names.remove( topology.getName() );
      topology = iterator.next();
      assertThat( names, hasItem( topology.getName() ) );
      names.remove( topology.getName() );
      assertThat( names.size(), is( 0 ) );
      assertThat( topoListener.events.size(), is( 1 ) );
      List<TopologyEvent> events = topoListener.events.get( 0 );
      assertThat( events.size(), is( 1 ) );
      TopologyEvent event = events.get( 0 );
      assertThat( event.getType(), is( TopologyEvent.Type.CREATED ) );
      assertThat( event.getTopology(), notNullValue() );

      // Update a file in the directory.
      two = createFile( dir, "two.xml", "org/apache/hadoop/gateway/topology/file/topology-three.xml", 2L );
      kickMonitor( monitor );
      topologies = provider.getTopologies();
      assertThat( topologies.size(), is( 2 ) );
      names = new HashSet<String>( Arrays.asList( "one", "two" ) );
      iterator = topologies.iterator();
      topology = iterator.next();
      assertThat( names, hasItem( topology.getName() ) );
      names.remove( topology.getName() );
      topology = iterator.next();
      assertThat( names, hasItem( topology.getName() ) );
      names.remove( topology.getName() );
      assertThat( names.size(), is( 0 ) );

      // Remove a file from the directory.
      two.delete();
      kickMonitor( monitor );
      topologies = provider.getTopologies();
      assertThat( topologies.size(), is( 1 ) );
      topology = topologies.iterator().next();
      assertThat( topology.getName(), is( "one" ) );
      assertThat( topology.getTimestamp(), is( time ) );
    } finally {
      FileUtils.deleteQuietly( dir );
    }
  }
View Full Code Here

      srvcs.init(config, options);
    } catch (ServiceLifecycleException e) {
      e.printStackTrace(); // I18N not required.
    }

    Topology topology = new Topology();
    topology.setName( "test-cluster" );
    Service service = new Service();
    service.setRole( "WEBHDFS" );
    service.setUrl( "http://localhost:50070/test-service-url" );
    topology.addService( service );

    Provider provider = new Provider();
    provider.setRole( "authentication" );
    provider.setName( "generic" );
    provider.setEnabled( true );
    Param param = new Param();
    param.setName( "filter" );
    param.setValue( "org.opensource.ExistingFilter" );
    provider.addParam( param );
    param = new Param();
    param.setName( "test-param-name" );
    param.setValue( "test-param-value" );
    provider.addParam( param );
    topology.addProvider( provider );

    WebArchive war = DeploymentFactory.createDeployment( config, topology );

    Document gateway = parse( war.get( "WEB-INF/gateway.xml" ).getAsset().openStream() );
    //dump( gateway );
View Full Code Here

      srvcs.init(config, options);
    } catch (ServiceLifecycleException e) {
      e.printStackTrace(); // I18N not required.
    }

    Topology topology = new Topology();
    topology.setName( "test-cluster" );
    Service service = new Service();
    service.setRole( "WEBHDFS" );
    service.setUrl( "http://localhost:50070/test-service-url" );
    topology.addService( service );

    Provider provider = new Provider();
    provider.setRole( "authentication" );
    provider.setName( "generic" );
    provider.setEnabled( true );
    Param param; // = new ProviderParam();
    // Missing filter param.
    //param.setName( "filter" );
    //param.setValue( "org.opensource.ExistingFilter" );
    //provider.addParam( param );
    param = new Param();
    param.setName( "test-param-name" );
    param.setValue( "test-param-value" );
    provider.addParam( param );
    topology.addProvider( provider );

    Enumeration<Appender> appenders = NoOpAppender.setUp();
    try {
      DeploymentFactory.createDeployment( config, topology );
      fail( "Should have throws IllegalArgumentException" );
View Full Code Here

      srvcs.init(config, options);
    } catch (ServiceLifecycleException e) {
      e.printStackTrace(); // I18N not required.
    }

    Topology topology = new Topology();
    topology.setName( "test-cluster" );
    Service service = new Service();
    service.setRole( "WEBHDFS" );
    service.setUrl( "http://localhost:50070/webhdfs" );
    topology.addService( service );
    Provider provider = new Provider();
    provider.setRole( "authentication" );
    provider.setEnabled( true );
    Param param = new Param();
    param.setName( "contextConfigLocation" );
    param.setValue( "classpath:app-context-security.xml" );
    provider.addParam( param );
    topology.addProvider( provider );
    Provider asserter = new Provider();
    asserter.setRole( "identity-assertion" );
    asserter.setName("Pseudo");
    asserter.setEnabled( true );
    topology.addProvider( asserter );
    Provider authorizer = new Provider();
    authorizer.setRole( "authorization" );
    authorizer.setName("AclsAuthz");
    authorizer.setEnabled( true );
    topology.addProvider( authorizer );

    WebArchive war = DeploymentFactory.createDeployment( config, topology );
    //File dir = new File( System.getProperty( "user.dir" ) );
    //File file = war.as( ExplodedExporter.class ).exportExploded( dir, "test-cluster.war" );
View Full Code Here

    MockJerseyService serviceContributor = new MockJerseyService();

    WebArchive webArchive = ShrinkWrap.create( WebArchive.class, "test-archive" );

    Topology topology = new Topology();
    topology.setName( "test-topology" );
    Provider provider = new Provider();
    provider.setRole( "pivot" );
    provider.setName( "jersey" );
    provider.setEnabled( true );
    topology.addProvider( provider );

    GatewayDescriptor descriptor = GatewayDescriptorFactory.create();

    DeploymentContext context = EasyMock.createNiceMock( DeploymentContext.class );
    EasyMock.expect( context.getWebArchive() ).andReturn( webArchive ).anyTimes();
View Full Code Here

    @Override
    public void handleTopologyEvent( List<TopologyEvent> events ) {
      synchronized ( GatewayServer.this ) {
        for( TopologyEvent event : events ) {
          Topology topology = event.getTopology();
          File deployDir = calculateAbsoluteDeploymentsDir();
          if( event.getType().equals( TopologyEvent.Type.DELETED ) ) {
            File[] files = deployDir.listFiles( new WarDirFilter( topology.getName() + "\\.war\\.[0-9A-Fa-f]+" ) );
            if( files != null ) {
              for( File file : files ) {
                auditor.audit( Action.UNDEPLOY, topology.getName(), ResourceType.TOPOLOGY, ActionOutcome.UNAVAILABLE );
                log.deletingDeployment( file.getAbsolutePath() );
                internalUndeploy( topology );
                FileUtils.deleteQuietly( file );
              }
            }
          } else {
            try {
              File warDir = calculateDeploymentDir( topology );
              if( !warDir.exists() ) {
                auditor.audit( Action.DEPLOY, topology.getName(), ResourceType.TOPOLOGY, ActionOutcome.UNAVAILABLE );
                log.deployingTopology( topology.getName(), warDir.getAbsolutePath() );
                internalUndeploy( topology ); // KNOX-152
                WebArchive war = null;
                war = DeploymentFactory.createDeployment( config, topology );
                if( !deployDir.exists() ) {
                  deployDir.mkdirs();
                }
                File tmp = war.as( ExplodedExporter.class ).exportExploded( deployDir, warDir.getName() + ".tmp" );
                tmp.renameTo( warDir );
                internalDeploy( topology, warDir );
                //log.deployedTopology( topology.getName());
              } else {
                auditor.audit( Action.REDEPLOY, topology.getName(), ResourceType.TOPOLOGY, ActionOutcome.UNAVAILABLE );
                log.redeployingTopology( topology.getName(), warDir.getAbsolutePath() );
                internalDeploy( topology, warDir );
                //log.redeployedTopology( topology.getName() );
              }
            } catch( Throwable e ) {
              auditor.audit( Action.DEPLOY, topology.getName(), ResourceType.TOPOLOGY, ActionOutcome.FAILURE );
              log.failedToDeployTopology( topology.getName(), e );
            }
          }
        }
      }
    }
View Full Code Here

    Provider provider = new Provider();
    provider.setEnabled( true );
    provider.setName( "shiro" );
    provider.setParamsproviderParams );

    Topology topology = new Topology();
    topology.setName( "Sample" );

    DeploymentContext context = EasyMock.createNiceMock( DeploymentContext.class );
    EasyMock.expect( context.getWebArchive() ).andReturn( webArchive ).anyTimes();
    EasyMock.expect( context.getWebAppDescriptor() ).andReturn( Descriptors.create( WebAppDescriptor.class ) ).anyTimes();
    EasyMock.expect( context.getTopology() ).andReturn( topology ).anyTimes();
View Full Code Here

    String name = "org/apache/hadoop/gateway/topology/xml/service-param-topology-ambari-format.conf";
    URL url = ClassLoader.getSystemResource( name );
    assertThat( "Failed to find URL for resource " + name, url, notNullValue() );
    File file = new File( url.getFile() );
    TopologyBuilder topologyBuilder = digester.parse( url );
    Topology topology = topologyBuilder.build();
    assertThat( "Failed to parse resource " + name, topology, notNullValue() );
    topology.setTimestamp( file.lastModified() );

    assertThat( topology.getName(), is( "test-topology-name" ) );
    assertThat( topology.getTimestamp(), is( file.lastModified() ) );

    assertThat( topology.getProviders().size(), is( 1 ) );
    Provider provider = topology.getProviders().iterator().next();
    assertThat( provider, notNullValue() );
    assertThat( provider.getRole(), is( "test-provider-role" ) );
    assertThat( provider.getName(), is( "test-provider-name" ) );
    assertThat( provider.isEnabled(), is( true ) );
    assertThat( provider.getParams(), hasEntry( is( "test-provider-param-name-1" ), is( "test-provider-param-value-1" ) ) );
    assertThat( provider.getParams(), hasEntry( is( "test-provider-param-name-2" ), is( "test-provider-param-value-2" ) ) );

    assertThat( topology.getServices().size(), is( 1 ) );
    Service service = topology.getServices().iterator().next();
    assertThat( service, notNullValue() );
    assertThat( service.getRole(), is( "test-service-role" ) );
    assertThat( service.getUrl(), is( "test-service-scheme://test-service-host:42/test-service-path" ) );
    assertThat( service.getName(), is( "test-service-name" ) );
    assertThat( service.getParams(), hasEntry( is( "test-service-param-name-1" ), is( "test-service-param-value-1" ) ) );
View Full Code Here

    public List<Service> services() {
        return services;
    }

    public Topology build() {
        Topology topology = new Topology();
        topology.setName(name);

        for (Provider provider : providers) {
            topology.addProvider(provider);
        }

        for (Service service : services) {
            topology.addService(service);
        }

        return topology;
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.gateway.topology.Topology

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.