Examples of RpcClient


Examples of org.apache.hadoop.hbase.ipc.RpcClient

      this.batchPool = pool;
      this.managed = managed;
      this.registry = setupRegistry();
      retrieveClusterId();

      this.rpcClient = new RpcClient(this.conf, this.clusterId);

      // Do we publish the status?
      Class<? extends ClusterStatusListener.Listener> listenerClass =
          conf.getClass(ClusterStatusListener.STATUS_LISTENER_CLASS,
              ClusterStatusListener.DEFAULT_STATUS_LISTENER_CLASS,
View Full Code Here

Examples of org.apache.hadoop.hbase.ipc.RpcClient

     * For tests only.
     * @param rpcClient Client we should use instead.
     * @return Previous rpcClient
     */
    RpcClient setRpcClient(final RpcClient rpcClient) {
      RpcClient oldRpcClient = this.rpcClient;
      this.rpcClient = rpcClient;
      return oldRpcClient;
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.ipc.RpcClient

    // verify the server authenticates us as this token user
    testuser.doAs(new PrivilegedExceptionAction<Object>() {
      public Object run() throws Exception {
        Configuration c = server.getConfiguration();
        RpcClient rpcClient = new RpcClient(c, clusterId.toString());
        ServerName sn =
          new ServerName(server.getAddress().getHostName(), server.getAddress().getPort(),
            System.currentTimeMillis());
        try {
          BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn,
            User.getCurrent(), HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
          AuthenticationProtos.AuthenticationService.BlockingInterface stub =
            AuthenticationProtos.AuthenticationService.newBlockingStub(channel);
          AuthenticationProtos.WhoAmIResponse response =
            stub.whoAmI(null, AuthenticationProtos.WhoAmIRequest.getDefaultInstance());
          String myname = response.getUsername();
          assertEquals("testuser", myname);
          String authMethod = response.getAuthMethod();
          assertEquals("TOKEN", authMethod);
        } finally {
          rpcClient.stop();
        }
        return null;
      }
    });
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.ipc.RpcClient

    TEST_UTIL.startMiniZKCluster();
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.set(HConstants.MASTER_PORT, "0");
    HMaster hm = new HMaster(conf);
    ServerName sm = hm.getServerName();
    RpcClient rpcClient = new RpcClient(conf, HConstants.CLUSTER_ID_DEFAULT);
    try {
      int i = 0;
      //retry the RPC a few times; we have seen SocketTimeoutExceptions if we
      //try to connect too soon. Retry on SocketTimeoutException.
      while (i < 20) {
        try {
          BlockingRpcChannel channel =
            rpcClient.createBlockingRpcChannel(sm, User.getCurrent(), 0);
          MasterMonitorProtos.MasterMonitorService.BlockingInterface stub =
            MasterMonitorProtos.MasterMonitorService.newBlockingStub(channel);
          stub.isMasterRunning(null, IsMasterRunningRequest.getDefaultInstance());
          fail();
        } catch (ServiceException ex) {
          IOException ie = ProtobufUtil.getRemoteException(ex);
          if (!(ie instanceof SocketTimeoutException)) {
            if (ie.getMessage().startsWith("org.apache.hadoop.hbase.ipc." +
                "ServerNotRunningYetException: Server is not running yet")) {
              // Done.  Got the exception we wanted.
              System.out.println("Expected exception: " + ie.getMessage());
              return;
            } else {
              throw ex;
            }
          } else {
            System.err.println("Got SocketTimeoutException. Will retry. ");
          }
        } catch (Throwable t) {
          fail("Unexpected throwable: " + t);
        }
        Thread.sleep(100);
        i++;
      }
      fail();
    } finally {
      rpcClient.stop();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.ipc.RpcClient

  public void testAdminTimeout() throws Exception {
    long lastLimit = HConstants.DEFAULT_HBASE_CLIENT_PREFETCH_LIMIT;
    HConnection lastConnection = null;
    boolean lastFailed = false;
    int initialInvocations = RandomTimeoutBlockingRpcChannel.invokations.get();
    RpcClient rpcClient = new RpcClient(TEST_UTIL.getConfiguration(), TEST_UTIL.getClusterKey()) {
      // Return my own instance, one that does random timeouts
      @Override
      public BlockingRpcChannel createBlockingRpcChannel(ServerName sn,
          User ticket, int rpcTimeout) {
        return new RandomTimeoutBlockingRpcChannel(this, sn, ticket, rpcTimeout);
      }
    };
    try {
      for (int i = 0; i < 5 || (lastFailed && i < 100); ++i) {
        lastFailed = false;
        // Ensure the HBaseAdmin uses a new connection by changing Configuration.
        Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
        conf.setLong(HConstants.HBASE_CLIENT_PREFETCH_LIMIT, ++lastLimit);
        HBaseAdmin admin = null;
        try {
          admin = new HBaseAdmin(conf);
          HConnection connection = admin.getConnection();
          assertFalse(connection == lastConnection);
          lastConnection = connection;
          // Override the connection's rpc client for timeout testing
          ((HConnectionManager.HConnectionImplementation)connection).setRpcClient(rpcClient);
          // run some admin commands
          HBaseAdmin.checkHBaseAvailable(conf);
          admin.setBalancerRunning(false, false);
        } catch (MasterNotRunningException ex) {
          // Since we are randomly throwing SocketTimeoutExceptions, it is possible to get
          // a MasterNotRunningException.  It's a bug if we get other exceptions.
          lastFailed = true;
        } finally {
          admin.close();
        }
      }
      // Ensure the RandomTimeoutRpcEngine is actually being used.
      assertFalse(lastFailed);
      assertTrue(RandomTimeoutBlockingRpcChannel.invokations.get() > initialInvocations);
    } finally {
      rpcClient.stop();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.ipc.RpcClient

    // Create the thread to clean the moved regions list
    movedRegionsCleaner = MovedRegionsCleaner.createAndStart(this);

    // Setup RPC client for master communication
    rpcClient = new RpcClient(conf, clusterId, new InetSocketAddress(
        this.isa.getAddress(), 0));
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.ipc.RpcClient

      // Create the chore that cleans up nonces.
      nonceManagerChore = this.nonceManager.createCleanupChore(this);
    }

    // Setup RPC client for master communication
    rpcClient = new RpcClient(conf, clusterId, new InetSocketAddress(
        this.isa.getAddress(), 0));
    this.pauseMonitor = new JvmPauseMonitor(conf);
    pauseMonitor.start();
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.ipc.RpcClient

      this.batchPool = pool;
      this.managed = managed;
      this.registry = setupRegistry();
      retrieveClusterId();

      this.rpcClient = new RpcClient(this.conf, this.clusterId);

      // Do we publish the status?
      boolean shouldListen = conf.getBoolean(HConstants.STATUS_PUBLISHED,
          HConstants.STATUS_PUBLISHED_DEFAULT);
      Class<? extends ClusterStatusListener.Listener> listenerClass =
View Full Code Here

Examples of org.apache.hadoop.hbase.ipc.RpcClient

     * For tests only.
     * @param rpcClient Client we should use instead.
     * @return Previous rpcClient
     */
    RpcClient setRpcClient(final RpcClient rpcClient) {
      RpcClient oldRpcClient = this.rpcClient;
      this.rpcClient = rpcClient;
      return oldRpcClient;
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.ipc.RpcClient

      // Create the chore that cleans up nonces.
      nonceManagerChore = this.nonceManager.createCleanupChore(this);
    }

    // Setup RPC client for master communication
    rpcClient = new RpcClient(conf, clusterId, new InetSocketAddress(
        this.isa.getAddress(), 0));
    this.pauseMonitor = new JvmPauseMonitor(conf);
    pauseMonitor.start();
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.