Package org.apache.phoenix.query

Examples of org.apache.phoenix.query.ConnectionQueryServices


        }
    }

    @SuppressWarnings("deprecation")
    private static void initTableValues() throws Exception {
        ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES));
        HTableInterface hTable = services.getTable(SchemaUtil.getTableNameAsBytes(HBASE_DYNAMIC_COLUMNS_SCHEMA_NAME,HBASE_DYNAMIC_COLUMNS));
        try {
            // Insert rows using standard HBase mechanism with standard HBase "types"
            List<Row> mutations = new ArrayList<Row>();
            byte[] dv = Bytes.toBytes("DV");
            byte[] first = Bytes.toBytes("F");
View Full Code Here


        }

    }
   
    public ServerCache addServerCache(ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, final ServerCacheFactory cacheFactory, final TableRef cacheUsingTableRef) throws SQLException {
        ConnectionQueryServices services = connection.getQueryServices();
        MemoryChunk chunk = services.getMemoryManager().allocate(cachePtr.getLength());
        List<Closeable> closeables = new ArrayList<Closeable>();
        closeables.add(chunk);
        ServerCache hashCacheSpec = null;
        SQLException firstException = null;
        final byte[] cacheId = generateId();
        /**
         * Execute EndPoint in parallel on each server to send compressed hash cache
         */
        // TODO: generalize and package as a per region server EndPoint caller
        // (ideally this would be functionality provided by the coprocessor framework)
        boolean success = false;
        ExecutorService executor = services.getExecutor();
        List<Future<Boolean>> futures = Collections.emptyList();
        try {
            List<HRegionLocation> locations = services.getAllTableRegions(cacheUsingTableRef.getTable().getPhysicalName().getBytes());
            int nRegions = locations.size();
            // Size these based on worst case
            futures = new ArrayList<Future<Boolean>>(nRegions);
            Set<HRegionLocation> servers = new HashSet<HRegionLocation>(nRegions);
            for (HRegionLocation entry : locations) {
                // Keep track of servers we've sent to and only send once
                if ( ! servers.contains(entry) &&
                        keyRanges.intersect(entry.getRegionInfo().getStartKey(), entry.getRegionInfo().getEndKey())) {  // Call RPC once per server
                    servers.add(entry);
                    if (LOG.isDebugEnabled()) {LOG.debug("Adding cache entry to be sent for " + entry);}
                    final byte[] key = entry.getRegionInfo().getStartKey();
                    final HTableInterface htable = services.getTable(cacheUsingTableRef.getTable().getPhysicalName().getBytes());
                    closeables.add(htable);
                    futures.add(executor.submit(new JobCallable<Boolean>() {
                       
                        @Override
                        public Boolean call() throws Exception {
                            final Map<byte[], AddServerCacheResponse> results;
                            try {
                                results = htable.coprocessorService(ServerCachingService.class, key, key,
                                            new Batch.Call<ServerCachingService, AddServerCacheResponse>() {
                                                @Override
                                                public AddServerCacheResponse call(ServerCachingService instance) throws IOException {
                                                    ServerRpcController controller = new ServerRpcController();
                                                    BlockingRpcCallback<AddServerCacheResponse> rpcCallback =
                                                            new BlockingRpcCallback<AddServerCacheResponse>();
                                                    AddServerCacheRequest.Builder builder = AddServerCacheRequest.newBuilder();
                                                    if(connection.getTenantId() != null){
                                                        builder.setTenantId(HBaseZeroCopyByteString.wrap(connection.getTenantId().getBytes()));
                                                    }
                                                    builder.setCacheId(HBaseZeroCopyByteString.wrap(cacheId));
                                                    builder.setCachePtr(org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr));
                                                    ServerCacheFactoryProtos.ServerCacheFactory.Builder svrCacheFactoryBuider = ServerCacheFactoryProtos.ServerCacheFactory.newBuilder();
                                                    svrCacheFactoryBuider.setClassName(cacheFactory.getClass().getName());
                                                    builder.setCacheFactory(svrCacheFactoryBuider.build());
                                                    instance.addServerCache(controller, builder.build(), rpcCallback);
                                                    if(controller.getFailedOn() != null) {
                                                        throw controller.getFailedOn();
                                                    }
                                                    return rpcCallback.get();
                                                }
                                              });
                            } catch (Throwable t) {
                                throw new Exception(t);
                            }
                            if(results != null && results.size() == 1){
                                return results.values().iterator().next().getReturn();
                            }
                            return false;
                        }

                        /**
                         * Defines the grouping for round robin behavior.  All threads spawned to process
                         * this scan will be grouped together and time sliced with other simultaneously
                         * executing parallel scans.
                         */
                        @Override
                        public Object getJobId() {
                            return ServerCacheClient.this;
                        }
                    }));
                } else {
                    if (LOG.isDebugEnabled()) {LOG.debug("NOT adding cache entry to be sent for " + entry + " since one already exists for that entry");}
                }
            }
           
            hashCacheSpec = new ServerCache(cacheId,servers,cachePtr.getLength());
            // Execute in parallel
            int timeoutMs = services.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
            for (Future<Boolean> future : futures) {
                future.get(timeoutMs, TimeUnit.MILLISECONDS);
            }
           
            cacheUsingTableRefMap.put(Bytes.mapKey(cacheId), cacheUsingTableRef);
View Full Code Here

     * @param servers list of servers upon which table was cached (filled in by {@link #addHashCache(HTable, Scan, Set)})
     * @throws SQLException
     * @throws IllegalStateException if hashed table cannot be removed on any region server on which it was added
     */
    private void removeServerCache(final byte[] cacheId, Set<HRegionLocation> servers) throws SQLException {
      ConnectionQueryServices services = connection.getQueryServices();
      Throwable lastThrowable = null;
      TableRef cacheUsingTableRef = cacheUsingTableRefMap.get(Bytes.mapKey(cacheId));
      byte[] tableName = cacheUsingTableRef.getTable().getPhysicalName().getBytes();
      HTableInterface iterateOverTable = services.getTable(tableName);
      try {
        List<HRegionLocation> locations = services.getAllTableRegions(tableName);
        Set<HRegionLocation> remainingOnServers = new HashSet<HRegionLocation>(servers);
        /**
         * Allow for the possibility that the region we based where to send our cache has split and been
         * relocated to another region server *after* we sent it, but before we removed it. To accommodate
         * this, we iterate through the current metadata boundaries and remove the cache once for each
View Full Code Here

        final HashCacheClient hashClient = new HashCacheClient(plan.getContext().getConnection());
        Scan scan = plan.getContext().getScan();
        final ScanRanges ranges = plan.getContext().getScanRanges();

        int count = joinIds.length;
        ConnectionQueryServices services = getContext().getConnection().getQueryServices();
        ExecutorService executor = services.getExecutor();
        List<Future<ServerCache>> futures = new ArrayList<Future<ServerCache>>(count);
        List<SQLCloseable> dependencies = new ArrayList<SQLCloseable>(count);
        final int maxServerCacheTimeToLive = services.getProps().getInt(QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB, QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS);
        final AtomicLong firstJobEndTime = new AtomicLong(0);
        SQLException firstException = null;
        for (int i = 0; i < count; i++) {
            final int index = i;
            futures.add(executor.submit(new JobCallable<ServerCache>() {
View Full Code Here

        }
    }
   
    @SuppressWarnings("deprecation")
    private static void initTableValues() throws Exception {
        ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES);
        HTableInterface hTable = services.getTable(SchemaUtil.getTableNameAsBytes(HBASE_NATIVE_SCHEMA_NAME, HBASE_NATIVE));
        try {
            // Insert rows using standard HBase mechanism with standard HBase "types"
            List<Row> mutations = new ArrayList<Row>();
            byte[] family = Bytes.toBytes("1");
            byte[] uintCol = Bytes.toBytes("UINT_COL");
View Full Code Here

        MutationState state = mutate(context, iterator, connection);
        long totalRowCount = state.getUpdateCount();
        if (connection.getAutoCommit()) {
            connection.getMutationState().join(state);
            connection.commit();
            ConnectionQueryServices services = connection.getQueryServices();
            int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
            state = new MutationState(maxSize, connection, totalRowCount);
        }
        final MutationState finalState = state;
        byte[] value = PDataType.LONG.toBytes(totalRowCount);
        KeyValue keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
View Full Code Here

        }

    }
   
    public ServerCache addServerCache(ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, final ServerCacheFactory cacheFactory, final TableRef cacheUsingTableRef) throws SQLException {
        ConnectionQueryServices services = connection.getQueryServices();
        MemoryChunk chunk = services.getMemoryManager().allocate(cachePtr.getLength());
        List<Closeable> closeables = new ArrayList<Closeable>();
        closeables.add(chunk);
        ServerCache hashCacheSpec = null;
        SQLException firstException = null;
        final byte[] cacheId = generateId();
        /**
         * Execute EndPoint in parallel on each server to send compressed hash cache
         */
        // TODO: generalize and package as a per region server EndPoint caller
        // (ideally this would be functionality provided by the coprocessor framework)
        boolean success = false;
        ExecutorService executor = services.getExecutor();
        List<Future<Boolean>> futures = Collections.emptyList();
        try {
            List<HRegionLocation> locations = services.getAllTableRegions(cacheUsingTableRef.getTable().getPhysicalName().getBytes());
            int nRegions = locations.size();
            // Size these based on worst case
            futures = new ArrayList<Future<Boolean>>(nRegions);
            Set<HRegionLocation> servers = new HashSet<HRegionLocation>(nRegions);
            for (HRegionLocation entry : locations) {
                // Keep track of servers we've sent to and only send once
                if ( ! servers.contains(entry) &&
                        keyRanges.intersect(entry.getRegionInfo().getStartKey(), entry.getRegionInfo().getEndKey())) {  // Call RPC once per server
                    servers.add(entry);
                    if (LOG.isDebugEnabled()) {LOG.debug("Adding cache entry to be sent for " + entry);}
                    final byte[] key = entry.getRegionInfo().getStartKey();
                    final HTableInterface htable = services.getTable(cacheUsingTableRef.getTable().getPhysicalName().getBytes());
                    closeables.add(htable);
                    futures.add(executor.submit(new JobCallable<Boolean>() {
                       
                        @Override
                        public Boolean call() throws Exception {
                            final Map<byte[], AddServerCacheResponse> results;
                            try {
                                results = htable.coprocessorService(ServerCachingService.class, key, key,
                                            new Batch.Call<ServerCachingService, AddServerCacheResponse>() {
                                                @Override
                                                public AddServerCacheResponse call(ServerCachingService instance) throws IOException {
                                                    ServerRpcController controller = new ServerRpcController();
                                                    BlockingRpcCallback<AddServerCacheResponse> rpcCallback =
                                                            new BlockingRpcCallback<AddServerCacheResponse>();
                                                    AddServerCacheRequest.Builder builder = AddServerCacheRequest.newBuilder();
                                                    if(connection.getTenantId() != null){
                                                        builder.setTenantId(HBaseZeroCopyByteString.wrap(connection.getTenantId().getBytes()));
                                                    }
                                                    builder.setCacheId(HBaseZeroCopyByteString.wrap(cacheId));
                                                    builder.setCachePtr(org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr));
                                                    ServerCacheFactoryProtos.ServerCacheFactory.Builder svrCacheFactoryBuider = ServerCacheFactoryProtos.ServerCacheFactory.newBuilder();
                                                    svrCacheFactoryBuider.setClassName(cacheFactory.getClass().getName());
                                                    builder.setCacheFactory(svrCacheFactoryBuider.build());
                                                    instance.addServerCache(controller, builder.build(), rpcCallback);
                                                    if(controller.getFailedOn() != null) {
                                                        throw controller.getFailedOn();
                                                    }
                                                    return rpcCallback.get();
                                                }
                                              });
                            } catch (Throwable t) {
                                throw new Exception(t);
                            }
                            if(results != null && results.size() == 1){
                                return results.values().iterator().next().getReturn();
                            }
                            return false;
                        }

                        /**
                         * Defines the grouping for round robin behavior.  All threads spawned to process
                         * this scan will be grouped together and time sliced with other simultaneously
                         * executing parallel scans.
                         */
                        @Override
                        public Object getJobId() {
                            return ServerCacheClient.this;
                        }
                    }));
                } else {
                    if (LOG.isDebugEnabled()) {LOG.debug("NOT adding cache entry to be sent for " + entry + " since one already exists for that entry");}
                }
            }
           
            hashCacheSpec = new ServerCache(cacheId,servers,cachePtr.getLength());
            // Execute in parallel
            int timeoutMs = services.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
            for (Future<Boolean> future : futures) {
                future.get(timeoutMs, TimeUnit.MILLISECONDS);
            }
           
            cacheUsingTableRefMap.put(Bytes.mapKey(cacheId), cacheUsingTableRef);
View Full Code Here

     * @param servers list of servers upon which table was cached (filled in by {@link #addHashCache(HTable, Scan, Set)})
     * @throws SQLException
     * @throws IllegalStateException if hashed table cannot be removed on any region server on which it was added
     */
    private void removeServerCache(final byte[] cacheId, Set<HRegionLocation> servers) throws SQLException {
        ConnectionQueryServices services = connection.getQueryServices();
        Throwable lastThrowable = null;
        TableRef cacheUsingTableRef = cacheUsingTableRefMap.get(Bytes.mapKey(cacheId));
        byte[] tableName = cacheUsingTableRef.getTable().getPhysicalName().getBytes();
        HTableInterface iterateOverTable = services.getTable(tableName);
        List<HRegionLocation> locations = services.getAllTableRegions(tableName);
        Set<HRegionLocation> remainingOnServers = new HashSet<HRegionLocation>(servers);
        /**
         * Allow for the possibility that the region we based where to send our cache has split and been
         * relocated to another region server *after* we sent it, but before we removed it. To accommodate
         * this, we iterate through the current metadata boundaries and remove the cache once for each
View Full Code Here

        final HashCacheClient hashClient = new HashCacheClient(plan.getContext().getConnection());
        Scan scan = plan.getContext().getScan();
        final ScanRanges ranges = plan.getContext().getScanRanges();
       
        int count = joinIds.length;
        ConnectionQueryServices services = getContext().getConnection().getQueryServices();
        ExecutorService executor = services.getExecutor();
        List<Future<ServerCache>> futures = new ArrayList<Future<ServerCache>>(count);
        List<SQLCloseable> dependencies = new ArrayList<SQLCloseable>(count);
        final int maxServerCacheTimeToLive = services.getProps().getInt(QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB, QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS);
        final AtomicLong firstJobEndTime = new AtomicLong(0);
        SQLException firstException = null;
        for (int i = 0; i < count; i++) {
            final int index = i;
            futures.add(executor.submit(new JobCallable<ServerCache>() {
View Full Code Here

    }
   
    private static void destroyTable() throws Exception {
        // Physically delete HBase table so that splits occur as expected for each test
        Properties props = new Properties(TEST_PROPERTIES);
        ConnectionQueryServices services = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class).getQueryServices();
        HBaseAdmin admin = services.getAdmin();
        try {
            try {
                admin.disableTable(PRODUCT_METRICS_NAME);
                admin.deleteTable(PRODUCT_METRICS_NAME);
            } catch (TableNotFoundException e) {
View Full Code Here

TOP

Related Classes of org.apache.phoenix.query.ConnectionQueryServices

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.