Package com.salesforce.phoenix.query

Examples of com.salesforce.phoenix.query.ConnectionQueryServices


     * @return the result iterators for the scan of each region
     */
    @Override
    public List<PeekingResultIterator> getIterators() throws SQLException {
        boolean success = false;
        final ConnectionQueryServices services = context.getConnection().getQueryServices();
        ReadOnlyProps props = services.getProps();
        int numSplits = splits.size();
        List<PeekingResultIterator> iterators = new ArrayList<PeekingResultIterator>(numSplits);
        List<Pair<byte[],Future<PeekingResultIterator>>> futures = new ArrayList<Pair<byte[],Future<PeekingResultIterator>>>(numSplits);
        final UUID scanId = UUID.randomUUID();
        try {
            ExecutorService executor = services.getExecutor();
            for (KeyRange split : splits) {
                final Scan splitScan = new Scan(this.context.getScan());
                // Intersect with existing start/stop key if the table is salted
                // If not salted, we've already intersected it. If salted, we need
                // to wait until now to intersect, as we're running parallel scans
View Full Code Here


        return projection;
    }

    protected ConnectionQueryServices getConnectionQueryServices(ConnectionQueryServices services) {
        // Get child services associated with tenantId of query.
        ConnectionQueryServices childServices = context.getConnection().getTenantId() == null ?
                services :
                services.getChildQueryServices(new ImmutableBytesWritable(context.getConnection().getTenantId().getBytes()));
        return childServices;
    }
View Full Code Here

        }

    }
   
    public ServerCache addServerCache(ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, final ServerCacheFactory cacheFactory, final TableRef cacheUsingTableRef) throws SQLException {
        ConnectionQueryServices services = connection.getQueryServices();
        MemoryChunk chunk = services.getMemoryManager().allocate(cachePtr.getLength());
        List<Closeable> closeables = new ArrayList<Closeable>();
        closeables.add(chunk);
        ServerCache hashCacheSpec = null;
        SQLException firstException = null;
        final byte[] cacheId = generateId();
        /**
         * Execute EndPoint in parallel on each server to send compressed hash cache
         */
        // TODO: generalize and package as a per region server EndPoint caller
        // (ideally this would be functionality provided by the coprocessor framework)
        boolean success = false;
        ExecutorService executor = services.getExecutor();
        List<Future<Boolean>> futures = Collections.emptyList();
        try {
            List<HRegionLocation> locations = services.getAllTableRegions(cacheUsingTableRef.getTable().getPhysicalName().getBytes());
            int nRegions = locations.size();
            // Size these based on worst case
            futures = new ArrayList<Future<Boolean>>(nRegions);
            Set<HRegionLocation> servers = new HashSet<HRegionLocation>(nRegions);
            for (HRegionLocation entry : locations) {
                // Keep track of servers we've sent to and only send once
                if ( ! servers.contains(entry) &&
                        keyRanges.intersect(entry.getRegionInfo().getStartKey(), entry.getRegionInfo().getEndKey())) {  // Call RPC once per server
                    servers.add(entry);
                    if (LOG.isDebugEnabled()) {LOG.debug("Adding cache entry to be sent for " + entry);}
                    final byte[] key = entry.getRegionInfo().getStartKey();
                    final HTableInterface htable = services.getTable(cacheUsingTableRef.getTable().getPhysicalName().getBytes());
                    closeables.add(htable);
                    futures.add(executor.submit(new JobCallable<Boolean>() {
                       
                        @Override
                        public Boolean call() throws Exception {
                            ServerCachingProtocol protocol = htable.coprocessorProxy(ServerCachingProtocol.class, key);
                            return protocol.addServerCache(connection.getTenantId() == null ? null : connection.getTenantId().getBytes(), cacheId, cachePtr, cacheFactory);
                        }

                        /**
                         * Defines the grouping for round robin behavior.  All threads spawned to process
                         * this scan will be grouped together and time sliced with other simultaneously
                         * executing parallel scans.
                         */
                        @Override
                        public Object getJobId() {
                            return ServerCacheClient.this;
                        }
                    }));
                } else {
                    if (LOG.isDebugEnabled()) {LOG.debug("NOT adding cache entry to be sent for " + entry + " since one already exists for that entry");}
                }
            }
           
            hashCacheSpec = new ServerCache(cacheId,servers,cachePtr.getLength());
            // Execute in parallel
            int timeoutMs = services.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
            for (Future<Boolean> future : futures) {
                future.get(timeoutMs, TimeUnit.MILLISECONDS);
            }
           
            cacheUsingTableRefMap.put(Bytes.mapKey(cacheId), cacheUsingTableRef);
View Full Code Here

     * @param servers list of servers upon which table was cached (filled in by {@link #addHashCache(HTable, Scan, Set)})
     * @throws SQLException
     * @throws IllegalStateException if hashed table cannot be removed on any region server on which it was added
     */
    private void removeServerCache(byte[] cacheId, Set<HRegionLocation> servers) throws SQLException {
        ConnectionQueryServices services = connection.getQueryServices();
        Throwable lastThrowable = null;
        TableRef cacheUsingTableRef = cacheUsingTableRefMap.get(Bytes.mapKey(cacheId));
        byte[] tableName = cacheUsingTableRef.getTable().getPhysicalName().getBytes();
        HTableInterface iterateOverTable = services.getTable(tableName);
        List<HRegionLocation> locations = services.getAllTableRegions(tableName);
        Set<HRegionLocation> remainingOnServers = new HashSet<HRegionLocation>(servers);
        /**
         * Allow for the possibility that the region we based where to send our cache has split and been
         * relocated to another region server *after* we sent it, but before we removed it. To accommodate
         * this, we iterate through the current metadata boundaries and remove the cache once for each
View Full Code Here

        final HashCacheClient hashClient = new HashCacheClient(plan.getContext().getConnection());
        Scan scan = plan.getContext().getScan();
        final ScanRanges ranges = plan.getContext().getScanRanges();
       
        int count = joinIds.length;
        ConnectionQueryServices services = getContext().getConnection().getQueryServices();
        ExecutorService executor = services.getExecutor();
        List<Future<ServerCache>> futures = new ArrayList<Future<ServerCache>>(count);
        List<SQLCloseable> dependencies = new ArrayList<SQLCloseable>(count);
        for (int i = 0; i < count; i++) {
            final int index = i;
            futures.add(executor.submit(new JobCallable<ServerCache>() {
View Full Code Here

    }
   
    private static void destroyTable() throws Exception {
        // Physically delete HBase table so that splits occur as expected for each test
        Properties props = new Properties(TEST_PROPERTIES);
        ConnectionQueryServices services = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class).getQueryServices();
        HBaseAdmin admin = services.getAdmin();
        try {
            try {
                admin.disableTable(PRODUCT_METRICS_NAME);
                admin.deleteTable(PRODUCT_METRICS_NAME);
            } catch (TableNotFoundException e) {
View Full Code Here

        Properties props = new Properties(TEST_PROPERTIES);
        Connection conn = DriverManager.getConnection(url, props);

        Scan scan = new Scan();
       
        ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES);
        TableRef table = getTableRef(conn,ts);
        services.getStatsManager().updateStats(table);
        scan.setStartRow(HConstants.EMPTY_START_ROW);
        scan.setStopRow(K1);
        List<KeyRange> keyRanges = getSplits(conn, ts, scan);
        assertEquals("Unexpected number of splits: " + keyRanges, 3, keyRanges.size());
        assertEquals(newKeyRange(KeyRange.UNBOUND, new byte[] {'7'}), keyRanges.get(0));
View Full Code Here

        int startTime = 100;
        long waitTime = 5000;
       
        ManualTimeKeeper timeKeeper = new ManualTimeKeeper();
        timeKeeper.setCurrentTimeMillis(startTime);
        ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES);
        StatsManager stats = new StatsManagerImpl(services, updateFreq, maxAge, timeKeeper);
        MinKeyChange minKeyChange = new MinKeyChange(stats, table);
        MaxKeyChange maxKeyChange = new MaxKeyChange(stats, table);
       
        byte[] minKey = stats.getMinKey(table);
View Full Code Here

    }
   
   
    private static void destroyIndexTable() throws Exception {
        Properties props = new Properties(TEST_PROPERTIES);
        ConnectionQueryServices services = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class).getQueryServices();
        HBaseAdmin admin = services.getAdmin();
        try {
            admin.disableTable(INDEX_TABLE_FULL_NAME);
            admin.deleteTable(INDEX_TABLE_FULL_NAME);
        } catch (TableNotFoundException e) {
        } finally {
View Full Code Here

            admin.close();
        }
    }
   
    private static void initTableValues() throws Exception {
        ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES);
        HTableInterface hTable = services.getTable(SchemaUtil.getTableNameAsBytes(HBASE_NATIVE_SCHEMA_NAME, HBASE_NATIVE));
        try {
            // Insert rows using standard HBase mechanism with standard HBase "types"
            List<Row> mutations = new ArrayList<Row>();
            byte[] family = Bytes.toBytes("1");
            byte[] uintCol = Bytes.toBytes("UINT_COL");
View Full Code Here

TOP

Related Classes of com.salesforce.phoenix.query.ConnectionQueryServices

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.