Package com.salesforce.phoenix.schema

Examples of com.salesforce.phoenix.schema.TableRef


        Iterator<Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>>> iterator = this.mutations.entrySet().iterator();
        List<Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>>> committedList = Lists.newArrayListWithCapacity(this.mutations.size());
        while (iterator.hasNext()) {
            Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>> entry = iterator.next();
            Map<ImmutableBytesPtr,Map<PColumn,byte[]>> valuesMap = entry.getValue();
            TableRef tableRef = entry.getKey();
            PTable table = tableRef.getTable();
            table.getIndexMaintainers(tempPtr);
            boolean hasIndexMaintainers = tempPtr.getLength() > 0;
            boolean isDataTable = true;
            long serverTimestamp = serverTimeStamps[i++];
            Iterator<Pair<byte[],List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, serverTimestamp, false);
View Full Code Here


     * @throws IllegalStateException if hashed table cannot be removed on any region server on which it was added
     */
    private void removeServerCache(byte[] cacheId, Set<HRegionLocation> servers) throws SQLException {
        ConnectionQueryServices services = connection.getQueryServices();
        Throwable lastThrowable = null;
        TableRef cacheUsingTableRef = cacheUsingTableRefMap.get(Bytes.mapKey(cacheId));
        byte[] tableName = cacheUsingTableRef.getTable().getPhysicalName().getBytes();
        HTableInterface iterateOverTable = services.getTable(tableName);
        List<HRegionLocation> locations = services.getAllTableRegions(tableName);
        Set<HRegionLocation> remainingOnServers = new HashSet<HRegionLocation>(servers);
        /**
         * Allow for the possibility that the region we based where to send our cache has split and been
View Full Code Here

        conn.close();
    }

    private static TableRef getTableRef(Connection conn, long ts) throws SQLException {
        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
        TableRef table = new TableRef(null,pconn.getPMetaData().getTable(SchemaUtil.getTableName(STABLE_SCHEMA_NAME, STABLE_NAME)),ts, false);
        return table;
    }
View Full Code Here

        return table;
    }
   
    private static List<KeyRange> getSplits(Connection conn, long ts, final Scan scan)
            throws SQLException {
        TableRef tableRef = getTableRef(conn, ts);
        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
        final List<HRegionLocation> regions =  pconn.getQueryServices().getAllTableRegions(tableRef.getTable().getPhysicalName().getBytes());
        StatementContext context = new StatementContext(new PhoenixStatement(pconn), null, Collections.emptyList(), scan);
        DefaultParallelIteratorRegionSplitter splitter = new DefaultParallelIteratorRegionSplitter(context, tableRef, HintNode.EMPTY_HINT_NODE) {
            @Override
            protected List<HRegionLocation> getAllRegions() throws SQLException {
                return DefaultParallelIteratorRegionSplitter.filterRegions(regions, scan.getStartRow(), scan.getStopRow());
View Full Code Here

        Connection conn = DriverManager.getConnection(url, props);

        Scan scan = new Scan();
       
        ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES);
        TableRef table = getTableRef(conn,ts);
        services.getStatsManager().updateStats(table);
        scan.setStartRow(HConstants.EMPTY_START_ROW);
        scan.setStopRow(K1);
        List<KeyRange> keyRanges = getSplits(conn, ts, scan);
        assertEquals("Unexpected number of splits: " + keyRanges, 3, keyRanges.size());
View Full Code Here

        long ts = nextTimestamp();
        initTableValues(ts);
        String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + ts;
        Properties props = new Properties(TEST_PROPERTIES);
        Connection conn = DriverManager.getConnection(url, props);
        TableRef table = getTableRef(conn,ts);

        int updateFreq = 5;
        int maxAge = 10;
        int startTime = 100;
        long waitTime = 5000;
View Full Code Here

        createTestTable(getUrl(),DDL,splits, ts-2);
        String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + ts;
        Properties props = new Properties(TEST_PROPERTIES);
        Connection conn = DriverManager.getConnection(url, props);
        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
        TableRef tableRef = new TableRef(null,pconn.getPMetaData().getTable(SchemaUtil.getTableName(SCHEMA_NAME, TABLE_NAME)),ts, false);
        List<HRegionLocation> regions = pconn.getQueryServices().getAllTableRegions(tableRef.getTable().getPhysicalName().getBytes());
       
        conn.close();
        initTableValues();
        List<KeyRange> ranges = getSplits(tableRef, scan, regions, scanRanges);
        assertEquals("Unexpected number of splits: " + ranges.size(), expectedSplits.size(), ranges.size());
View Full Code Here

TOP

Related Classes of com.salesforce.phoenix.schema.TableRef

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.