Package com.datastax.driver.core.querybuilder

Examples of com.datastax.driver.core.querybuilder.Select


                        insert.value(cm.getColumnName(), bindMarker());
                    return insert.toString();
                }
            case GET:
                {
                    Select select = table == null
                                  ? select().all().from(mapper.getKeyspace(), mapper.getTable())
                                  : select().all().from(table);
                    Select.Where where = select.where();
                    for (int i = 0; i < mapper.primaryKeySize(); i++)
                        where.and(eq(mapper.getPrimaryKeyColumn(i).getColumnName(), bindMarker()));
                    return select.toString();
                }
            case DEL:
                {
                    Delete delete = table == null
                                  ? delete().all().from(mapper.getKeyspace(), mapper.getTable())
                                  : delete().all().from(table);
                    Delete.Where where = delete.where();
                    for (int i = 0; i < mapper.primaryKeySize(); i++)
                        where.and(eq(mapper.getPrimaryKeyColumn(i).getColumnName(), bindMarker()));
                    return delete.toString();
                }
            case SLICE:
            case REVERSED_SLICE:
                {
                    Select select = table == null
                                  ? select().all().from(mapper.getKeyspace(), mapper.getTable())
                                  : select().all().from(table);
                    Select.Where where = select.where();
                    for (int i = 0; i < mapper.partitionKeys.size(); i++)
                        where.and(eq(mapper.partitionKeys.get(i).getColumnName(), bindMarker()));

                    if (startBoundSize > 0) {
                        if (startBoundSize == 1) {
                            String name = mapper.clusteringColumns.get(0).getColumnName();
                            where.and(startInclusive ? gte(name, bindMarker()) : gt(name, bindMarker()));
                        } else {
                            List<String> names = new ArrayList<String>(startBoundSize);
                            List<Object> values = new ArrayList<Object>(startBoundSize);
                            for (int i = 0; i < startBoundSize; i++) {
                                names.add(mapper.clusteringColumns.get(i).getColumnName());
                                values.add(bindMarker());
                            }
                            where.and(startInclusive ? gte(names, values) : gt(names, values));
                        }
                    }

                    if (endBoundSize > 0) {
                        if (endBoundSize == 1) {
                            String name = mapper.clusteringColumns.get(0).getColumnName();
                            where.and(endInclusive ? gte(name, bindMarker()) : gt(name, bindMarker()));
                        } else {
                            List<String> names = new ArrayList<String>(endBoundSize);
                            List<Object> values = new ArrayList<Object>(endBoundSize);
                            for (int i = 0; i < endBoundSize; i++) {
                                names.add(mapper.clusteringColumns.get(i).getColumnName());
                                values.add(bindMarker());
                            }
                            where.and(endInclusive ? lte(names, values) : lt(names, values));
                        }
                    }

                    select = select.limit(bindMarker());

                    if (kind == Kind.REVERSED_SLICE)
                        select = select.orderBy(desc(mapper.clusteringColumns.get(0).getColumnName()));

                    return select.toString();
                }
        }
        throw new AssertionError();
    }
View Full Code Here


        if (needToGetBatch) {
            if (batchListener != null)
                batchListener.beforeFetchingNextBatch();

            String rowKeyString = StandardConverters.convertFromBytes(String.class, rowKey);
            Select selectQuery = QueryBuilder.select().all().from(keySpace, indTable).allowFiltering();
            Where whereClause = Cql3Util.createRowQuery(from, to, columnMeta, selectQuery, rowKeyString, indTable);
            Query query = null;

            if (batchSize != null)
                query = whereClause.limit(batchSize);
View Full Code Here

        List<Future<ResultSet>> futures = new ArrayList<Future<ResultSet>>();

        String rowKeyString = StandardConverters.convertFromBytes(String.class, rowKey);

        for (byte[] val : values) {
            Select selectQuery = QueryBuilder.select().all().from(keySpace, indTable).allowFiltering();
            Where selectWhere = selectQuery.where();
            Clause rkClause = QueryBuilder.eq("id", rowKeyString);
            selectWhere.and(rkClause);

            Object value = null;
            value = columnMeta.getStorageType().convertFromNoSql(val);
View Full Code Here

            session.execute(query);
        }
    }

    public boolean findIndexRow(String table, String rowKey, byte[] key, Object indValue) {
        Select selectQuery = QueryBuilder.select().all().from(keys, table).allowFiltering();
        Where selectWhere = selectQuery.where();
        Clause rkClause = QueryBuilder.eq("id", rowKey);
        selectWhere.and(rkClause);
        Clause indClause = null;
        if (indValue != null) {
            indClause = QueryBuilder.eq("colname", indValue);
View Full Code Here

        List<CassandraColumnHandle> partitionKeyColumns = table.getPartitionKeyColumns();

        boolean fullPartitionKey = filterPrefix.size() == partitionKeyColumns.size();
        ResultSetFuture countFuture;
        if (!fullPartitionKey) {
            final Select countAll = CassandraCqlUtils.selectCountAllFrom(tableHandle).limit(limitForPartitionKeySelect);
            countFuture = executeWithSession(schemaName, new SessionCallable<ResultSetFuture>() {
                @Override
                public ResultSetFuture executeWithSession(Session session)
                {
                    return session.executeAsync(countAll);
                }
            });
        }
        else {
            // no need to count if partition key is completely known
            countFuture = null;
        }

        int limit = fullPartitionKey ? 1 : limitForPartitionKeySelect;
        final Select partitionKeys = CassandraCqlUtils.selectDistinctFrom(tableHandle, partitionKeyColumns);
        partitionKeys.limit(limit);
        partitionKeys.setFetchSize(fetchSizeForPartitionKeySelect);

        if (!fullPartitionKey) {
            addWhereClause(partitionKeys.where(), partitionKeyColumns, new ArrayList<Comparable<?>>());
            ResultSetFuture partitionKeyFuture =  executeWithSession(schemaName, new SessionCallable<ResultSetFuture>() {
                @Override
                public ResultSetFuture executeWithSession(Session session)
                {
                    return session.executeAsync(partitionKeys);
                }
            });

            long count = countFuture.getUninterruptibly().one().getLong(0);
            if (count == limitForPartitionKeySelect) {
                partitionKeyFuture.cancel(true);
                return null; // too much effort to query all partition keys
            }
            else {
                return partitionKeyFuture.getUninterruptibly();
            }
        }
        else {
            addWhereClause(partitionKeys.where(), partitionKeyColumns, filterPrefix);
            ResultSetFuture partitionKeyFuture = executeWithSession(schemaName, new SessionCallable<ResultSetFuture>() {
                @Override
                public ResultSetFuture executeWithSession(Session session)
                {
                    return session.executeAsync(partitionKeys);
View Full Code Here

        List<CassandraColumnHandle> partitionKeyColumns = table.getPartitionKeyColumns();

        boolean fullPartitionKey = filterPrefix.size() == partitionKeyColumns.size();
        ResultSetFuture countFuture;
        if (!fullPartitionKey) {
            Select countAll = CassandraCqlUtils.selectCountAllFrom(tableHandle).limit(limitForPartitionKeySelect);
            countFuture = session.executeAsync(countAll);
        }
        else {
            // no need to count if partition key is completely known
            countFuture = null;
        }

        int limit = fullPartitionKey ? 1 : limitForPartitionKeySelect;
        Select partitionKeys = CassandraCqlUtils.selectDistinctFrom(tableHandle, partitionKeyColumns);
        partitionKeys.limit(limit);
        partitionKeys.setFetchSize(fetchSizeForPartitionKeySelect);
        addWhereClause(partitionKeys.where(), partitionKeyColumns, filterPrefix);
        ResultSetFuture partitionKeyFuture = session.executeAsync(partitionKeys);

        if (!fullPartitionKey) {
            long count = countFuture.getUninterruptibly().one().getLong(0);
            if (count == limitForPartitionKeySelect) {
View Full Code Here

        List<CassandraColumnHandle> partitionKeyColumns = table.getPartitionKeyColumns();

        boolean fullPartitionKey = filterPrefix.size() == partitionKeyColumns.size();
        ResultSetFuture countFuture;
        if (!fullPartitionKey) {
            final Select countAll = CassandraCqlUtils.selectCountAllFrom(tableHandle).limit(limitForPartitionKeySelect);
            countFuture = executeWithSession(schemaName, new SessionCallable<ResultSetFuture>() {
                @Override
                public ResultSetFuture executeWithSession(Session session)
                {
                    return session.executeAsync(countAll);
                }
            });
        }
        else {
            // no need to count if partition key is completely known
            countFuture = null;
        }

        int limit = fullPartitionKey ? 1 : limitForPartitionKeySelect;
        final Select partitionKeys = CassandraCqlUtils.selectDistinctFrom(tableHandle, partitionKeyColumns);
        partitionKeys.limit(limit);
        partitionKeys.setFetchSize(fetchSizeForPartitionKeySelect);

        if (!fullPartitionKey) {
            addWhereClause(partitionKeys.where(), partitionKeyColumns, new ArrayList<Comparable<?>>());
            ResultSetFuture partitionKeyFuture =  executeWithSession(schemaName, new SessionCallable<ResultSetFuture>() {
                @Override
                public ResultSetFuture executeWithSession(Session session)
                {
                    return session.executeAsync(partitionKeys);
                }
            });

            long count = countFuture.getUninterruptibly().one().getLong(0);
            if (count == limitForPartitionKeySelect) {
                partitionKeyFuture.cancel(true);
                return null; // too much effort to query all partition keys
            }
            else {
                return partitionKeyFuture.getUninterruptibly();
            }
        }
        else {
            addWhereClause(partitionKeys.where(), partitionKeyColumns, filterPrefix);
            ResultSetFuture partitionKeyFuture = executeWithSession(schemaName, new SessionCallable<ResultSetFuture>() {
                @Override
                public ResultSetFuture executeWithSession(Session session)
                {
                    return session.executeAsync(partitionKeys);
View Full Code Here

      CassandraColumnName column,
      KijiDataRequest dataRequest,
      Column columnRequest
  ) {
    Preconditions.checkArgument(column.containsQualifier());
    final Select select =
        select()
            .all()
            .from(table.getKeyspace(), table.getTable())
            .where(eq(FAMILY_COL, column.getFamilyBuffer()))
            .and(eq(QUALIFIER_COL, column.getQualifierBuffer()))
            .limit(columnRequest.getMaxVersions());

    if (dataRequest.getMaxTimestamp() != Long.MAX_VALUE) {
      select.where(lt(VERSION_COL, dataRequest.getMaxTimestamp()));
    }

    if (dataRequest.getMinTimestamp() != 0L) {
      select.where(gte(VERSION_COL, dataRequest.getMinTimestamp()));
    }

    select.setFetchSize(
        columnRequest.getPageSize() == 0 ? Integer.MAX_VALUE : columnRequest.getPageSize());

    for (final Map.Entry<String, Object> component
        : getEntityIdColumnValues(layout, entityId).entrySet()) {
      select.where(eq(component.getKey(), component.getValue()));
    }

    return select;
  }
View Full Code Here

      // We can optimize and use a DISTINCT clause because all entity ID columns are in the
      // partition key.  CQL does not allow DISTINCT over non partition-key columns.
      selection.distinct();
    }

    final Select select = selection.from(table.getKeyspace(), table.getTable());

    if (options.hasStartToken()) {
      select.where(gte(tokenColumn, options.getStartToken()));
    }

    if (options.hasStopToken()) {
      select.where(lt(tokenColumn, options.getStopToken()));
    }

    select.setFetchSize(ENTITY_ID_BATCH_SIZE);

    return select;
  }
View Full Code Here

     * @param session
     * @return number of register in a column family
     */
    public Long count(Class<?> bean, Session session,
            ConsistencyLevel consistency) {
        Select select = prepareCount(bean, consistency);
        ResultSet resultSet = session.execute(select);
        return resultSet.all().get(0).getLong(0);
    }
View Full Code Here

TOP

Related Classes of com.datastax.driver.core.querybuilder.Select

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.