Package org.apache.hadoop.hbase.regionserver

Examples of org.apache.hadoop.hbase.regionserver.RegionScanner


     * @param arrayFuncRefs
     * @param arrayKVRefs
     */
    private RegionScanner getWrappedScanner(final ObserverContext<RegionCoprocessorEnvironment> c, final RegionScanner s,
           final List<KeyValueColumnExpression> arrayKVRefs, final Expression[] arrayFuncRefs) {
        return new RegionScanner() {

            @Override
            public boolean next(List<KeyValue> results) throws IOException {
                try {
                    return s.next(results);
View Full Code Here


                + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkParentNode;
        }
     
        @Override
        public void run() {
            RegionScanner scanner = null;
            PhoenixConnection conn = null;
            if (inProgress.get() > 0) {
                LOG.debug("New ScheduledBuildIndexTask skipped as there is already one running");
                return;
            }
            try {
                inProgress.incrementAndGet();
                Scan scan = new Scan();
                SingleColumnValueFilter filter = new SingleColumnValueFilter(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                    PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES,
                    CompareFilter.CompareOp.NOT_EQUAL, PDataType.LONG.toBytes(0L));
                filter.setFilterIfMissing(true);
                scan.setFilter(filter);
                scan.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                    PhoenixDatabaseMetaData.TABLE_NAME_BYTES);
                scan.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                    PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES);
                scan.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                    PhoenixDatabaseMetaData.INDEX_STATE_BYTES);
                scan.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                    PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES);

                boolean hasMore = false;
                List<KeyValue> results = new ArrayList<KeyValue>();
                scanner = this.env.getRegion().getScanner(scan);

                do {
                    results.clear();
                    hasMore = scanner.next(results);
                    if (results.isEmpty()) break;

                    Result r = new Result(results);
                    byte[] disabledTimeStamp = r.getValue(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                        PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES);

                    Long disabledTimeStampVal = 0L;
                    if (disabledTimeStamp == null || disabledTimeStamp.length == 0) {
                        continue;
                    }

                    // disableTimeStamp has to be a positive value
                    disabledTimeStampVal = (Long) PDataType.LONG.toObject(disabledTimeStamp);
                    if (disabledTimeStampVal <= 0) {
                        continue;
                    }

                    byte[] dataTable = r.getValue(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                        PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES);
                    byte[] indexStat = r.getValue(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                        PhoenixDatabaseMetaData.INDEX_STATE_BYTES);
                    if ((dataTable == null || dataTable.length == 0)
                            || (indexStat == null || indexStat.length == 0)
                            || ((Bytes.compareTo(PIndexState.DISABLE.getSerializedBytes(), indexStat) != 0)
                                    && (Bytes.compareTo(PIndexState.INACTIVE.getSerializedBytes(), indexStat) != 0))) {
                        // index has to be either in disable or inactive state
                        // data table name can't be empty
                        continue;
                    }

                    byte[][] rowKeyMetaData = new byte[3][];
                    SchemaUtil.getVarChars(r.getRow(), 3, rowKeyMetaData);
                    byte[] schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
                    byte[] indexTable = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];

                    // validity check
                    if (indexTable == null || indexTable.length == 0) {
                        LOG.debug("Index rebuild has been skipped for row=" + r);
                        continue;
                    }

                    if (conn == null) {
                        conn = DriverManager.getConnection(getJdbcUrl()).unwrap(PhoenixConnection.class);
                    }

                    String dataTableFullName = SchemaUtil.getTableName(schemaName, dataTable);
                    String indexTableFullName = SchemaUtil.getTableName(schemaName, indexTable);
                    PTable dataPTable = PhoenixRuntime.getTable(conn, dataTableFullName);
                    PTable indexPTable = PhoenixRuntime.getTable(conn, indexTableFullName);
                    if (!MetaDataUtil.tableRegionsOnline(this.env.getConfiguration(), indexPTable)) {
                        LOG.debug("Index rebuild has been skipped because not all regions of index table="
                                + indexPTable.getName() + " are online.");
                        continue;
                    }

                    MetaDataClient client = new MetaDataClient(conn);
                    long overlapTime = env.getConfiguration().getLong(
                        QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME_ATTRIB,
                        QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME);
                    long timeStamp = Math.max(0, disabledTimeStampVal - overlapTime);

                    LOG.info("Starting to build index=" + indexPTable.getName() + " from timestamp=" + timeStamp);
                    client.buildPartialIndexFromTimeStamp(indexPTable, new TableRef(dataPTable, Long.MAX_VALUE, timeStamp));

                } while (hasMore);
            } catch (Throwable t) {
                LOG.warn("ScheduledBuildIndexTask failed!", t);
            } finally {
                inProgress.decrementAndGet();
                if (scanner != null) {
                    try {
                        scanner.close();
                    } catch (IOException ignored) {
                        LOG.debug("ScheduledBuildIndexTask can't close scanner.", ignored);
                    }
                }
                if (conn != null) {
View Full Code Here

        return (RegionCoprocessorEnvironment)super.getEnvironment();
    }
   
    private PTable buildTable(byte[] key, ImmutableBytesPtr cacheKey, HRegion region, long clientTimeStamp) throws IOException, SQLException {
        Scan scan = newTableRowsScan(key, MIN_TABLE_TIMESTAMP, clientTimeStamp);
        RegionScanner scanner = region.getScanner(scan);
        Cache<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
        try {
            PTable oldTable = metaDataCache.getIfPresent(cacheKey);
            long tableTimeStamp = oldTable == null ? MIN_TABLE_TIMESTAMP-1 : oldTable.getTimeStamp();
            PTable newTable;
            newTable = getTable(scanner, clientTimeStamp, tableTimeStamp);
            if (newTable == null) {
                return null;
            }
            if (oldTable == null || tableTimeStamp < newTable.getTimeStamp()) {
                if (logger.isDebugEnabled()) {
                    logger.debug("Caching table " + Bytes.toStringBinary(cacheKey.get(), cacheKey.getOffset(), cacheKey.getLength()) + " at seqNum " + newTable.getSequenceNumber() + " with newer timestamp " + newTable.getTimeStamp() + " versus " + tableTimeStamp);
                }
                metaDataCache.put(cacheKey, newTable);
            }
            return newTable;
        } finally {
            scanner.close();
        }
    }
View Full Code Here

        }
       
        Scan scan = newTableRowsScan(key, clientTimeStamp, HConstants.LATEST_TIMESTAMP);
        scan.setFilter(new FirstKeyOnlyFilter());
        scan.setRaw(true);
        RegionScanner scanner = region.getScanner(scan);
        List<KeyValue> results = Lists.<KeyValue>newArrayList();
        scanner.next(results);
        // HBase ignores the time range on a raw scan (HBASE-7362)
        if (!results.isEmpty() && results.get(0).getTimestamp() > clientTimeStamp) {
            KeyValue kv = results.get(0);
            if (kv.isDelete()) {
                Cache<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
View Full Code Here

        byte[] suffix = ByteUtil.concat(QueryConstants.SEPARATOR_BYTE_ARRAY, SchemaUtil.getTableNameAsBytes(schemaName, tableName));
        SuffixFilter rowFilter = new SuffixFilter(suffix);
        Filter filter = new FilterList(linkFilter, rowFilter);
        scan.setFilter(filter);
        scan.addColumn(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES);
        RegionScanner scanner = region.getScanner(scan);
        try {
            List<KeyValue> results = newArrayList();
            scanner.next(results);
            return results.size() > 0;
        }
        finally {
            scanner.close();
        }
    }
View Full Code Here

        }
        // Since we don't allow back in time DDL, we know if we have a table it's the one
        // we want to delete. FIXME: we shouldn't need a scan here, but should be able to
        // use the table to generate the Delete markers.
        Scan scan = newTableRowsScan(key, MIN_TABLE_TIMESTAMP, clientTimeStamp);
        RegionScanner scanner = region.getScanner(scan);
        List<KeyValue> results = Lists.newArrayList();
        scanner.next(results);
        if (results.isEmpty()) { // Should not be possible
            return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
        }
        if (hasViews(region, tenantId, table)) {
            return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), null);
        }
        if (tableType != PTableType.VIEW) { // Add to list of HTables to delete, unless it's a view
            tableNamesToDelete.add(table.getName().getBytes());
        }
        List<byte[]> indexNames = Lists.newArrayList();
        invalidateList.add(cacheKey);
        byte[][] rowKeyMetaData = new byte[5][];
        byte[] rowKey;
        do {
            KeyValue kv = results.get(LINK_TYPE_INDEX);
            rowKey = kv.getRow();
            int nColumns = getVarChars(rowKey, rowKeyMetaData);
            if (nColumns == 5 && rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX].length == 0 && rowKeyMetaData[PhoenixDatabaseMetaData.INDEX_NAME_INDEX].length > 0
                    && Bytes.compareTo(kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength(), LINK_TYPE_BYTES, 0, LINK_TYPE_BYTES.length) == 0
                    && LinkType.fromSerializedValue(kv.getBuffer()[kv.getValueOffset()]) == LinkType.INDEX_TABLE) {
                indexNames.add(rowKeyMetaData[PhoenixDatabaseMetaData.INDEX_NAME_INDEX]);
            }
            @SuppressWarnings("deprecation") // FIXME: Remove when unintentionally deprecated method is fixed (HBASE-7870).
            // FIXME: the version of the Delete constructor without the lock args was introduced
            // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
            // of the client.
            Delete delete = new Delete(rowKey, clientTimeStamp, null);
            rowsToDelete.add(delete);
            results.clear();
            scanner.next(results);
        } while (!results.isEmpty());
       
        // Recursively delete indexes
        for (byte[] indexName : indexNames) {
            byte[] indexKey = SchemaUtil.getTableKey(tenantId, schemaName, indexName);
View Full Code Here

    // need to use a scan here so we can get raw state, which Get doesn't provide.
    Scan s = IndexManagementUtil.newLocalStateScan(Collections.singletonList(columns));
    s.setStartRow(row);
    s.setStopRow(row);
    HRegion region = this.env.getRegion();
    RegionScanner scanner = region.getScanner(s);
    List<KeyValue> kvs = new ArrayList<KeyValue>(1);
    boolean more = scanner.next(kvs);
    assert !more : "Got more than one result when scanning" + " a single row in the primary table!";

    Result r = new Result(kvs);
    scanner.close();
    return r;
  }
View Full Code Here

    @Override
    protected RegionScanner doPostScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws IOException {
        final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
        final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
        RegionScanner theScanner = s;
        if (p != null || j != null)  {
            theScanner = new HashJoinRegionScanner(s, p, j, ScanUtil.getTenantId(scan), c.getEnvironment());
        }
        final RegionScanner innerScanner = theScanner;
       
        byte[] indexUUID = scan.getAttribute(PhoenixIndexCodec.INDEX_UUID);
        PTable projectedTable = null;
        List<Expression> selectExpressions = null;
        byte[] upsertSelectTable = scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_TABLE);
        boolean isUpsert = false;
        boolean isDelete = false;
        byte[] deleteCQ = null;
        byte[] deleteCF = null;
        byte[][] values = null;
        byte[] emptyCF = null;
        ImmutableBytesWritable ptr = null;
        if (upsertSelectTable != null) {
            isUpsert = true;
            projectedTable = deserializeTable(upsertSelectTable);
            selectExpressions = deserializeExpressions(scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS));
            values = new byte[projectedTable.getPKColumns().size()][];
            ptr = new ImmutableBytesWritable();
        } else {
            byte[] isDeleteAgg = scan.getAttribute(BaseScannerRegionObserver.DELETE_AGG);
            isDelete = isDeleteAgg != null && Bytes.compareTo(PDataType.TRUE_BYTES, isDeleteAgg) == 0;
            if (!isDelete) {
                deleteCF = scan.getAttribute(BaseScannerRegionObserver.DELETE_CF);
                deleteCQ = scan.getAttribute(BaseScannerRegionObserver.DELETE_CQ);
            }
            emptyCF = scan.getAttribute(BaseScannerRegionObserver.EMPTY_CF);
        }
       
        int batchSize = 0;
        long ts = scan.getTimeRange().getMax();
        HRegion region = c.getEnvironment().getRegion();
        List<Pair<Mutation,Integer>> mutations = Collections.emptyList();
        if (isDelete || isUpsert || (deleteCQ != null && deleteCF != null) || emptyCF != null) {
            // TODO: size better
            mutations = Lists.newArrayListWithExpectedSize(1024);
            batchSize = c.getEnvironment().getConfiguration().getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
        }
        Aggregators aggregators = ServerAggregators.deserialize(
                scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), c.getEnvironment().getConfiguration());
        Aggregator[] rowAggregators = aggregators.getAggregators();
        boolean hasMore;
        boolean hasAny = false;
        MultiKeyValueTuple result = new MultiKeyValueTuple();
        if (logger.isInfoEnabled()) {
          logger.info("Starting ungrouped coprocessor scan " + scan);
        }
        long rowCount = 0;
        MultiVersionConsistencyControl.setThreadReadPoint(innerScanner.getMvccReadPoint());
        region.startRegionOperation();
        try {
            do {
                List<KeyValue> results = new ArrayList<KeyValue>();
                // Results are potentially returned even when the return value of s.next is false
                // since this is an indication of whether or not there are more values after the
                // ones returned
                hasMore = innerScanner.nextRaw(results, null);
                if (!results.isEmpty()) {
                  rowCount++;
                    result.setKeyValues(results);
                    try {
                        if (isDelete) {
                            @SuppressWarnings("deprecation") // FIXME: Remove when unintentionally deprecated method is fixed (HBASE-7870).
                            // FIXME: the version of the Delete constructor without the lock args was introduced
                            // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
                            // of the client.
                            Delete delete = new Delete(results.get(0).getRow(),ts,null);
                            mutations.add(new Pair<Mutation,Integer>(delete,null));
                        } else if (isUpsert) {
                            Arrays.fill(values, null);
                            int i = 0;
                            List<PColumn> projectedColumns = projectedTable.getColumns();
                            for (; i < projectedTable.getPKColumns().size(); i++) {
                                Expression expression = selectExpressions.get(i);
                                if (expression.evaluate(result, ptr)) {
                                    values[i] = ptr.copyBytes();
                                    // If SortOrder from expression in SELECT doesn't match the
                                    // column being projected into then invert the bits.
                                    if (expression.getSortOrder() != projectedColumns.get(i).getSortOrder()) {
                                        SortOrder.invert(values[i], 0, values[i], 0, values[i].length);
                                    }
                                }
                            }
                            projectedTable.newKey(ptr, values);
                            PRow row = projectedTable.newRow(kvBuilder, ts, ptr);
                            for (; i < projectedColumns.size(); i++) {
                                Expression expression = selectExpressions.get(i);
                                if (expression.evaluate(result, ptr)) {
                                    PColumn column = projectedColumns.get(i);
                                    Object value = expression.getDataType().toObject(ptr, column.getSortOrder());
                                    // We are guaranteed that the two column will have the same type.
                                    if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(),
                                            expression.getMaxLength(),  expression.getScale(),
                                            column.getMaxLength(), column.getScale())) {
                                        throw new ValueTypeIncompatibleException(column.getDataType(),
                                                column.getMaxLength(), column.getScale());
                                    }
                                    column.getDataType().coerceBytes(ptr, value, expression.getDataType(),
                                            expression.getMaxLength(), expression.getScale(), expression.getSortOrder(),
                                            column.getMaxLength(), column.getScale(), column.getSortOrder());
                                    byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
                                    row.setValue(column, bytes);
                                }
                            }
                            for (Mutation mutation : row.toRowMutations()) {
                                mutations.add(new Pair<Mutation,Integer>(mutation,null));
                            }
                        } else if (deleteCF != null && deleteCQ != null) {
                            // No need to search for delete column, since we project only it
                            // if no empty key value is being set
                            if (emptyCF == null || result.getValue(deleteCF, deleteCQ) != null) {
                                Delete delete = new Delete(results.get(0).getRow());
                                delete.deleteColumns(deleteCF,  deleteCQ, ts);
                                mutations.add(new Pair<Mutation,Integer>(delete,null));
                            }
                        }
                        if (emptyCF != null) {
                            /*
                             * If we've specified an emptyCF, then we need to insert an empty
                             * key value "retroactively" for any key value that is visible at
                             * the timestamp that the DDL was issued. Key values that are not
                             * visible at this timestamp will not ever be projected up to
                             * scans past this timestamp, so don't need to be considered.
                             * We insert one empty key value per row per timestamp.
                             */
                            Set<Long> timeStamps = Sets.newHashSetWithExpectedSize(results.size());
                            for (KeyValue kv : results) {
                                long kvts = kv.getTimestamp();
                                if (!timeStamps.contains(kvts)) {
                                    Put put = new Put(kv.getRow());
                                    put.add(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts, ByteUtil.EMPTY_BYTE_ARRAY);
                                    mutations.add(new Pair<Mutation,Integer>(put,null));
                                }
                            }
                        }
                        // Commit in batches based on UPSERT_BATCH_SIZE_ATTRIB in config
                        if (!mutations.isEmpty() && batchSize > 0 && mutations.size() % batchSize == 0) {
                            commitBatch(region,mutations, indexUUID);
                            mutations.clear();
                        }
                    } catch (ConstraintViolationException e) {
                        // Log and ignore in count
                        logger.error("Failed to create row in " + region.getRegionNameAsString() + " with values " + SchemaUtil.toString(values), e);
                        continue;
                    }
                    aggregators.aggregate(rowAggregators, result);
                    hasAny = true;
                }
            } while (hasMore);
        } finally {
            innerScanner.close();
            region.closeRegionOperation();
        }
       
        if (logger.isInfoEnabled()) {
          logger.info("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan);
        }

        if (!mutations.isEmpty()) {
            commitBatch(region,mutations, indexUUID);
        }

        final boolean hadAny = hasAny;
        KeyValue keyValue = null;
        if (hadAny) {
            byte[] value = aggregators.toBytes(rowAggregators);
            keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
        }
        final KeyValue aggKeyValue = keyValue;
       
        RegionScanner scanner = new BaseRegionScanner() {
            private boolean done = !hadAny;

            @Override
            public HRegionInfo getRegionInfo() {
                return innerScanner.getRegionInfo();
View Full Code Here

    for (Entry<byte[], ? extends Collection<?>> entry : familyMap.entrySet()) {
      if (entry.getValue() instanceof List) {
        familyMap1.put(new SimpleByteRange(entry.getKey()), (List<Cell>) entry.getValue());
      }
    }
    RegionScanner scanner = getRegion(e).getScanner(new Scan(get));
    List<Cell> cells = Lists.newArrayList();
    Cell prevCell = null;
    ByteRange curFam = new SimpleByteRange();
    boolean curColAllVersions = (request == OpType.DELETE);
    long curColCheckTs = opTs;
    boolean foundColumn = false;
    try {
      boolean more = false;
      do {
        cells.clear();
        // scan with limit as 1 to hold down memory use on wide rows
        more = scanner.next(cells, 1);
        for (Cell cell: cells) {
          if (LOG.isTraceEnabled()) {
            LOG.trace("Found cell " + cell);
          }
          boolean colChange = prevCell == null || !CellUtil.matchingColumn(prevCell, cell);
          if (colChange) foundColumn = false;
          prevCell = cell;
          if (!curColAllVersions && foundColumn) {
            continue;
          }
          if (colChange && considerCellTs) {
            curFam.set(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength());
            List<Cell> cols = familyMap1.get(curFam);
            for (Cell col : cols) {
              // null/empty qualifier is used to denote a Family delete. The TS and delete type
              // associated with this is applicable for all columns within the family. That is
              // why the below (col.getQualifierLength() == 0) check.
              if ((col.getQualifierLength() == 0 && request == OpType.DELETE)
                  || CellUtil.matchingQualifier(cell, col)) {
                byte type = col.getTypeByte();
                if (considerCellTs) {
                  curColCheckTs = col.getTimestamp();
                }
                // For a Delete op we pass allVersions as true. When a Delete Mutation contains
                // a version delete for a column no need to check all the covering cells within
                // that column. Check all versions when Type is DeleteColumn or DeleteFamily
                // One version delete types are Delete/DeleteFamilyVersion
                curColAllVersions = (KeyValue.Type.DeleteColumn.getCode() == type)
                    || (KeyValue.Type.DeleteFamily.getCode() == type);
                break;
              }
            }
          }
          if (cell.getTimestamp() > curColCheckTs) {
            // Just ignore this cell. This is not a covering cell.
            continue;
          }
          foundColumn = true;
          for (Action action: actions) {
            // Are there permissions for this user for the cell?
            if (!authManager.authorize(user, getTableName(e), cell, action)) {
              // We can stop if the cell ACL denies access
              return false;
            }
          }
          cellGrants++;
        }
      } while (more);
    } catch (AccessDeniedException ex) {
      throw ex;
    } catch (IOException ex) {
      LOG.error("Exception while getting cells to calculate covering permission", ex);
    } finally {
      scanner.close();
    }
    // We should not authorize unless we have found one or more cell ACLs that
    // grant access. This code is used to check for additional permissions
    // after no table or CF grants are found.
    return cellGrants > 0;
View Full Code Here

    Coprocessor c = region.getCoprocessorHost().
      findCoprocessor(CoprocessorImpl.class.getName());

    // HBASE-4197
    Scan s = new Scan();
    RegionScanner scanner = regions[0].getCoprocessorHost().postScannerOpen(s, regions[0].getScanner(s));
    assertTrue(scanner instanceof CustomScanner);
    // this would throw an exception before HBASE-4197
    scanner.next(new ArrayList<Cell>());

    assertTrue("Coprocessor not started", ((CoprocessorImpl)c).wasStarted());
    assertTrue("Coprocessor not stopped", ((CoprocessorImpl)c).wasStopped());
    assertTrue(((CoprocessorImpl)c).wasOpened());
    assertTrue(((CoprocessorImpl)c).wasClosed());
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.regionserver.RegionScanner

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.