Package org.apache.hadoop.hbase.filter

Examples of org.apache.hadoop.hbase.filter.Filter


    // Make sure key is of some substance... non-null and > than first key.
    assertTrue(key != null && key.length > 0 &&
      Bytes.BYTES_COMPARATOR.compare(key, new byte [] {'a', 'a', 'a'}) >= 0);
    LOG.info("Key=" + Bytes.toString(key));
    Scan s = startRow == null? new Scan(): new Scan(startRow);
    Filter f = new RowFilter(op, new BinaryComparator(key));
    f = new WhileMatchFilter(f);
    s.setFilter(f);
    return s;
  }
View Full Code Here


      put.add(FAMILY, QUALIFIERS[i], VALUE);
      ht.put(put);
    }
    Scan scan = new Scan();
    scan.addFamily(FAMILY);
    Filter filter = new QualifierFilter(CompareOp.EQUAL,
      new RegexStringComparator("col[1-5]"));
    scan.setFilter(filter);
    ResultScanner scanner = ht.getScanner(scan);
    int expectedIndex = 1;
    for(Result result : ht.getScanner(scan)) {
View Full Code Here

      put.add(FAMILY, QUALIFIERS[i], VALUE);
      ht.put(put);
    }
    Scan scan = new Scan();
    scan.addFamily(FAMILY);
    Filter filter = new KeyOnlyFilter(true);
    scan.setFilter(filter);
    ResultScanner scanner = ht.getScanner(scan);
    int count = 0;
    for(Result result : ht.getScanner(scan)) {
      assertEquals(result.size(), 1);
View Full Code Here

        switch(qcode) {
          case INCLUDE:
          case INCLUDE_AND_SEEK_NEXT_ROW:
          case INCLUDE_AND_SEEK_NEXT_COL:

            Filter f = matcher.getFilter();
            outResult.add(f == null ? kv : f.transform(kv));
            count++;

            if (qcode == ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_ROW) {
              if (!matcher.moreRowsMayExistAfter(kv)) {
                return false;
View Full Code Here

            }

            if (maxMessagesPerPoll > 0) {
                filters.add(new PageFilter(maxMessagesPerPoll));
            }
            Filter compoundFilter = new FilterList(filters);
            scan.setFilter(compoundFilter);

            if (rowModel != null && rowModel.getCells() != null) {
                Set<HBaseCell> cellModels = rowModel.getCells();
                for (HBaseCell cellModel : cellModels) {
View Full Code Here

    }
  }

  private void internalPreRead(final ObserverContext<RegionCoprocessorEnvironment> c,
      final Query query, OpType opType) throws IOException {
    Filter filter = query.getFilter();
    // Don't wrap an AccessControlFilter
    if (filter != null && filter instanceof AccessControlFilter) {
      return;
    }
    User user = getActiveUser();
    RegionCoprocessorEnvironment env = c.getEnvironment();
    Map<byte[],? extends Collection<byte[]>> families = null;
    switch (opType) {
    case GET:
    case EXISTS:
      families = ((Get)query).getFamilyMap();
      break;
    case SCAN:
      families = ((Scan)query).getFamilyMap();
      break;
    default:
      throw new RuntimeException("Unhandled operation " + opType);
    }
    AuthResult authResult = permissionGranted(opType, user, env, families, Action.READ);
    HRegion region = getRegion(env);
    TableName table = getTableName(region);
    Map<ByteRange, Integer> cfVsMaxVersions = Maps.newHashMap();
    for (HColumnDescriptor hcd : region.getTableDesc().getFamilies()) {
      cfVsMaxVersions.put(new SimpleByteRange(hcd.getName()), hcd.getMaxVersions());
    }
    if (!authResult.isAllowed()) {
      if (!cellFeaturesEnabled || compatibleEarlyTermination) {
        // Old behavior: Scan with only qualifier checks if we have partial
        // permission. Backwards compatible behavior is to throw an
        // AccessDeniedException immediately if there are no grants for table
        // or CF or CF+qual. Only proceed with an injected filter if there are
        // grants for qualifiers. Otherwise we will fall through below and log
        // the result and throw an ADE. We may end up checking qualifier
        // grants three times (permissionGranted above, here, and in the
        // filter) but that's the price of backwards compatibility.
        if (hasFamilyQualifierPermission(user, Action.READ, env, families)) {
          Filter ourFilter = new AccessControlFilter(authManager, user, table,
            query.getACLStrategy() ? AccessControlFilter.Strategy.CHECK_CELL_FIRST :
              AccessControlFilter.Strategy.CHECK_TABLE_AND_CF_ONLY,
            cfVsMaxVersions);
          // wrap any existing filter
          if (filter != null) {
            ourFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL,
              Lists.newArrayList(ourFilter, filter));
          }
          authResult.setAllowed(true);;
          authResult.setReason("Access allowed with filter");
          switch (opType) {
          case GET:
          case EXISTS:
            ((Get)query).setFilter(ourFilter);
            break;
          case SCAN:
            ((Scan)query).setFilter(ourFilter);
            break;
          default:
            throw new RuntimeException("Unhandled operation " + opType);
          }
        }
      } else {
        // New behavior: Any access we might be granted is more fine-grained
        // than whole table or CF. Simply inject a filter and return what is
        // allowed. We will not throw an AccessDeniedException. This is a
        // behavioral change since 0.96.
        Filter ourFilter = new AccessControlFilter(authManager, user, table,
          query.getACLStrategy() ? AccessControlFilter.Strategy.CHECK_CELL_FIRST :
            AccessControlFilter.Strategy.CHECK_CELL_DEFAULT,
          cfVsMaxVersions);
        // wrap any existing filter
        if (filter != null) {
View Full Code Here

      }
    }

    protected Scan constructScan(byte[] valuePrefix) throws IOException {
      FilterList list = new FilterList();
      Filter filter = new SingleColumnValueFilter(
          FAMILY_NAME, QUALIFIER_NAME, CompareFilter.CompareOp.EQUAL,
          new BinaryComparator(valuePrefix)
      );
      list.addFilter(filter);
      if(opts.filterAll) {
View Full Code Here

    // Make sure key is of some substance... non-null and > than first key.
    assertTrue(key != null && key.length > 0 &&
      Bytes.BYTES_COMPARATOR.compare(key, new byte [] {'a', 'a', 'a'}) >= 0);
    LOG.info("Key=" + Bytes.toString(key));
    Scan s = startRow == null? new Scan(): new Scan(startRow);
    Filter f = new RowFilter(op, new BinaryComparator(key));
    f = new WhileMatchFilter(f);
    s.setFilter(f);
    return s;
  }
View Full Code Here

      put.add(FAMILY, QUALIFIERS[i], VALUE);
      ht.put(put);
    }
    Scan scan = new Scan();
    scan.addFamily(FAMILY);
    Filter filter = new QualifierFilter(CompareOp.EQUAL,
        new RegexStringComparator("col[1-5]"));
    scan.setFilter(filter);
    ResultScanner scanner = ht.getScanner(scan);
    int expectedIndex = 1;
    for(Result result : ht.getScanner(scan)) {
View Full Code Here

          get.addColumn(Bytes.toBytes(col.getFamilyName()),
              Bytes.toBytes(col.getColumnName()));
        }
        FilterList filters =  new FilterList();
        for (ColumnStruct columnStruct : action.getNotIndexConditionColumns()) {
          Filter filter = new SingleColumnValueFilter(Bytes.toBytes(columnStruct.getFamilyName()),
              Bytes.toBytes(columnStruct.getColumnName()),
              ParserUtils.convertIntValueToCompareOp(columnStruct.getCompareOp()), columnStruct.getValue());
          filters.addFilter(filter);

          NavigableSet<byte[]> qualifierSet = get.getFamilyMap().get(
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.filter.Filter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.