Package org.apache.hadoop.hbase.filter

Examples of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter


            // be quite a bit faster.
            if (familyMap.isEmpty() && table.getColumnFamilies().size() == 1) {
                // Project the one column family. We must project a column family since it's possible
                // that there are other non declared column families that we need to ignore.
                scan.addFamily(table.getColumnFamilies().get(0).getName().getBytes());
                ScanUtil.andFilterAtBeginning(scan, new FirstKeyOnlyFilter());
            } else {
                byte[] ecf = SchemaUtil.getEmptyColumnFamily(table.getColumnFamilies());
                // Project empty key value unless the column family containing it has
                // been projected in its entirety.
                if (!familyMap.containsKey(ecf) || familyMap.get(ecf) != null) {
View Full Code Here


  }

  @Test
  public void testFirstKeyOnlyFilter() throws Exception {
    Scan s = new Scan();
    s.setFilter(new FirstKeyOnlyFilter());
    // Expected KVs, the first KV from each of the remaining 6 rows
    KeyValue [] kvs = {
        new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
        new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
        new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
View Full Code Here

       
        // Confirm that data is no longer there because we dropped the table
        // This needs to be done natively b/c the metadata is gone
        HTableInterface htable = conn5.unwrap(PhoenixConnection.class).getQueryServices().getTable(SchemaUtil.getTableNameAsBytes(ATABLE_SCHEMA_NAME, ATABLE_NAME));
        Scan scan = new Scan();
        scan.setFilter(new FirstKeyOnlyFilter());
        scan.setTimeRange(0, ts+9);
        assertNull(htable.getScanner(scan).next());
        conn5.close();

        // Still should work b/c we're at an earlier timestamp than when table was deleted
View Full Code Here

        if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) {
            return null;
        }
       
        Scan scan = newTableRowsScan(key, clientTimeStamp, HConstants.LATEST_TIMESTAMP);
        scan.setFilter(new FirstKeyOnlyFilter());
        scan.setRaw(true);
        RegionScanner scanner = region.getScanner(scan);
        List<KeyValue> results = Lists.<KeyValue>newArrayList();
        scanner.next(results);
        // HBase ignores the time range on a raw scan (HBASE-7362)
View Full Code Here

          list.add(model.build());
        }
        filter = new FilterList(FilterList.Operator.valueOf(op), list);
      } break;
      case FirstKeyOnlyFilter:
        filter = new FirstKeyOnlyFilter();
        break;
      case InclusiveStopFilter:
        filter = new InclusiveStopFilter(Base64.decode(value));
        break;
      case KeyOnlyFilter:
View Full Code Here

  }

  @Test
  public void testFirstKeyOnlyFilter() throws Exception {
    Scan s = new Scan();
    s.setFilter(new FirstKeyOnlyFilter());
    // Expected KVs, the first KV from each of the remaining 6 rows
    KeyValue [] kvs = {
        new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
        new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
        new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
View Full Code Here

          list.add(model.build());
        }
        filter = new FilterList(FilterList.Operator.valueOf(op), list);
      } break;
      case FirstKeyOnlyFilter:
        filter = new FirstKeyOnlyFilter();
        break;
      case InclusiveStopFilter:
        filter = new InclusiveStopFilter(Base64.decode(value));
        break;
      case KeyOnlyFilter:
View Full Code Here

      byte[] qualifier = null;
      if (qualifiers != null && !qualifiers.isEmpty()) {
        qualifier = qualifiers.pollFirst();
      }
      if (scan.getFilter() == null && qualifier == null)
        scan.setFilter(new FirstKeyOnlyFilter());
      scanner = env.getRegion().getScanner(scan);
      boolean hasMoreRows = false;
      do {
        hasMoreRows = scanner.next(results);
        if (results.size() > 0) {
View Full Code Here

    }
    // specified column may or may not be part of first key value for the row.
    // Hence do not use FirstKeyOnlyFilter if scan has columns, instead use
    // FirstKeyValueMatchingQualifiersFilter.
    if (qualifiers.size() == 0) {
      scan.setFilter(new FirstKeyOnlyFilter());
    } else {
      scan.setFilter(new FirstKeyValueMatchingQualifiersFilter(qualifiers));
    }
    scan.setTimeRange(startTime, endTime == 0 ? HConstants.LATEST_TIMESTAMP : endTime);
    job.setOutputFormatClass(NullOutputFormat.class);
View Full Code Here

        for (Map.Entry<String, TableInfo> entry : tableInfos.entrySet()) {
            HTableInterface table = new HTable(hbaseAdmin.getConfiguration(), entry.getKey());
            Scan scan = new Scan();
            scan.setCaching(100);
            scan.setCacheBlocks(false);
            scan.setFilter(new FirstKeyOnlyFilter());
            ResultScanner scanner = table.getScanner(scan);
            int rowCount = 0;
            while (scanner.next() != null && rowCount <= 5000) {
                rowCount++;
            }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.