Package org.apache.hadoop.hbase.client

Examples of org.apache.hadoop.hbase.client.HTablePool


   * @throws IOException thrown when HDFS goes bad or bad file name
   */
  public ReplicationSink(Configuration conf, Stoppable stopper)
      throws IOException {
    this.conf = conf;
    this.pool = new HTablePool(this.conf,
        conf.getInt("replication.sink.htablepool.capacity", 10));
    this.metrics = new ReplicationSinkMetrics();
  }
View Full Code Here


        }
      }
    }
   
    System.out.println("Total "+wordCountMap.size()+" words");
    HTablePool pool = new HTablePool();
    WordCountDAO DAO = new WordCountDAO(pool);
    DAO.CreateTable();
    int count = 0;
    for(WordCountDAO.WordCount w: wordCountMap.values()){
      DAO.addWordCount(w);
      if(++count % 50000==0){
        System.out.println("Loading "+count+" words");
      }
    }
    pool.closeTablePool(DAO.TABLE_NAME);
  }
View Full Code Here

    cacheControl.setNoTransform(false);
  }

  private HTableDescriptor getTableSchema() throws IOException,
      TableNotFoundException {
    HTablePool pool = servlet.getTablePool();
    HTable table = pool.getTable(actualTableName);
    try {
      return table.getTableDescriptor();
    } finally {
      pool.putTable(table);
    }
  }
View Full Code Here

  private Iterator<KeyValue> valuesI;
  private KeyValue cache;

  public RowResultGenerator(final String tableName, final RowSpec rowspec,
      final Filter filter) throws IllegalArgumentException, IOException {
    HTablePool pool = RESTServlet.getInstance().getTablePool();
    HTable table = pool.getTable(tableName);
    try {
      Get get = new Get(rowspec.getRow());
      if (rowspec.hasColumns()) {
        get.addColumns(rowspec.getColumns());
      } else {
        // rowspec does not explicitly specify columns, return them all
        for (HColumnDescriptor family:
            table.getTableDescriptor().getFamilies()) {
          get.addFamily(family.getName());
        }
      }
      get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
      get.setMaxVersions(rowspec.getMaxVersions());
      if (filter != null) {
        get.setFilter(filter);
      }
      Result result = table.get(get);
      if (result != null && !result.isEmpty()) {
        valuesI = result.list().iterator();
      }
    } finally {
      pool.putTable(table);
    }
  }
View Full Code Here

                  Response.Status.SERVICE_UNAVAILABLE);
    }
  }

  Response update(final CellSetModel model, final boolean replace) {
    HTablePool pool = servlet.getTablePool();
    HTable table = null;
    try {
      List<RowModel> rows = model.getRows();
      // the user request limit is a transaction limit, so we need to
      // account for updates by row
      if (user != null && !servlet.userRequestLimit(user, rows.size())) {
        throw new WebApplicationException(Response.status(509).build());
      }
      table = pool.getTable(actualTableName);
      table.setAutoFlush(false);
      for (RowModel row: rows) {
        byte[] key = row.getKey();
        Put put = new Put(key);
        for (CellModel cell: row.getCells()) {
          byte [][] parts = KeyValue.parseColumn(cell.getColumn());
          put.add(parts[0], parts[1], cell.getTimestamp(), cell.getValue());
        }
        table.put(put);
        if (LOG.isDebugEnabled()) {
          LOG.debug("PUT " + put.toString());
        }
      }
      table.setAutoFlush(true);
      table.flushCommits();
      ResponseBuilder response = Response.ok();
      return response.build();
    } catch (IOException e) {
      throw new WebApplicationException(e,
                  Response.Status.SERVICE_UNAVAILABLE);
    } finally {
      if (table != null) {
        pool.putTable(table);
      }
    }
  }
View Full Code Here

  }

  // This currently supports only update of one row at a time.
  Response updateBinary(final byte[] message, final HttpHeaders headers,
      final boolean replace) {
    HTablePool pool = servlet.getTablePool();
    HTable table = null;   
    try {
      byte[] row = rowspec.getRow();
      byte[][] columns = rowspec.getColumns();
      byte[] column = null;
      if (columns != null) {
        column = columns[0];
      }
      long timestamp = HConstants.LATEST_TIMESTAMP;
      List<String> vals = headers.getRequestHeader("X-Row");
      if (vals != null && !vals.isEmpty()) {
        row = Bytes.toBytes(vals.get(0));
      }
      vals = headers.getRequestHeader("X-Column");
      if (vals != null && !vals.isEmpty()) {
        column = Bytes.toBytes(vals.get(0));
      }
      vals = headers.getRequestHeader("X-Timestamp");
      if (vals != null && !vals.isEmpty()) {
        timestamp = Long.valueOf(vals.get(0));
      }
      if (column == null) {
        throw new WebApplicationException(Response.Status.BAD_REQUEST);
      }
      Put put = new Put(row);
      byte parts[][] = KeyValue.parseColumn(column);
      put.add(parts[0], parts[1], timestamp, message);
      table = pool.getTable(actualTableName);
      table.put(put);
      if (LOG.isDebugEnabled()) {
        LOG.debug("PUT " + put.toString());
      }
      return Response.ok().build();
    } catch (IOException e) {
      throw new WebApplicationException(e,
                  Response.Status.SERVICE_UNAVAILABLE);
    } finally {
      if (table != null) {
        pool.putTable(table);
      }
    }
  }
View Full Code Here

        } else {
          delete.deleteFamily(split[0]);
        }
      }
    }
    HTablePool pool = servlet.getTablePool();
    HTable table = null;
    try {
      table = pool.getTable(actualTableName);
      table.delete(delete);
      if (LOG.isDebugEnabled()) {
        LOG.debug("DELETE " + delete.toString());
      }
    } catch (IOException e) {
      throw new WebApplicationException(e,
                  Response.Status.SERVICE_UNAVAILABLE);
    } finally {
      if (table != null) {
        pool.putTable(table);
      }
    }
    return Response.ok().build();
  }
View Full Code Here

    servlet = RESTServlet.getInstance();
  }

  private Map<HRegionInfo,HServerAddress> getTableRegions()
      throws IOException {
    HTablePool pool = servlet.getTablePool();
    HTable table = pool.getTable(actualTableName);
    try {
      return table.getRegionsInfo();
    } finally {
      pool.putTable(table);
    }
  }
View Full Code Here

   * Constructor
   * @throws IOException
   */
  public RESTServlet() throws IOException {
    this.conf = new HBaseConfiguration();
    this.pool = new HTablePool(conf, 10);
    this.wrapper = initZooKeeperWrapper();
    this.statusReporter = new StatusReporter(
      conf.getInt(STATUS_REPORT_PERIOD_KEY, 1000 * 30), stopping);
    Threads.setDaemonThreadRunning(statusReporter, "Stargate.statusReporter");
    this.multiuser = conf.getBoolean("stargate.multiuser", false);
View Full Code Here

  private ResultScanner scanner;
  private Result cached;

  public ScannerResultGenerator(final String tableName, final RowSpec rowspec,
      final Filter filter) throws IllegalArgumentException, IOException {
    HTablePool pool = RESTServlet.getInstance().getTablePool();
    HTable table = pool.getTable(tableName);
    try {
      Scan scan;
      if (rowspec.hasEndRow()) {
        scan = new Scan(rowspec.getStartRow(), rowspec.getEndRow());
      } else {
        scan = new Scan(rowspec.getStartRow());
      }
      if (rowspec.hasColumns()) {
        byte[][] columns = rowspec.getColumns();
        for (byte[] column: columns) {
          byte[][] split = KeyValue.parseColumn(column);
          if (split.length > 1 && (split[1] != null && split[1].length != 0)) {
            scan.addColumn(split[0], split[1]);
          } else {
            scan.addFamily(split[0]);
          }
        }
      } else {
        for (HColumnDescriptor family:
            table.getTableDescriptor().getFamilies()) {
          scan.addFamily(family.getName());
        }
      }
      scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());         
      scan.setMaxVersions(rowspec.getMaxVersions());
      if (filter != null) {
        scan.setFilter(filter);
      }
      // always disable block caching on the cluster when scanning
      scan.setCacheBlocks(false);
      scanner = table.getScanner(scan);
      cached = null;
      id = Long.toString(System.currentTimeMillis()) +
             Integer.toHexString(scanner.hashCode());
    } finally {
      pool.putTable(table);
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.client.HTablePool

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.