Package org.apache.hadoop.hbase

Examples of org.apache.hadoop.hbase.HBaseConfiguration


        printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">");
      }
      majorCompact = true;
    }
    Path tableDir  = new Path(args[0]);
    HBaseConfiguration c = new HBaseConfiguration();
    FileSystem fs = FileSystem.get(c);
    Path logdir = new Path(c.get("hbase.tmp.dir"),
      "hlog" + tableDir.getName() + System.currentTimeMillis());
    HLog log = new HLog(fs, logdir, c, null);
    try {
      processTable(fs, tableDir, log, c, majorCompact);
     } finally {
View Full Code Here


      boolean printKeyValue = cmd.hasOption("p");
      boolean printMeta = cmd.hasOption("m");
      boolean checkRow = cmd.hasOption("k");
      boolean checkFamily = cmd.hasOption("a");
      // get configuration, file system and get list of files
      HBaseConfiguration conf = new HBaseConfiguration();
      conf.set("fs.default.name",
        conf.get(org.apache.hadoop.hbase.HConstants.HBASE_DIR));
      FileSystem fs = FileSystem.get(conf);
      ArrayList<Path> files = new ArrayList<Path>();
      if (cmd.hasOption("f")) {
        files.add(new Path(cmd.getOptionValue("f")));
      }
View Full Code Here

  private void init(String methodName) throws IOException {
    //Setting up a Store
    Path basedir = new Path(DIR+methodName);
    Path logdir = new Path(DIR+methodName+"/logs");
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    HBaseConfiguration conf = new HBaseConfiguration();
    FileSystem fs = FileSystem.get(conf);
    Path reconstructionLog = null;
    Progressable reporter = null;

    fs.delete(logdir, true);
View Full Code Here

    // Now put in place an empty store file.  Its a little tricky.  Have to
    // do manually with hacked in sequence id.
    StoreFile f = this.store.getStorefiles().firstEntry().getValue();
    Path storedir = f.getPath().getParent();
    long seqid = f.getMaxSequenceId();
    HBaseConfiguration c = new HBaseConfiguration();
    FileSystem fs = FileSystem.get(c);
    Writer w = StoreFile.getWriter(fs, storedir);
    StoreFile.appendMetadata(w, seqid + 1);
    w.close();
    this.store.close();
View Full Code Here

    if (args.length < 1) {
      printUsageAndExit();
    }

    HBaseConfiguration conf = new HBaseConfiguration();

    // Process command-line args. TODO: Better cmd-line processing
    // (but hopefully something not as painful as cli options).
    for (String cmd: args) {

      if (cmd.startsWith("--minServers=")) {
        conf.setInt("hbase.regions.server.count.min",
          Integer.valueOf(cmd.substring(13)));
        continue;
      }

      if (cmd.equals("start")) {
View Full Code Here

    byte[][] families = { cf };
    byte[] col1 = Bytes.toBytes("col1");
    byte[] col2 = Bytes.toBytes("col2");
    long ts = 1;
    String method = this.getName();
    HBaseConfiguration config = new HBaseConfiguration();
    config.setInt("test.block.size", 1);
    this.region = initHRegion(tableName, method, config, families);
    try {
      KeyValue kv1 = new KeyValue(rowA, cf, col1, ts, KeyValue.Type.Put, null);
      KeyValue kv2 = new KeyValue(rowB, cf, col1, ts, KeyValue.Type.Put, null);
      KeyValue kv3 = new KeyValue(rowC, cf, col1, ts, KeyValue.Type.Put, null);
View Full Code Here

    byte[] cf3 = Bytes.toBytes("CF3");
    byte[][] families = { cf1, cf2, cf3 };
    byte[] col = Bytes.toBytes("C");
    long ts = 1;
    String method = this.getName();
    HBaseConfiguration conf = new HBaseConfiguration();
    // disable compactions in this test.
    conf.setInt("hbase.hstore.compactionThreshold", 10000);
    this.region = initHRegion(tableName, method, conf, families);
    try {
      // kv naming style: kv(row number) totalKvCountInThisRow seq no
      KeyValue kv0_1_1 = new KeyValue(row0, cf1, col, ts, KeyValue.Type.Put,
          null);
View Full Code Here

    byte[] cf4 = Bytes.toBytes("CF4");
    byte[][] families = { cf1, cf2, cf3, cf4 };
    byte[] col = Bytes.toBytes("C");
    long ts = 1;
    String method = this.getName();
    HBaseConfiguration conf = new HBaseConfiguration();
    // disable compactions in this test.
    conf.setInt("hbase.hstore.compactionThreshold", 10000);
    this.region = initHRegion(tableName, method, conf, families);
    try {
      KeyValue kv1 = new KeyValue(row1, cf1, col, ts, KeyValue.Type.Put, null);
      KeyValue kv2 = new KeyValue(row2, cf2, col, ts, KeyValue.Type.Put, null);
      KeyValue kv3 = new KeyValue(row3, cf3, col, ts, KeyValue.Type.Put, null);
View Full Code Here

   
    return descriptor;
  }
 
  public void dropTables() {
    HBaseConfiguration config = new HBaseConfiguration();
    try {
      HBaseAdmin admin = new HBaseAdmin(config);
      HTableDescriptor[] descriptors = admin.listTables();
      for (HTableDescriptor hTableDescriptor : descriptors) {
        String name = hTableDescriptor.getNameAsString();
View Full Code Here

  public Response get(final @Context UriInfo uriInfo) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("GET " + uriInfo.getAbsolutePath());
    }
    servlet.getMetrics().incrementRequests(1);
    HBaseConfiguration conf = servlet.getConfiguration();
    try {
      HBaseAdmin admin = new HBaseAdmin(conf);
      StorageClusterVersionModel model = new StorageClusterVersionModel();
      model.setVersion(admin.getClusterStatus().getHBaseVersion());
      ResponseBuilder response = Response.ok(model);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.HBaseConfiguration

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.