Package org.apache.hadoop.hbase.replication

Examples of org.apache.hadoop.hbase.replication.ReplicationZookeeper$ReplicationStatusTracker


    long ttl = 10000;
    conf.setLong("hbase.master.logcleaner.ttl", ttl);
    conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
    Replication.decorateMasterConfiguration(conf);
    Server server = new DummyServer();
    ReplicationZookeeper zkHelper =
        new ReplicationZookeeper(server, new AtomicBoolean(true));

    Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
        HConstants.HREGION_OLDLOGDIR_NAME);
    String fakeMachineName =
      URLEncoder.encode(server.getServerName().toString(), "UTF8");

    FileSystem fs = FileSystem.get(conf);
    LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);

    // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
    long now = System.currentTimeMillis();
    fs.delete(oldLogDir, true);
    fs.mkdirs(oldLogDir);
    // Case 1: 2 invalid files, which would be deleted directly
    fs.createNewFile(new Path(oldLogDir, "a"));
    fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
    // Case 2: 1 "recent" file, not even deletable for the first log cleaner
    // (TimeToLiveLogCleaner), so we are not going down the chain
    System.out.println("Now is: " + now);
    for (int i = 1; i < 31; i++) {
      // Case 3: old files which would be deletable for the first log cleaner
      // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
      Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
      fs.createNewFile(fileName);
      // Case 4: put 3 old log files in ZK indicating that they are scheduled
      // for replication so these files would pass the first log cleaner
      // (TimeToLiveLogCleaner) but would be rejected by the second
      // (ReplicationLogCleaner)
      if (i % (30/3) == 1) {
        zkHelper.addLogToList(fileName.getName(), fakeMachineName);
        System.out.println("Replication log file: " + fileName);
      }
    }

    // sleep for sometime to get newer modifcation time
View Full Code Here


        }
        HConnectionManager.execute(new HConnectable<Void>(conf) {
          @Override
          public Void connect(HConnection conn) throws IOException {
            ZooKeeperWatcher localZKW = null;
            ReplicationZookeeper zk = null;
            ReplicationPeer peer = null;
            try {
              localZKW = new ZooKeeperWatcher(
                conf, "VerifyReplication", new Abortable() {
                @Override public void abort(String why, Throwable e) {}
                @Override public boolean isAborted() {return false;}
              });
              zk = new ReplicationZookeeper(conn, conf, localZKW);
              // Just verifying it we can connect
              peer = zk.getPeer(peerId);
              HTable replicatedTable = new HTable(peer.getConfiguration(),
                  conf.get(NAME+".tableName"));
              scan.setStartRow(value.getRow());
              replicatedScanner = replicatedTable.getScanner(scan);
            } catch (KeeperException e) {
              throw new IOException("Got a ZK exception", e);
            } finally {
              if (peer != null) {
                peer.close();
              }
              if (zk != null) {
                zk.close();
              }
              if (localZKW != null) {
                localZKW.close();
              }
            }
View Full Code Here

    this.server = server;
    this.conf = this.server.getConfiguration();
    this.replication = isReplication(this.conf);
    if (replication) {
      try {
        this.zkHelper = new ReplicationZookeeper(server, this.replicating);
      } catch (KeeperException ke) {
        throw new IOException("Failed replication handler create " +
           "(replicating=" + this.replicating, ke);
      }
      this.replicationManager = new ReplicationSourceManager(zkHelper, conf,
View Full Code Here

    // I can close myself when comes time.
    Configuration conf = new Configuration(config);
    super.setConf(conf);
    try {
      ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "replicationLogCleaner", null);
      this.zkHelper = new ReplicationZookeeper(this, conf, zkw);
    } catch (KeeperException e) {
      LOG.error("Error while configuring " + this.getClass().getName(), e);
    } catch (IOException e) {
      LOG.error("Error while configuring " + this.getClass().getName(), e);
    }
View Full Code Here

  public void testLogCleaning() throws Exception{
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
    Replication.decorateMasterConfiguration(conf);
    Server server = new DummyServer();
    ReplicationZookeeper zkHelper =
        new ReplicationZookeeper(server, new AtomicBoolean(true));

    Path oldLogDir = new Path(HBaseTestingUtility.getTestDir(),
        HConstants.HREGION_OLDLOGDIR_NAME);
    String fakeMachineName = URLEncoder.encode(server.getServerName(), "UTF8");

    FileSystem fs = FileSystem.get(conf);
    LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);

    // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
    long now = System.currentTimeMillis();
    fs.delete(oldLogDir, true);
    fs.mkdirs(oldLogDir);
    // Case 1: 2 invalid files, which would be deleted directly
    fs.createNewFile(new Path(oldLogDir, "a"));
    fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
    // Case 2: 1 "recent" file, not even deletable for the first log cleaner
    // (TimeToLiveLogCleaner), so we are not going down the chain
    fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));
    System.out.println("Now is: " + now);
    for (int i = 0; i < 30; i++) {
      // Case 3: old files which would be deletable for the first log cleaner
      // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
      Path fileName = new Path(oldLogDir, fakeMachineName + "." +
          (now - 6000000 - i) );
      fs.createNewFile(fileName);
      // Case 4: put 3 old log files in ZK indicating that they are scheduled
      // for replication so these files would pass the first log cleaner
      // (TimeToLiveLogCleaner) but would be rejected by the second
      // (ReplicationLogCleaner)
      if (i % (30/3) == 0) {
        zkHelper.addLogToList(fileName.getName(), fakeMachineName);
        System.out.println("Replication log file: " + fileName);
      }
    }
    for (FileStatus stat : fs.listStatus(oldLogDir)) {
      System.out.println(stat.getPath().toString());
View Full Code Here

    if (!conf.getBoolean(HConstants.REPLICATION_ENABLE_KEY, false)) {
      throw new IOException("Replication needs to be enabled to verify it.");
    }
    try {
      HConnection conn = HConnectionManager.getConnection(conf);
      ReplicationZookeeper zk = new ReplicationZookeeper(conn, conf,
          conn.getZooKeeperWatcher());
      // Just verifying it we can connect
      ReplicationPeer peer = zk.getPeer(peerId);
      if (peer == null) {
        throw new IOException("Couldn't get access to the slave cluster," +
            "please see the log");
      }
    } catch (KeeperException ex) {
View Full Code Here

          scan.setTimeRange(startTime,
              endTime == 0 ? HConstants.LATEST_TIMESTAMP : endTime);
        }
        try {
          HConnection conn = HConnectionManager.getConnection(conf);
          ReplicationZookeeper zk = new ReplicationZookeeper(conn, conf,
              conn.getZooKeeperWatcher());
          ReplicationPeer peer = zk.getPeer(conf.get(NAME+".peerId"));
          HTable replicatedTable = new HTable(peer.getConfiguration(),
              conf.get(NAME+".tableName"));
          scan.setStartRow(value.getRow());
          replicatedScanner = replicatedTable.getScanner(scan);
        } catch (KeeperException e) {
View Full Code Here

  throws IOException, KeeperException {
    this.server = server;
    this.conf = this.server.getConfiguration();
    this.replication = isReplication(this.conf);
    if (replication) {
      this.zkHelper = new ReplicationZookeeper(server, this.replicating);
      this.replicationManager = new ReplicationSourceManager(zkHelper, conf,
          this.server, fs, this.replicating, logDir, oldLogDir) ;
    } else {
      this.replicationManager = null;
      this.zkHelper = null;
View Full Code Here

    // I can close myself when comes time.
    this.conf = new Configuration(conf);
    try {
      ZooKeeperWatcher zkw =
          new ZooKeeperWatcher(this.conf, "replicationLogCleaner", null);
      this.zkHelper = new ReplicationZookeeper(this, this.conf, zkw);
    } catch (KeeperException e) {
      LOG.error("Error while configuring " + this.getClass().getName(), e);
    } catch (IOException e) {
      LOG.error("Error while configuring " + this.getClass().getName(), e);
    }
View Full Code Here

  public void testNodeFailoverWorkerCopyQueuesFromRSUsingMulti() throws Exception {
    LOG.debug("testNodeFailoverWorkerCopyQueuesFromRSUsingMulti");
    conf.setBoolean(HConstants.ZOOKEEPER_USEMULTI, true);
    final Server server = new DummyServer("hostname0.example.org");
    AtomicBoolean replicating = new AtomicBoolean(true);
    ReplicationZookeeper rz = new ReplicationZookeeper(server, replicating);
    // populate some znodes in the peer znode
    files.add("log1");
    files.add("log2");
    for (String file : files) {
      rz.addLogToList(file, "1");
    }
    // create 3 DummyServers
    Server s1 = new DummyServer("dummyserver1.example.org");
    Server s2 = new DummyServer("dummyserver2.example.org");
    Server s3 = new DummyServer("dummyserver3.example.org");

    // create 3 DummyNodeFailoverWorkers
    DummyNodeFailoverWorker w1 = new DummyNodeFailoverWorker(
        server.getServerName().getServerName(), s1);
    DummyNodeFailoverWorker w2 = new DummyNodeFailoverWorker(
        server.getServerName().getServerName(), s2);
    DummyNodeFailoverWorker w3 = new DummyNodeFailoverWorker(
        server.getServerName().getServerName(), s3);

    latch = new CountDownLatch(3);
    // start the threads
    w1.start();
    w2.start();
    w3.start();
    // make sure only one is successful
    int populatedMap = 0;
    // wait for result now... till all the workers are done.
    latch.await();
    populatedMap += w1.isLogZnodesMapPopulated() + w2.isLogZnodesMapPopulated()
        + w3.isLogZnodesMapPopulated();
    assertEquals(1, populatedMap);
    // close out the resources.
    rz.close();
    server.abort("", null);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.replication.ReplicationZookeeper$ReplicationStatusTracker

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.