Package org.apache.accumulo.core.client.impl

Examples of org.apache.accumulo.core.client.impl.MultiTableBatchWriterImpl


    Connector connector = instance.getConnector("root", password);

    BatchWriterConfig config = new BatchWriterConfig();

    TCredentials creds = CredentialHelper.create("root", password, instance.getInstanceID());
    MultiTableBatchWriter mtbw = new MultiTableBatchWriterImpl(instance, creds, config, 60, TimeUnit.SECONDS);

    try {
      final String table1 = "testTableRenameDataValidation_table1", table2 = "testTableRenameDataValidation_table2";

      TableOperations tops = connector.tableOperations();
      tops.create(table1);

      BatchWriter bw1 = mtbw.getBatchWriter(table1);

      Mutation m1 = new Mutation("foo");
      m1.put("col1", "", "val1");

      bw1.addMutation(m1);

      tops.rename(table1, table2);
      tops.create(table1);

      BatchWriter bw2 = mtbw.getBatchWriter(table1);
     
      Mutation m2 = new Mutation("bar");
      m2.put("col1", "", "val1");

      bw1.addMutation(m2);
      bw2.addMutation(m2);

      mtbw.close();

      Map<Entry<String,String>,String> table1Expectations = new HashMap<Entry<String,String>,String>();
      table1Expectations.put(Maps.immutableEntry("bar", "col1"), "val1");

      Map<Entry<String,String>,String> table2Expectations = new HashMap<Entry<String,String>,String>();
      table2Expectations.put(Maps.immutableEntry("foo", "col1"), "val1");
      table2Expectations.put(Maps.immutableEntry("bar", "col1"), "val1");

      Scanner s = connector.createScanner(table1, new Authorizations());
      s.setRange(new Range());
      Map<Entry<String,String>,String> actual = new HashMap<Entry<String,String>,String>();
      for (Entry<Key,Value> entry : s) {
        actual.put(Maps.immutableEntry(entry.getKey().getRow().toString(), entry.getKey().getColumnFamily().toString()), entry.getValue().toString());
      }

      Assert.assertEquals("Differing results for " + table1, table1Expectations, actual);

      s = connector.createScanner(table2, new Authorizations());
      s.setRange(new Range());
      actual = new HashMap<Entry<String,String>,String>();
      for (Entry<Key,Value> entry : s) {
        actual.put(Maps.immutableEntry(entry.getKey().getRow().toString(), entry.getKey().getColumnFamily().toString()), entry.getValue().toString());
      }

      Assert.assertEquals("Differing results for " + table2, table2Expectations, actual);
     
    } finally {
      if (null != mtbw) {
        mtbw.close();
      }
    }
  }
View Full Code Here


    Connector connector = instance.getConnector("root", password);

    BatchWriterConfig config = new BatchWriterConfig();

    TCredentials creds = CredentialHelper.create("root", password, instance.getInstanceID());
    MultiTableBatchWriter mtbw = new MultiTableBatchWriterImpl(instance, creds, config, 60, TimeUnit.SECONDS);

    try {
      final String table1 = "testTableRenameSameWriters_table1", table2 = "testTableRenameSameWriters_table2";
      final String newTable1 = "testTableRenameSameWriters_newTable1", newTable2 = "testTableRenameSameWriters_newTable2";

      TableOperations tops = connector.tableOperations();
      tops.create(table1);
      tops.create(table2);

      BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);

      Mutation m1 = new Mutation("foo");
      m1.put("col1", "", "val1");
      m1.put("col2", "", "val2");

      bw1.addMutation(m1);
      bw2.addMutation(m1);

      tops.rename(table1, newTable1);
      tops.rename(table2, newTable2);

      Mutation m2 = new Mutation("bar");
      m2.put("col1", "", "val1");
      m2.put("col2", "", "val2");

      bw1.addMutation(m2);
      bw2.addMutation(m2);

      mtbw.close();

      Map<Entry<String,String>,String> expectations = new HashMap<Entry<String,String>,String>();
      expectations.put(Maps.immutableEntry("foo", "col1"), "val1");
      expectations.put(Maps.immutableEntry("foo", "col2"), "val2");
      expectations.put(Maps.immutableEntry("bar", "col1"), "val1");
      expectations.put(Maps.immutableEntry("bar", "col2"), "val2");

      for (String table : Arrays.asList(newTable1, newTable2)) {
        Scanner s = connector.createScanner(table, new Authorizations());
        s.setRange(new Range());
        Map<Entry<String,String>,String> actual = new HashMap<Entry<String,String>,String>();
        for (Entry<Key,Value> entry : s) {
          actual.put(Maps.immutableEntry(entry.getKey().getRow().toString(), entry.getKey().getColumnFamily().toString()), entry.getValue().toString());
        }

        Assert.assertEquals("Differing results for " + table, expectations, actual);
      }
    } finally {
      if (null != mtbw) {
        mtbw.close();
      }
    }
  }
View Full Code Here

    Connector connector = instance.getConnector("root", password);

    BatchWriterConfig config = new BatchWriterConfig();

    TCredentials creds = CredentialHelper.create("root", password, instance.getInstanceID());
    MultiTableBatchWriter mtbw = new MultiTableBatchWriterImpl(instance, creds, config, 60, TimeUnit.SECONDS);

    try {
      final String table1 = "testTableRenameNewWriters_table1", table2 = "testTableRenameNewWriters_table2";
      final String newTable1 = "testTableRenameNewWriters_newTable1", newTable2 = "testTableRenameNewWriters_newTable2";

      TableOperations tops = connector.tableOperations();
      tops.create(table1);
      tops.create(table2);

      BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);

      Mutation m1 = new Mutation("foo");
      m1.put("col1", "", "val1");
      m1.put("col2", "", "val2");

      bw1.addMutation(m1);
      bw2.addMutation(m1);

      tops.rename(table1, newTable1);

      // MTBW is still caching this name to the correct table, but we should invalidate its cache
      // after seeing the rename
      try {
        bw1 = mtbw.getBatchWriter(table1);
        Assert.fail("Should not be able to find this table");
      } catch (TableNotFoundException e) {
        // pass
      }

      tops.rename(table2, newTable2);
     
      try {
        bw2 = mtbw.getBatchWriter(table2);
        Assert.fail("Should not be able to find this table");
      } catch (TableNotFoundException e) {
        //pass
      }
     
      bw1 = mtbw.getBatchWriter(newTable1);
      bw2 = mtbw.getBatchWriter(newTable2);

      Mutation m2 = new Mutation("bar");
      m2.put("col1", "", "val1");
      m2.put("col2", "", "val2");

      bw1.addMutation(m2);
      bw2.addMutation(m2);

      mtbw.close();

      Map<Entry<String,String>,String> expectations = new HashMap<Entry<String,String>,String>();
      expectations.put(Maps.immutableEntry("foo", "col1"), "val1");
      expectations.put(Maps.immutableEntry("foo", "col2"), "val2");
      expectations.put(Maps.immutableEntry("bar", "col1"), "val1");
      expectations.put(Maps.immutableEntry("bar", "col2"), "val2");

      for (String table : Arrays.asList(newTable1, newTable2)) {
        Scanner s = connector.createScanner(table, new Authorizations());
        s.setRange(new Range());
        Map<Entry<String,String>,String> actual = new HashMap<Entry<String,String>,String>();
        for (Entry<Key,Value> entry : s) {
          actual.put(Maps.immutableEntry(entry.getKey().getRow().toString(), entry.getKey().getColumnFamily().toString()), entry.getValue().toString());
        }

        Assert.assertEquals("Differing results for " + table, expectations, actual);
      }
    } finally {
      if (null != mtbw) {
        mtbw.close();
      }
    }
  }
View Full Code Here

    Connector connector = instance.getConnector("root", password);

    BatchWriterConfig config = new BatchWriterConfig();
   
    TCredentials creds = CredentialHelper.create("root", password, instance.getInstanceID());
    MultiTableBatchWriter mtbw = new MultiTableBatchWriterImpl(instance, creds, config, 0, TimeUnit.SECONDS);

    try {
      final String table1 = "testTableRenameNewWritersNoCaching_table1", table2 = "testTableRenameNewWritersNoCaching_table2";
      final String newTable1 = "testTableRenameNewWritersNoCaching_newTable1", newTable2 = "testTableRenameNewWritersNoCaching_newTable2";

      TableOperations tops = connector.tableOperations();
      tops.create(table1);
      tops.create(table2);

      BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);

      Mutation m1 = new Mutation("foo");
      m1.put("col1", "", "val1");
      m1.put("col2", "", "val2");

      bw1.addMutation(m1);
      bw2.addMutation(m1);

      tops.rename(table1, newTable1);
      tops.rename(table2, newTable2);

      try {
        bw1 = mtbw.getBatchWriter(table1);
        Assert.fail("Should not have gotten batchwriter for " + table1);
      } catch (TableNotFoundException e) {
        // Pass
      }

      try {
        bw2 = mtbw.getBatchWriter(table2);
      } catch (TableNotFoundException e) {
        // Pass
      }
    } finally {
      if (null != mtbw) {
        mtbw.close();
      }
    }
  }
View Full Code Here

    Connector connector = instance.getConnector("root", password);

    BatchWriterConfig config = new BatchWriterConfig();

    TCredentials creds = CredentialHelper.create("root", password, instance.getInstanceID());
    MultiTableBatchWriter mtbw = new MultiTableBatchWriterImpl(instance, creds, config, 60, TimeUnit.SECONDS);
    boolean mutationsRejected = false;
   
    try {
      final String table1 = "testTableDelete_table1", table2 = "testTableDelete_table2";

      TableOperations tops = connector.tableOperations();
      tops.create(table1);
      tops.create(table2);

      BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);

      Mutation m1 = new Mutation("foo");
      m1.put("col1", "", "val1");
      m1.put("col2", "", "val2");

      bw1.addMutation(m1);
      bw2.addMutation(m1);

      tops.delete(table1);
      tops.delete(table2);

      Mutation m2 = new Mutation("bar");
      m2.put("col1", "", "val1");
      m2.put("col2", "", "val2");

      try {
        bw1.addMutation(m2);
        bw2.addMutation(m2);
      } catch (MutationsRejectedException e) {
        // Pass - Mutations might flush immediately
        mutationsRejected = true;
      }

    } finally {
      if (null != mtbw) {
        try {
          // Mutations might have flushed before the table offline occurred
          mtbw.close();
        } catch (MutationsRejectedException e) {
          // Pass
          mutationsRejected = true;
        }
      }
View Full Code Here

    Connector connector = instance.getConnector("root", password);

    BatchWriterConfig config = new BatchWriterConfig();

    TCredentials creds = CredentialHelper.create("root", password, instance.getInstanceID());
    MultiTableBatchWriter mtbw = new MultiTableBatchWriterImpl(instance, creds, config, 60, TimeUnit.SECONDS);
    boolean mutationsRejected = false;

    try {
      final String table1 = "testOfflineTable_table1", table2 = "testOfflineTable_table2";

      TableOperations tops = connector.tableOperations();
      tops.create(table1);
      tops.create(table2);

      BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);

      Mutation m1 = new Mutation("foo");
      m1.put("col1", "", "val1");
      m1.put("col2", "", "val2");

      bw1.addMutation(m1);
      bw2.addMutation(m1);

      tops.offline(table1);
      tops.offline(table2);

      Mutation m2 = new Mutation("bar");
      m2.put("col1", "", "val1");
      m2.put("col2", "", "val2");

      try {
        bw1.addMutation(m2);
        bw2.addMutation(m2);
      } catch (MutationsRejectedException e) {
        // Pass -- Mutations might flush immediately and fail because of offline table
        mutationsRejected = true;
      }
    } finally {
      if (null != mtbw) {
        try {
          // Mutations might have flushed before the table offline occurred
          mtbw.close();
        } catch (MutationsRejectedException e) {
          // Pass
          mutationsRejected = true;
        }
      }
View Full Code Here

    Connector connector = instance.getConnector("root", password);

    BatchWriterConfig config = new BatchWriterConfig();
   
    TCredentials creds = CredentialHelper.create("root", password, instance.getInstanceID());
    MultiTableBatchWriter mtbw = new MultiTableBatchWriterImpl(instance, creds, config, 60, TimeUnit.SECONDS);
    boolean mutationsRejected = false;

    try {
      final String table1 = "testOfflineTableWithCache_table1", table2 = "testOfflineTableWithCache_table2";

      TableOperations tops = connector.tableOperations();
      tops.create(table1);
      tops.create(table2);

      BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);

      Mutation m1 = new Mutation("foo");
      m1.put("col1", "", "val1");
      m1.put("col2", "", "val2");

      bw1.addMutation(m1);
      bw2.addMutation(m1);

      tops.offline(table1);

      try {
        bw1 = mtbw.getBatchWriter(table1);
      } catch (TableOfflineException e) {
        // pass
        mutationsRejected = true;
      }

      tops.offline(table2);

      try {
        bw2 = mtbw.getBatchWriter(table2);
      } catch (TableOfflineException e) {
        // pass
        mutationsRejected = true;
      }
    } finally {
      if (null != mtbw) {
        try {
          // Mutations might have flushed before the table offline occurred
          mtbw.close();
        } catch (MutationsRejectedException e) {
          // Pass
          mutationsRejected = true;
        }
      }
View Full Code Here

    Connector connector = instance.getConnector("root", password);

    BatchWriterConfig config = new BatchWriterConfig();

    TCredentials creds = CredentialHelper.create("root", password, instance.getInstanceID());
    MultiTableBatchWriter mtbw = new MultiTableBatchWriterImpl(instance, creds, config, 0, TimeUnit.SECONDS);
    boolean mutationsRejected = false;

    try {
      final String table1 = "testOfflineTableWithoutCache_table1", table2 = "testOfflineTableWithoutCache_table2";

      TableOperations tops = connector.tableOperations();
      tops.create(table1);
      tops.create(table2);

      BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);

      Mutation m1 = new Mutation("foo");
      m1.put("col1", "", "val1");
      m1.put("col2", "", "val2");

      bw1.addMutation(m1);
      bw2.addMutation(m1);

      // Mutations might or might not flush before tables goes offline
      tops.offline(table1);
      tops.offline(table2);

      try {
        bw1 = mtbw.getBatchWriter(table1);
        Assert.fail(table1 + " should be offline");
      } catch (TableOfflineException e) {
        // pass
        mutationsRejected = true;
      }

      try {
        bw2 = mtbw.getBatchWriter(table2);
        Assert.fail(table1 + " should be offline");
      } catch (TableOfflineException e) {
        // pass
        mutationsRejected = true;
      }
    } finally {
      if (null != mtbw) {
        try {
          // Mutations might have flushed before the table offline occurred
          mtbw.close();
        } catch (MutationsRejectedException e) {
          // Pass
          mutationsRejected = true;
        }
      }
View Full Code Here

    connector = getConnector();
    mtbw = getMultiTableBatchWriter(60);
  }

  public MultiTableBatchWriter getMultiTableBatchWriter(long cacheTimeoutInSeconds) {
    return new MultiTableBatchWriterImpl(connector.getInstance(), new Credentials("root", new PasswordToken(ROOT_PASSWORD)),
        new BatchWriterConfig(), cacheTimeoutInSeconds, TimeUnit.SECONDS);
  }
View Full Code Here

    connector = getConnector();
    mtbw = getMultiTableBatchWriter(60);
  }

  public MultiTableBatchWriter getMultiTableBatchWriter(long cacheTimeoutInSeconds) {
    return new MultiTableBatchWriterImpl(connector.getInstance(), new Credentials("root", new PasswordToken(getStaticCluster().getConfig().getRootPassword())),
        new BatchWriterConfig(), cacheTimeoutInSeconds, TimeUnit.SECONDS);
  }
View Full Code Here

TOP

Related Classes of org.apache.accumulo.core.client.impl.MultiTableBatchWriterImpl

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.