Package org.hivedb.meta.directory

Examples of org.hivedb.meta.directory.NodeResolver


    final Hive hive = Hive.load(getConnectString(getHiveDatabaseName()), CachingDataSourceProvider.getInstance());
    final String key = new String("Antarctica");

    final PartitionDimension partitionDimension = hive.getPartitionDimension();
    hive.directory().insertPrimaryIndexKey(key);
    NodeResolver directory = new DbDirectory(partitionDimension, CachingDataSourceProvider.getInstance().getDataSource(hive.getUri()));
    for (Integer id : Transform.map(DirectoryWrapper.semaphoreToId(), directory.getKeySemamphoresOfPrimaryIndexKey(key)))
      hive.getNode(id).setStatus(Status.readOnly);

    AssertUtils.assertThrows(new Toss() {
      public void f() throws Exception {
        hive.connection().getByPartitionKey(key, AccessType.ReadWrite);
View Full Code Here


    Hive hive = Hive.load(getConnectString(getHiveDatabaseName()), CachingDataSourceProvider.getInstance());
    final String key = new String("Asia");

    PartitionDimension partitionDimension = hive.getPartitionDimension();
    hive.directory().insertPrimaryIndexKey(key);
    NodeResolver directory = new DbDirectory(partitionDimension, CachingDataSourceProvider.getInstance().getDataSource(hive.getUri()));
    for (Integer id : Transform.map(DirectoryWrapper.semaphoreToId(), directory.getKeySemamphoresOfPrimaryIndexKey(key)))
      hive.updateNodeStatus(hive.getNode(id), Status.readOnly);
    hive = null;

    final Hive fetchedHive = Hive.load(getConnectString(getHiveDatabaseName()), CachingDataSourceProvider.getInstance());
View Full Code Here

    Integer secondaryKey = new Integer(7);

    Pair<Node, Node> nodes = initializeTestData(hive, primaryKey, secondaryKey);
    Node origin = nodes.getKey();
    Node destination = nodes.getValue();
    NodeResolver dir = new DbDirectory(hive.getPartitionDimension(), CachingDataSourceProvider.getInstance().getDataSource(getConnectString(getHiveDatabaseName())));
    PartitionKeyMover<String> pMover = new PrimaryMover(origin.getUri());
    Mover<Integer> secMover = new SecondaryMover();

    //Do the actual migration
    Migrator m = new HiveMigrator(hive);
    assertNotNull(Filter.grepItemAgainstList(origin.getId(), Transform.map(DirectoryWrapper.semaphoreToId(), dir.getKeySemamphoresOfPrimaryIndexKey(primaryKey))));
    m.migrate(primaryKey, Arrays.asList(new String[]{destination.getName()}), pMover);
    //Directory points to the destination node
    assertNotNull(Filter.grepItemAgainstList(destination.getId(), Transform.map(DirectoryWrapper.semaphoreToId(), dir.getKeySemamphoresOfPrimaryIndexKey(primaryKey))));
    //Records exist and are identical on the destination node
    assertEquals(primaryKey, pMover.get(primaryKey, destination));
    assertEquals(secondaryKey, secMover.get(secondaryKey, destination));
  }
View Full Code Here

    Integer secondaryKey = new Integer(7);

    Pair<Node, Node> nodes = initializeTestData(hive, primaryKey, secondaryKey);
    Node origin = nodes.getKey();
    Node destination = nodes.getValue();
    NodeResolver dir = new DbDirectory(hive.getPartitionDimension(), getDataSource(getConnectString(getHiveDatabaseName())));
    PartitionKeyMover<String> pMover = new PrimaryMover(origin.getUri());
    //This mover just craps out on copy
    Mover<Integer> failingMover = new Mover<Integer>() {
      public void copy(Integer item, Node node) {
        throw new RuntimeException("");
      }

      public void delete(Integer item, Node node) {
      }

      public Integer get(Object id, Node node) {
        return null;
      }
    };

    Migrator m = new HiveMigrator(hive);
    pMover.getDependentMovers().clear();
    pMover.getDependentMovers().add(new Pair<Mover, KeyLocator>(failingMover, new SecondaryKeyLocator(origin.getUri())));
    try {
      m.migrate(primaryKey, Arrays.asList(new String[]{destination.getName()}), pMover);
    } catch (Exception e) {
      //Quash
    }
    //Directory still points to the origin node
    assertNotNull(Filter.grepItemAgainstList(origin.getId(), Transform.map(DirectoryWrapper.semaphoreToId(), dir.getKeySemamphoresOfPrimaryIndexKey(primaryKey))));
    //Records are intact on the origin node
    assertEquals(primaryKey, pMover.get(primaryKey, origin));
    assertEquals(secondaryKey, new SecondaryMover().get(secondaryKey, origin));
  }
View Full Code Here

    Pair<Node, Node> nodes = initializeTestData(hive, primaryKey, secondaryKey);
    Node origin = nodes.getKey();
    Node destination = nodes.getValue();

    NodeResolver dir = new DbDirectory(hive.getPartitionDimension(), getDataSource(getConnectString(getHiveDatabaseName())));
    PartitionKeyMover<String> pMover = new PrimaryMover(origin.getUri());
//    This mover just craps out on delete
    Mover<Integer> failingMover = new Mover<Integer>() {
      public void copy(Integer item, Node node) {
        SimpleJdbcDaoSupport dao = new SimpleJdbcDaoSupport();
        dao.setDataSource(CachingDataSourceProvider.getInstance().getDataSource(node.getUri()));
        dao.getJdbcTemplate().update("insert into secondary_table values (?)", new Object[]{item});
      }

      public void delete(Integer item, Node node) {
        throw new RuntimeException("Ach!");
      }

      public Integer get(Object id, Node node) {
        SimpleJdbcDaoSupport dao = new SimpleJdbcDaoSupport();
        dao.setDataSource(CachingDataSourceProvider.getInstance().getDataSource(node.getUri()));
        return dao.getJdbcTemplate().queryForInt("select id from secondary_table where id = ?", new Object[]{id});
      }
    };

    Migrator m = new HiveMigrator(hive);
    pMover.getDependentMovers().clear();
    pMover.getDependentMovers().add(new Pair<Mover, KeyLocator>(failingMover, new SecondaryKeyLocator(origin.getUri())));
    try {
      m.migrate(primaryKey, Arrays.asList(new String[]{destination.getName()}), pMover);
    } catch (Exception e) {
      //Quash
    }
    //Directory still points destination
    assertNotNull(Filter.grepItemAgainstList(destination.getId(), Transform.map(DirectoryWrapper.semaphoreToId(), dir.getKeySemamphoresOfPrimaryIndexKey(primaryKey))));
    //Records exist ondestination
    assertEquals(primaryKey, pMover.get(primaryKey, destination));
    assertEquals(secondaryKey, new SecondaryMover().get(secondaryKey, destination));
  }
View Full Code Here


  private Pair<Node, Node> initializeTestData(Hive hive, String primaryKey, Integer secondaryKey) throws Exception {
    hive.directory().insertPrimaryIndexKey(primaryKey);
    //Setup the test data on one node
    NodeResolver dir = new DbDirectory(hive.getPartitionDimension(), getDataSource(getConnectString(getHiveDatabaseName())));
    int originId = Atom.getFirst(dir.getKeySemamphoresOfPrimaryIndexKey(primaryKey)).getNodeId();
    Node origin = hive.getNode(originId);
    Node destination = origin.getName().equals("data1") ? hive.getNode("data2") :
      hive.getNode("data1");
    PartitionKeyMover<String> pMover = new PrimaryMover(origin.getUri());
    Mover<Integer> secMover = new SecondaryMover();
View Full Code Here

TOP

Related Classes of org.hivedb.meta.directory.NodeResolver

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.