Package org.hivedb.meta.directory

Examples of org.hivedb.meta.directory.DbDirectory


  }

  private void doMigration(Object key, Collection<Node> destinations, PartitionKeyMover mover) {
    try {
      lock(key);
      DbDirectory dir = new DbDirectory(dimension);
      Collection<Node> origins = Transform.map(new Unary<KeySemaphore, Node>() {
        public Node f(KeySemaphore keySemaphore) {
          return getNode(keySemaphore.getNodeId());
        }
      }, dir.getKeySemamphoresOfPrimaryIndexKey(key));

      //Elect a random origin node as the authority
      Node authority = Lists.random(origins);
      Object migrant = mover.get(key, authority);

      //Copy the records
      for (Node destination : destinations) {
        try {
          deepNodeToNodeCopy(migrant, authority, destination, mover);
        } catch (RuntimeException e) {
          throw new MigrationException(String.format("Error while copying records to node %s", destination.getName()), e);
        }
      }
      //Update the directory entries
      try {
        dir.deletePrimaryIndexKey(key);
        for (Node destination : destinations)
          dir.insertPrimaryIndexKey(destination, key);
      } catch (RuntimeException e) {
        try {
          //try to repair the damage
          for (Node origin : origins)
            dir.insertPrimaryIndexKey(origin, key);
        } catch (Exception ex) {
        }
        throw new MigrationException(
          String.format("Failed to update directory entry for %s. Records may be orphaned.",
            key), e);
View Full Code Here


    final Hive hive = Hive.load(getConnectString(getHiveDatabaseName()), CachingDataSourceProvider.getInstance());
    final String key = new String("Antarctica");

    final PartitionDimension partitionDimension = hive.getPartitionDimension();
    hive.directory().insertPrimaryIndexKey(key);
    NodeResolver directory = new DbDirectory(partitionDimension, CachingDataSourceProvider.getInstance().getDataSource(hive.getUri()));
    for (Integer id : Transform.map(DirectoryWrapper.semaphoreToId(), directory.getKeySemamphoresOfPrimaryIndexKey(key)))
      hive.getNode(id).setStatus(Status.readOnly);

    AssertUtils.assertThrows(new Toss() {
      public void f() throws Exception {
        hive.connection().getByPartitionKey(key, AccessType.ReadWrite);
View Full Code Here

    Hive hive = Hive.load(getConnectString(getHiveDatabaseName()), CachingDataSourceProvider.getInstance());
    final String key = new String("Asia");

    PartitionDimension partitionDimension = hive.getPartitionDimension();
    hive.directory().insertPrimaryIndexKey(key);
    NodeResolver directory = new DbDirectory(partitionDimension, CachingDataSourceProvider.getInstance().getDataSource(hive.getUri()));
    for (Integer id : Transform.map(DirectoryWrapper.semaphoreToId(), directory.getKeySemamphoresOfPrimaryIndexKey(key)))
      hive.updateNodeStatus(hive.getNode(id), Status.readOnly);
    hive = null;

    final Hive fetchedHive = Hive.load(getConnectString(getHiveDatabaseName()), CachingDataSourceProvider.getInstance());
View Full Code Here

    }
  }


  private Directory getDirectory() {
    return new DbDirectory(dimension, CachingDataSourceProvider.getInstance().getDataSource(dimension.getIndexUri()));
  }
View Full Code Here

    Integer secondaryKey = new Integer(7);

    Pair<Node, Node> nodes = initializeTestData(hive, primaryKey, secondaryKey);
    Node origin = nodes.getKey();
    Node destination = nodes.getValue();
    NodeResolver dir = new DbDirectory(hive.getPartitionDimension(), CachingDataSourceProvider.getInstance().getDataSource(getConnectString(getHiveDatabaseName())));
    PartitionKeyMover<String> pMover = new PrimaryMover(origin.getUri());
    Mover<Integer> secMover = new SecondaryMover();

    //Do the actual migration
    Migrator m = new HiveMigrator(hive);
    assertNotNull(Filter.grepItemAgainstList(origin.getId(), Transform.map(DirectoryWrapper.semaphoreToId(), dir.getKeySemamphoresOfPrimaryIndexKey(primaryKey))));
    m.migrate(primaryKey, Arrays.asList(new String[]{destination.getName()}), pMover);
    //Directory points to the destination node
    assertNotNull(Filter.grepItemAgainstList(destination.getId(), Transform.map(DirectoryWrapper.semaphoreToId(), dir.getKeySemamphoresOfPrimaryIndexKey(primaryKey))));
    //Records exist and are identical on the destination node
    assertEquals(primaryKey, pMover.get(primaryKey, destination));
    assertEquals(secondaryKey, secMover.get(secondaryKey, destination));
  }
View Full Code Here

    Integer secondaryKey = new Integer(7);

    Pair<Node, Node> nodes = initializeTestData(hive, primaryKey, secondaryKey);
    Node origin = nodes.getKey();
    Node destination = nodes.getValue();
    NodeResolver dir = new DbDirectory(hive.getPartitionDimension(), getDataSource(getConnectString(getHiveDatabaseName())));
    PartitionKeyMover<String> pMover = new PrimaryMover(origin.getUri());
    //This mover just craps out on copy
    Mover<Integer> failingMover = new Mover<Integer>() {
      public void copy(Integer item, Node node) {
        throw new RuntimeException("");
      }

      public void delete(Integer item, Node node) {
      }

      public Integer get(Object id, Node node) {
        return null;
      }
    };

    Migrator m = new HiveMigrator(hive);
    pMover.getDependentMovers().clear();
    pMover.getDependentMovers().add(new Pair<Mover, KeyLocator>(failingMover, new SecondaryKeyLocator(origin.getUri())));
    try {
      m.migrate(primaryKey, Arrays.asList(new String[]{destination.getName()}), pMover);
    } catch (Exception e) {
      //Quash
    }
    //Directory still points to the origin node
    assertNotNull(Filter.grepItemAgainstList(origin.getId(), Transform.map(DirectoryWrapper.semaphoreToId(), dir.getKeySemamphoresOfPrimaryIndexKey(primaryKey))));
    //Records are intact on the origin node
    assertEquals(primaryKey, pMover.get(primaryKey, origin));
    assertEquals(secondaryKey, new SecondaryMover().get(secondaryKey, origin));
  }
View Full Code Here

    Pair<Node, Node> nodes = initializeTestData(hive, primaryKey, secondaryKey);
    Node origin = nodes.getKey();
    Node destination = nodes.getValue();

    NodeResolver dir = new DbDirectory(hive.getPartitionDimension(), getDataSource(getConnectString(getHiveDatabaseName())));
    PartitionKeyMover<String> pMover = new PrimaryMover(origin.getUri());
//    This mover just craps out on delete
    Mover<Integer> failingMover = new Mover<Integer>() {
      public void copy(Integer item, Node node) {
        SimpleJdbcDaoSupport dao = new SimpleJdbcDaoSupport();
        dao.setDataSource(CachingDataSourceProvider.getInstance().getDataSource(node.getUri()));
        dao.getJdbcTemplate().update("insert into secondary_table values (?)", new Object[]{item});
      }

      public void delete(Integer item, Node node) {
        throw new RuntimeException("Ach!");
      }

      public Integer get(Object id, Node node) {
        SimpleJdbcDaoSupport dao = new SimpleJdbcDaoSupport();
        dao.setDataSource(CachingDataSourceProvider.getInstance().getDataSource(node.getUri()));
        return dao.getJdbcTemplate().queryForInt("select id from secondary_table where id = ?", new Object[]{id});
      }
    };

    Migrator m = new HiveMigrator(hive);
    pMover.getDependentMovers().clear();
    pMover.getDependentMovers().add(new Pair<Mover, KeyLocator>(failingMover, new SecondaryKeyLocator(origin.getUri())));
    try {
      m.migrate(primaryKey, Arrays.asList(new String[]{destination.getName()}), pMover);
    } catch (Exception e) {
      //Quash
    }
    //Directory still points destination
    assertNotNull(Filter.grepItemAgainstList(destination.getId(), Transform.map(DirectoryWrapper.semaphoreToId(), dir.getKeySemamphoresOfPrimaryIndexKey(primaryKey))));
    //Records exist ondestination
    assertEquals(primaryKey, pMover.get(primaryKey, destination));
    assertEquals(secondaryKey, new SecondaryMover().get(secondaryKey, destination));
  }
View Full Code Here


  private Pair<Node, Node> initializeTestData(Hive hive, String primaryKey, Integer secondaryKey) throws Exception {
    hive.directory().insertPrimaryIndexKey(primaryKey);
    //Setup the test data on one node
    NodeResolver dir = new DbDirectory(hive.getPartitionDimension(), getDataSource(getConnectString(getHiveDatabaseName())));
    int originId = Atom.getFirst(dir.getKeySemamphoresOfPrimaryIndexKey(primaryKey)).getNodeId();
    Node origin = hive.getNode(originId);
    Node destination = origin.getName().equals("data1") ? hive.getNode("data2") :
      hive.getNode("data1");
    PartitionKeyMover<String> pMover = new PrimaryMover(origin.getUri());
    Mover<Integer> secMover = new SecondaryMover();
View Full Code Here

TOP

Related Classes of org.hivedb.meta.directory.DbDirectory

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.