Package com.splout.db.common

Examples of com.splout.db.common.PartitionEntry


                                                  // tablespaces

    for(DeployRequest req : deployRequests) {
      for(Object obj : req.getReplicationMap()) {
        ReplicationEntry rEntry = (ReplicationEntry) obj;
        PartitionEntry pEntry = null;
        for(PartitionEntry partEntry : req.getPartitionMap()) {
          if(partEntry.getShard().equals(rEntry.getShard())) {
            pEntry = partEntry;
          }
        }
        if(pEntry == null) {
          String msg = "No Partition metadata for shard: " + rEntry.getShard()
              + " this is very likely to be a software bug.";
          log.error(msg);
          try {
            log.error("Partition map: " + JSONSerDe.ser(req.getPartitionMap()));
            log.error("Replication map: " + JSONSerDe.ser(req.getReplicationMap()));
          } catch (JSONSerDe.JSONSerDeException e) {
            log.error("JSON error", e);
          }
          throw new RuntimeException(msg);
        }
        // Normalize DNode ids -> The convention is that DNodes are identified by host:port . So we need to strip the
        // protocol, if any
        for(int i = 0; i < rEntry.getNodes().size(); i++) {
          String dnodeId = rEntry.getNodes().get(i);
          if(dnodeId.startsWith("tcp://")) {
            dnodeId = dnodeId.substring("tcp://".length(), dnodeId.length());
          }
          rEntry.getNodes().set(i, dnodeId);
        }
        for(String dNode : rEntry.getNodes()) {
          List<DeployAction> actionsSoFar = (List<DeployAction>) MapUtils.getObject(actions, dNode,
              new ArrayList<DeployAction>());
          actions.put(dNode, actionsSoFar);
          DeployAction deployAction = new DeployAction();
          deployAction.setDataURI(req.getData_uri() + "/" + rEntry.getShard() + ".db");
          deployAction.setTablespace(req.getTablespace());
          deployAction.setVersion(version);
          deployAction.setPartition(rEntry.getShard());

          // Add partition metadata to the deploy action for DNodes to save it
          PartitionMetadata metadata = new PartitionMetadata();
          metadata.setMinKey(pEntry.getMin());
          metadata.setMaxKey(pEntry.getMax());
          metadata.setNReplicas(rEntry.getNodes().size());
          metadata.setDeploymentDate(deployDate);
          metadata.setInitStatements(req.getInitStatements());
          metadata.setEngineId(req.getEngine());
View Full Code Here


        DNode dnode = TestUtils.getTestDNode(dNodeConfig, new TellIDHandler("DNode" + i), "dnode-"
            + this.getClass().getName() + "-" + i);
        dNodes.add(dnode);
        // Define the partition for this DNode
        // DNode 0 will have [10, 20), DNode 1 [20, 30], ...
        PartitionEntry partitionEntry = new PartitionEntry();
        partitionEntry.setMin((i * 10 + 10) + "");
        partitionEntry.setMax((i * 10 + 20) + "");
        partitionEntry.setShard(i);
        partitions.add(partitionEntry);
        // And the replication
        ReplicationEntry repEntry = new ReplicationEntry();
        repEntry.setShard(i);
        repEntry.setNodes(Arrays.asList(new String[] { "localhost:" + dNodeConfig.getInt(DNodeProperties.PORT) }));
View Full Code Here

              // NO! So we have to remove the DNode
              entry.getNodes().remove(dNodeInfo.getAddress());
              if(entry.getNodes().isEmpty()) {
                repIter.remove();
                // Remove also from PartitionMap
                PartitionEntry pEntry = new PartitionEntry();
                pEntry.setShard(entry.getShard());
                tablespace.getPartitionMap().getPartitionEntries().remove(pEntry);
              }
            }
          }
          if(!entry.getNodes().isEmpty()) {
            nonEmptyReplicas++;
          }
        }
        if(nonEmptyReplicas == 0) {
          // Delete TablespaceVersion
          log.info("Removing empty tablespace version (implicit leaving from " + dNodeInfo.getAddress()
              + "): " + tablespaceName + ", " + version);
          iterator.remove();
        }
      }

      // Now iterate over all the tablespaces of this DNode to see new additions or EXPLICIT leavings
      for(Map.Entry<String, Map<Long, Map<Integer, PartitionMetadata>>> tablespaceEntry : dNodeInfo
          .getServingInfo().entrySet()) {
        String tablespaceName = tablespaceEntry.getKey();
        // Iterate over all versions of this tablespace
        for(Map.Entry<Long, Map<Integer, PartitionMetadata>> versionEntry : tablespaceEntry.getValue()
            .entrySet()) {
          Long versionName = versionEntry.getKey();
          TablespaceVersion tablespaceVersion = new TablespaceVersion(tablespaceName, versionName);
          Tablespace currentTablespace = tablespaceVersionMap.get(tablespaceVersion);
          List<PartitionEntry> partitionMap = new ArrayList<PartitionEntry>();
          List<ReplicationEntry> replicationMap = new ArrayList<ReplicationEntry>();
          long deployDate = -1;
          if(currentTablespace != null) {
            // Not first time we see this tablespace. We do a copy of the partition map to be able to modify it without
            // contention.
            partitionMap.addAll(currentTablespace.getPartitionMap().getPartitionEntries());
            replicationMap.addAll(currentTablespace.getReplicationMap().getReplicationEntries());
            deployDate = currentTablespace.getCreationDate();
          }
          // Iterate over all partitions of this tablespace
          for(Map.Entry<Integer, PartitionMetadata> partition : versionEntry.getValue().entrySet()) {
            deployDate = deployDate == -1 ? partition.getValue().getDeploymentDate() : deployDate;
            if(deployDate != -1 && (deployDate != partition.getValue().getDeploymentDate())) {
              throw new TablespaceVersionInfoException(
                  "Inconsistent partition metadata within same node, deploy date was " + deployDate
                      + " versus " + partition.getValue().getDeploymentDate());
            }
            PartitionMetadata metadata = partition.getValue();
            Integer shard = partition.getKey();
            // Create a PartitionEntry according to this PartitionMetadata
            PartitionEntry myEntry = new PartitionEntry();
            myEntry.setMax(metadata.getMaxKey());
            myEntry.setMin(metadata.getMinKey());
            myEntry.setShard(shard);
            PartitionEntry existingPartitionEntry = null;
            // Look for an existing PartitionEntry for the same shard in the PartitionMap
            if(!partitionMap.contains(myEntry)) {
              if(!event.equals(DNodeEvent.LEAVE)) {
                // In this case all conditions are met for adding a new entry to the PartitionMap
                partitionMap.add(myEntry);
                // Note that now the PartitionMap is not necessarily sorted! let's sort it now
                Collections.sort(partitionMap);
              }
            } else {
              // Check consistency of this Partition Metadata
              existingPartitionEntry = partitionMap.get(partitionMap.indexOf(myEntry));
              if(existingPartitionEntry.getMax() == null || myEntry.getMax() == null) {
                if(!(existingPartitionEntry.getMax() == null && myEntry.getMax() == null)) {
                  throw new TablespaceVersionInfoException(
                      "Inconsistent partition metadata between nodes: " + existingPartitionEntry
                          + " versus " + myEntry);
                }
              } else {
                if(!existingPartitionEntry.getMax().equals(myEntry.getMax())) {
                  throw new TablespaceVersionInfoException(
                      "Inconsistent partition metadata between nodes: " + existingPartitionEntry
                          + " versus " + myEntry);
                }
              }
              if(existingPartitionEntry.getMin() == null || myEntry.getMin() == null) {
                if(!(existingPartitionEntry.getMin() == null && myEntry.getMin() == null)) {
                  throw new TablespaceVersionInfoException(
                      "Inconsistent partition metadata between nodes: " + existingPartitionEntry
                          + " versus " + myEntry);
                }
              } else {
                if(!existingPartitionEntry.getMin().equals(myEntry.getMin())) {
                  throw new TablespaceVersionInfoException(
                      "Inconsistent partition metadata between nodes: " + existingPartitionEntry
                          + " versus " + myEntry);
                }
              }
View Full Code Here

    List<PartitionEntry> partitionEntries = TablespaceGenerator.calculatePartitions(nPartitions, sampleKeysSorted.length, reader);

    System.out.println("Expected: " + toS(expectedPartitionMap) + ", Obtained: " + toS(partitionEntries));
    assertEquals(expectedPartitionMap.length - 1, partitionEntries.size());
    for (int i = 0; i < expectedPartitionMap.length - 2; i++) {
      PartitionEntry pe = partitionEntries.get(i);
      assertEquals(expectedPartitionMap[i], pe.getMin());
      assertEquals(expectedPartitionMap[i + 1], pe.getMax());
      assertEquals(i, (int) pe.getShard());
    }
  }
View Full Code Here

    String previousKey = null;
    String currentKey = null;
    String candidateToLastPartitionMin = null;
    boolean foundDistinctKey = false;
    for (int i = 1; i <= nPartitions; i++) {
      PartitionEntry entry = new PartitionEntry();
      if (min != null) {
        entry.setMin(min);
      }
      int keyIndex = i * offset;
      foundDistinctKey = false;
      do {
        wereMore = reader.next(key);
        if (wereMore) {
          rowPointer++;

          // Logic for knowing which is currentKey and previousKey
          if (!equalsWithNulls(key.toString(), currentKey)) {
            foundDistinctKey = true;
            previousKey = currentKey;
            currentKey = key.toString();
          }
        }
      // Keep iterating until we have advanced enough and we have find a different key.
      } while (wereMore && (rowPointer < keyIndex || !foundDistinctKey));

      // If we are sure there are at least one partition more
      // we store the possible candidate to last partition min.
      if (wereMore && i<nPartitions) {
        candidateToLastPartitionMin = previousKey;
      }
      entry.setMax(key.toString());
      min = key.toString();
      entry.setShard(i-1); // Shard are 0-indexed
      partitionEntries.add(entry);

      // No more rows to consume. No more partitions to build.
      if (!wereMore) {
        break;
      }
    }
    int generatedPartitions = partitionEntries.size();
    // Last range must be opened
    partitionEntries.get(generatedPartitions-1).setMax(null);

    // Especial case. We want to ensure that every partition contains at least
    // one entry. Given than ranges are (,] that is ensured for every partition
    // but for the latest. We can ensure that the latest partition is not empty
    // if it has more sample keys after the latest min. That is, if
    // foundDistinctKey is true. Otherwise, we have to adjust:
    // We are going to try to adjust latest partition min
    // to a key before the selected min. If that is not possible, we merge
    // the latest to partitions. That solves the problem.
    if (!foundDistinctKey && partitionEntries.size() > 1) {
      PartitionEntry previous = partitionEntries.get(generatedPartitions - 2);
      PartitionEntry latest = partitionEntries.get(generatedPartitions - 1);
      // if previous.getMin() < candidateToLastPartitionMin
      // it is possible to adjust the latest two partitions
      if (compareWithNulls(previous.getMin(), candidateToLastPartitionMin) < 0) {
        previous.setMax(candidateToLastPartitionMin);
        latest.setMin(candidateToLastPartitionMin);
      } else {
        // Was not possible to adjust. Merging two latest partitions.
        previous.setMax(null);
        partitionEntries.remove(generatedPartitions - 1);
      }
View Full Code Here

    String paddingExp = "%0" + (padding != null ? padding : maxKeyDigits) + "d";
    for(int i = 0; i < nPartitions; i++) {
      int thisMin = (i * eachPartition);
      int thisMax = (i + 1) * eachPartition;
      HadoopUtils.stringToFile(inFs, new Path(input, i + ".txt"), i + "\t" + thisMin + ":" + thisMax);
      PartitionEntry entry = new PartitionEntry();
      entry.setMin(String.format(paddingExp, thisMin));
      entry.setMax(String.format(paddingExp, thisMax));
      entry.setShard(i);
      partitionEntries.add(entry);
    }

    partitionEntries.get(0).setMin(null);
    partitionEntries.get(partitionEntries.size() - 1).setMax(null);
View Full Code Here

    List<PartitionEntry> partitions = new ArrayList<PartitionEntry>();
    List<ReplicationEntry> replicationEntries = new ArrayList<ReplicationEntry>();
    for(int i = 0; i < nDnodes; i++) {
      // Define the partition for this DNode
      // DNode 0 will have [0, 10], DNode 1 [10, 20], ...
      PartitionEntry partitionEntry = new PartitionEntry();
      partitionEntry.setMin((i * 10) + "");
      partitionEntry.setMax((i * 10 + 10) + "");
      partitionEntry.setShard(i);
      partitions.add(partitionEntry);
      // And the replication
      ReplicationEntry repEntry = new ReplicationEntry();
      repEntry.setShard(i);
      int j = i + 1;
View Full Code Here

TOP

Related Classes of com.splout.db.common.PartitionEntry

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.