Package com.splout.db.common

Examples of com.splout.db.common.PartitionMap


        repEntry.setShard(i);
        repEntry.setNodes(Arrays.asList(new String[] { "localhost:" + dNodeConfig.getInt(DNodeProperties.PORT) }));
        replicationEntries.add(repEntry);
      }

      Tablespace tablespace1 = new Tablespace(new PartitionMap(partitions), new ReplicationMap(replicationEntries), 1l,
          0l);
      handler.getContext().getTablespaceVersionsMap().put(new TablespaceVersion("tablespace1", 1l), tablespace1);
      handler.getContext().getCurrentVersionsMap().put("tablespace1", 1l);

      // The following has to be read as: multi-query range [23-25)
View Full Code Here


            log.info("Removing empty tablespaceVersion: " + tablespaceVersion
                + " due to explicit leaving from node " + dNodeInfo.getAddress());
            tablespaceVersionMap.remove(tablespaceVersion);
          } else {
            // Update the info in memory
            currentTablespace = new Tablespace(new PartitionMap(partitionMap), new ReplicationMap(
                replicationMap), versionName, deployDate);
            tablespaceVersionMap.put(tablespaceVersion, currentTablespace);
          }
        }
      }
View Full Code Here

      if (!sourceFs.exists(partitionMapPath)) {
        throw new IllegalArgumentException("Invalid tablespace folder: " + tablespaceOut + " doesn't contain a partition-map file.");
      }

      // Load the partition map
      PartitionMap partitionMap = JSONSerDe.deSer(
          HadoopUtils.fileToString(sourceFs, partitionMapPath), PartitionMap.class);

      // Load the init statements, if they exist
      ArrayList<String> initStatements = new ArrayList<String>();
      Path initStatementsPath = new Path(tablespaceOut, TablespaceGenerator.OUT_INIT_STATEMENTS);
      if (sourceFs.exists(initStatementsPath)) {
        initStatements.addAll(JSONSerDe.deSer(HadoopUtils.fileToString(sourceFs, initStatementsPath), ArrayList.class));
      }
      // Add the other initStatements coming in the deploy request
      if (tablespace.getInitStatements() != null) {
        initStatements.addAll(tablespace.getInitStatements());
      }
     
      String engine = DefaultEngine.class.getName();
      // New : load the engine id used in the generation tool, if exists ( to maintain backwards compatibility )
      Path engineId = new Path(tablespaceOut, TablespaceGenerator.OUT_ENGINE);
      if(sourceFs.exists(engineId)) {
        engine = HadoopUtils.fileToString(sourceFs, engineId);
        log.info("Using generated engine id: " + engine);
      }
     
      // Finally set
      deployRequests[tIndex].setInitStatements(initStatements);
      deployRequests[tIndex].setEngine(engine);
     
      deployRequests[tIndex].setTablespace(tablespace.getTablespace());
      deployRequests[tIndex].setData_uri(new Path(absoluteOutPath, "store").toUri().toString());
      deployRequests[tIndex].setPartitionMap(partitionMap.getPartitionEntries());

      // If rep > dnodes, impossible to reach this level of replication
      int repFactor = tablespace.getReplication();
      if (dnodes.size() < repFactor) {
        log.warn("WARNING: Replication factor " + repFactor + " for tablespace " + tablespace.getTablespace() + " is bigger than the number of serving DNodes. Adjusting replication factor to " + dnodes.size());
        repFactor = dnodes.size();
      }

      deployRequests[tIndex].setReplicationMap(ReplicationMap.roundRobinMap(partitionMap.getPartitionEntries().size(), repFactor,
          dnodes.toArray(new String[0])).getReplicationEntries());

      tIndex++;
    }
View Full Code Here

    TablespaceGenerator viewGenerator = new TablespaceGenerator(builder.build(), new Path(OUTPUT),
        this.getClass());
    viewGenerator.generateView(conf, samplingType, new TupleSampler.RandomSamplingOptions());

    PartitionMap partitionMap = JSONSerDe.deSer(
        HadoopUtils.fileToString(FileSystem.getLocal(conf), new Path(OUTPUT, "partition-map")),
        PartitionMap.class);

    assertEquals(null, partitionMap.getPartitionEntries().get(0).getMin());
    assertEquals("aa", partitionMap.getPartitionEntries().get(0).getMax());

    assertEquals("aa", partitionMap.getPartitionEntries().get(1).getMin());
    assertEquals("ab", partitionMap.getPartitionEntries().get(1).getMax());

    assertEquals("ab", partitionMap.getPartitionEntries().get(2).getMin());
    assertEquals(null, partitionMap.getPartitionEntries().get(2).getMax());

    Runtime.getRuntime().exec("rm -rf " + INPUT);
    Runtime.getRuntime().exec("rm -rf " + OUTPUT);
  }
View Full Code Here

    reader.close();
    fileSystem.delete(sampledInput, true);
    fileSystem.delete(sampledInputSorted, true);

    // 2.2 Create the partition map
    return new PartitionMap(partitionEntries);
  }
View Full Code Here

    }

    partitionEntries.get(0).setMin(null);
    partitionEntries.get(partitionEntries.size() - 1).setMax(null);
   
    PartitionMap partitionMap = new PartitionMap(partitionEntries);
    HadoopUtils.stringToFile(outFs, new Path(out, "partition-map"), JSONSerDe.ser(partitionMap));

    List<Field> fields = new ArrayList<Field>();
    fields.add(Field.create(SploutSQLOutputFormat.PARTITION_TUPLE_FIELD, Type.INT));
    fields.addAll(Fields.parse("key:int, value:string"));
View Full Code Here

      replicationEntries.add(repEntry);
    }
    // A valid partition map is complete: first min is null and last max is null.
    partitions.get(0).setMin(null);
    partitions.get(partitions.size() - 1).setMax(null);
    return new Tablespace(new PartitionMap(partitions), new ReplicationMap(replicationEntries), 0l, 0l);
  }
View Full Code Here

TOP

Related Classes of com.splout.db.common.PartitionMap

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.