Package org.voltdb.catalog

Examples of org.voltdb.catalog.Site


    private void createSetup(String file_path, String file_nonce,
            long startTime, SystemProcedureExecutionContext context,
            String hostname, final VoltTable result) {
        {           
            Site site = context.getSite();
            int numLocalPartitions  = site.getPartitions().size();
                       
            LOG.trace("createSetup at : partition "+context.getPartitionExecutor().getPartitionId());
           
            /*
             * Used to close targets on failure
             */
            final ArrayList<SnapshotDataTarget> targets = new ArrayList<SnapshotDataTarget>();
            try {
                final ArrayDeque<SnapshotTableTask> partitionedSnapshotTasks =
                    new ArrayDeque<SnapshotTableTask>();
                final ArrayList<SnapshotTableTask> replicatedSnapshotTasks =
                    new ArrayList<SnapshotTableTask>();
               
                LOG.trace("ExecutionSitesCurrentlySnapshotting initial check : " + SnapshotSiteProcessor.ExecutionSitesCurrentlySnapshotting.get());
                assert(SnapshotSiteProcessor.ExecutionSitesCurrentlySnapshotting.get() == -1);

                final List<Table> tables = SnapshotUtil.getTablesToSave(context.getDatabase());

                synchronized (SnapshotSiteProcessor.m_digestWritten) {
                    if (SnapshotSiteProcessor.m_digestWritten.get() == false) {
                        SnapshotSiteProcessor.m_digestWritten.set(true);
                        SnapshotUtil.recordSnapshotTableList(startTime, file_path, file_nonce, tables);
                        LOG.trace("Digest written at partition " + context.getPartitionExecutor().getPartitionId());
                    }
                }

                final AtomicInteger numTables = new AtomicInteger(tables.size());               
                //LOG.info("NumTables Initial : "+numTables);
               
                final SnapshotRegistry.Snapshot snapshotRecord =
                    SnapshotRegistry.startSnapshot(
                            startTime,
                            context.getHStoreSite().getHostId(),
                            context.getHStoreSite().getSiteId(),
                            context.getPartitionExecutor().getPartitionId(),                          
                            file_path,
                            file_nonce,
                            tables.toArray(new Table[0]));
               
                for (final Table table : SnapshotUtil.getTablesToSave(context.getDatabase()))
                {
                    String canSnapshot = "SUCCESS";
                    String err_msg = "";
                    final File saveFilePath =
                        SnapshotUtil.constructFileForTable(table, file_path, file_nonce,
                                              String.valueOf(context.getHost().getId()),                                
                                              String.valueOf(context.getHStoreSite().getSiteId()),
                                              String.valueOf(context.getPartitionExecutor().getPartitionId())
                                              );
                    SnapshotDataTarget sdt = null;
                    try {
                        sdt =
                            constructSnapshotDataTargetForTable(
                                    context,
                                    saveFilePath,
                                    table,
                                    context.getSite().getHost(),
                                    numLocalPartitions,
                                    startTime);
                        targets.add(sdt);
                        final SnapshotDataTarget sdtFinal = sdt;
                        final Runnable onClose = new Runnable() {
                            @Override
                            public void run() {
                                snapshotRecord.updateTable(table.getTypeName(),
                                        new SnapshotRegistry.Snapshot.TableUpdater() {
                                    @Override
                                    public SnapshotRegistry.Snapshot.Table update(
                                            SnapshotRegistry.Snapshot.Table registryTable) {
                                        return snapshotRecord.new Table(
                                                registryTable,
                                                sdtFinal.getBytesWritten(),
                                                sdtFinal.getLastWriteException());
                                    }
                                });
                                int tablesLeft = numTables.decrementAndGet();
                                if (tablesLeft == 0) {
                                    final SnapshotRegistry.Snapshot completed =
                                        SnapshotRegistry.finishSnapshot(snapshotRecord);
                                    final double duration =
                                        (completed.timeFinished - completed.timeStarted) / 1000.0;
                                    LOG.info(
                                            "Snapshot " + snapshotRecord.nonce + " finished at " +
                                             completed.timeFinished + " and took " + duration
                                             + " seconds ");
                                }
                            }
                        };

                        sdt.setOnCloseHandler(onClose);

                        final SnapshotTableTask task =
                            new SnapshotTableTask(
                                    table.getRelativeIndex(),
                                    sdt,                                   
                                    table.getIsreplicated(),
                                    table.getTypeName());

                        if (table.getIsreplicated()) {
                            replicatedSnapshotTasks.add(task);
                        } else {
                            partitionedSnapshotTasks.offer(task);
                        }
                    } catch (IOException ex) {
                        /*
                         * Creation of this specific target failed. Close it if it was created.
                         * Continue attempting the snapshot anyways so that at least some of the data
                         * can be retrieved.
                         */
                        try {
                            if (sdt != null) {
                                targets.remove(sdt);
                                sdt.close();
                            }
                        } catch (Exception e) {
                            LOG.error(e);
                        }

                        StringWriter sw = new StringWriter();
                        PrintWriter pw = new PrintWriter(sw);
                        ex.printStackTrace(pw);
                        pw.flush();
                        canSnapshot = "FAILURE";
                        err_msg = "SNAPSHOT INITIATION OF " + saveFilePath +
                        "RESULTED IN IOException: \n" + sw.toString();
                    }

                    result.addRow(Integer.parseInt(context.getSite().getHost().getTypeName().replaceAll("[\\D]", "")),
                            hostname,
                            context.getHStoreSite().getSiteId(),
                            context.getPartitionExecutor().getPartitionId(),                        
                            table.getTypeName(),
                            canSnapshot,
                            err_msg);
                }

                synchronized (SnapshotSiteProcessor.m_taskListsForSites) {
                    if (!partitionedSnapshotTasks.isEmpty() || !replicatedSnapshotTasks.isEmpty()) {

                        // Used to sync across all partitions on all sites - set only once                       
                        if(SnapshotSiteProcessor.ExecutionSitesCurrentlySnapshotting.get() == -1){
                            SnapshotSiteProcessor.ExecutionSitesCurrentlySnapshotting.set(numLocalPartitions);
                            LOG.trace("ExecutionSitesCurrentlySnapshotting set :" + SnapshotSiteProcessor.ExecutionSitesCurrentlySnapshotting.get());
                        }

                        for (int ii = 0; ii < numLocalPartitions; ii++) {
                            SnapshotSiteProcessor.m_taskListsForSites.add(new ArrayDeque<SnapshotTableTask>());
                        }
                    } else {
                        SnapshotRegistry.discardSnapshot(snapshotRecord);
                    }

                    /**
                     * Distribute the writing of replicated tables to exactly one partition.
                     */

                    CatalogMap<Partition> partition_map = site.getPartitions();
                    Integer lowest_partition_id = Integer.MAX_VALUE, p_id;       
                    for (Partition pt : partition_map) {
                        p_id = pt.getId();
                        lowest_partition_id = Math.min(p_id, lowest_partition_id);
                    }       
View Full Code Here


        if (fragmentId == SysProcFragmentId.PF_snapshotDelete)
        {
            // Choose the lowest site ID on this host to do the deletion.
            // All other sites should just return empty results tables.
            Host catalog_host = context.getHost();
            Site catalog_site = CollectionUtil.first(CatalogUtil.getSitesForHost(catalog_host));
            Integer lowest_site_id = catalog_site.getId();
           
            LOG.trace("Site id :"+context.getPartitionExecutor().getSiteId());                      
            int partition_id = context.getPartitionExecutor().getPartitionId();
            LOG.trace("Partition Id : " + partition_id);
                       
View Full Code Here

                a_id2 = random_int * catalog_clus.getNum_partitions() + partition_num;
                break;
            }
            case SAME_SITE: {
                int partition_num = TheHashinator.hashToPartition(a_id, catalog_clus.getNum_partitions());
                Site site = CatalogUtil.getPartitionById(catalog, partition_num).getParent();
                int num_sites_per_host = CatalogUtil.getSitesPerHost(site).get(site.getHost()).size();
                int num_partitions_per_site = site.getPartitions().size();
                double a_id_site_num = Math.floor((double) partition_num / (double) num_partitions_per_site);
                double a_id_host_num = Math.floor((double) a_id_site_num / (double) num_sites_per_host);
                // determine the partition range for the cluster (with a random
                // host and random sites)
                // and pick partition randomly from this range
                int lowerbound = (int) a_id_host_num * num_sites_per_host * num_partitions_per_site + (int) a_id_site_num * num_partitions_per_site;
                int upperbound = (int) a_id_host_num * num_sites_per_host * num_partitions_per_site + (int) a_id_site_num * num_partitions_per_site + (num_partitions_per_site - 1);
                int a_id2_partition_num = rng.numberExcluding(lowerbound, upperbound, partition_num);
                // get a random partition
                a_id2 = random_int * catalog_clus.getNum_partitions() + a_id2_partition_num;
                break;
            }
            case SAME_HOST: {
                int partition_num = TheHashinator.hashToPartition(a_id, catalog_clus.getNum_partitions());
                Site site = CatalogUtil.getPartitionById(catalog, partition_num).getParent();
                int num_sites_per_host = CatalogUtil.getSitesPerHost(site).get(site.getHost()).size();
                int num_partitions_per_site = site.getPartitions().size();

                double a_id_site_num = Math.floor((double) partition_num / (double) num_partitions_per_site);
                double a_id_host_num = Math.floor((double) a_id_site_num / (double) num_sites_per_host);
                int lowerboundsite = (int) a_id_host_num * num_sites_per_host;
                int upperboundsite = (int) a_id_host_num * num_sites_per_host + (num_sites_per_host - 1);
                int new_site = rng.numberExcluding(lowerboundsite, upperboundsite, (int) a_id_site_num);
                int lowerbound = new_site * num_partitions_per_site;
                int upperbound = new_site * num_partitions_per_site + (num_partitions_per_site - 1);
                int a_id2_partition_num = rng.number(lowerbound, upperbound);
                // get a random partition
                a_id2 = random_int * catalog_clus.getNum_partitions() + a_id2_partition_num;
                break;
            }
            case REMOTE_HOST: {
                int total_number_of_hosts = catalog_clus.getHosts().size();
                int partition_num = TheHashinator.hashToPartition(a_id, catalog_clus.getNum_partitions());
                Site site = CatalogUtil.getPartitionById(catalog, partition_num).getParent();
                int num_sites_per_host = CatalogUtil.getSitesPerHost(site).get(site.getHost()).size();
                int num_partitions_per_site = site.getPartitions().size();
                // get the site number the partition exists on
                double a_id_site_num = Math.floor((double) partition_num / (double) num_partitions_per_site);
                double a_id_host_num = Math.floor((double) a_id_site_num / (double) num_sites_per_host);
                int new_host = (int) a_id_host_num;
                if (total_number_of_hosts > 1) {
View Full Code Here

            VoltTable result = ClusterSaveFileState.constructEmptySaveFileStateVoltTable();
        
            // Choose the lowest site ID on this host to do the file scan
            // All other sites should just return empty results tables.           
            Host catalog_host = context.getHost();           
            Site catalog_site = CollectionUtil.first(CatalogUtil.getSitesForHost(catalog_host));
           
            CatalogMap<Partition> partition_map = catalog_site.getPartitions();
            Integer lowest_partition_id = Integer.MAX_VALUE, p_id;       
            for (Partition pt : partition_map) {
                p_id = pt.getId();
                lowest_partition_id = Math.min(p_id, lowest_partition_id);
            }       
View Full Code Here

        Map<Integer, Integer> sites_to_partitions = new HashMap<Integer, Integer>();

        // CHANGE : Up Sites
        Host catalog_host = context.getHost();
        Collection<Site> catalog_sites = CatalogUtil.getSitesForHost(catalog_host);
        Site catalog_site = context.getSite();
        Partition catalog_partition = context.getPartitionExecutor().getPartition();           

        LOG.trace("Table :" + tableName);

        for (Site site : catalog_sites) {
            for (Partition partition : site.getPartitions()) {
                sites_to_partitions.put(site.getId(), partition.getId());
            }
        }

        try {
            initializeTableSaveFiles(m_filePath, m_fileNonce, tableName, originalHostIds, relevantPartitionIds, context);
        } catch (IOException e) {
            VoltTable result = constructResultsTable();
            // e.printStackTrace();
            result.addRow(m_hostId, hostname, m_siteId, tableName, relevantPartitionIds[0], "FAILURE", "Unable to load table: " + tableName + " error: " + e.getMessage());
            return result;
        }

        int partition_id = context.getPartitionExecutor().getPartitionId();
        LOG.trace("Starting performLoadPartitionedTable " + tableName + " at partition - " + partition_id);

        String result_str = "SUCCESS";
        String error_msg = "";
        TableSaveFile savefile = null;

        /**
         * For partitioned tables
         */
        try {
            savefile = getTableSaveFile(getSaveFileForPartitionedTable(m_filePath, m_fileNonce, tableName,
                    catalog_host.getId(),
                    catalog_site.getId(),
                    catalog_partition.getId()),                            
                    3, null);
            assert (savefile.getCompleted());
        } catch (IOException e) {
            VoltTable result = constructResultsTable();
View Full Code Here

                a_id2 = random_int * catalog_clus.getNum_partitions() + partition_num;
              break;
            }
            case SAME_SITE: {
                int partition_num = TheHashinator.hashToPartition(a_id, catalog_clus.getNum_partitions());
                Site site = CatalogUtil.getPartitionById(catalog, partition_num).getParent();
                int num_sites_per_host = CatalogUtil.getSitesPerHost(site).get(site.getHost()).size();
                int num_partitions_per_site = site.getPartitions().size();
                double a_id_site_num = Math.floor((double)partition_num / (double)num_partitions_per_site);
                double a_id_host_num = Math.floor((double)a_id_site_num / (double)num_sites_per_host);
                //determine the partition range for the cluster (with a random host and random sites)
                //and pick partition randomly from this range
                int lowerbound = (int)a_id_host_num * num_sites_per_host * num_partitions_per_site + (int)a_id_site_num * num_partitions_per_site;
                int upperbound = (int)a_id_host_num * num_sites_per_host * num_partitions_per_site  + (int)a_id_site_num * num_partitions_per_site + (num_partitions_per_site - 1);
                int a_id2_partition_num = rng.numberExcluding(lowerbound, upperbound, partition_num);
                // get a random partition
                a_id2 = random_int * catalog_clus.getNum_partitions() + a_id2_partition_num;
                break;
            }
            case SAME_HOST: {
                int partition_num = TheHashinator.hashToPartition(a_id, catalog_clus.getNum_partitions());
                Site site = CatalogUtil.getPartitionById(catalog, partition_num).getParent();
                int num_sites_per_host = CatalogUtil.getSitesPerHost(site).get(site.getHost()).size();
                int num_partitions_per_site = site.getPartitions().size();

                double a_id_site_num = Math.floor((double)partition_num / (double)num_partitions_per_site);
                double a_id_host_num = Math.floor((double)a_id_site_num / (double)num_sites_per_host);
                int lowerboundsite = (int)a_id_host_num * num_sites_per_host;
                int upperboundsite = (int)a_id_host_num * num_sites_per_host + (num_sites_per_host - 1);
                int new_site = rng.numberExcluding(lowerboundsite, upperboundsite, (int)a_id_site_num);
                int lowerbound = new_site * num_partitions_per_site;
                int upperbound = new_site * num_partitions_per_site + (num_partitions_per_site - 1);
                int a_id2_partition_num = rng.number(lowerbound, upperbound);
                // get a random partition
                a_id2 = random_int * catalog_clus.getNum_partitions() + a_id2_partition_num;
                break;
            }
            case REMOTE_HOST: {
                int total_number_of_hosts = catalog_clus.getHosts().size();
                int partition_num = TheHashinator.hashToPartition(a_id, catalog_clus.getNum_partitions());
                Site site = CatalogUtil.getPartitionById(catalog, partition_num).getParent();
                int num_sites_per_host = CatalogUtil.getSitesPerHost(site).get(site.getHost()).size();
                int num_partitions_per_site = site.getPartitions().size();
                // get the site number the partition exists on
                double a_id_site_num = Math.floor((double)partition_num / (double)num_partitions_per_site);
                double a_id_host_num = Math.floor((double)a_id_site_num / (double)num_sites_per_host);
                int new_host = (int)a_id_host_num;
                if (total_number_of_hosts > 1)
View Full Code Here

//            int withinHostId = i % (sitesPerHost + initiatorsPerHost);

            //int siteId = hostId * VoltDB.SITES_TO_HOST_DIVISOR;// + withinHostId;

            Site site = cluster.getSites().add(String.valueOf(++siteId));
            site.setId(siteId);
            site.setHost(host);
            site.setProc_port(HStoreConstants.DEFAULT_PORT);
            site.setMessenger_port(HStoreConstants.DEFAULT_PORT + HStoreConstants.MESSENGER_PORT_OFFSET);
            site.setIsup(true);

            Partition part = site.getPartitions().add(String.valueOf(++partitionCounter));
            part.setId(partitionCounter);
//            System.err.println("[" + partitionCounter + "] " + CatalogUtil.getDisplayName(site) + " => " + CatalogUtil.getDisplayName(part));
//            System.err.println(CatalogUtil.debug(site));
        }
    }
View Full Code Here

        {
            VoltTable result = constructNodeResultsTable();
            // Choose the lowest site ID on this host to do the file scan
            // All other sites should just return empty results tables.
            Host catalog_host = context.getHost();
            Site site = context.getSite();

            CatalogMap<Partition> partition_map = site.getPartitions();
            Integer lowest_partition_id = Integer.MAX_VALUE, p_id;
            Integer lowest_site_id = Integer.MAX_VALUE, s_id;
           
            for(Site st : CatalogUtil.getAllSites(catalog_host)){
                s_id = st.getId();
View Full Code Here

        HStoreConf hstore_conf = HStoreConf.singleton();
        hstore_conf.site.commandlog_enable = false;
        hstore_conf.site.commandlog_timeout = 1000;

        Site catalog_site = CollectionUtil.first(catalogContext.sites);
        hstore_site = new MockHStoreSite(catalog_site.getId(), catalogContext, hstore_conf);
        assert(hstore_site.isLocalPartition(0));
       
        outputFile = FileUtil.getTempFile("log");
        logger = new CommandLogWriter(hstore_site, outputFile);
        loggerThread = new Thread(this.logger);
View Full Code Here

    protected void setUp() throws Exception {
        super.setUp(ProjectType.TM1);
        this.addPartitions(NUM_PARTITONS);
       
        this.hstore_conf = HStoreConf.singleton();
        Site catalog_site = CollectionUtil.first(catalogContext.sites);
        this.hstore_site = this.createHStoreSite(catalog_site, hstore_conf);
        this.client = createClient();
        this.executor = hstore_site.getPartitionExecutor(PARTITION_ID);
        this.executorDebug = this.executor.getDebugContext();
        this.catalog_proc = this.getProcedure(TARGET_PROCEDURE);
View Full Code Here

TOP

Related Classes of org.voltdb.catalog.Site

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.