Package org.voltdb.jni

Examples of org.voltdb.jni.ExecutionEngine


            LOG.debug("Processing " + next);

        // We need to get the EE handle for the partition that this txn
        // needs to have read in some blocks from disk
        PartitionExecutor executor = hstore_site.getPartitionExecutor(next.partition);
        ExecutionEngine ee = executor.getExecutionEngine();

        // boolean merge_needed = true;

        // We can now tell it to read in the blocks that this txn needs
        // Note that we are doing this without checking whether another txn is already
        // running. That's because reading in unevicted tuples is a two-stage process.
        // First we read the blocks from disk in a standalone buffer. Then once we
        // know that all of the tuples that we need are there, we will requeue the txn,
        // which knows that it needs to tell the EE to merge in the results from this buffer
        // before it executes anything.
        //
        // TODO: We may want to create a HStoreConf option that allows to dispatch this
        // request asynchronously per partition. For now we're just going to
        // block the AntiCacheManager until each of the requests are finished
        if (hstore_conf.site.anticache_profiling)
            this.profilers[next.partition].retrieval_time.start();
        try {
            if (debug.val)
                LOG.debug(String.format("Asking EE to read in evicted blocks from table %s on partition %d: %s",
                          next.catalog_tbl.getName(), next.partition, Arrays.toString(next.block_ids)));

            ee.antiCacheReadBlocks(next.catalog_tbl, next.block_ids, next.tuple_offsets);

            if (debug.val)
                LOG.debug(String.format("Finished reading blocks from partition %d",
                          next.partition));
        } catch (SerializableException ex) {
            LOG.info("Caught unexpected SerializableException while reading anti-cache block.", ex);

            // merge_needed = false;
        } finally {
            if (hstore_conf.site.anticache_profiling)
                this.profilers[next.partition].retrieval_time.stopIfStarted();
        }

        if (debug.val) LOG.debug("anticache block removal done");
        // Long oldTxnId = next.ts.getTransactionId();
        // Now go ahead and requeue our transaction

        //        if(merge_needed)
        next.ts.setAntiCacheMergeTable(next.catalog_tbl);

        if (next.ts instanceof LocalTransaction){
            // HACK HACK HACK HACK HACK HACK
            // We need to get a new txnId for ourselves, since the one that we
            // were given before is now probably too far in the past
          if(next.partition != next.ts.getBasePartition()){
            ee.antiCacheMergeBlocks(next.catalog_tbl);
          }
            this.hstore_site.getTransactionInitializer().resetTransactionId(next.ts, next.partition);

            if (debug.val) LOG.debug("restartin on local");
          this.hstore_site.transactionInit(next.ts)
        } else {
          ee.antiCacheMergeBlocks(next.catalog_tbl);
          RemoteTransaction ts = (RemoteTransaction) next.ts;
          RpcCallback<UnevictDataResponse> callback = ts.getUnevictCallback();
          UnevictDataResponse.Builder builder = UnevictDataResponse.newBuilder()
            .setSenderSite(this.hstore_site.getSiteId())
            .setTransactionId(ts.getNewTransactionId())
View Full Code Here


            for (Partition pt : partitionMap ) {
                PartitionExecutor pe =  getPartitionExecutor(pt.getId());
                assert (pe != null);

                ExecutionEngine ee = pe.getExecutionEngine();
                assert (ee != null);

                int m_partitionId = pe.getPartitionId();

                LOG.info("ARIES : start recovery at partition  :"+m_partitionId+" on site :"+m_siteId);
               
                if (!m_ariesLog.isRecoveryCompletedForSite(m_partitionId)) {
                    ee.doAriesRecoveryPhase(m_ariesLog.getPointerToReplayLog(), m_ariesLog.getReplayLogSize(), m_ariesLog.getTxnIdToBeginReplay());
                    m_ariesLog.setRecoveryCompleted(m_partitionId);               
                }
            }
        }
View Full Code Here

    public void setUp() {
        EELibraryLoader.loadExecutionEngineLibrary(true);
    }

    public void testSameLongHash() {
        ExecutionEngine ee = new ExecutionEngineJNI(null, 1, 1, 0, 0, "");

        /**
         *  Run with 100k of random values and make sure C++ and Java hash to
         *  the same value.
         */
        for (int i = 0; i < 100000; i++) {
            int partitionCount = r.nextInt(1000) + 1;
            // this will produce negative values, which is desired here.
            long valueToHash = r.nextLong();

            int eehash = ee.hashinate(valueToHash, partitionCount);
            int javahash = TheHashinator.hashinate(valueToHash, partitionCount);
            assertEquals(eehash, javahash);
            assertTrue(eehash < partitionCount);
            assertTrue(eehash > -1);
        }
View Full Code Here

            assertTrue(eehash > -1);
        }
    }

    public void testSameStringHash() {
        ExecutionEngine ee = new ExecutionEngineJNI(null, 1, 1, 0, 0, "");

        for (int i = 0; i < 100000; i++) {
            int partitionCount = r.nextInt(1000) + 1;
            String valueToHash = Long.toString(r.nextLong());

            int eehash = ee.hashinate(valueToHash, partitionCount);
            int javahash = TheHashinator.hashinate(valueToHash, partitionCount);
            if (eehash != javahash) {
                partitionCount++;
            }
            assertEquals(eehash, javahash);
View Full Code Here

        // hsql Backend member variable.  The real volt backend is encapsulated
        // by the ExecutionEngine class. This class has implementations for both
        // JNI and IPC - and selects the desired implementation based on the
        // value of this.eeBackend.
        HsqlBackend hsqlTemp = null;
        ExecutionEngine eeTemp = null;
        SnapshotSiteProcessor snapshotter = null;
        try {
            if (trace.val) LOG.trace("Creating EE wrapper with target type '" + target + "'");
            if (this.backend_target == BackendTarget.HSQLDB_BACKEND) {
                hsqlTemp = new HsqlBackend(partitionId);
                final String hexDDL = catalogContext.database.getSchema();
                final String ddl = Encoder.hexDecodeToString(hexDDL);
                final String[] commands = ddl.split(";");
                for (String command : commands) {
                    if (command.length() == 0) {
                        continue;
                    }
                    hsqlTemp.runDDL(command);
                }
                eeTemp = new MockExecutionEngine();
               
            }
            else if (target == BackendTarget.NATIVE_EE_JNI) {
                org.voltdb.EELibraryLoader.loadExecutionEngineLibrary(true);
                // set up the EE
                eeTemp = new ExecutionEngineJNI(this,
                                                catalogContext.cluster.getRelativeIndex(),
                                                this.getSiteId(),
                                                this.getPartitionId(),
                                                this.site.getHost().getId(),
                                                "localhost");
               
                // Initialize Anti-Cache
                if (hstore_conf.site.anticache_enable) {
                    File acFile = AntiCacheManager.getDatabaseDir(this);
                    long blockSize = hstore_conf.site.anticache_block_size;
                    eeTemp.antiCacheInitialize(acFile, blockSize);
                }
                             
               
                // Initialize STORAGE_MMAP
                if (hstore_conf.site.storage_mmap) {
                    File dbFile = getMMAPDir(this);
                    long mapSize = hstore_conf.site.storage_mmap_file_size;
                    long syncFrequency = hstore_conf.site.storage_mmap_sync_frequency;
                    eeTemp.MMAPInitialize(dbFile, mapSize, syncFrequency);
                }
               
                // Initialize ARIES
                if (hstore_conf.site.aries) {
                    File dbFile = getARIESDir(this);
                    File logFile = getARIESFile(this);
                    eeTemp.ARIESInitialize(dbFile, logFile);
                }                           
               
                // Important: This has to be called *after* we initialize the anti-cache
                //            and the storage information!
                eeTemp.loadCatalog(catalogContext.catalog.serialize());
                this.lastTickTime = System.currentTimeMillis();
                eeTemp.tick(this.lastTickTime, 0);
               
                snapshotter = new SnapshotSiteProcessor(new Runnable() {
                    final PotentialSnapshotWorkMessage msg = new PotentialSnapshotWorkMessage();
                    @Override
                    public void run() {
                        PartitionExecutor.this.work_queue.add(this.msg);
                    }
                });
            }
            else {
                // set up the EE over IPC
                eeTemp = new ExecutionEngineIPC(this,
                                                catalogContext.cluster.getRelativeIndex(),
                                                this.getSiteId(),
                                                this.getPartitionId(),
                                                this.site.getHost().getId(),
                                                "localhost",
                                                target);
                eeTemp.loadCatalog(catalogContext.catalog.serialize());
                this.lastTickTime = System.currentTimeMillis();
                eeTemp.tick(this.lastTickTime, 0);
            }
        }
        // just print error info an bail if we run into an error here
        catch (final Exception ex) {
            throw new ServerFaultException("Failed to initialize PartitionExecutor", ex);
View Full Code Here

            // table = m_hsql.runDML(sql);
        }
        else {
            assert(plan != null);
           
            ExecutionEngine ee = context.getExecutionEngine();
            AbstractTransaction ts = this.hstore_site.getTransaction(txn_id);
           
            // Enable read/write set tracking
            if (hstore_conf.site.exec_readwrite_tracking && ts.hasExecutedWork(this.partitionId) == false) {
                if (debug.val)
                    LOG.trace(String.format("%s - Enabling read/write set tracking in EE at partition %d",
                              ts, this.partitionId));
                ee.trackingEnable(txn_id);
            }
           
            // Always mark this information for the txn so that we can
            // rollback anything that it may do
            ts.markExecNotReadOnly(this.partitionId);
            ts.markExecutedWork(this.partitionId);
           
            table = ee.executeCustomPlanFragment(plan, outputDepId, inputDepId, txn_id,
                                          context.getLastCommittedTxnId(),
                                          ts.getLastUndoToken(this.partitionId));
        }

        return new DependencySet(new int[]{ outputDepId }, new VoltTable[]{ table });
View Full Code Here

        assert(fragmentId == SysProcFragmentId.PF_antiCacheEviction);
        throw new IllegalAccessError("Invalid invocation of " + this.getClass() + ".executePlanFragment()");
    }
   
    public VoltTable[] run(int partition, String tableNames[], String childrenTableNames[], long blockSizes[], int numBlocks[]) {
        ExecutionEngine ee = executor.getExecutionEngine();
        assert(tableNames.length == blockSizes.length);
        // LOG.info("reached evict tuples");
       
        // PROFILER
        AntiCacheManagerProfiler profiler = null;
        long start = -1;
        if (hstore_conf.site.anticache_profiling) {
            start = System.currentTimeMillis();
            profiler = hstore_site.getAntiCacheManager().getDebugContext().getProfiler(this.partitionId);
            profiler.eviction_time.start();
        }

        // Check Input
        if (tableNames.length == 0) {
            throw new VoltAbortException("No tables to evict were given");
        }
        Table tables[] = new Table[tableNames.length];
        Table childTables[] = new Table[tableNames.length];
        for (int i = 0; i < tableNames.length; i++) {
    //    LOG.info("reached tables for loop");
            tables[i] = catalogContext.database.getTables().getIgnoreCase(tableNames[i]);
            if (tables[i] == null) {
                String msg = String.format("Unknown table '%s'", tableNames[i]);
          //      LOG.info("abort due to null table");
                throw new VoltAbortException(msg);
            }
            else if (tables[i].getEvictable() == false) {
                String msg = String.format("Trying to evict tuples from table '%s' but it is not marked as evictable", tables[i].getName());
            //    LOG.info("abort due to non evictanle table");
                throw new VoltAbortException(msg);
            }
            else if (blockSizes[i] <= 0) {
                String msg = String.format("Invalid block eviction size '%d' for table '%s'", blockSizes[i], tables[i].getName());
            //    LOG.info("abort due to blocksize < 0");
                throw new VoltAbortException(msg);
            }
            else if (numBlocks[i] <= 0) {
                String msg = String.format("Invalid number of blocks to evict '%d' for table '%s'", numBlocks[i], tables[i].getName());
            //    LOG.info("abort due to num blocks < 0");
                throw new VoltAbortException(msg);
            }
        } // FOR
       
        // TODO: Instead of sending down requests one at a time per table, it will
        //       be much faster if we just send down the entire batch
        final VoltTable allResults = new VoltTable(ResultsColumns);
        long totalTuplesEvicted = 0;
        long totalBlocksEvicted = 0;
        long totalBytesEvicted = 0;
        for (int i = 0; i < tableNames.length; i++) {
        //LOG.info("reached batching loop");
            if (debug.val)
                LOG.debug(String.format("Evicting %d blocks of blockSize %d",
                          numBlocks[i], blockSizes[i]));
            VoltTable vt = null;
            if (debug.val)
                LOG.debug("****************"+hstore_conf.site.anticache_batching);
            if (hstore_conf.site.anticache_batching == true){
                if (debug.val) LOG.info("reached here!!!!!");
                if (childrenTableNames.length!=0 && !childrenTableNames[i].isEmpty()){
                    childTables[i] = catalogContext.database.getTables().getIgnoreCase(childrenTableNames[i]);
                    vt = ee.antiCacheEvictBlockInBatch(tables[i], childTables[i], blockSizes[i], numBlocks[i]);                   
                } else {
                    vt = ee.antiCacheEvictBlock(tables[i], blockSizes[i], numBlocks[i]);
                }
            }else{
                vt = ee.antiCacheEvictBlock(tables[i], blockSizes[i], numBlocks[i]);   
            }
           
            boolean adv = vt.advanceRow();
           
            if (!adv) {
View Full Code Here

    public void testExpectNonZeroHash() throws Exception {
        int partitionCount = 3;
        final byte configBytes[] = TheHashinator.getConfigureBytes(partitionCount);
        TheHashinator.initialize(TheHashinator.getConfiguredHashinatorClass(), configBytes);
        HashinatorConfig config = TheHashinator.getCurrentConfig();
        ExecutionEngine ee =
                new ExecutionEngineJNI(
                        1,
                        1,
                        0,
                        0,
                        "",
                        100,
                        config);

        long valueToHash = hashinatorType == HashinatorType.ELASTIC ? 39: 2;

        int eehash = ee.hashinate(valueToHash, config);
        int javahash = TheHashinator.getPartitionForParameter(VoltType.typeFromObject(valueToHash).getValue(),
                valueToHash);
        if (eehash != javahash) {
            System.out.printf("Mismatched hash of (%s) %d with %d partitions => EE: %d, Java: %d\n",
                    VoltType.typeFromObject(valueToHash).toSQLString(),
                    valueToHash, partitionCount, eehash, javahash);
        }
        assertEquals(eehash, javahash);
        assertNotSame(0, eehash);
        assertTrue(eehash < partitionCount);
        assertTrue(eehash >= 0);

        try { ee.release(); } catch (Exception e) {}
    }
View Full Code Here

    @Test
    public void testSameLongHash1() throws Exception {
        int partitionCount = 2;
        TheHashinator.initialize(TheHashinator.getConfiguredHashinatorClass(), TheHashinator.getConfigureBytes(partitionCount));
        HashinatorConfig hashinatorConfig = TheHashinator.getCurrentConfig();
        ExecutionEngine ee =
                new ExecutionEngineJNI(
                        1,
                        1,
                        0,
                        0,
                        "",
                        100,
                        hashinatorConfig);


        long valueToHash = 0;
        int eehash = ee.hashinate(valueToHash, hashinatorConfig);
        int javahash = TheHashinator.getPartitionForParameter(VoltType.typeFromObject(valueToHash).getValue(),
                valueToHash);
        if (eehash != javahash) {
            System.out.printf("Mismatched hash of (%s) %d with %d partitions => EE: %d, Java: %d\n",
                    VoltType.typeFromObject(valueToHash).toSQLString(),
                    valueToHash, partitionCount, eehash, javahash);
        }
        assertEquals(eehash, javahash);
        assertTrue(eehash < partitionCount);
        assertTrue(eehash >= 0);

        valueToHash = 1;
        eehash = ee.hashinate(valueToHash, hashinatorConfig);
        javahash = TheHashinator.getPartitionForParameter(VoltType.typeFromObject(valueToHash).getValue(),
                valueToHash);
        if (eehash != javahash) {
            System.out.printf("Mismatched hash of (%s) %d with %d partitions => EE: %d, Java: %d\n",
                    VoltType.typeFromObject(valueToHash).toSQLString(),
                    valueToHash, partitionCount, eehash, javahash);
        }
        assertEquals(eehash, javahash);
        assertTrue(eehash < partitionCount);
        assertTrue(eehash >= 0);

        valueToHash = 2;
        eehash = ee.hashinate(valueToHash, hashinatorConfig);
        javahash = TheHashinator.getPartitionForParameter(VoltType.typeFromObject(valueToHash).getValue(),
                valueToHash);
        if (eehash != javahash) {
            System.out.printf("Mismatched hash of (%s) %d with %d partitions => EE: %d, Java: %d\n",
                    VoltType.typeFromObject(valueToHash).toSQLString(),
                    valueToHash, partitionCount, eehash, javahash);
        }
        assertEquals(eehash, javahash);
        assertTrue(eehash < partitionCount);
        assertTrue(eehash >= 0);

        valueToHash = 3;
        eehash = ee.hashinate(valueToHash, hashinatorConfig);
        javahash = TheHashinator.getPartitionForParameter(VoltType.typeFromObject(valueToHash).getValue(),
                valueToHash);
        if (eehash != javahash) {
            System.out.printf("Mismatched hash of (%s) %d with %d partitions => EE: %d, Java: %d\n",
                    VoltType.typeFromObject(valueToHash).toSQLString(),
                    valueToHash, partitionCount, eehash, javahash);
        }
        assertEquals(eehash, javahash);
        assertTrue(eehash < partitionCount);
        assertTrue(eehash >= 0);

        try { ee.release(); } catch (Exception e) {}
    }
View Full Code Here

    public void testSizeChanges() {
        // try with lots of partition counts
        for (int partitionCount = 1; partitionCount <= 11; partitionCount++) {
            TheHashinator.initialize(TheHashinator.getConfiguredHashinatorClass(), TheHashinator.getConfigureBytes(partitionCount));
            HashinatorConfig hashinatorConfig = TheHashinator.getCurrentConfig();
            ExecutionEngine ee =
                    new ExecutionEngineJNI(
                            1,
                            1,
                            0,
                            0,
                            "",
                            100,
                            hashinatorConfig);

            // use a short value hashed as a long type
            for (short valueToHash = -7; valueToHash <= 7; valueToHash++) {
                int eehash = ee.hashinate(valueToHash, hashinatorConfig);
                int javahash = TheHashinator.getPartitionForParameter(VoltType.BIGINT.getValue(), valueToHash);
                if (eehash != javahash) {
                    System.out.printf("Mismatched hash of (%s) %d with %d partitions => EE: %d, Java: %d\n",
                            VoltType.typeFromObject(valueToHash).toSQLString(),
                            valueToHash, partitionCount, eehash, javahash);
                }
                assertEquals(eehash, javahash);
                assertTrue(eehash < partitionCount);
                assertTrue(eehash >= 0);
            }

            // use a long value hashed as a short type
            for (long valueToHash = -7; valueToHash <= 7; valueToHash++) {
                int eehash = ee.hashinate(valueToHash, hashinatorConfig);
                int javahash = TheHashinator.getPartitionForParameter(VoltType.SMALLINT.getValue(), valueToHash);
                if (eehash != javahash) {
                    System.out.printf("Mismatched hash of (%s) %d with %d partitions => EE: %d, Java: %d\n",
                            VoltType.typeFromObject(valueToHash).toSQLString(),
                            valueToHash, partitionCount, eehash, javahash);
                }
                assertEquals(eehash, javahash);
                assertTrue(eehash < partitionCount);
                assertTrue(eehash >= 0);
            }

            try { ee.release(); } catch (Exception e) {}
        }
    }
View Full Code Here

TOP

Related Classes of org.voltdb.jni.ExecutionEngine

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.