Package org.infinispan.loaders.bucket

Examples of org.infinispan.loaders.bucket.Bucket


      boolean created = file.createNewFile();
      assert created;
   }

   public void testBucketRemoval() throws Exception {
      Bucket b;
      InternalCacheEntry se = TestInternalCacheEntryFactory.create("test", "value");
      fcs.store(se);
      b = fcs.loadBucketContainingKey("test");
      assert b != null;

      assert !b.getEntries().isEmpty();

      assert new File(fcs.root, b.getBucketIdAsString()).exists();

      b.removeEntry("test");
      assert b.getEntries().isEmpty();

      fcs.updateBucket(b);
      checkBucketExists(b);
   }
View Full Code Here


   public void testCacheStoreRebootable() throws Exception {
      String key = "testCacheStoreRebootable";
      InternalCacheEntry se = TestInternalCacheEntryFactory.create(key, "initialValue");
      fcs.store(se);
      Bucket b = fcs.loadBucketContainingKey(key);

      //stop and restart it:
      fcs.stop();
      fcs.start();

      InternalCacheEntry entry = b.getEntry(key);
      entry.setValue("updatedValue");
      fcs.updateBucket(b);
      assert "updatedValue".equals(fcs.load(key).getValue());
   }
View Full Code Here

         }

         @Override
         public void loadAllProcess(ResultSet rs, Set<InternalCacheEntry> result) throws SQLException, CacheLoaderException {
            InputStream binaryStream = rs.getBinaryStream(1);
            Bucket bucket = (Bucket) JdbcUtil.unmarshall(getMarshaller(), binaryStream);
            for (InternalCacheEntry ice: bucket.getStoredEntries()) {
               if (!ice.isExpired()) {
                  result.add(ice);
               }
            }
         }

         @Override
         public void loadAllKeysProcess(ResultSet rs, Set<Object> keys, Set<Object> keysToExclude) throws SQLException, CacheLoaderException {
            InputStream binaryStream = rs.getBinaryStream(1);
            Bucket bucket = (Bucket) JdbcUtil.unmarshall(getMarshaller(), binaryStream);
            for (InternalCacheEntry ice: bucket.getStoredEntries()) {
               if (!ice.isExpired() && includeKey(ice.getKey(), keysToExclude)) {
                  keys.add(ice.getKey());
               }
            }
         }

         @Override
         public void toStreamProcess(ResultSet rs, InputStream is, ObjectOutput objectOutput) throws CacheLoaderException, SQLException, IOException {
            Bucket bucket = (Bucket) JdbcUtil.unmarshall(getMarshaller(), is);
            String bucketName = rs.getString(2);
            marshaller.objectToObjectStream(bucketName, objectOutput);
            marshaller.objectToObjectStream(bucket, objectOutput);
         }

         @Override
         public boolean fromStreamProcess(Object bucketName, PreparedStatement ps, ObjectInput objectInput)
               throws SQLException, CacheLoaderException, IOException, ClassNotFoundException, InterruptedException {
            if (bucketName instanceof String) {
               Bucket bucket = (Bucket) marshaller.objectFromObjectStream(objectInput);
               ByteBuffer buffer = JdbcUtil.marshall(getMarshaller(), bucket);
               ps.setBinaryStream(1, buffer.getStream(), buffer.getLength());
               ps.setLong(2, bucket.timestampOfFirstEntryToExpire());
               ps.setString(3, (String) bucketName);
               return true;
            } else {
               return false;
            }
View Full Code Here

         if (!rs.next()) {
            return null;
         }
         String bucketName = rs.getString(1);
         InputStream inputStream = rs.getBinaryStream(2);
         Bucket bucket = (Bucket) JdbcUtil.unmarshall(getMarshaller(), inputStream);
         bucket.setBucketId(bucketName);//bucket name is volatile, so not persisted.
         return bucket;
      } catch (SQLException e) {
         log.sqlFailureLoadingKey(String.valueOf(keyHashCode), e);
         throw new CacheLoaderException(String.format(
               "Sql failure while loading key: %s", keyHashCode), e);
View Full Code Here

            if (immediateLockForWriting(key)) {
               if (log.isTraceEnabled()) {
                  log.tracef("Adding bucket keyed %s for purging.", key);
               }
               InputStream binaryStream = rs.getBinaryStream(1);
               Bucket bucket = (Bucket) JdbcUtil.unmarshall(getMarshaller(), binaryStream);
               bucket.setBucketId(key);
               expiredBuckets.add(bucket);
            } else {
               if (log.isTraceEnabled()) {
                  log.tracef("Could not acquire write lock for %s, this won't be purged even though it has expired elements", key);
               }
            }
         }
      } catch (SQLException ex) {
         //if something happens make sure buckets locks are being release
         releaseLocks(expiredBuckets);
         connectionFactory.releaseConnection(conn);
         log.failedClearingJdbcCacheStore(ex);
         throw new CacheLoaderException("Failed clearing JdbcBinaryCacheStore", ex);
      } finally {
         JdbcUtil.safeClose(ps);
         JdbcUtil.safeClose(rs);
      }

      if (log.isTraceEnabled()) {
         log.tracef("Found following buckets: %s which are about to be expired", expiredBuckets);
      }

      if (expiredBuckets.isEmpty()) {
         return;
      }
      Set<Bucket> emptyBuckets = new HashSet<Bucket>();
      //now update all the buckets in batch
      try {
         String sql = tableManipulation.getUpdateRowSql();
         ps = conn.prepareStatement(sql);
         int updateCount = 0;
         Iterator<Bucket> it = expiredBuckets.iterator();
         while (it.hasNext()) {
            Bucket bucket = it.next();
            bucket.removeExpiredEntries();
            if (!bucket.isEmpty()) {
               ByteBuffer byteBuffer = JdbcUtil.marshall(getMarshaller(), bucket);
               ps.setBinaryStream(1, byteBuffer.getStream(), byteBuffer.getLength());
               ps.setLong(2, bucket.timestampOfFirstEntryToExpire());
               ps.setString(3, bucket.getBucketIdAsString());
               ps.addBatch();
               updateCount++;
               if (updateCount % batchSize == 0) {
                  ps.executeBatch();
                  if (log.isTraceEnabled()) {
                     log.tracef("Flushing batch, update count is: %d", updateCount);
                  }
               }
            } else {
               it.remove();
               emptyBuckets.add(bucket);
            }
         }
         //flush the batch
         if (updateCount % batchSize != 0) {
            if (log.isTraceEnabled()) {
               log.tracef("Flushing batch, update count is: %d", updateCount);
            }
            ps.executeBatch();
         }
         if (log.isTraceEnabled()) {
            log.tracef("Updated %d buckets.", updateCount);
         }
      } catch (SQLException ex) {
         //if something happens make sure buckets locks are being release
         releaseLocks(emptyBuckets);
         connectionFactory.releaseConnection(conn);
         log.failedClearingJdbcCacheStore(ex);
         throw new CacheLoaderException("Failed clearing JdbcBinaryCacheStore", ex);
      } catch (InterruptedException ie) {
         if (log.isTraceEnabled()) {
            log.trace("Interrupted while marshalling to purge expired entries");
         }
         Thread.currentThread().interrupt();
      } finally {
         //release locks for the updated buckets.This won't include empty buckets, as these were migrated to emptyBuckets
         releaseLocks(expiredBuckets);
         JdbcUtil.safeClose(ps);
      }


      if (log.isTraceEnabled()) {
         log.tracef("About to remove empty buckets %s", emptyBuckets);
      }

      if (emptyBuckets.isEmpty()) {
         return;
      }
      //then remove the empty buckets
      try {
         String sql = tableManipulation.getDeleteRowSql();
         ps = conn.prepareStatement(sql);
         int deletionCount = 0;
         for (Bucket bucket : emptyBuckets) {
            ps.setString(1, bucket.getBucketIdAsString());
            ps.addBatch();
            deletionCount++;
            if (deletionCount % batchSize == 0) {
               if (log.isTraceEnabled()) {
                  log.tracef("Flushing deletion batch, total deletion count so far is %d", deletionCount);
View Full Code Here

      }
      dmHelper = new DataManipulationHelper(connectionFactory, tableManipulation, marshaller) {
         @Override
         public void loadAllProcess(ResultSet rs, Set<InternalCacheEntry> result) throws SQLException, CacheLoaderException {
            InputStream binaryStream = rs.getBinaryStream(1);
            Bucket bucket = (Bucket) JdbcUtil.unmarshall(getMarshaller(), binaryStream);
            for (InternalCacheEntry ice: bucket.getStoredEntries()) {
               if (!ice.isExpired()) result.add(ice);
            }
         }

         @Override
         public void toStreamProcess(ResultSet rs, InputStream is, ObjectOutput objectOutput) throws CacheLoaderException, SQLException, IOException {
            Bucket bucket = (Bucket) JdbcUtil.unmarshall(getMarshaller(), is);
            String bucketName = rs.getString(2);
            marshaller.objectToObjectStream(bucketName, objectOutput);
            marshaller.objectToObjectStream(bucket, objectOutput);
         }

         public boolean fromStreamProcess(Object bucketName, PreparedStatement ps, ObjectInput objectInput) throws SQLException, CacheLoaderException, IOException, ClassNotFoundException {
            if (bucketName instanceof String) {
               Bucket bucket = (Bucket) marshaller.objectFromObjectStream(objectInput);
               ByteBuffer buffer = JdbcUtil.marshall(getMarshaller(), bucket);
               ps.setBinaryStream(1, buffer.getStream(), buffer.getLength());
               ps.setLong(2, bucket.timestampOfFirstEntryToExpire());
               ps.setString(3, (String) bucketName);
               return true;
            } else {
               return false;
            }
View Full Code Here

         ps.setString(1, keyHashCode);
         rs = ps.executeQuery();
         if (!rs.next()) return null;
         String bucketName = rs.getString(1);
         InputStream inputStream = rs.getBinaryStream(2);
         Bucket bucket = (Bucket) JdbcUtil.unmarshall(getMarshaller(), inputStream);
         bucket.setBucketName(bucketName);//bucket name is volatile, so not persisted.
         return bucket;
      } catch (SQLException e) {
         String message = "sql failure while loading key: " + keyHashCode;
         log.error(message, e);
         throw new CacheLoaderException(message, e);
View Full Code Here

         while (rs.next()) {
            String key = rs.getString(2);
            if (immediateLockForWriting(key)) {
               if (log.isTraceEnabled()) log.trace("Adding bucket keyed " + key + " for purging.");
               InputStream binaryStream = rs.getBinaryStream(1);
               Bucket bucket = (Bucket) JdbcUtil.unmarshall(getMarshaller(), binaryStream);
               bucket.setBucketName(key);
               expiredBuckets.add(bucket);
            } else {
               if (log.isTraceEnabled())
                  log.trace("Could not acquire write lock for " + key + ", this won't be purged even though it has expired elements");
            }
         }
      } catch (SQLException ex) {
         //if something happens make sure buckets locks are being release
         releaseLocks(expiredBuckets);
         connectionFactory.releaseConnection(conn);
         DataManipulationHelper.logAndThrow(ex, "Failed clearing JdbcBinaryCacheStore");
      } finally {
         JdbcUtil.safeClose(ps);
         JdbcUtil.safeClose(rs);
      }

      if (log.isTraceEnabled())
         log.trace("Found following buckets: " + expiredBuckets + " which are about to be expired");

      if (expiredBuckets.isEmpty()) return;
      Set<Bucket> emptyBuckets = new HashSet<Bucket>();
      //now update all the buckets in batch
      try {
         String sql = tableManipulation.getUpdateRowSql();
         ps = conn.prepareStatement(sql);
         int updateCount = 0;
         Iterator<Bucket> it = expiredBuckets.iterator();
         while (it.hasNext()) {
            Bucket bucket = it.next();
            bucket.removeExpiredEntries();
            if (!bucket.isEmpty()) {
               ByteBuffer byteBuffer = JdbcUtil.marshall(getMarshaller(), bucket);
               ps.setBinaryStream(1, byteBuffer.getStream(), byteBuffer.getLength());
               ps.setLong(2, bucket.timestampOfFirstEntryToExpire());
               ps.addBatch();
               updateCount++;
               if (updateCount % batchSize == 0) {
                  ps.executeBatch();
                  if (log.isTraceEnabled()) log.trace("Flushing batch, update count is: " + updateCount);
               }
            } else {
               it.remove();
               emptyBuckets.add(bucket);
            }
         }
         //flush the batch
         if (updateCount % batchSize != 0) {
            ps.executeBatch();
         }
         if (log.isTraceEnabled()) log.trace("Updated " + updateCount + " buckets.");
      } catch (SQLException ex) {
         //if something happens make sure buckets locks are being release
         releaseLocks(emptyBuckets);
         connectionFactory.releaseConnection(conn);
         DataManipulationHelper.logAndThrow(ex, "Failed clearing JdbcBinaryCacheStore");
      } finally {
         //release locks for the updated buckets.This won't include empty buckets, as these were migrated to emptyBuckets
         releaseLocks(expiredBuckets);
         JdbcUtil.safeClose(ps);
      }


      if (log.isTraceEnabled()) log.trace("About to remove empty buckets " + emptyBuckets);

      if (emptyBuckets.isEmpty()) return;
      //then remove the empty buckets
      try {
         String sql = tableManipulation.getDeleteRowSql();
         ps = conn.prepareStatement(sql);
         int deletionCount = 0;
         for (Bucket bucket : emptyBuckets) {
            ps.setString(1, bucket.getBucketName());
            ps.addBatch();
            deletionCount++;
            if (deletionCount % batchSize == 0) {
               if (log.isTraceEnabled())
                  log.trace("Flushing deletion batch, total deletion count so far is " + deletionCount);
View Full Code Here

   public void testBucketMarshalling() throws Exception {
      ImmortalCacheEntry entry1 = (ImmortalCacheEntry) TestInternalCacheEntryFactory.create("key", "value", System.currentTimeMillis() - 1000, -1, System.currentTimeMillis(), -1);
      MortalCacheEntry entry2 = (MortalCacheEntry) TestInternalCacheEntryFactory.create("key", "value", System.currentTimeMillis() - 1000, 200000, System.currentTimeMillis(), -1);
      TransientCacheEntry entry3 = (TransientCacheEntry) TestInternalCacheEntryFactory.create("key", "value", System.currentTimeMillis() - 1000, -1, System.currentTimeMillis(), 4000000);
      TransientMortalCacheEntry entry4 = (TransientMortalCacheEntry) TestInternalCacheEntryFactory.create("key", "value", System.currentTimeMillis() - 1000, 200000, System.currentTimeMillis(), 4000000);
      Bucket b = new Bucket(TIME_SERVICE);
      b.setBucketId(0);
      b.addEntry(entry1);
      b.addEntry(entry2);
      b.addEntry(entry3);
      b.addEntry(entry4);

      byte[] bytes = marshaller.objectToByteBuffer(b);
      Bucket rb = (Bucket) marshaller.objectFromByteBuffer(bytes);
      assert rb.getEntries().equals(b.getEntries()) : "Writen[" + b.getEntries() + "] and read[" + rb.getEntries() + "] objects should be the same";
   }
View Full Code Here

      boolean created = file.createNewFile();
      assert created;
   }

   public void testBucketRemoval() throws Exception {
      Bucket b;
      InternalCacheEntry se = TestInternalCacheEntryFactory.create("test", "value");
      fcs.store(se);
      b = fcs.loadBucketContainingKey("test");
      assert b != null;

      assert !b.getEntries().isEmpty();

      assert new File(fcs.root, b.getBucketIdAsString()).exists();

      b.removeEntry("test");
      assert b.getEntries().isEmpty();

      fcs.updateBucket(b);
      checkBucketExists(b);
   }
View Full Code Here

TOP

Related Classes of org.infinispan.loaders.bucket.Bucket

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.