Package org.infinispan.persistence.support

Examples of org.infinispan.persistence.support.Bucket


   @Override
   public final MarshalledEntry load(Object key) {
      Integer bucketId = getBuckedId(key);
      lockBucketForReading(bucketId);
      try {
         Bucket bucket = loadBucket(bucketId);
         if (bucket == null) {
            return null;
         }
         return bucket.getEntry(key, ctx.getTimeService());
      } finally {
         unlock(bucketId);
      }
   }
View Full Code Here


   @Override
   public boolean contains(Object key) {
      Integer bucketId = getBuckedId(key);
      lockBucketForReading(bucketId);
      try {
         Bucket bucket = loadBucket(bucketId);
         return bucket != null && bucket.contains(key, ctx.getTimeService());
      } finally {
         unlock(bucketId);
      }
   }
View Full Code Here

         final TaskContextImpl taskContext = new TaskContextImpl();
         //we can do better here: ATM we load the entries in the caller's thread and process them in parallel
         // we can do the loading (expensive operation) in parallel as well.
         while (rs.next()) {
            InputStream binaryStream = rs.getBinaryStream(1);
            final Bucket bucket = unmarshallBucket(binaryStream);
            ecs.submit(new Callable<Void>() {
               @Override
               public Void call() throws Exception {
                  try {
                     for (MarshalledEntry me : bucket.getStoredEntries(filter, ctx.getTimeService()).values()) {
                        if (!taskContext.isStopped()) {
                           task.processEntry(me, taskContext);
                        }
                     }
                     return null;
View Full Code Here

            if (immediateLockForWriting(bucketId)) {
               if (log.isTraceEnabled()) {
                  log.tracef("Adding bucket keyed %s for purging.", bucketId);
               }
               InputStream binaryStream = rs.getBinaryStream(1);
               Bucket bucket = unmarshallBucket(binaryStream);
               bucket.setBucketId(bucketId);
               expiredBuckets.add(bucket);
               if (expiredBuckets.size() == BATCH_SIZE) {
                  ++tasksScheduled;
                  ecs.submit(new BucketPurger(expiredBuckets, task, ctx.getMarshaller(), conn, emptyBuckets));
                  expiredBuckets = new ArrayList<Bucket>(BATCH_SIZE);
               }
            } else {
               if (log.isTraceEnabled()) {
                  log.tracef("Could not acquire write lock for %s, this won't be purged even though it has expired elements", bucketId);
               }
            }
            // continuously unlock already purged buckets - we don't want to run out of memory by storing
            // them in unlimited collection
            tasksCompleted += unlockCompleted(ecs, false); // cannot throw InterruptedException
         }

         if (!expiredBuckets.isEmpty()) {
            ++tasksScheduled;
            ecs.submit(new BucketPurger(expiredBuckets, task, ctx.getMarshaller(), conn, emptyBuckets));
         }
         // wait until all tasks have completed
         try {
            while (tasksCompleted < tasksScheduled) {
               tasksCompleted += unlockCompleted(ecs, true);
            }
         } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw new PersistenceException("Interrupted purging JdbcBinaryStore", e);
         }
         // when all tasks have completed, we may have up to BATCH_SIZE empty buckets waiting to be deleted
         PreparedStatement deletePs = null;
         try {
            deletePs = conn.prepareStatement(tableManipulation.getDeleteRowSql());
            Bucket bucket;
            while ((bucket = emptyBuckets.poll()) != null) {
               deletePs.setString(1, bucket.getBucketIdAsString());
               deletePs.addBatch();
               unlock(bucket.getBucketId());
            }
            log.tracef("Flushing deletion batch");
            deletePs.executeBatch();
            log.tracef("Flushed deletion batch");
         } catch (Exception ex) {
View Full Code Here

         if (!rs.next()) {
            return null;
         }
         String bucketName = rs.getString(1);
         InputStream inputStream = rs.getBinaryStream(2);
         Bucket bucket = unmarshallBucket(inputStream);
         bucket.setBucketId(bucketName);//bucket name is volatile, so not persisted.
         return bucket;
      } catch (SQLException e) {
         log.sqlFailureLoadingKey(String.valueOf(bucketId), e);
         throw new PersistenceException(String.format(
               "Sql failure while loading key: %s", bucketId), e);
View Full Code Here

   public TableManipulation getTableManipulation() {
      return tableManipulation;
   }

   protected void storeInBucket(MarshalledEntry me, Integer bucketId) {
      Bucket bucket = loadBucket(bucketId);
      if (bucket != null) {
         bucket.addEntry(me.getKey(), me);
         updateBucket(bucket);
      } else {
         bucket = new Bucket(keyEquivalence);
         bucket.setBucketId(bucketId);
         bucket.addEntry(me.getKey(), me);
         insertBucket(bucket);
      }
   }
View Full Code Here

         insertBucket(bucket);
      }
   }

   protected boolean removeKeyFromBucket(Object key, Integer bucketId) {
      Bucket bucket = loadBucket(bucketId);
      if (bucket == null) {
         return false;
      } else {
         boolean success = bucket.removeEntry(key);
         if (success) {
            updateBucket(bucket);
         }
         return success;
      }
View Full Code Here

      return configuration;
   }

   private Bucket unmarshallBucket(InputStream stream) throws PersistenceException {
      Map<Object, MarshalledEntry> entries = JdbcUtil.unmarshall(ctx.getMarshaller(), stream);
      return new Bucket(entries, keyEquivalence);
   }
View Full Code Here

            PreparedStatement ps = null;
            try {
               String sql = tableManipulation.getDeleteRowSql();
               ps = conn.prepareStatement(sql);
               int deletionCount = 0;
               Bucket bucket;
               while (deletionCount < BATCH_SIZE && (bucket = emptyBuckets.poll()) != null) {
                  ps.setString(1, bucket.getBucketIdAsString());
                  ps.addBatch();
                  deletionCount++;
                  purgedBuckets.add(bucket.getBucketId());
               }
               log.tracef("Flushing deletion batch");
               ps.executeBatch();
               log.tracef("Flushed deletion batch");
            } catch (Exception ex) {
View Full Code Here

            PreparedStatement ps = null;
            try {
               String sql = tableManipulation.getDeleteRowSql();
               ps = conn.prepareStatement(sql);
               int deletionCount = 0;
               Bucket bucket;
               while (deletionCount < BATCH_SIZE && (bucket = emptyBuckets.poll()) != null) {
                  ps.setString(1, bucket.getBucketIdAsString());
                  ps.addBatch();
                  deletionCount++;
                  purgedBuckets.add(bucket.getBucketId());
               }
               log.tracef("Flushing deletion batch");
               ps.executeBatch();
               log.tracef("Flushed deletion batch");
            } catch (Exception ex) {
View Full Code Here

TOP

Related Classes of org.infinispan.persistence.support.Bucket

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.