Package java.util.concurrent

Examples of java.util.concurrent.ExecutorCompletionService


            int pingRes = newClient.ping(123321);

            // Execute collection asynchronously (TODO: cache pool usage could be
            // improved)
            final ExecutorService cachePool = Executors.newCachedThreadPool();
            final ExecutorCompletionService<String> ecs = new ExecutorCompletionService(cachePool);
            final Future<String> future = ecs.submit(new Callable<String>() {

                public String call() throws Exception {
                    return AccessController.doPrivileged(new PrivilegedAction<String>() {
                        public String run() {
                            return newClient.toString();
View Full Code Here


        final DiscoveryManager newClient = Proxies.newClient(EXPORT_NAME, address, getClass().getClassLoader(), DiscoveryManager.class);

        // Execute collection asynchronously (TODO: cache pool usage could be improved)
        final ExecutorService cachePool = Executors.newCachedThreadPool(RemoteDiscoveryImpl.threadFactory);
        final ExecutorCompletionService<String> ecs = new ExecutorCompletionService(cachePool);
        final Future<String> future = ecs.submit(new Callable<String>() {

            public String call() throws Exception {
                return AccessController.doPrivileged(new PrivilegedAction<String>() {
                    public String run() {
                        return newClient.ping(667) == 667 ? "OK" : null;
View Full Code Here

        // Tricky part, obtaining the proxy works fast, but we only know everything works fine after the first call,
        // so lets try that ...
        final R newClient = Proxies.newClient(name, address, remote);

        // Execute collection asynchronously
        final ExecutorCompletionService<String> ecs = new ExecutorCompletionService(Executors.newCachedThreadPool());
        ecs.submit(new Callable<String>() {

            public String call() throws Exception {
                return newClient.toString();
            }
        });

        // Wait at most half a second (TODO: Make this configurable)
        try {
            final Future<String> poll = ecs.poll(timeout, TimeUnit.MILLISECONDS);
            if (poll == null) return null;

            poll.get(timeout, TimeUnit.MILLISECONDS);

            return newClient;
View Full Code Here

    @SuppressWarnings("unchecked")
    public static void main(String[] args) {

        // Execute collection asynchronously
        ExecutorService newCachedThreadPool = Executors.newCachedThreadPool();
        final ExecutorCompletionService<String> ecs = new ExecutorCompletionService(newCachedThreadPool);
        Future<String> submit = ecs.submit(new Callable<String>() {

            public String call() {
                try {
                    Thread.sleep(1012331872);
                } catch (InterruptedException e) {
View Full Code Here

        this.size = 1;
      }
      executor = Executors.newFixedThreadPool(size);
    }
    if(completion==null) {
      completion = new ExecutorCompletionService(executor);
    }
    try {
      for(int i=0;i<fileList.length;i++) {
        String filename = fileList[i].getPath().toUri().toString();
        log.info("Processing: "+filename);
View Full Code Here

        this.size = 1;
      }
      executor = Executors.newFixedThreadPool(size);
    }
    if(completion==null) {
      completion = new ExecutorCompletionService(executor);
    }
   
    try {
      // Locate directory output directories of the current demux, and create a unique directory list.
      HashSet<Path> inputPaths = new HashSet<Path>();
View Full Code Here

   public void purge(Executor threadPool, PurgeListener task) {
      Connection conn = null;
      PreparedStatement ps = null;
      ResultSet rs = null;
      Collection<Bucket> expiredBuckets = new ArrayList<Bucket>(BATCH_SIZE);
      ExecutorCompletionService ecs = new ExecutorCompletionService(threadPool);
      BlockingQueue<Bucket> emptyBuckets = new LinkedBlockingQueue<Bucket>();
      // We have to lock and unlock the buckets in the same thread - executor can execute
      // the BucketPurger task in different thread. That's why we can't unlock the locks
      // there but have to send them to this thread through this queue.
      int tasksScheduled = 0;
      int tasksCompleted = 0;
      try {
         String sql = tableManipulation.getSelectExpiredRowsSql();
         conn = connectionFactory.getConnection();
         ps = conn.prepareStatement(sql);
         ps.setLong(1, ctx.getTimeService().wallClockTime());
         rs = ps.executeQuery();
         while (rs.next()) {
            Integer bucketId = rs.getInt(2);
            if (immediateLockForWriting(bucketId)) {
               if (log.isTraceEnabled()) {
                  log.tracef("Adding bucket keyed %s for purging.", bucketId);
               }
               InputStream binaryStream = rs.getBinaryStream(1);
               Bucket bucket = unmarshallBucket(binaryStream);
               bucket.setBucketId(bucketId);
               expiredBuckets.add(bucket);
               if (expiredBuckets.size() == BATCH_SIZE) {
                  ++tasksScheduled;
                  ecs.submit(new BucketPurger(expiredBuckets, task, ctx.getMarshaller(), conn, emptyBuckets));
                  expiredBuckets = new ArrayList<Bucket>(BATCH_SIZE);
               }
            } else {
               if (log.isTraceEnabled()) {
                  log.tracef("Could not acquire write lock for %s, this won't be purged even though it has expired elements", bucketId);
               }
            }
            // continuously unlock already purged buckets - we don't want to run out of memory by storing
            // them in unlimited collection
            tasksCompleted += unlockCompleted(ecs, false); // cannot throw InterruptedException
         }

         if (!expiredBuckets.isEmpty()) {
            ++tasksScheduled;
            ecs.submit(new BucketPurger(expiredBuckets, task, ctx.getMarshaller(), conn, emptyBuckets));
         }
         // wait until all tasks have completed
         try {
            while (tasksCompleted < tasksScheduled) {
               tasksCompleted += unlockCompleted(ecs, true);
View Full Code Here

   public void purge(Executor threadPool, PurgeListener task) {
      Connection conn = null;
      PreparedStatement ps = null;
      ResultSet rs = null;
      Collection<Bucket> expiredBuckets = new ArrayList<Bucket>(BATCH_SIZE);
      ExecutorCompletionService ecs = new ExecutorCompletionService(threadPool);
      BlockingQueue<Bucket> emptyBuckets = new LinkedBlockingQueue<Bucket>();
      // We have to lock and unlock the buckets in the same thread - executor can execute
      // the BucketPurger task in different thread. That's why we can't unlock the locks
      // there but have to send them to this thread through this queue.
      int tasksScheduled = 0;
      int tasksCompleted = 0;
      try {
         String sql = tableManipulation.getSelectExpiredRowsSql();
         conn = connectionFactory.getConnection();
         ps = conn.prepareStatement(sql);
         ps.setLong(1, ctx.getTimeService().wallClockTime());
         rs = ps.executeQuery();
         while (rs.next()) {
            Integer bucketId = rs.getInt(2);
            if (immediateLockForWriting(bucketId)) {
               if (log.isTraceEnabled()) {
                  log.tracef("Adding bucket keyed %s for purging.", bucketId);
               }
               InputStream binaryStream = rs.getBinaryStream(1);
               Bucket bucket = unmarshallBucket(binaryStream);
               bucket.setBucketId(bucketId);
               expiredBuckets.add(bucket);
               if (expiredBuckets.size() == BATCH_SIZE) {
                  ++tasksScheduled;
                  ecs.submit(new BucketPurger(expiredBuckets, task, ctx.getMarshaller(), conn, emptyBuckets));
                  expiredBuckets = new ArrayList<Bucket>(BATCH_SIZE);
               }
            } else {
               if (log.isTraceEnabled()) {
                  log.tracef("Could not acquire write lock for %s, this won't be purged even though it has expired elements", bucketId);
               }
            }
            // continuously unlock already purged buckets - we don't want to run out of memory by storing
            // them in unlimited collection
            tasksCompleted += unlockCompleted(ecs, false); // cannot throw InterruptedException
         }

         if (!expiredBuckets.isEmpty()) {
            ++tasksScheduled;
            ecs.submit(new BucketPurger(expiredBuckets, task, ctx.getMarshaller(), conn, emptyBuckets));
         }
         // wait until all tasks have completed
         try {
            while (tasksCompleted < tasksScheduled) {
               tasksCompleted += unlockCompleted(ecs, true);
View Full Code Here

   private AtomicReference<ExecutionException> firstException = new AtomicReference<ExecutionException>();
   private AtomicLong scheduled = new AtomicLong();
   private AtomicLong completed = new AtomicLong();

   public ExecutorAllCompletionService(Executor executor) {
      this.executorService = new ExecutorCompletionService(executor);
   }
View Full Code Here

            final DocumentFactory documentFactory = new DocumentFactory(basedir, buildMapping(), buildHeaderDefinitions(), encoding, keywords);

            int nThreads = (int) (Runtime.getRuntime().availableProcessors() * concurrencyFactor);
            ExecutorService executorService = Executors.newFixedThreadPool(nThreads);
            CompletionService completionService = new ExecutorCompletionService(executorService);
            int count = 0;
            debug("Number of execution threads: %s", nThreads);

            try {
                for (final String file : listSelectedFiles()) {
                    completionService.submit(new Runnable() {
                        public void run() {
                            Document document = documentFactory.createDocuments(file);
                            debug("Selected file: %s [header style: %s]", document.getFile(), document.getHeaderDefinition());
                            if (document.isNotSupported()) {
                                warn("Unknown file extension: %s", document.getFile());
                            } else if (document.is(h)) {
                                debug("Skipping header file: %s", document.getFile());
                            } else if (document.hasHeader(h, strictCheck)) {
                                callback.onExistingHeader(document, h);
                            } else {
                                boolean headerFound = false;
                                for (Header validHeader : validHeaders) {
                                    if (headerFound = document.hasHeader(validHeader, strictCheck)) {
                                        callback.onExistingHeader(document, h);
                                        break;
                                    }
                                }
                                if (!headerFound)
                                    callback.onHeaderNotFound(document, h);
                            }
                        }
                    }, null);
                    count++;
                }

                while (count-- > 0) {
                    try {
                        completionService.take().get();
                    } catch (InterruptedException e) {
                        Thread.currentThread().interrupt();
                    } catch (ExecutionException e) {
                        Throwable cause = e.getCause();
                        if (cause instanceof Error)
View Full Code Here

TOP

Related Classes of java.util.concurrent.ExecutorCompletionService

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.