Package gov.nasa.arc.mct.buffer.util

Examples of gov.nasa.arc.mct.buffer.util.ElapsedTimer


        PERF_WRITE_LOGGER.debug("COD Putting data for {} feeds", value);
        synchronized (movePartitionLock) {
            if (reset) return;
        }
       
        final ElapsedTimer timer = new ElapsedTimer();
        timer.startInterval();

        int i = this.currentParition.getBufferEnv().getCurrentBufferPartition();
        int startPartition = i;
        do {
            PartitionDataBuffer partitionBuffer = this.partitionDataBuffers[i].get();
            if (partitionBuffer == null || !partitionBuffer.isActive()) {
                break;
            }
           
            LOGGER.debug("Putting in partition {}", i);

            putData(partitionBuffer, value, timeUnit, metaDataBuffer, i);

            timer.stopInterval();
            PERF_LOGGER.debug("Time to save data for {} feeds: {}", value.size(), timer
                            .getIntervalInMillis());
            i = (i + 1) % currentParition.getBufferEnv().getNumOfBufferPartitions();
        } while (i != startPartition);

        if (callback != null) {
View Full Code Here


    public void putData(String feedID, TimeUnit timeUnit, long time, Map<String, String> value) throws BufferFullException {
        synchronized (movePartitionLock) {
            if (reset) return;
        }
       
        final ElapsedTimer timer = new ElapsedTimer();
        timer.startInterval();

        Map<Long, Map<String, String>> dataToPut = new HashMap<Long, Map<String, String>>();
        dataToPut.put(Long.valueOf(time), value);
       
        Map<String, Map<Long, Map<String, String>>> feedDataToPut = new HashMap<String, Map<Long,Map<String,String>>>();
        feedDataToPut.put(feedID, dataToPut);

        int i = this.currentParition.getBufferEnv().getCurrentBufferPartition();
        int startPartition = i;
        do {
            PartitionDataBuffer partitionBuffer = this.partitionDataBuffers[i].get();
            if (partitionBuffer == null || !partitionBuffer.isActive()) {
                break;
            }
           
            LOGGER.debug("Putting in partition {}", i);

            Map<String, PartitionTimestamps> timeStamps = putData(partitionBuffer, feedDataToPut, timeUnit);
            if (timeStamps != null) {
                metaDataBuffer.updatePartitionMetaData(partitionBuffer.getBufferEnv().getCurrentBufferPartition(), timeStamps);
            }
            i = (i + 1) % this.currentParition.getBufferEnv().getNumOfBufferPartitions();
        } while (i != startPartition);
       
        timer.stopInterval();
        PERF_LOGGER.debug("Time to save data for feed {}: {}", feedID, timer.getIntervalInMillis());
    }
View Full Code Here

   
    @SuppressWarnings("unchecked")
    public Map<String, SortedMap<Long, Map<String, String>>> getData(Set<String> feedIDs, final TimeUnit timeUnit,
            final long startTime, final long endTime) {
        final ElapsedTimer timer = new ElapsedTimer();
        timer.startInterval();

        final Set<String>[] groupFeeds = groupInputFeeds(feedIDs);
        final Map<String, SortedMap<Long, Map<String, String>>>[] dataSlices = new Map[groupFeeds.length];
        final CountDownLatch readLatch = new CountDownLatch(groupFeeds.length);
        for (int i = 0; i < groupFeeds.length; i++) {
            final int dataIndex = i;

            Runnable r = new Runnable() {

                @Override
                public void run() {
                    try {
                        Map<String, SortedMap<Long, Map<String, String>>> dataSlice = getData(databases[dataIndex],
                                groupFeeds[dataIndex], timeUnit, startTime, endTime);
                        if (dataSlice != null) {
                            dataSlices[dataIndex] = dataSlice;
                        }
                    } finally {
                        readLatch.countDown();
                    }
                }
            };
           
            readThreads.execute(r);
        }
       
        try {
            readLatch.await();
        } catch (InterruptedException e) {
            LOGGER.warn("Internal error during getData thread", e);
        }

        Map<String, SortedMap<Long, Map<String, String>>> returnedData = new HashMap<String, SortedMap<Long, Map<String, String>>>();
        for (int i = 0; i < dataSlices.length; i++) {
            Map<String, SortedMap<Long, Map<String, String>>> dataSlice = dataSlices[i];
            if (dataSlice != null) {
                returnedData.putAll(dataSlice);
            }
        }
        timer.stopInterval();
        READ_PERF_LOGGER.debug("time to get 1 partition Data for {} feeds: {}", feedIDs.size(), timer
                .getIntervalInMillis());

        return returnedData;

    }
View Full Code Here

        }
        return (Map<String, Map<Long, Map<String, String>>>[]) groupInputData;
    }

    public Map<String, PartitionTimestamps> putData(Map<String, Map<Long, Map<String, String>>> value, final TimeUnit timeUnit) throws BufferFullException {
        final ElapsedTimer timer = new ElapsedTimer();
        timer.startInterval();
        final Map<String, Map<Long, Map<String, String>>>[] groupData = groupInputDataByFeed(value);
        final Map<String, PartitionTimestamps> timestamps = new HashMap<String, PartitionTimestamps>();

        final AtomicBoolean bufferFull = new AtomicBoolean(false);
        final CountDownLatch latch = new CountDownLatch(groupData.length);
        for (int i = 0; i < groupData.length; i++) {
            final int dataIndex = i;
            Runnable r = new Runnable() {
                @Override
                public void run() {
                    try {
                        for (Entry<String, Map<Long, Map<String, String>>> feedData : groupData[dataIndex].entrySet()) {
                            PartitionTimestamps timeStamp = null;
                            try {
                                timeStamp = putData(null, feedData.getKey(), databases[dataIndex], timeUnit, feedData.getValue());
                            } catch (BufferFullException e) {
                                bufferFull.compareAndSet(false, true);
                            }
                            if (timeStamp == null) {
                                break;
                            } else {
                                timestamps.put(feedData.getKey(), timeStamp);
                                LOGGER.debug("feedData.getKey(): " + feedData.getKey() + ", timeStamp: " + timeStamp);
                            }
                        }
                    } finally {
                        latch.countDown();
                    }
                }
            };

            writeThreads.execute(r);
        }
       
        try {
            latch.await();
        } catch (InterruptedException e) {
            LOGGER.warn("Internal error during putData thread", e);
        }
        if (bufferFull.get()) {
            throw new BufferFullException(env.getErrorMsg());
        }
        timer.stopInterval();

        WRITE_PERF_LOGGER.debug("Time to write {} feeds: {}", value.size(), timer.getIntervalInMillis());
        return timestamps;
    }
View Full Code Here

        return timestamps;
    }

    @Override
    public void putData(Map<String, Map<Long, Map<String, String>>> value, final TimeUnit timeUnit, final MetaDataBuffer metadata, final int metadataIndex) throws BufferFullException {
        final ElapsedTimer timer = new ElapsedTimer();
        timer.startInterval();
        final Map<String, Map<Long, Map<String, String>>>[] groupData = groupInputDataByFeed(value);

        final AtomicBoolean bufferFull = new AtomicBoolean(false);
        final CountDownLatch latch = new CountDownLatch(groupData.length);
        for (int i = 0; i < groupData.length; i++) {
            final int dataIndex = i;
            Runnable r = new Runnable() {
                @Override
                public void run() {
                    try {
                        for (Entry<String, Map<Long, Map<String, String>>> feedData : groupData[dataIndex].entrySet()) {
                            PartitionTimestamps timeStamp = null;
                            try {
                                timeStamp = putData(null, feedData.getKey(), databases[dataIndex], timeUnit, feedData.getValue());
                            } catch (BufferFullException e) {
                                bufferFull.compareAndSet(false, true);
                            }
                            if (timeStamp == null) {
                                break;
                            } else {
                                metadata.updatePartitionMetaData(metadataIndex, feedData.getKey(), timeStamp.getStartTimestamp(), timeStamp.getEndTimestamp());
                            }
                        }
                    } finally {
                        latch.countDown();
                    }
                }
            };

            writeThreads.execute(r);
        }
       
        try {
            latch.await();
        } catch (InterruptedException e) {
            LOGGER.warn("Internal error during putData thread", e);
        }
        if (bufferFull.get()) {
            throw new BufferFullException(env.getErrorMsg());
        }
        timer.stopInterval();

        WRITE_PERF_LOGGER.debug("Time to write {} feeds: {}", value.size(), timer.getIntervalInMillis());
    }
View Full Code Here

    }

    @SuppressWarnings("unchecked")
    public Map<String, SortedMap<Long, Map<String, String>>> getData(Set<String> feedIDs, final TimeUnit timeUnit,
            final long startTime, final long endTime) {
        final ElapsedTimer timer = new ElapsedTimer();
        timer.startInterval();

        final Set<String>[] groupFeeds = groupInputFeeds(feedIDs);
        final Map<String, SortedMap<Long, Map<String, String>>>[] dataSlices = new Map[groupFeeds.length];
        final CountDownLatch readLatch = new CountDownLatch(groupFeeds.length);
        for (int i = 0; i < groupFeeds.length; i++) {
            final int dataIndex = i;

            Runnable r = new Runnable() {

                @Override
                public void run() {
                    try {
                        Map<String, SortedMap<Long, Map<String, String>>> dataSlice = getData(databases[dataIndex],
                                groupFeeds[dataIndex], timeUnit, startTime, endTime);
                        if (dataSlice != null) {
                            dataSlices[dataIndex] = dataSlice;
                        }
                    } finally {
                        readLatch.countDown();
                    }
                }
            };
           
            readThreads.execute(r);
        }
       
        try {
            readLatch.await();
        } catch (InterruptedException e) {
            LOGGER.warn("Internal error during getData thread", e);
        }

        Map<String, SortedMap<Long, Map<String, String>>> returnedData = new HashMap<String, SortedMap<Long, Map<String, String>>>();
        for (int i = 0; i < dataSlices.length; i++) {
            Map<String, SortedMap<Long, Map<String, String>>> dataSlice = dataSlices[i];
            if (dataSlice != null) {
                returnedData.putAll(dataSlice);
            }
        }
        timer.stopInterval();
        READ_PERF_LOGGER.debug("time to get 1 partition Data for {} feeds: {}", feedIDs.size(), timer
                .getIntervalInMillis());

        return returnedData;

    }
View Full Code Here

    }
   
    @SuppressWarnings("unchecked")
    @Override
    public Map<String, SortedMap<Long, Map<String, String>>> getLastData(Set<String> feedIDs, final TimeUnit timeUnit, final long startTime, final long endTime) {
        final ElapsedTimer timer = new ElapsedTimer();
        timer.startInterval();

        final Set<String>[] groupFeeds = groupInputFeeds(feedIDs);
        final Map<String, SortedMap<Long, Map<String, String>>>[] dataSlices = new Map[groupFeeds.length];
        final CountDownLatch latch = new CountDownLatch(groupFeeds.length);
        for (int i = 0; i < groupFeeds.length; i++) {
            final int dataIndex = i;

            Runnable r = new Runnable() {

                @Override
                public void run() {
                    try {
                        Map<String, SortedMap<Long, Map<String, String>>> dataSlice = getLastData(databases[dataIndex],
                            groupFeeds[dataIndex], timeUnit, startTime, endTime);
                        if (dataSlice != null) {
                            dataSlices[dataIndex] = dataSlice;
                        }
                    } finally {
                        latch.countDown();
                    }

                }
            };
           
            readThreads.execute(r);
           
        }
        try {
            latch.await();
        } catch (InterruptedException e) {
            LOGGER.warn("Internal error during getLastData thread", e);
        }

        Map<String, SortedMap<Long, Map<String, String>>> returnedData = new HashMap<String, SortedMap<Long, Map<String, String>>>();
        for (int i = 0; i < dataSlices.length; i++) {
            Map<String, SortedMap<Long, Map<String, String>>> dataSlice = dataSlices[i];
            if (dataSlice != null) {
                returnedData.putAll(dataSlice);
            }
        }

        timer.stopInterval();
        READ_PERF_LOGGER.debug("time to get 1 partition last Data for {} feeds: {}", feedIDs.size(), timer
                .getIntervalInMillis());

        return returnedData;

View Full Code Here

        }
        return (Map<String, Map<Long, Map<String, String>>>[]) groupInputData;
    }

    public Map<String, PartitionTimestamps> putData(Map<String, Map<Long, Map<String, String>>> value, final TimeUnit timeUnit) throws BufferFullException {
        final ElapsedTimer timer = new ElapsedTimer();
        timer.startInterval();
        final Map<String, Map<Long, Map<String, String>>>[] groupData = groupInputDataByFeed(value);
        final Map<String, PartitionTimestamps> timestamps = new HashMap<String, PartitionTimestamps>();

        final AtomicBoolean bufferFull = new AtomicBoolean(false);
        final CountDownLatch latch = new CountDownLatch(groupData.length);
        for (int i = 0; i < groupData.length; i++) {
            final int dataIndex = i;
            Runnable r = new Runnable() {
                @Override
                public void run() {
                    try {
                        for (Entry<String, Map<Long, Map<String, String>>> feedData : groupData[dataIndex].entrySet()) {
                            PartitionTimestamps timeStamp = null;
                            try {
                                timeStamp = putData(null, feedData.getKey(), databases[dataIndex], timeUnit, feedData.getValue());
                            } catch (BufferFullException e) {
                                bufferFull.compareAndSet(false, true);
                            }
                            if (timeStamp == null) {
                                break;
                            } else {
                                timestamps.put(feedData.getKey(), timeStamp);
                            }
                        }
                    } finally {
                        latch.countDown();
                    }
                }
            };

            writeThreads.execute(r);
        }
       
        try {
            latch.await();
        } catch (InterruptedException e) {
            LOGGER.warn("Internal error during putData thread", e);
        }
        if (bufferFull.get()) {
            throw new BufferFullException(env.getErrorMsg());
        }
        timer.stopInterval();

        WRITE_PERF_LOGGER.debug("Time to write {} feeds: {}", value.size(), timer.getIntervalInMillis());
        return timestamps;
    }
View Full Code Here

        return timestamps;
    }

    @Override
    public void putData(Map<String, Map<Long, Map<String, String>>> value, final TimeUnit timeUnit, final MetaDataBuffer metadata, final int metadataIndex) throws BufferFullException {
        final ElapsedTimer timer = new ElapsedTimer();
        timer.startInterval();
        final Map<String, Map<Long, Map<String, String>>>[] groupData = groupInputDataByFeed(value);

        final AtomicBoolean bufferFull = new AtomicBoolean(false);
        final CountDownLatch latch = new CountDownLatch(groupData.length);
        for (int i = 0; i < groupData.length; i++) {
            final int dataIndex = i;
            Runnable r = new Runnable() {
                @Override
                public void run() {
                    try {
                        for (Entry<String, Map<Long, Map<String, String>>> feedData : groupData[dataIndex].entrySet()) {
                            PartitionTimestamps timeStamp = null;
                            try {
                                timeStamp = putData(null, feedData.getKey(), databases[dataIndex], timeUnit, feedData.getValue());
                            } catch (BufferFullException e) {
                                bufferFull.compareAndSet(false, true);
                            }
                            if (timeStamp == null) {
                                break;
                            } else {
                                metadata.updatePartitionMetaData(metadataIndex, feedData.getKey(), timeStamp.getStartTimestamp(), timeStamp.getEndTimestamp());
                            }
                        }
                    } finally {
                        latch.countDown();
                    }
                }
            };

            writeThreads.execute(r);
        }
       
        try {
            latch.await();
        } catch (InterruptedException e) {
            LOGGER.warn("Internal error during putData thread", e);
        }
        if (bufferFull.get()) {
            throw new BufferFullException(env.getErrorMsg());
        }
        timer.stopInterval();

        WRITE_PERF_LOGGER.debug("Time to write {} feeds: {}", value.size(), timer.getIntervalInMillis());
    }
View Full Code Here

        return returnedCachedData;
    }
   
    @Override
    public Map<String, SortedMap<Long, Map<String, String>>> getLastData(Set<String> feedIDs, TimeUnit timeUnit, long startTime, long endTime) {
        final ElapsedTimer timer = new ElapsedTimer();
        timer.startInterval();
       
        Map<String, TreeMap<Long, Map<String, String>>> cachedData = getCachedData();
       
        Map<String, SortedMap<Long, Map<String, String>>> returnedData = new HashMap<String, SortedMap<Long, Map<String, String>>>();

        for (String feedID : feedIDs) {
            synchronized (this) {
                TreeMap<Long, Map<String, String>> feedCachedData = cachedData.get(feedID);
                if (feedCachedData == null) {
                    continue;
                }

                long start = TimeUnit.NANOSECONDS.convert(startTime, timeUnit);
                long end = TimeUnit.NANOSECONDS.convert(endTime, timeUnit);
                Entry<Long, Map<String, String>> feedSearchedData = feedCachedData.subMap(start, true, end, true).lastEntry();
                if (feedSearchedData != null) {
                    SortedMap<Long, Map<String, String>> feedData = new TreeMap<Long, Map<String, String>>();
                    feedData.put(feedSearchedData.getKey(), feedSearchedData.getValue());
                    returnedData.put(feedID, feedData);
                }
            }
        }
       
        timer.stopInterval();
        READ_PERF_LOGGER.debug("Time to get {} feeds from memory: {} from partition " + this.env.getCurrentBufferPartition(), feedIDs.size(), timer.getIntervalInMillis());

        return returnedData;
    }
View Full Code Here

TOP

Related Classes of gov.nasa.arc.mct.buffer.util.ElapsedTimer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.