Package org.rhq.core.domain.cloud

Examples of org.rhq.core.domain.cloud.StorageNodeLoadComposite


    @RequiredPermission(Permission.MANAGE_SETTINGS)
    public StorageNodeLoadComposite getLoad(Subject subject, StorageNode node, long beginTime, long endTime) {
        Stopwatch stopwatch = stopwatchStart();
        try {
            if (!storageClientManager.isClusterAvailable()) {
                return new StorageNodeLoadComposite(node, beginTime, endTime);
            }
            int storageNodeResourceId;
            try {
                storageNodeResourceId = getResourceIdFromStorageNode(node);
            } catch (ResourceNotFoundException e) {
                log.warn(e.getMessage());
                return new StorageNodeLoadComposite(node, beginTime, endTime);
            }
            Map<String, Integer> scheduleIdsMap = new HashMap<String, Integer>();

            for (Object[] tupple : getChildrenScheduleIds(storageNodeResourceId, false)) {
                String definitionName = (String) tupple[0];
                Integer scheduleId = (Integer) tupple[2];
                scheduleIdsMap.put(definitionName, scheduleId);
            }
            for (Object[] tupple : getGrandchildrenScheduleIds(storageNodeResourceId, false)) {
                String definitionName = (String) tupple[0];
                Integer scheduleId = (Integer) tupple[2];
                scheduleIdsMap.put(definitionName, scheduleId);
            }

            StorageNodeLoadComposite result = new StorageNodeLoadComposite(node, beginTime, endTime);
            MeasurementAggregate totalDiskUsedAggregate = new MeasurementAggregate(0d, 0d, 0d);
            Integer scheduleId = null;

            // find the aggregates and enrich the result instance
            if (!scheduleIdsMap.isEmpty()) {
                try {
                    if ((scheduleId = scheduleIdsMap.get(METRIC_TOKENS)) != null) {
                        MeasurementAggregate tokensAggregate = measurementManager.getMeasurementAggregate(subject,
                            scheduleId, beginTime, endTime);
                        result.setTokens(tokensAggregate);
                    }
                    if ((scheduleId = scheduleIdsMap.get(METRIC_OWNERSHIP)) != null) {
                        StorageNodeLoadComposite.MeasurementAggregateWithUnits ownershipAggregateWithUnits = getMeasurementAggregateWithUnits(
                            subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime);
                        result.setActuallyOwns(ownershipAggregateWithUnits);
                    }

                    //calculated disk space related metrics
                    if ((scheduleId = scheduleIdsMap.get(METRIC_DATA_DISK_USED_PERCENTAGE)) != null) {
                        StorageNodeLoadComposite.MeasurementAggregateWithUnits dataDiskUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits(
                            subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime);
                        result.setDataDiskUsedPercentage(dataDiskUsedPercentageAggregateWithUnits);
                    }
                    if ((scheduleId = scheduleIdsMap.get(METRIC_TOTAL_DISK_USED_PERCENTAGE)) != null) {
                        StorageNodeLoadComposite.MeasurementAggregateWithUnits totalDiskUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits(
                            subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime);
                        result.setTotalDiskUsedPercentage(totalDiskUsedPercentageAggregateWithUnits);
                    }
                    if ((scheduleId = scheduleIdsMap.get(METRIC_FREE_DISK_TO_DATA_RATIO)) != null) {
                        MeasurementAggregate freeDiskToDataRatioAggregate = measurementManager.getMeasurementAggregate(
                            subject, scheduleId, beginTime, endTime);
                        result.setFreeDiskToDataSizeRatio(freeDiskToDataRatioAggregate);
                    }

                    if ((scheduleId = scheduleIdsMap.get(METRIC_LOAD)) != null) {
                        StorageNodeLoadComposite.MeasurementAggregateWithUnits loadAggregateWithUnits = getMeasurementAggregateWithUnits(
                            subject, scheduleId, MeasurementUnits.BYTES, beginTime, endTime);
                        result.setLoad(loadAggregateWithUnits);

                        updateAggregateTotal(totalDiskUsedAggregate, loadAggregateWithUnits.getAggregate());
                    }
                    if ((scheduleId = scheduleIdsMap.get(METRIC_KEY_CACHE_SIZE)) != null) {
                        updateAggregateTotal(totalDiskUsedAggregate,
                            measurementManager.getMeasurementAggregate(subject, scheduleId, beginTime, endTime));

                    }
                    if ((scheduleId = scheduleIdsMap.get(METRIC_ROW_CACHE_SIZE)) != null) {
                        updateAggregateTotal(totalDiskUsedAggregate,
                            measurementManager.getMeasurementAggregate(subject, scheduleId, beginTime, endTime));
                    }

                    if ((scheduleId = scheduleIdsMap.get(METRIC_TOTAL_COMMIT_LOG_SIZE)) != null) {
                        updateAggregateTotal(totalDiskUsedAggregate,
                            measurementManager.getMeasurementAggregate(subject, scheduleId, beginTime, endTime));
                    }
                    if (totalDiskUsedAggregate.getMax() > 0) {
                        StorageNodeLoadComposite.MeasurementAggregateWithUnits totalDiskUsedAggregateWithUnits = new StorageNodeLoadComposite.MeasurementAggregateWithUnits(
                            totalDiskUsedAggregate, MeasurementUnits.BYTES);
                        totalDiskUsedAggregateWithUnits.setFormattedValue(getSummaryString(totalDiskUsedAggregate,
                            MeasurementUnits.BYTES));
                        result.setDataDiskUsed(totalDiskUsedAggregateWithUnits);
                    }

                    if ((scheduleId = scheduleIdsMap.get(METRIC_HEAP_COMMITED)) != null) {
                        StorageNodeLoadComposite.MeasurementAggregateWithUnits heapCommittedAggregateWithUnits = getMeasurementAggregateWithUnits(
                            subject, scheduleId, MeasurementUnits.BYTES, beginTime, endTime);
                        result.setHeapCommitted(heapCommittedAggregateWithUnits);
                    }
                    if ((scheduleId = scheduleIdsMap.get(METRIC_HEAP_USED)) != null) {
                        StorageNodeLoadComposite.MeasurementAggregateWithUnits heapUsedAggregateWithUnits = getMeasurementAggregateWithUnits(
                            subject, scheduleId, MeasurementUnits.BYTES, beginTime, endTime);
                        result.setHeapUsed(heapUsedAggregateWithUnits);
                    }
                    if ((scheduleId = scheduleIdsMap.get(METRIC_HEAP_USED_PERCENTAGE)) != null) {
                        StorageNodeLoadComposite.MeasurementAggregateWithUnits heapUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits(
                            subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime);
                        result.setHeapPercentageUsed(heapUsedPercentageAggregateWithUnits);
                    }
                } catch (NoHostAvailableException nhae) {
                    // storage cluster went down while performing this method
                    return new StorageNodeLoadComposite(node, beginTime, endTime);
                }
            }

            return result;
        } finally {
View Full Code Here


    @Override
    @RequiredPermission(Permission.MANAGE_SETTINGS)
    public ListenableFuture<List<StorageNodeLoadComposite>> getLoadAsync(Subject subject, StorageNode node,
        long beginTime, long endTime) {
        Stopwatch stopwatch = stopwatchStart();
        final StorageNodeLoadComposite result = new StorageNodeLoadComposite(node, beginTime, endTime);
        try {
            if (!storageClientManager.isClusterAvailable()) {
                return Futures.successfulAsList(Lists.newArrayList(Futures.immediateFuture(result)));
            }
            int storageNodeResourceId;
            try {
                storageNodeResourceId = getResourceIdFromStorageNode(node);
            } catch (ResourceNotFoundException e) {
                log.warn(e.getMessage());
                return Futures.successfulAsList(Lists.newArrayList(Futures.immediateFuture(result)));
            }
            try {
                final String host = InetAddress.getByName(node.getAddress()).getCanonicalHostName();
                if (!node.getAddress().equals(host)) {
                    result.setHostname(host + " (" + node.getAddress() + ")");
                }
            } catch (UnknownHostException e) {
            }
            MetricsServer metricsServer = storageClientManager.getMetricsServer();
            Map<String, Integer> scheduleIdsMap = new HashMap<String, Integer>();

            for (Object[] tupple : getChildrenScheduleIds(storageNodeResourceId, true)) {
                String definitionName = (String) tupple[0];
                Integer scheduleId = (Integer) tupple[2];
                scheduleIdsMap.put(definitionName, scheduleId);
            }
            for (Object[] tupple : getGrandchildrenScheduleIds(storageNodeResourceId, true)) {
                String definitionName = (String) tupple[0];
                Integer scheduleId = (Integer) tupple[2];
                scheduleIdsMap.put(definitionName, scheduleId);
            }

            List<ListenableFuture<StorageNodeLoadComposite>> compositeFutures = new ArrayList<ListenableFuture<StorageNodeLoadComposite>>();
            final MeasurementAggregate totalDiskUsedAggregate = new MeasurementAggregate(0d, 0d, 0d);
            Integer scheduleId = null;

            // find the aggregates and enrich the result instance
            if (scheduleIdsMap.isEmpty()) {
                // no sheduled metrics yet
                return Futures.successfulAsList(Lists.newArrayList(Futures.immediateFuture(result)));
            }

            if ((scheduleId = scheduleIdsMap.get(METRIC_FREE_DISK_TO_DATA_RATIO)) != null) {
                ListenableFuture<AggregateNumericMetric> dataFuture = metricsServer.getSummaryAggregateAsync(
                    scheduleId, beginTime, endTime);
                ListenableFuture<StorageNodeLoadComposite> compositeFuture = Futures.transform(dataFuture,
                    new Function<AggregateNumericMetric, StorageNodeLoadComposite>() {
                        @Override
                        public StorageNodeLoadComposite apply(AggregateNumericMetric metric) {
                            result.setFreeDiskToDataSizeRatio(new MeasurementAggregate(metric.getMin(),
                                metric.getAvg(), metric.getMax()));
                            return result;
                        }
                    });
                compositeFutures.add(wrapFuture(compositeFuture, result, "Failed to retrieve metric ["
                    + METRIC_FREE_DISK_TO_DATA_RATIO + "] data for " + node));
            }
            if ((scheduleId = scheduleIdsMap.get(METRIC_HEAP_USED_PERCENTAGE)) != null) {
                ListenableFuture<StorageNodeLoadComposite.MeasurementAggregateWithUnits> dataFuture = getMeasurementAggregateWithUnitsAsync(
                    scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime);
                ListenableFuture<StorageNodeLoadComposite> compositeFuture = Futures.transform(dataFuture,
                    new Function<StorageNodeLoadComposite.MeasurementAggregateWithUnits, StorageNodeLoadComposite>() {
                        @Override
                        public StorageNodeLoadComposite apply(
                            StorageNodeLoadComposite.MeasurementAggregateWithUnits metric) {
                            result.setHeapPercentageUsed(metric);
                            return result;
                        }
                    });
                compositeFutures.add(wrapFuture(compositeFuture, result, "Failed to retrieve metric ["
                    + METRIC_HEAP_USED_PERCENTAGE + "] data for " + node));
View Full Code Here

                            log.warn("An error occurred while fetching load data for " + theNode, t);
                            latch.countDown();
                        }
                    });
                } else { // newly installed node
                    result.add(new StorageNodeLoadComposite(node, beginTime, endTime));
                    latch.countDown();
                }

            }
            Map<Integer, Integer> alertCounts = findUnackedAlertCounts(nodes);
View Full Code Here

TOP

Related Classes of org.rhq.core.domain.cloud.StorageNodeLoadComposite

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.