Package com.alibaba.otter.shared.etl.model

Examples of com.alibaba.otter.shared.etl.model.RowBatch


public class RpcPipeTest extends BaseOtterTest {

    @Test
    public void test_ok() {
        final DbBatch source = new DbBatch();
        RowBatch rowBatch = new RowBatch();
        Identity identity = new Identity();
        identity.setChannelId(100L);
        identity.setPipelineId(100L);
        identity.setProcessId(100L);
        rowBatch.setIdentity(identity);
        source.setRowBatch(rowBatch);

        final RowDataRpcPipe pipe = new RowDataRpcPipe();
        try {
            pipe.afterPropertiesSet();
View Full Code Here


    @Test
    public void test_timeout() {

        final DbBatch source = new DbBatch();
        RowBatch rowBatch = new RowBatch();
        Identity identity = new Identity();
        identity.setChannelId(100L);
        identity.setPipelineId(100L);
        identity.setProcessId(100L);
        rowBatch.setIdentity(identity);
        source.setRowBatch(rowBatch);

        final RowDataRpcPipe pipe = new RowDataRpcPipe();
        pipe.setTimeout(1 * 1000L);// 1s后超时
        try {
View Full Code Here

                returns(pipeline);
            }
        };

        // 构造数据
        RowBatch rowBatch = new RowBatch();
        rowBatch.setIdentity(identity);
        for (int tableId = start; tableId < start + count; tableId++) {
            for (int i = start; i < start + count; i++) {
                EventData eventData = getEventData(tableId, i);
                eventData.setSchemaName("srf");
                eventData.setTableName("columns");
                rowBatch.merge(eventData);
            }
        }

        databaseExtractor.extract(new DbBatch(rowBatch));
        want.number(rowBatch.getDatas().size()).isEqualTo(count);
    }
View Full Code Here

                returns(pipeline);
            }
        };

        // 构造数据
        RowBatch rowBatch = new RowBatch();
        rowBatch.setIdentity(identity);
        for (int tableId = start; tableId < start + count; tableId++) {
            for (int i = start; i < start + count; i++) {
                EventData eventData = getEventData(tableId, i);
                eventData.setSchemaName("srf");
                eventData.setTableName("columns");
                eventData.setSyncConsistency(SyncConsistency.MEDIA);
                rowBatch.merge(eventData);
            }
        }

        databaseExtractor.extract(new DbBatch(rowBatch));

        want.number(rowBatch.getDatas().size()).isEqualTo(count);
    }
View Full Code Here

                returns(pipeline);
            }
        };

        // 构造数据
        RowBatch rowBatch = new RowBatch();
        rowBatch.setIdentity(identity);
        for (int tableId = start; tableId < start + count; tableId++) {
            for (int i = start; i < start + count; i++) {
                EventData eventData = getEventData(tableId, i);
                eventData.setSchemaName("retl");
                eventData.setTableName("retl_buffer");
                rowBatch.merge(eventData);
            }
        }

        DbBatch dbBatch = new DbBatch(rowBatch);
        freedomExtractor.extract(dbBatch);
View Full Code Here

                returns(pipeline);
            }
        };

        // 构造数据
        RowBatch rowBatch = new RowBatch();
        rowBatch.setIdentity(identity);
        for (int tableId = start; tableId < start + count; tableId++) {
            for (int i = start; i < start + count; i++) {
                EventData eventData = getEventData(tableId, i);
                eventData.setSchemaName("retl");
                eventData.setTableName("retl_buffer");
                rowBatch.merge(eventData);
            }
        }

        DbBatch dbBatch = new DbBatch(rowBatch);
        freedomExtractor.extract(dbBatch);
View Full Code Here

        Identity identity = new Identity();
        identity.setChannelId(100L);
        identity.setPipelineId(100L);
        identity.setProcessId(100L);

        RowBatch rowBatch = new RowBatch();
        rowBatch.setIdentity(identity);

        FileBatch fileBatch = new FileBatch();
        fileBatch.setIdentity(identity);

        final DbBatch dbBatch = new DbBatch();
View Full Code Here

                            if (!CollectionUtils.isEmpty(eventData)) {
                                startTime = eventData.get(0).getExecuteTime();
                            }

                            Channel channel = configClientService.findChannelByPipelineId(pipelineId);
                            RowBatch rowBatch = new RowBatch();
                            // 构造唯一标识
                            Identity identity = new Identity();
                            identity.setChannelId(channel.getId());
                            identity.setPipelineId(pipelineId);
                            identity.setProcessId(etlEventData.getProcessId());
                            rowBatch.setIdentity(identity);
                            // 进行数据合并
                            for (EventData data : eventData) {
                                rowBatch.merge(data);
                            }

                            long nextNodeId = etlEventData.getNextNid();
                            List<PipeKey> pipeKeys = rowDataPipeDelegate.put(new DbBatch(rowBatch), nextNodeId);
                            etlEventData.setDesc(pipeKeys);
View Full Code Here

    private BeanFactory         beanFactory;
    private ConfigClientService configClientService;
    private LoadInterceptor     dbInterceptor;

    public List<LoadContext> load(DbBatch data) {
        final RowBatch rowBatch = data.getRowBatch();
        final FileBatch fileBatch = data.getFileBatch();
        boolean existFileBatch = (rowBatch != null && !CollectionUtils.isEmpty(fileBatch.getFiles()) && data.getRoot() != null);
        boolean existRowBatch = (rowBatch != null && !CollectionUtils.isEmpty(rowBatch.getDatas()));

        int count = 0;
        List<RowBatch> rowBatchs = null;
        if (existRowBatch) {
            rowBatchs = split(rowBatch); // 根据介质内容进行分类合并,每个介质一个载入通道
View Full Code Here

    private List<RowBatch> split(RowBatch rowBatch) {
        final Identity identity = rowBatch.getIdentity();
        Map<DataMediaSource, RowBatch> result = new MapMaker().makeComputingMap(new Function<DataMediaSource, RowBatch>() {

            public RowBatch apply(DataMediaSource input) {
                RowBatch rowBatch = new RowBatch();
                rowBatch.setIdentity(identity);
                return rowBatch;
            }
        });

        for (EventData eventData : rowBatch.getDatas()) {
            // 获取介质信息
            DataMedia media = ConfigHelper.findDataMedia(configClientService.findPipeline(identity.getPipelineId()),
                                                         eventData.getTableId());
            result.get(media.getSource()).merge(eventData); // 归类
        }
View Full Code Here

TOP

Related Classes of com.alibaba.otter.shared.etl.model.RowBatch

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.