Package com.alibaba.otter.shared.common.model.config.pipeline

Examples of com.alibaba.otter.shared.common.model.config.pipeline.Pipeline


     * @return
     */
    private List<FileData> doFileExtract(RowBatch rowBatch) {
        List<FileData> fileDatas = new ArrayList<FileData>();
        // 处理数据
        Pipeline pipeline = getPipeline(rowBatch.getIdentity().getPipelineId());
        List<EventData> eventDatas = rowBatch.getDatas();
        for (EventData eventData : eventDatas) {
            if (eventData.getEventType().isDdl()) {
                continue;
            }

            List<DataMediaPair> dataMediaPairs = ConfigHelper.findDataMediaPairByMediaId(pipeline,
                                                                                         eventData.getTableId());
            if (dataMediaPairs == null) {
                throw new ExtractException("ERROR ## the dataMediaId = " + eventData.getTableId()
                                           + " dataMediaPair is null,please check");
            }

            for (DataMediaPair dataMediaPair : dataMediaPairs) {
                if (dataMediaPair.getResolverData() == null
                    || dataMediaPair.getResolverData().getExtensionDataType() == null
                    || (dataMediaPair.getResolverData().getExtensionDataType().isClazz() && StringUtils.isBlank(dataMediaPair.getResolverData().getClazzPath()))
                    || (dataMediaPair.getResolverData().getExtensionDataType().isSource() && StringUtils.isBlank(dataMediaPair.getResolverData().getSourceText()))) {
                    continue;
                }

                FileResolver fileResolver = null;

                if (dataMediaPair.getResolverData() != null) {
                    fileResolver = extensionFactory.getExtension(FileResolver.class, dataMediaPair.getResolverData());
                } else {
                    continue;
                }

                if (fileResolver == null) {
                    throw new ExtractException("ERROR ## the dataMediaId = " + eventData.getTableId()
                                               + " the fileResolver className  = "
                                               + dataMediaPair.getResolverData().getClazzPath()
                                               + " is null ,please check the class");
                }

                if (fileResolver instanceof RemoteDirectoryFetcherAware) {
                    RemoteDirectoryFetcherAware remoteDirectoryFetcherAware = (RemoteDirectoryFetcherAware) fileResolver;
                    remoteDirectoryFetcherAware.setRemoteDirectoryFetcher(arandaRemoteDirectoryFetcher);
                }

                List<FileData> singleRowFileDatas = getSingleRowFileInfos(dataMediaPair.getId(), fileResolver,
                                                                          eventData);
                // 做一下去重处理
                for (FileData data : singleRowFileDatas) {
                    if (!fileDatas.contains(data)) {
                        fileDatas.add(data);
                    }
                }
            }
        }

        // 判断是否需要进行图片重复同步检查
        if (pipeline.getParameters().getFileDetect()) {
            doFileDetectCollector(pipeline, fileDatas);
        }
        return fileDatas;
    }
View Full Code Here


        return key;
    }

    // 处理对应的附件
    private File unpackFile(HttpPipeKey key) {
        Pipeline pipeline = configClientService.findPipeline(key.getIdentity().getPipelineId());
        DataRetriever dataRetriever = dataRetrieverFactory.createRetriever(pipeline.getParameters().getRetriever(),
                                                                           key.getUrl(), downloadDir);
        File archiveFile = null;
        try {
            dataRetriever.connect();
            dataRetriever.doRetrieve();
View Full Code Here

    private ExtensionFactory extensionFactory;

    public void extract(DbBatch param) throws ExtractException {
        RowBatch rowBatch = param.getRowBatch();
        Pipeline pipeline = getPipeline(rowBatch.getIdentity().getPipelineId());

        List<EventData> eventDatas = rowBatch.getDatas();
        Set<EventData> removeDatas = new HashSet<EventData>();// 使用set,提升remove时的查找速度
        for (EventData eventData : eventDatas) {
            List<DataMediaPair> dataMediaPairs = ConfigHelper.findDataMediaPairByMediaId(pipeline,
View Full Code Here

        result.put(EventData.class, initBatchObject(identity, EventData.class));

        for (EventData eventData : rowBatch.getDatas()) {
            // 处理eventData
            Long tableId = eventData.getTableId();
            Pipeline pipeline = configClientService.findPipeline(identity.getPipelineId());
            // 针对每个同步数据,可能会存在多路复制的情况
            List<DataMediaPair> dataMediaPairs = ConfigHelper.findDataMediaPairByMediaId(pipeline, tableId);
            for (DataMediaPair pair : dataMediaPairs) {
                if (!pair.getSource().getId().equals(tableId)) { // 过滤tableID不为源的同步
                    continue;
View Full Code Here

        result.put(FileData.class, initBatchObject(identity, FileData.class));

        for (FileData fileData : fileDatas) {
            // 进行转化
            Long tableId = fileData.getTableId();
            Pipeline pipeline = configClientService.findPipeline(identity.getPipelineId());
            // 针对每个同步数据,可能会存在多路复制的情况
            List<DataMediaPair> dataMediaPairs = ConfigHelper.findDataMediaPairByMediaId(pipeline, tableId);
            for (DataMediaPair pair : dataMediaPairs) {
                if (!pair.getSource().getId().equals(tableId)) { // 过滤tableID不为源的同步
                    continue;
View Full Code Here

        HttpPipeKey key = new HttpPipeKey();
        key.setUrl(remoteUrlBuilder.getUrl(rowBatch.getIdentity().getPipelineId(), filename));
        key.setDataType(PipeDataType.DB_BATCH);
        key.setIdentity(rowBatch.getIdentity());
        Pipeline pipeline = configClientService.findPipeline(rowBatch.getIdentity().getPipelineId());
        if (pipeline.getParameters().getUseFileEncrypt()) {
            // 加密处理
            EncryptedData encryptedData = encryptFile(file);
            key.setKey(encryptedData.getKey());
            key.setCrc(encryptedData.getCrc());
        }
View Full Code Here

    }

    // 处理对应的dbBatch
    private DbBatch getDbBatch(HttpPipeKey key) {
        String dataUrl = key.getUrl();
        Pipeline pipeline = configClientService.findPipeline(key.getIdentity().getPipelineId());
        DataRetriever dataRetriever = dataRetrieverFactory.createRetriever(pipeline.getParameters().getRetriever(),
                                                                           dataUrl, downloadDir);
        File archiveFile = null;
        try {
            dataRetriever.connect();
            dataRetriever.doRetrieve();
View Full Code Here

    public void extract(DbBatch dbBatch) throws ExtractException {
        Assert.notNull(dbBatch);

        // 读取配置
        Pipeline pipeline = getPipeline(dbBatch.getRowBatch().getIdentity().getPipelineId());

        boolean skipFreedom = pipeline.getParameters().getSkipFreedom();
        String bufferSchema = pipeline.getParameters().getSystemSchema();
        String bufferTable = pipeline.getParameters().getSystemBufferTable();

        List<EventData> eventDatas = dbBatch.getRowBatch().getDatas();
        Set<EventData> removeDatas = new HashSet<EventData>();// 使用set,提升remove时的查找速度
        for (EventData eventData : eventDatas) {
            if (StringUtils.equalsIgnoreCase(bufferSchema, eventData.getSchemaName())
                && StringUtils.equalsIgnoreCase(bufferTable, eventData.getTableName())) {
                if (eventData.getEventType().isDdl()) {
                    continue;
                }

                if (skipFreedom) {// 判断是否需要忽略
                    removeDatas.add(eventData);
                    continue;
                }

                // 只处理insert / update记录
                if (eventData.getEventType().isInsert() || eventData.getEventType().isUpdate()) {
                    // 重新改写一下EventData的数据,根据系统表的定义
                    EventColumn tableIdColumn = getMatchColumn(eventData.getColumns(), TABLE_ID);
                    // 获取到对应tableId的media信息
                    try {
                        DataMedia dataMedia = null;
                        Long tableId = Long.valueOf(tableIdColumn.getColumnValue());
                        eventData.setTableId(tableId);
                        if (tableId <= 0) { //直接按照full_name进行查找
                            //尝试直接根据schema+table name进行查找
                            EventColumn fullNameColumn = getMatchColumn(eventData.getColumns(), FULL_NAME);
                            if (fullNameColumn != null) {
                                String[] names = StringUtils.split(fullNameColumn.getColumnValue(), ".");
                                if (names.length >= 2) {
                                    dataMedia = ConfigHelper.findSourceDataMedia(pipeline, names[0], names[1]);
                                    eventData.setTableId(dataMedia.getId());
                                } else {
                                    throw new ConfigException("no such DataMedia " + names);
                                }
                            }
                        } else {
                            // 如果指定了tableId,需要按照tableId进行严格查找,如果没找到,那说明不需要进行同步
                            dataMedia = ConfigHelper.findDataMedia(pipeline,
                                                                   Long.valueOf(tableIdColumn.getColumnValue()));
                        }

                        DbDialect dbDialect = dbDialectFactory.getDbDialect(pipeline.getId(),
                                                                            (DbMediaSource) dataMedia.getSource());
                        // 考虑offer[1-128]的配置模式
                        if (!dataMedia.getNameMode().getMode().isSingle()
                            || !dataMedia.getNamespaceMode().getMode().isSingle()) {
                            boolean hasError = true;
View Full Code Here

    @Override
    public void extract(DbBatch dbBatch) throws ExtractException {
        Assert.notNull(dbBatch);
        Assert.notNull(dbBatch.getRowBatch());

        Pipeline pipeline = getPipeline(dbBatch.getRowBatch().getIdentity().getPipelineId());
        List<DataMediaPair> dataMediaPairs = pipeline.getPairs();

        /**
         * Key = TableId<br>
         * Value = a List of this tableId's column need to sync<br>
         */
 
View Full Code Here

     * 3. retl.xdual canal心跳表数据过滤
     * </pre>
     */
    public List<EventData> parse(Long pipelineId, List<Entry> datas) throws SelectException {
        List<EventData> eventDatas = new ArrayList<EventData>();
        Pipeline pipeline = configClientService.findPipeline(pipelineId);
        List<Entry> transactionDataBuffer = new ArrayList<Entry>();
        // hz为主站点,us->hz的数据,需要回环同步会us。并且需要开启回环补救算法
        PipelineParameter pipelineParameter = pipeline.getParameters();
        boolean enableLoopbackRemedy = pipelineParameter.isEnableRemedy() && pipelineParameter.isHome()
                                       && pipelineParameter.getRemedyAlgorithm().isLoopback();
        boolean isLoopback = false;
        boolean needLoopback = false; // 判断是否属于需要loopback处理的类型,只处理正常otter同步产生的回环数据,因为会有业务方手工屏蔽同步的接口,避免回环

        long now = new Date().getTime();
        try {
            for (Entry entry : datas) {
                switch (entry.getEntryType()) {
                    case TRANSACTIONBEGIN:
                        isLoopback = false;
                        break;
                    case ROWDATA:
                        String schemaName = entry.getHeader().getSchemaName();
                        String tableName = entry.getHeader().getTableName();
                        // 判断是否是回环表retl_mark
                        boolean isMarkTable = schemaName.equalsIgnoreCase(pipeline.getParameters().getSystemSchema())
                                              && tableName.equalsIgnoreCase(pipeline.getParameters()
                                                  .getSystemMarkTable());
                        if (isMarkTable) {
                            RowChange rowChange = RowChange.parseFrom(entry.getStoreValue());
                            if (!rowChange.getIsDdl()) {
                                int loopback = checkLoopback(pipeline, rowChange.getRowDatas(0));
                                if (loopback == 2) {
                                    needLoopback |= true; // 只处理正常同步产生的回环数据
                                }

                                isLoopback |= loopback > 0;
                            }
                        }

                        // 检查下otter3.0的回环表,对应的schmea会比较随意,所以不做比较
                        boolean isCompatibleLoopback = tableName.equalsIgnoreCase(compatibleMarkTable);

                        if (isCompatibleLoopback) {
                            RowChange rowChange = RowChange.parseFrom(entry.getStoreValue());
                            if (!rowChange.getIsDdl()) {
                                int loopback = checkCompatibleLoopback(pipeline, rowChange.getRowDatas(0));
                                if (loopback == 2) {
                                    needLoopback |= true; // 只处理正常同步产生的回环数据
                                }
                                isLoopback |= loopback > 0;
                            }
                        }

                        if ((!isLoopback || (enableLoopbackRemedy && needLoopback)) && !isMarkTable
                            && !isCompatibleLoopback) {
                            transactionDataBuffer.add(entry);
                        }
                        break;
                    case TRANSACTIONEND:
                        if (!isLoopback || (enableLoopbackRemedy && needLoopback)) {
                            // 添加数据解析
                            for (Entry bufferEntry : transactionDataBuffer) {
                                List<EventData> parseDatas = internParse(pipeline, bufferEntry);
                                if (CollectionUtils.isEmpty(parseDatas)) {// 可能为空,针对ddl返回时就为null
                                    continue;
                                }

                                // 初步计算一下事件大小
                                long totalSize = bufferEntry.getHeader().getEventLength();
                                long eachSize = totalSize / parseDatas.size();
                                for (EventData eventData : parseDatas) {
                                    if (eventData == null) {
                                        continue;
                                    }

                                    eventData.setSize(eachSize);// 记录一下大小
                                    if (needLoopback) {// 针对需要回环同步的
                                        // 如果延迟超过指定的阀值,则设置为需要反查db
                                        if (now - eventData.getExecuteTime() > 1000 * pipeline.getParameters()
                                            .getRemedyDelayThresoldForMedia()) {
                                            eventData.setSyncConsistency(SyncConsistency.MEDIA);
                                        } else {
                                            eventData.setSyncConsistency(SyncConsistency.BASE);
                                        }
                                        eventData.setRemedy(true);
                                    }
                                    eventDatas.add(eventData);
                                }
                            }
                        }

                        isLoopback = false;
                        needLoopback = false;
                        transactionDataBuffer.clear();
                        break;
                    default:
                        break;
                }
            }

            // 添加最后一次的数据,可能没有TRANSACTIONEND
            if (!isLoopback || (enableLoopbackRemedy && needLoopback)) {
                // 添加数据解析
                for (Entry bufferEntry : transactionDataBuffer) {
                    List<EventData> parseDatas = internParse(pipeline, bufferEntry);
                    if (CollectionUtils.isEmpty(parseDatas)) {// 可能为空,针对ddl返回时就为null
                        continue;
                    }

                    // 初步计算一下事件大小
                    long totalSize = bufferEntry.getHeader().getEventLength();
                    long eachSize = totalSize / parseDatas.size();
                    for (EventData eventData : parseDatas) {
                        if (eventData == null) {
                            continue;
                        }

                        eventData.setSize(eachSize);// 记录一下大小
                        if (needLoopback) {// 针对需要回环同步的
                            // 如果延迟超过指定的阀值,则设置为需要反查db
                            if (now - eventData.getExecuteTime() > 1000 * pipeline.getParameters()
                                .getRemedyDelayThresoldForMedia()) {
                                eventData.setSyncConsistency(SyncConsistency.MEDIA);
                            } else {
                                eventData.setSyncConsistency(SyncConsistency.BASE);
                            }
View Full Code Here

TOP

Related Classes of com.alibaba.otter.shared.common.model.config.pipeline.Pipeline

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.