Date now = new Date();
String source = null;
String target = null;
MonitorManager monitorManager = null;
long time = 0;
JobStatus status = JobStatus.RUNNING;
int statusCode = status.getStatus();
WriterManager writerManager = null;
try {
s_logger.info("Nebula wormhole Start");
// create instance of StoraheManager & MonitorManager
List<StorageConf> storageConfList = createStorageConfs(jobConf);
if (storageConfList == null || storageConfList.isEmpty()) {
s_logger.error("No writer is defined in job configuration or there are some errors in writer configuration");
return JobStatus.FAILED.getStatus();
}
int writerNum = jobConf.getWriterNum();
StorageManager storageManager = new StorageManager(storageConfList);
monitorManager = new MonitorManager(writerNum);
monitorManager.setStorageManager(storageManager);
// get job conf and start reader & writer
JobPluginConf readerConf = jobConf.getReaderConf();
List<JobPluginConf> writerConf = jobConf.getWriterConfs();
AbstractPluginManager
.regDataSourceProp(readerConf.getPluginParam());
// get source and target info before prepare phase, due to it may
// throw exception during prepare
source = readerConf.getPluginName() + "/"
+ readerConf.getPluginParam().getValue(IP, "IP_UNKNOWN")
+ "/" + readerConf.getPluginParam().getValue(DB, "") + "/"
+ readerConf.getPluginParam().getValue(TABLE, "");
StringBuilder sb = new StringBuilder();
for (int i = 0; i < writerConf.size(); i++) {
JobPluginConf conf = writerConf.get(i);
AbstractPluginManager.regDataSourceProp(conf.getPluginParam());
sb.append(conf.getPluginName())
.append("/")
.append(conf.getPluginParam().getValue(IP, "IPUnknown"))
.append("/")
.append(conf.getPluginParam().getValue(DB, ""))
.append("/")
.append(conf.getPluginParam().getValue(TABLE, ""));
if (i != writerConf.size() - 1) {
sb.append(",");
}
}
target = sb.toString();
IParam readerPluginParam = pluginReg
.get(readerConf.getPluginName());
s_logger.info("Start Reader Threads");
ReaderManager readerManager = ReaderManager.getInstance(
storageManager, monitorManager);
readerManager.run(readerConf, readerPluginParam);
s_logger.info("Start Writer Threads");
writerManager = WriterManager.getInstance(storageManager,
monitorManager, writerNum);
writerManager.run(writerConf, pluginReg);
int intervalCount = 0;
int statusCheckInterval = engineConf.getIntValue(
EngineConfParamKey.STATUS_CHECK_INTERVAL,
STATUS_CHECK_INTERVAL);
int monitorInfoDisplayPeriod = engineConf.getIntValue(
EngineConfParamKey.MONITOR_INFO_DISPLAY_PERIOD,
INFO_SHOW_PERIOD);
writerConsistency = engineConf.getBooleanValue(
EngineConfParamKey.WRITER_CONSISTENCY, false);
while (true) {
intervalCount++;
statusCode = checkStatus(readerManager, writerManager,
monitorManager, storageManager);
status = JobStatus.fromStatus(statusCode);
if (status == null) {
s_logger.error("status = " + statusCode
+ ".This should never happen");
return JobStatus.FAILED.getStatus();
}
// read failed rollback all
if (status == JobStatus.FAILED
|| (status.getStatus() >= JobStatus.FAILED.getStatus() && status
.getStatus() < JobStatus.WRITE_FAILED
.getStatus())) {
s_logger.error("Nebula wormhole Job is Failed!");
writerManager.rollbackAll();
break;
} else if (status == JobStatus.PARTIAL_FAILED
|| status.getStatus() >= JobStatus.WRITE_FAILED
.getStatus()) {
Set<String> failedIDs = getFailedWriterIDs(writerManager,
monitorManager);
s_logger.error("Some of the writer is Failed:"
+ failedIDs.toString());
writerManager.rollback(failedIDs);
break;
} else if (status == JobStatus.SUCCESS_WITH_ERROR) {
s_logger.error("Nebula wormhole Job is Completed successfully with a few abnormal data");
break;
} else if (status == JobStatus.SUCCESS) {
s_logger.info("Nebula wormhole Job is Completed successfully!");
break;
}
// Running
else if (status == JobStatus.RUNNING) {
if (intervalCount % monitorInfoDisplayPeriod == 0) {
s_logger.info(monitorManager.realtimeReport());
}
try {
Thread.sleep(statusCheckInterval);
} catch (InterruptedException e) {
s_logger.error("Sleep of main thread is interrupted!",
e);
}
}
}
} catch (WormholeException e) {
if (!status.isFailed()) {
statusCode = e.getStatusCode();
status = JobStatus.fromStatus(e.getStatusCode());
if (status == null) {
s_logger.error("status = " + statusCode
+ ".This should never happen");
return JobStatus.FAILED.getStatus();
}
}
if (JobStatus.fromStatus(e.getStatusCode()).equals(
JobStatus.ROLL_BACK_FAILED)) {
s_logger.error("Roll back failed: " + e.getPluginID(), e);
} else {
s_logger.error("Nebula wormhole Job is Failed!", e);
try {
writerManager.killAll();
writerManager.rollbackAll();
} catch (Exception e1) {
s_logger.error("Roll back all failed ", e1);
}
}
} catch (InterruptedException e) {
status = JobStatus.FAILED;
s_logger.error(
"Nebula wormhole Job is Failed as it is interrupted when prepare to read or write",
e);
} catch (ExecutionException e) {
status = JobStatus.FAILED;
s_logger.error(
"Nebula wormhole Job is Failed as it is failed when prepare to read or write",
e);
} catch (TimeoutException e) {
status = JobStatus.FAILED;
s_logger.error(
"Nebula wormhole Job is Failed as it is timeout when prepare to read or write",
e);
} catch (Exception e) {
if (!status.isFailed()) {
status = JobStatus.FAILED;
}
s_logger.error("Nebula wormhole Job is Failed!", e);
s_logger.error("Unknown Exception occurs, will roll back all");
try {
if (writerManager != null) {
writerManager.killAll();
writerManager.rollbackAll();
}
} catch (Exception e1) {
s_logger.error("Roll back all failed ", e1);
}
} finally {
time = new Date().getTime() - now.getTime();
if (monitorManager != null) {
WormHoleJobInfo jobInfo = monitorManager.getJobInfo(source,
target, time / 1000, status.getStatus(), now);
JobDBUtil.insertOneJobInfo(jobInfo);
}
s_logger.info(monitorManager.finalReport());
}
if (statusCode != JobStatus.RUNNING.getStatus()) {
return statusCode;
} else {
return status.getStatus();
}
}