Package io.druid.indexing.common.task

Examples of io.druid.indexing.common.task.Task


  {
    final File taskFile = Preconditions.checkNotNull(config.getTaskFile(), "taskFile");
    final File statusFile = Preconditions.checkNotNull(config.getStatusFile(), "statusFile");
    final InputStream parentStream = Preconditions.checkNotNull(config.getParentStream(), "parentStream");

    final Task task;

    try {
      task = jsonMapper.readValue(taskFile, Task.class);

      log.info(
          "Running with task: %s",
          jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(task)
      );
    }
    catch (IOException e) {
      throw Throwables.propagate(e);
    }

    // Spawn monitor thread to keep a watch on parent's stdin
    // If stdin reaches eof, the parent is gone, and we should shut down
    parentMonitorExec.submit(
        new Runnable()
        {
          @Override
          public void run()
          {
            try {
              while (parentStream.read() != -1) {
                // Toss the byte
              }
            }
            catch (Exception e) {
              log.error(e, "Failed to read from stdin");
            }

            // Kind of gross, but best way to kill the JVM as far as I know
            log.info("Triggering JVM shutdown.");
            System.exit(2);
          }
        }
    );

    // Won't hurt in remote mode, and is required for setting up locks in local mode:
    try {
      if (!task.isReady(taskActionClientFactory.create(task))) {
        throw new ISE("Task is not ready to run yet!", task.getId());
      }
    } catch (Exception e) {
      throw new ISE(e, "Failed to run isReady", task.getId());
    }

    statusFuture = Futures.transform(
        taskRunner.run(task),
        new Function<TaskStatus, TaskStatus>()
View Full Code Here


      running.clear();
      // Bookkeeping for a log message at the end
      final Set<String> uniqueTaskIds = Sets.newHashSet();
      int taskLockCount = 0;
      for (final Pair<Task, TaskLock> taskAndLock : byVersionOrdering.sortedCopy(storedLocks)) {
        final Task task = taskAndLock.lhs;
        final TaskLock savedTaskLock = taskAndLock.rhs;
        if (savedTaskLock.getInterval().toDurationMillis() <= 0) {
          // "Impossible", but you never know what crazy stuff can be restored from storage.
          log.warn("WTF?! Got lock with empty interval for task: %s", task.getId());
          continue;
        }
        uniqueTaskIds.add(task.getId());
        final Optional<TaskLock> acquiredTaskLock = tryLock(
            task,
            savedTaskLock.getInterval(),
            Optional.of(savedTaskLock.getVersion())
        );
        if (acquiredTaskLock.isPresent() && savedTaskLock.getVersion().equals(acquiredTaskLock.get().getVersion())) {
          taskLockCount ++;
          log.info(
              "Reacquired lock on interval[%s] version[%s] for task: %s",
              savedTaskLock.getInterval(),
              savedTaskLock.getVersion(),
              task.getId()
          );
        } else if (acquiredTaskLock.isPresent()) {
          taskLockCount ++;
          log.info(
              "Could not reacquire lock on interval[%s] version[%s] (got version[%s] instead) for task: %s",
              savedTaskLock.getInterval(),
              savedTaskLock.getVersion(),
              acquiredTaskLock.get().getVersion(),
              task.getId()
          );
        } else {
          log.info(
              "Could not reacquire lock on interval[%s] version[%s] for task: %s",
              savedTaskLock.getInterval(),
              savedTaskLock.getVersion(),
              task.getId()
          );
        }
      }
      log.info(
          "Synced %,d locks for %,d tasks from storage (%,d locks ignored).",
View Full Code Here

            @Override
            public void childEvent(CuratorFramework curatorFramework, PathChildrenCacheEvent pathChildrenCacheEvent)
                throws Exception
            {
              if (pathChildrenCacheEvent.getType().equals(PathChildrenCacheEvent.Type.CHILD_ADDED)) {
                final Task task = jsonMapper.readValue(
                    cf.getData().forPath(pathChildrenCacheEvent.getData().getPath()),
                    Task.class
                );

                if (isTaskRunning(task)) {
                  log.warn(
                      "I can't build it. There's something in the way. Got task %s that I am already running...",
                      task.getId()
                  );
                  workerCuratorCoordinator.unannounceTask(task.getId());
                  return;
                }

                log.info("Submitting runnable for task[%s]", task.getId());

                exec.submit(
                    new Runnable()
                    {
                      @Override
                      public void run()
                      {
                        final long startTime = System.currentTimeMillis();

                        log.info("Affirmative. Running task [%s]", task.getId());
                        running.add(task);

                        TaskStatus taskStatus;
                        try {
                          workerCuratorCoordinator.unannounceTask(task.getId());
                          workerCuratorCoordinator.announceTastAnnouncement(
                              TaskAnnouncement.create(
                                  task,
                                  TaskStatus.running(task.getId())
                              )
                          );
                          taskStatus = taskRunner.run(task).get();
                        }
                        catch (Exception e) {
                          log.makeAlert(e, "I can't build there. Failed to run task")
                             .addData("task", task.getId())
                             .emit();
                          taskStatus = TaskStatus.failure(task.getId());
                        }
                        finally {
                          running.remove(task);
                        }

                        taskStatus = taskStatus.withDuration(System.currentTimeMillis() - startTime);

                        try {
                          workerCuratorCoordinator.updateAnnouncement(TaskAnnouncement.create(task, taskStatus));
                          log.info(
                              "Job's finished. Completed [%s] with status [%s]",
                              task.getId(),
                              taskStatus.getStatusCode()
                          );
                        }
                        catch (Exception e) {
                          log.makeAlert(e, "Failed to update task status")
                             .addData("task", task.getId())
                             .emit();
                        }
                      }
                    }
                );
View Full Code Here

  {
    QueryRunner<T> queryRunner = null;
    final String queryDataSource = Iterables.getOnlyElement(query.getDataSource().getNames());

    for (final ThreadPoolTaskRunnerWorkItem taskRunnerWorkItem : ImmutableList.copyOf(runningItems)) {
      final Task task = taskRunnerWorkItem.getTask();
      if (task.getDataSource().equals(queryDataSource)) {
        final QueryRunner<T> taskQueryRunner = task.getQueryRunner(query);

        if (taskQueryRunner != null) {
          if (queryRunner == null) {
            queryRunner = taskQueryRunner;
          } else {
View Full Code Here

  }

  @Test
  public void testIndexTask() throws Exception
  {
    final Task indexTask = new IndexTask(
        null,
        null,
        "foo",
        new UniformGranularitySpec(
            Granularity.DAY,
            null,
            ImmutableList.of(new Interval("2010-01-01/P2D")),
            Granularity.DAY
        ),
        new AggregatorFactory[]{new DoubleSumAggregatorFactory("met", "met")},
        QueryGranularity.NONE,
        10000,
        newMockFirehoseFactory(
            ImmutableList.of(
                IR("2010-01-01T01", "x", "y", 1),
                IR("2010-01-01T01", "x", "z", 1),
                IR("2010-01-02T01", "a", "b", 2),
                IR("2010-01-02T01", "a", "c", 1)
            )
        ),
        -1,
        TestUtils.MAPPER
    );

    final Optional<TaskStatus> preRunTaskStatus = tsqa.getStatus(indexTask.getId());
    Assert.assertTrue("pre run task status not present", !preRunTaskStatus.isPresent());

    final TaskStatus mergedStatus = runTask(indexTask);
    final TaskStatus status = ts.getStatus(indexTask.getId()).get();
    final List<DataSegment> publishedSegments = byIntervalOrdering.sortedCopy(mdc.getPublished());
    final List<DataSegment> loggedSegments = byIntervalOrdering.sortedCopy(tsqa.getInsertedSegments(indexTask.getId()));

    Assert.assertEquals("statusCode", TaskStatus.Status.SUCCESS, status.getStatusCode());
    Assert.assertEquals("merged statusCode", TaskStatus.Status.SUCCESS, mergedStatus.getStatusCode());
    Assert.assertEquals("segments logged vs published", loggedSegments, publishedSegments);
    Assert.assertEquals("num segments published", 2, mdc.getPublished().size());
View Full Code Here

  }

  @Test
  public void testIndexTaskFailure() throws Exception
  {
    final Task indexTask = new IndexTask(
        null,
        null,
        "foo",
        new UniformGranularitySpec(Granularity.DAY, null, ImmutableList.of(new Interval("2010-01-01/P1D")), Granularity.DAY),
        new AggregatorFactory[]{new DoubleSumAggregatorFactory("met", "met")},
View Full Code Here

  @Test
  public void testKillTask() throws Exception
  {
    // This test doesn't actually do anything right now.  We should actually put things into the Mocked coordinator
    // Such that this test can test things...
    final Task killTask = new KillTask(null, "foo", new Interval("2010-01-02/P2D"));

    final TaskStatus status = runTask(killTask);
    Assert.assertEquals("merged statusCode", TaskStatus.Status.SUCCESS, status.getStatusCode());
    Assert.assertEquals("num segments published", 0, mdc.getPublished().size());
    Assert.assertEquals("num segments nuked", 0, mdc.getNuked().size());
View Full Code Here

  }

  @Test
  public void testRealtimeishTask() throws Exception
  {
    final Task rtishTask = new RealtimeishTask();
    final TaskStatus status = runTask(rtishTask);

    Assert.assertEquals("statusCode", TaskStatus.Status.SUCCESS, status.getStatusCode());
    Assert.assertEquals("num segments published", 2, mdc.getPublished().size());
    Assert.assertEquals("num segments nuked", 0, mdc.getNuked().size());
View Full Code Here

  }

  @Test
  public void testNoopTask() throws Exception
  {
    final Task noopTask = new DefaultObjectMapper().readValue(
        "{\"type\":\"noop\", \"runTime\":\"100\"}\"",
        Task.class
    );
    final TaskStatus status = runTask(noopTask);
View Full Code Here

  }

  @Test
  public void testNeverReadyTask() throws Exception
  {
    final Task neverReadyTask = new DefaultObjectMapper().readValue(
        "{\"type\":\"noop\", \"isReadyResult\":\"exception\"}\"",
        Task.class
    );
    final TaskStatus status = runTask(neverReadyTask);
View Full Code Here

TOP

Related Classes of io.druid.indexing.common.task.Task

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.