Package org.apache.spark

Examples of org.apache.spark.SparkConf


      }
    }

    Configuration conf = getConfiguration();
    if (sparkContext == null) {
      SparkConf sparkConf = new SparkConf();
      for (Map.Entry<String, String> e : conf) {
        if (e.getKey().startsWith("spark.")) {
          sparkConf.set(e.getKey(), e.getValue());
        }
      }
      this.sparkContext = new JavaSparkContext(sparkConnect, getName(), sparkConf);
      if (jarClass != null) {
        String[] jars = JavaSparkContext.jarOfClass(jarClass);
View Full Code Here


    return sc;
  }

  public synchronized static SparkConf getSparkConf() {
    if (sparkConf == null) {
      sparkConf = new SparkConf()
          .setAppName("test")
          .setMaster("local");
    }
    return sparkConf;
  }
View Full Code Here

     
      int reducers = new JobClient(pipeline.getConfiguration()).getClusterStatus().getMaxReduceTasks(); // MR1
      //reducers = job.getCluster().getClusterStatus().getReduceSlotCapacity(); // Yarn only     
      LOG.info("Cluster reports {} reduce slots", reducers);
    } else if (opts.pipelineType == PipelineType.spark) {
      SparkConf sconf = new SparkConf();
      if (!sconf.contains("spark.app.name") || sconf.get("spark.app.name").equals(getClass().getName())) {
        sconf.setAppName(Utils.getShortClassName(getClass()));
      }
      JavaSparkContext sparkContext = new JavaSparkContext(sconf);
      pipeline = new SparkPipeline(sparkContext, sparkContext.appName());
      pipeline.setConfiguration(getConf());
    } else {
View Full Code Here

   * Initializes the {@link SparkConf} with proper settings.
   *
   * @return the initialized {@link SparkConf}
   */
  private SparkConf initializeSparkConf() {
    SparkConf sparkConf = new SparkConf();
    sparkConf.setAppName(basicSparkContext.getProgramName());
    return sparkConf;
  }
View Full Code Here

    Configuration conf = new Configuration();
    DatasetKeyInputFormat.configure(conf).readFrom(eventsUri).withType(StandardEvent.class);
    DatasetKeyOutputFormat.configure(conf).writeTo(correlatedEventsUri).withType(CorrelatedEvents.class);

    // Create our Spark configuration and get a Java context
    SparkConf sparkConf = new SparkConf()
        .setAppName("Correlate Events")
        // Configure the use of Kryo serialization including our Avro registrator
        .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
        .set("spark.kryo.registrator", "org.kitesdk.examples.spark.AvroKyroRegistrator");
    JavaSparkContext sparkContext = new JavaSparkContext(sparkConf);
View Full Code Here

        /* In this example, we're going to create 1 Kinesis Worker/Receiver/DStream for each shard */
        int numStreams = numShards;

        /* Setup the Spark config. */
        SparkConf sparkConfig = new SparkConf().setAppName("KinesisWordCount");

        /* Kinesis checkpoint interval.  Same as batchInterval for this example. */
        Duration checkpointInterval = batchInterval;

        /* Setup the StreamingContext */
 
View Full Code Here

    }

    StreamingExamples.setStreamingLogLevels();

    // Create the context with a 1 second batch size
    SparkConf sparkConf = new SparkConf().setAppName("JavaCustomReceiver");
    JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, new Duration(1000));

    // Create a input stream with the custom receiver on target ip:port and count the
    // words in input stream of \n delimited text (eg. generated by 'nc')
    JavaReceiverInputDStream<String> lines = ssc.receiverStream(
View Full Code Here

      System.err.println("Usage: JavaKafkaWordCount <zkQuorum> <group> <topics> <numThreads>");
      System.exit(1);
    }

    StreamingExamples.setStreamingLogLevels();
    SparkConf sparkConf = new SparkConf().setAppName("JavaKafkaWordCount");
    // Create the context with a 1 second batch size
    JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, new Duration(2000));

    int numThreads = Integer.parseInt(args[3]);
    Map<String, Integer> topicMap = new HashMap<String, Integer>();
View Full Code Here

    String host = args[0];
    int port = Integer.parseInt(args[1]);

    Duration batchInterval = new Duration(2000);
    SparkConf sparkConf = new SparkConf().setAppName("JavaFlumeEventCount");
    JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, batchInterval);
    JavaReceiverInputDStream<SparkFlumeEvent> flumeStream = FlumeUtils.createStream(ssc, host, port);

    flumeStream.count();
View Full Code Here

    }

    StreamingExamples.setStreamingLogLevels();

    // Create the context with a 1 second batch size
    SparkConf sparkConf = new SparkConf().setAppName("JavaNetworkWordCount");
    JavaStreamingContext ssc = new JavaStreamingContext(sparkConf,  new Duration(1000));

    // Create a JavaReceiverInputDStream on target ip:port and count the
    // words in input stream of \n delimited text (eg. generated by 'nc')
    // Note that no duplication in storage level only for running locally.
View Full Code Here

TOP

Related Classes of org.apache.spark.SparkConf

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.