sparkStreaming与kafka整合

sparkStreaming与kafka的整合


//基于Direct方式整合kafka

package spark.com.test.day04

import kafka.serializer.StringDecoder

import org.apache.log4j.{Level, Logger}

import org.apache.spark.SparkConf

import org.apache.spark.streaming.dstream.InputDStream

import org.apache.spark.streaming.kafka.KafkaUtils

import org.apache.spark.streaming.{Seconds, StreamingContext}


object SparkStreamingWithDirctOps {

def main(args: Array[String]): Unit = {

Logger.getLogger("org.apache.spark").setLevel(Level.WARN)

Logger.getLogger("org.apache.hadoop").setLevel(Level.WARN)

Logger.getLogger("org.spark_project").setLevel(Level.WARN)

val conf=new SparkConf()

.setAppName("SparkStreamingWithDirctOps")

.setMaster("local[*]")

//创建SteamingContext对象,第一参数为SparkConf对象,第二个参数为批次时间;

    val ssc=new StreamingContext(conf,Seconds(2))

val kafkaparams=Map[String,String](

"bootstrap.servers"->

"haddoop01:9092,hadoop02:9092,hadoop03:9092",

"auto.offset.reset"->"largest",//消费方式从最大偏移量开始读取数据

      "group.id"->"bd-1901-gropu-3"

    )

val topics="spark".split(",").toSet//创建一个集合topics

    val message:InputDStream[(String,String)]=KafkaUtils

.createDirectStream[String,String,StringDecoder,StringDecoder](ssc,kafkaparams,topics)

message.print()

ssc.start()

ssc.awaitTermination()

/*

awaitTermination(long timeOut, TimeUnit unit)

当前线程阻塞,直到

等所有已提交的任务(包括正在跑的和队列中等待的)执行完

或者等超时时间到

或者线程被中断,抛出InterruptedException

然后返回true(shutdown请求后所有任务执行完毕)或false(已超时)

*/

  }

}




//sparkStreaming基于Receiver方式整合kafka
package spark.com.test.day04

import kafka.serializer.StringDecoder

import org.apache.log4j.{Level, Logger}

import org.apache.spark.SparkConf

import org.apache.spark.storage.StorageLevel

import org.apache.spark.streaming.dstream.ReceiverInputDStream

import org.apache.spark.streaming.kafka.KafkaUtils

import org.apache.spark.streaming.{Seconds, StreamingContext}


object SparkStreamingWithReceiver2KafkaOps {

def main(args: Array[String]): Unit = {

Logger.getLogger("org.apache.spark").setLevel(Level.WARN)

Logger.getLogger("org.apache.hadoop").setLevel(Level.WARN)

Logger.getLogger("org.spark_project").setLevel(Level.WARN)

//整合入口kafkautils

    val conf =new SparkConf()

.setAppName("SparkStreamingWithReceiver2KafkaOps")

.setMaster("local[*]")

//创建SteamingContext对象,第一参数为SparkConf对象,第二个参数为批次时间;

    val ssc =new StreamingContext(conf,Seconds(2))

//连接kafka参数

    val kafkaParams =Map[String,String](

"zookeeper.connect" ->

"hadoop01:2181,hadoop02:2181,hadoop03:2181/kafka",//集群入口

      "group.id" ->"bd-1901-group-2",//消费组

      "auto.offset.reset" ->"smallest" //消费方式从头开始读

    )

//创建map类型的参数topics

    val topics =Map[String, Int]("spark" ->3)

val message: ReceiverInputDStream[(String,String)] = KafkaUtils

.createStream[String,String, StringDecoder, StringDecoder](ssc, kafkaParams, topics, StorageLevel.MEMORY_ONLY)

message.print()

ssc.start()

ssc.awaitTermination()

}

private def readfromKafka(ssc: StreamingContext) = {

//接收kafka中的数据

    val zkQuorum ="hadoop01:2181,hadoop02:2181,hadoop03:2181/kafka"

    val groupId ="bd-1901-group-2"

    val topics =Map[String, Int](

"spark" ->3

    )

/**

* 返回值的key:kafka中每一条record对应的key

* 返回值的value:kafka中每一条recoder对应的value

* 这种方式只能从最开始的位置消费数据

* ReceiverInputDStream中的key就是当前一条数据在kafka中的key,

* value就是该条数据对应的value

* KafkaUtils工具类入口kafak整合的时候需要用到

*/

    val inputStream: ReceiverInputDStream[(String,String)] = KafkaUtils

.createStream(ssc, zkQuorum, groupId, topics)

inputStream.print()

}

}

最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容