脚本or工具类的知识的积累

1、用txt写脚本注意事项
自己喜欢在txt上写脚本,用notepad++编辑。需要注意的是,右下角。是unix格式。编码是UTF8。
请添加图片描述
上传到Linux之后,把后缀txt去掉,并且给脚本授权限。

2、Kafka脚本

#!/bin/bash
if [ $# -lt 1 ]
then
   echo "Usage: kf.sh {start|stop|kc [topic]|kp [topic] |list |delete [topic] |describe [topic]}"
   exit
fi
case $1 in
start)
	for i in hadoop102 hadoop103 hadoop104
	do
      echo "====================> START $i KF <===================="
      ssh $i kafka-server-start.sh  -daemon /opt/module/kafka/config/server.properties 
	done
;;
stop)
	for i in hadoop102 hadoop103 hadoop104
	do
      echo "====================> STOP $i KF <===================="
      ssh $i kafka-server-stop.sh
	done
;;
kc)
	if [ $2 ]
	then
	   kafka-console-consumer.sh --bootstrap-server hadoop102:9092,hadoop103:9092,hadoop104:9092 --topic $2
	else
          echo "Usage: kf.sh {start|stop|kc [topic]|kp [topic] |list |delete [topic] |describe [topic]}"
	fi
;;
kp)
	if [ $2 ]
	then 
 	  kafka-console-producer.sh --broker-list hadoop102:9092,hadoop103:9092,hadoop104:9092 --topic $2
	else
	  echo "Usage: kf.sh {start|stop|kc [topic]|kp [topic] |list |delete [topic] |describe [topic]}"
	fi
;;

list)
	kafka-topics.sh --list --bootstrap-server hadoop102:9092,hadoop103:9092,hadoop104:9092
;;
describe)
	if [ $2 ]
	then
	 kafka-topics.sh --describe --bootstrap-server hadoop102:9092,hadoop103:9092,hadoop104:9092 --topic $2
	else
	 echo "Usage: kf.sh {start|stop|kc [topic]|kp [topic] |list |delete [topic] |describe [topic]}"
	fi 
;;

delete)
	if [ $2 ]
	then
	 kafka-topics.sh --delete --bootstrap-server hadoop102:9092,hadoop103:9092,hadoop104:9092 --topic $2
	else
	 echo "Usage: kf.sh {start|stop|kc [topic]|kp [topic] |list |delete [topic] |describe [topic]}"
	fi
;;
*)
   echo "Usage: kf.sh {start|stop|kc [topic]|kp [topic] |list |delete [topic] |describe [topic]}"
   exit
;;
esac

mv xxxx.sh.txt xxxx.sh
chmod u+x xxxx.sh

2、Kafka工具类

import java.util.Properties
import	org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.clients.producer
import	org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.TopicPartition
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.InputDStream
import	org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}


/**
 * Kafka 工具类,用于生产和消费
 */
object MyKafkaUtils {

  //kafka 消费配置
  private	val	consumerConfig:	mutable.Map[String,	String]	= mutable.Map(
    ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG	-> PropertiesUtils("kafka.bootstrap.servers"),
    ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG	-> "org.apache.kafka.common.serialization.StringDeserializer",
    ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG	-> "org.apache.kafka.common.serialization.StringDeserializer",
    ConsumerConfig.GROUP_ID_CONFIG -> "gmall", ConsumerConfig.AUTO_OFFSET_RESET_CONFIG -> "latest", ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG -> "true"
  )



  /**
   * 默认 offsets 位置消费
   */
  def	getKafkaDStream(topic:String,ssc: StreamingContext,groupId:String)		: InputDStream[ConsumerRecord[String, String]] ={
    consumerConfig(ConsumerConfig.GROUP_ID_CONFIG) = groupId
    val	dStream:	InputDStream[ConsumerRecord[String,	String]]	= KafkaUtils.createDirectStream[String, String](
      ssc, LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String,	String](Array(topic), consumerConfig)
    )
    dStream
  }



  /**
   * 指定 offsets 位置消费
   */
  def getKafkaDStream(topic:String,ssc: StreamingContext,offsets: Map[TopicPartition,Long],groupId : String ) : InputDStream[ConsumerRecord[String, String]] ={
    consumerConfig(ConsumerConfig.GROUP_ID_CONFIG) = groupId
    val	dStream:	InputDStream[ConsumerRecord[String,	String]]	= KafkaUtils.createDirectStream[String, String](
      ssc, LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String,	String](Array(topic), consumerConfig,offsets)
    )
    dStream
  }


  /**
   * 创建 Kafka 生产者对象
   */
  def createKafkaProducer():KafkaProducer[String,String] = {
    //Kafka 生产配置
    val props = new Properties()

    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,PropertiesUtils ("kafka.bootstrap.servers"))

    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,"org.apache. kafka.common.serialization.StringSerializer")

    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,"org.apach e.kafka.common.serialization.StringSerializer")
    props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG,"true")

    val	producer:KafkaProducer[String,String]	=	new KafkaProducer[String,String](props)
    producer
  }

  private	var	producer	:	KafkaProducer[	String	,	String	]	= createKafkaProducer()

  /**
   * 生产
   */
  def send(topic:String, msg :String):Unit={
    producer.send(new ProducerRecord[String,String](topic,msg))
  }

  /**
   * 生产 指定 key
   */
  def send(topic:String, key: String	, msg :String):Unit = { producer.send(new	ProducerRecord[String,String](topic,key,
    msg))
  }

  /**
   * 刷写缓冲区
   */

  def flush(): Unit = {
    if(producer != null ) producer.flush()
  }

  /**
   * 关闭生产者对象
   */
  def close():Unit = {
    if(producer != null ) producer.close()
  }

}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值