kafka脚本

从kafka读取数据

import time
import json
import queue
import threading
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from kafka import KafkaConsumer,TopicPartition
import pymysql
import re
import logging
import os
from logging.handlers import TimedRotatingFileHandler
# 设置日志配置
logger = logging.getLogger()
logger.setLevel(logging.INFO)  # 只记录INFO及以上级别的日志
# 确保 logs 目录存在
log_dir = './logs'
os.makedirs(log_dir, exist_ok=True)  # 如果目录已经存在,则不会报错
# 创建一个定时切割的日志处理器,每天生成一个新文件
handler = TimedRotatingFileHandler('logs/app.log', when='midnight', interval=1, backupCount=7)
handler.setLevel(logging.INFO)  # 处理器也只处理INFO及以上级别的日志
# 设置日志格式
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# 将处理器添加到logger
logger.addHandler(handler)




# Kafka 配置信息
KAFKA_SERVER = "xxxx:9092"
KAFKA_TOPIC = "topic_log"

# 数据库配置信息
DB_HOST = "xxxx"
DB_USER = "root"
DB_PASSWORD = "000000"
DB_NAME = "maxwall"

# 邮件配置
SMTP_SERVER = "smtp.gmail.com"
SMTP_PORT = 587
SENDER_EMAIL = "xxx"
SENDER_PASSWORD = "xxx"
RECEIVER_EMAIL = "xxx"

# 队列和批量大小设置
BATCH_SIZE = 1000
TIMEOUT = 300  # 5 minutes in seconds

# 队列用于存储从 Kafka 消费的数据
data_queue = queue.Queue()

# 数据库连接
def get_db_connection():
    return pymysql.connect(
        host=DB_HOST,
        user=DB_USER,
        password=DB_PASSWORD,
        database=DB_NAME,
        charset='utf8mb4'
    )

# 发送邮件函数
def send_email(subject, body):
    msg = MIMEMultipart()
    msg['From'] = SENDER_EMAIL
    msg['To'] = RECEIVER_EMAIL
    msg['Subject'] = subject

    msg.attach(MIMEText(body, 'plain'))

    try:
        server = smtplib.SMTP(SMTP_SERVER, SMTP_PORT)
        server.starttls()  # 启动TLS加密
        server.login(SENDER_EMAIL, SENDER_PASSWORD)
        text = msg.as_string()
        server.sendmail(SENDER_EMAIL, RECEIVER_EMAIL, text)
        server.quit()
        print(f"Email sent to {RECEIVER_EMAIL}")
    except Exception as e:
        print(f"Error sending email: {e}")

# Kafka 消费者
def consume_from_kafka():
    consumer = KafkaConsumer(
        KAFKA_TOPIC,
        bootstrap_servers=[KAFKA_SERVER],
        group_id="log-consumer-group",
        value_deserializer=lambda x: json.loads(x.decode('utf-8'))
    )
    for message in consumer:
        data = message.value
        data_queue.put(data)
        # print(f"Consumed and added to queue: {data}")
        
        # 使用 TopicPartition 包装分区
        tp = TopicPartition(message.topic, message.partition)
        
        # 获取当前的偏移量
        current_offset = consumer.position(tp)
        
        # 获取每个分区的最新偏移量
        end_offsets = consumer.end_offsets([tp])
        latest_offset = end_offsets[tp]
        
        # 打印消费进度
        progress = (current_offset / latest_offset) * 100 if latest_offset > 0 else 0
        # print(f"Partition: {message.partition},number:{current_offset}/{latest_offset} progress: {progress:.2f}% ")
        logger.info(f"Partition: {message.partition},number:{current_offset}/{latest_offset} progress: {progress:.2f}% ")
        # if validate_data(data):
        #     data_queue.put(data)
        #     print(f"Consumed and added to queue: {data}")
        # else:
        #     print(f"Data failed validation and was skipped: {data}")

# 数据验证函数
def validate_data(data):
    # 在这里进行数据验证,返回 True 表示数据有效,False 表示数据无效
    # 比如验证某个字段是否存在,或者字段值是否满足条件
    if 'field1' in data and isinstance(data['field1'], (int, float)) and data['field1'] > 0:
        return True
    # 你可以在此添加更多验证逻辑
    return False

# 批量插入数据库,并判断是否需要发送邮件
def batch_insert_to_db():
    while True:
        if data_queue.qsize() >= BATCH_SIZE:
            process_batch()
        else:
            # 等待 5 分钟后检查是否处理未满的批次
            time.sleep(TIMEOUT)
            if data_queue.qsize() > 0:
                process_batch()

# 处理并插入批量数据到数据库
def process_batch():
    print("Processing batch...")
    data_batch = []
    
    # 获取队列中的 1000 条数据
    while not data_queue.empty() and len(data_batch) < BATCH_SIZE:
        data_batch.append(data_queue.get())

    # 将数据插入数据库
    try:
        connection = get_db_connection()
        cursor = connection.cursor()

        # 根据数据结构生成合适的 SQL 插入语句
        for data in data_batch:
            sql = """
                INSERT INTO log_table (field1, field2, field3)
                VALUES (%s, %s, %s)
            """
            cursor.execute(sql, (data['field1'], data['field2'], data['field3']))

            # 检查数据是否符合发送邮件的条件
            # if should_send_email(data):
            #     send_email("Alert: Special Condition Met", f"Data: {data}")

        connection.commit()
        print(f"Inserted {len(data_batch)} records into the database.")
    except Exception as e:
        print(f"Error inserting data: {e}")
    finally:
        cursor.close()
        connection.close()

# 判断是否需要发送邮件的条件
def should_send_email(data):
    # 例如,如果某个字段的值超过阈值就发送邮件
    # 这里假设 'field1' 是一个数值字段,超过 100 就触发邮件
    if 'field1' in data and data['field1'] > 100:
        return True
    return False

# 启动 Kafka 消费线程//用来向队列加入数据
def start_kafka_consumer():
    consumer_thread = threading.Thread(target=consume_from_kafka)
    consumer_thread.daemon = True
    consumer_thread.start()

# 启动批量插入线程//用来向队列取出来数据
def start_batch_processor():
    processor_thread = threading.Thread(target=batch_insert_to_db)
    processor_thread.daemon = True
    processor_thread.start()








def is_error_log(log: str) -> bool:
'''检测nginx  error.log里面错误的日志'''
    # 使用正则表达式匹配日志中的时间戳和错误级别
    # 假设错误日志的格式为:YYYY/MM/DD HH:MM:SS [error] 错误信息
    pattern = r'\[\s*(error|warn|crit|alert)\s*\]'
    
    # 如果日志中包含错误级别,则视为错误日志
    if re.search(pattern, log, re.IGNORECASE):
        return True
    return False

def is_laravel_error_log(log: str) -> bool:
    ''' 检测laravel日志是否是error的'''
    # 使用正则表达式匹配错误日志的级别 "ERROR"
    pattern = r'\.ERROR:'
    
    # 如果日志包含 .ERROR:,则视为错误日志
    if re.search(pattern, log, re.IGNORECASE):
        return True
    return False

'''
django日志拼接规则:
filebeat.inputs:
  - type: log
    paths:
      - /path/to/your/django/logs/*.log  # 修改为你的 Django 日志路径

    multiline.pattern: '^Internal Server Error:'  # 正则匹配日志开头
    multiline.negate: true  # 否定匹配,表示接下来的行将会与前面一行拼接
    multiline.match: after  # 如果接下来有匹配的内容,则拼接到前面的日志行之后
'''



if __name__ == "__main__":
    # 启动消费和插入线程
    start_kafka_consumer()
    # start_batch_processor()
    # 保持主线程运行
    while True:
        time.sleep(5*60)


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值