pfsense 记一次python高并发场景下任务的IO密集型和CPU密集型的不同资源消耗

pfsense 记一次python高并发场景下任务的IO密集型和CPU密集型的不同资源消耗

场景:需至少满足一分钟5000次的http请求调用服务不宕机,且4核CPU的消耗控制在30%,内存4G控制在2GB的消耗。

第一次,为了满足任务的及时处理,考虑处理的任务可能是IO密集型,故采用一下技术方案:

  1. 基于FastAPI做HTTP服务,使用uvicorn作为ASGI服务器
  2. 使用了uvloop替代默认的事件循环,以提高异步性能
  3. 阻塞任务采用ThreadPoolExecutor 在线程池中执行,避免阻塞主线程
  4. uvicorn启用4个工作进程

代码如下:

1.httpservice.py

#!/usr/bin/python3
# -*- coding: utf-8 -*-
import asyncio
import json
from concurrent.futures import ThreadPoolExecutor
from configparser import ConfigParser
from http.client import HTTPException

from fastapi import FastAPI, Header, Request
import uvloop

import huazhida_take

# 替换默认事件循环为 uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
app = FastAPI()

cfg = ConfigParser()
file = cfg.read('application.ini')
node = cfg.sections()

app_host = cfg.get('app', 'host')
app_port = int(cfg.get('app', 'port'))

filePath = cfg.get('netconfig', 'filepath')

# 创建自定义线程池(线程数根据硬件调整)
thread_pool = ThreadPoolExecutor(max_workers=50)

def redCommon():
    headers = {}
    headers["Token"] = "xxxxxxxxxxxxxxxxxxxxxxx"
    headers["Nonce"] = "xxxxxxxxxxx"
    return headers

# 获取端口流量
@app.get('/port')
async def port(hostIp, customerId, username, password, to_util, speed):
    loop = asyncio.get_event_loop()
    headers = redCommon()
    try:
        # 使用线程池执行阻塞任务
        res = await loop.run_in_executor(thread_pool, huazhida_take.port, headers, int(to_util), hostIp, speed)
        return res
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.get('/portquality')
async def portquality(hostIp, customerId, username, password, to_util):
    loop = asyncio.get_event_loop()
    headers = redCommon()
    try:
        # 使用线程池执行阻塞任务
        res = await loop.run_in_executor(thread_pool, huazhida_take.portquality, headers, int(to_util), hostIp)
        return json.loads(res)
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.get('/networkTunnel')
async def networkTunnel(hostIp, customerId, username, password):
    loop = asyncio.get_event_loop()
    headers = redCommon()
    try:
        # 使用线程池执行阻塞任务
        res = await loop.run_in_executor(thread_pool, huazhida_take.networkTunnel, headers, hostIp)
        return json.loads(res)
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

# ......后续接口不再展示

if __name__ == '__main__':
    import uvicorn
    uvicorn.run(app="httpservice:app", host=app_host, port=app_port, workers=4, reload=True)

2.huazhida_take.py

import requests
import urllib3
import json
from Cryptodome.PublicKey import RSA
from Cryptodome.Cipher import PKCS1_v1_5
import base64
import urllib.parse
import time
import calendar
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

path = ''
def port(headers, to_util, hostIp,speed):
    try:
        # 常量定义
        from_utc_time = str(calendar.timegm(time.gmtime()) - to_util)
        to_utc_time = str(calendar.timegm(time.gmtime()))
        tims = '&from=' + from_utc_time + '&to=' + to_utc_time

        # 获取端口流量
        urll = path + 'https://'+hostIp+'/api/xxxx/port?offset=0&limit=100' + tims
        encoded_urll = urllib.parse.quote(urll, safe=':/?&=')
        responsel = requests.get(encoded_urll, headers=headers, verify=False, timeout=2)

        portData = json.loads(responsel.text)
        if portData.get('value').get('value') != None and len(portData.get('value').get('value')) > 0:
            # 提取端口状态
            portStatusData = json.loads(ifPhysical(headers,hostIp))
            for val in portData.get('value').get('value'):
                portName = val.get('name')
                portValueData = val.get('value')[0]
                if speed != None and speed != "":
                    portValueData['speed'] = speed
                else:
                    if portValueData.get('speed') == None:
                        portValueData['speed'] = 0
                for status in portStatusData.get('value').get('interfaceList'):
                    if status.get('name') == portName:
                        portValueData['type'] = status.get('type')
                        portValueData['enabled'] = status.get('enabled')
                        portValueData['mtu'] = status.get('mtu')
                        portValueData['rateLimit'] = status.get('rateLimit')
                        portValueData['burstSize'] = status.get('burstSize')
                        portValueData['phyState'] = status.get('phyState')
                        portValueData['outputRateLimit'] = status.get('outputRateLimit')
                        portValueData['outputBurstSize'] = status.get('outputBurstSize')
                        portValueData['reverse'] = status.get('reverse')
                        portValueData['mac'] = status.get('mac')

        return portData
    except Exception as err:
        return '-9999'

def portquality(headers, to_util,hostIp):
    try:
        # 常量定义
        from_utc_time = str(calendar.timegm(time.gmtime()) - to_util)
        to_utc_time = str(calendar.timegm(time.gmtime()))
        tims = '&from=' + from_utc_time + '&to=' + to_utc_time

        # 获取端口流量
        urll = path + 'https://'+hostIp+'/api/xxxxxx/historixxx/port?offset=0&limit=100' + tims
        encoded_urll = urllib.parse.quote(urll, safe=':/?&=')
        responsel = requests.get(encoded_urll, headers=headers, verify=False, timeout=2)

        return responsel.text
    except Exception as err:
        return '-9999'
       
def networkTunnel(headers,hostIp):
    # 常量定义
    try:
        # 获取隧道属性
        urls = path + 'https://'+hostIp+'/api/xxxxxx/tunnel?offset=0&limit=100'
        encoded_urls = urllib.parse.quote(urls, safe=':/?&=')
        responses = requests.get(encoded_urls, headers=headers, verify=False, timeout=2)

        return responses.text
    except Exception as err:
        return '-9999'

# ............后续接口不再展示

经过两周的压力测试,压力是抗住了,但是CPU的消耗55%~75%(占用稍高,如果后续继续增加压力,可能CPU扛不住),内存25%左右(总体算好)

经过多次的检测发现,阻塞任务请求频繁,考虑CPU密集型,主要改良是httpservice.py,
将线程池改为进程池,将任务分配到多个进程中,充分利用多核CPU

1.使用 ProcessPoolExecutor 替代 ThreadPoolExecutor
2.关闭 reload=True
3.根据实际负载调整 workers 数量。通常设置为 CPU 核心数的 2 倍

修改代码如下:
httpservice.py:

#!/usr/bin/python3
# -*- coding: utf-8 -*-
import asyncio
import json
from concurrent.futures import ProcessPoolExecutor
from configparser import ConfigParser
from http.client import HTTPException

from fastapi import FastAPI, Header, Request
import uvloop

import huazhida_take

# 替换默认事件循环为 uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
app = FastAPI()

cfg = ConfigParser()
file = cfg.read('application.ini')
node = cfg.sections()

app_host = cfg.get('app', 'host')
app_port = int(cfg.get('app', 'port'))

filePath = cfg.get('netconfig', 'filepath')

# 创建自定义进程池
thread_pool = ProcessPoolExecutor(max_workers=4)

def redCommon():
    headers = {}
    headers["Token"] = "xxxxxxxxxxxxxxxxxxxxxxx"
    headers["Nonce"] = "xxxxxxxxxxx"
    return headers

# 获取端口流量
@app.get('/port')
async def port(hostIp, customerId, username, password, to_util, speed):
    loop = asyncio.get_event_loop()
    headers = redCommon()
    try:
        # 使用线程池执行阻塞任务
        res = await loop.run_in_executor(thread_pool, huazhida_take.port, headers, int(to_util), hostIp, speed)
        return res
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.get('/portquality')
async def portquality(hostIp, customerId, username, password, to_util):
    loop = asyncio.get_event_loop()
    headers = redCommon()
    try:
        # 使用线程池执行阻塞任务
        res = await loop.run_in_executor(thread_pool, huazhida_take.portquality, headers, int(to_util), hostIp)
        return json.loads(res)
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

# ......后续接口不再展示

if __name__ == '__main__':
    import uvicorn
    uvicorn.run(app="httpservice:app", host=app_host, port=app_port, workers=8)

对比方案1线程池的方案,进程池对于CPU的消耗更低,http调用速度更快,内存占用27%。

总结:
像类似这种基于HTTP高频处理第三方APP调用,更多的考虑CPU密集型,可采用方案2,不过以上仅供参考,具体应结合具体的情况来决定。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

clarence.wei

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值