原始版本
# -*- coding:utf-8 -*-
# author: cyz
# time: 2021/2/3 9:11
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
# os.chdir(os.path.dirname(os.path.abspath(__file__)))
import pickle
# localhost = "1.1.1.1"
# port = 6379
# pool = redis.ConnectionPool(host=localhost, port=port, db=0, health_check_interval=30)
# r = redis.Redis(connection_pool=pool)
# # 自动commit
# r.set('bing', 'baz')
# r.set('a', 'b')
# r.get('bing')
# r.get('a')
# # 手动commit
# pipe = r.pipeline()
# pipe.get('bing')
# pipe.get('a')
# pipe.set('a', "c")
# pipe.delete('a')
# pipe.execute()
# pipe.reset() # 清洗管道缓存
# redis
# https://github.com/andymccurdy/redis-py
import redis
class ConnectRedis(object):
# https://github.com/andymccurdy/redis-py/blob/master/redis/client.py
# def __init__(self,host,port,**kwargs):
# pool = redis.ConnectionPool(host=localhost, port=port, db=0, health_check_interval=30)
# conn = redis.Redis(connection_pool=pool)
def __init__(self, **kwargs):
host = kwargs.get("host", "1.1.1.1")
port = kwargs.get("port", 6379)
db = kwargs.get("db", 0)
health_check_interval = kwargs.get("health_check_interval", 30) # 连接心跳
pool = redis.ConnectionPool(host=host, port=port,
db=db,
health_check_interval=health_check_interval)
self.conn = redis.Redis(connection_pool=pool)
def set(self, key:str, value):
value = pickle.dumps(value)
return self.conn.set(key, value)
def get(self, key:str):
value = pickle.loads(self.conn.get(key))
return value
def delete(self, key:str):
return self.conn.delete(key)
# rediscluster
# https://github.com/grokzen/redis-py-cluster
from rediscluster import RedisCluster, exceptions
class ConnectRedis(object):
# https://github.com/Grokzen/redis-py-cluster/blob/570d7f23faa9bde8031f5e2622e4c01aef3c4f7a/rediscluster/client.py#L89
def __init__(self, **kwargs):
nodes = kwargs.get("nodes", [{"host": "1.1.1.1", "port": "6379"}])
health_check_interval = kwargs.get("health_check_interval", 30) # 连接心跳
def _listens_for(num_retries, retry_interval):
# https://blog.51cto.com/yishi/2354752
def decorate(func):
def wrapper(*args, **kw):
retry = 0
while 1:
try:
func_result = func(*args, **kw)
return func_result
except Exception as ex:
print("disconnection error, retrying operation. retry times: {}".format(retry))
if num_retries == 0:
pass
else:
if retry > num_retries:
raise
time.sleep(retry_interval)
retry += 1
return wrapper
return decorate
@_listens_for(0, 5)
def connection(self):
conn = RedisCluster(startup_nodes=self.nodes,
decode_responses=False,
health_check_interval=self.health_check_interval)
return conn
def ping(self):
with self.connection() as conn:
return conn.ping()
def exceptions(self):
return exceptions
def expire(self, key: str, time):
'''
设置key的超时时间
:param key: key name
:param time: 时长,单位s
:return:
'''
conn = self.connection()
return conn.expire(key, time)
def set(self, key: str, value):
conn = self.connection()
value = pickle.dumps(value)
return conn.set(key, value)
def setnx(self, key: str, value):
'''
Set the value of key ``name`` to ``value`` if key doesn't exist
:param key: key name
:param value: value
:return: 如果key不存在,则返回True,否则返回False
'''
conn = self.connection()
value = pickle.dumps(value)
return conn.setnx(key, value)
def get(self, key: str):
conn = self.connection()
value = conn.get(key)
value = None if value is None else pickle.loads(value)
return value
def hdel(self, name: str,key: str):
conn = self.connection()
return conn.hdel(name, key)
def hset(self, name: str,key: str, value):
conn = self.connection()
value = pickle.dumps(value)
return conn.hset(name,key, value)
def hget(self, name: str,key: str):
conn = self.connection()
value = None if conn.hget(name,key) is None else pickle.loads(conn.hget(name,key))
return value
def hmset(self, name: str,value: dict):
conn = self.connection()
value = {key:pickle.dumps(value[key]) for key in value.keys()}
return conn.hmset(name,value)
def hmget(self, name: str,key: list):
conn = self.connection()
value = conn.hmget(name,key)
value = [pickle.loads(_) if _ is not None else None for _ in value]
return value
def hgetall(self, name: str):
conn = self.connection()
value = conn.hgetall(name)
value = {key:pickle.loads(value[key]) for key in value.keys()}
return value
def hvals(self, name: str): # https://redis.io/commands/hvals
conn = self.connection()
value = conn.hvals(name)
value = [pickle.loads(_) for _ in value]
return value
def delete(self, key: str):
conn = self.connection()
return conn.delete(key)
def xgroup(self,name:str, groupname:str,xgroup_tyoe:str,**kwargs):
'''
xgroup指令集合(redis中stream模式,要求redis5.0以上)
:param name:str, 流的名称
:param groupname:str, 消费者组的名称
:param xgroup_tyoe:str, xgroup细分指令,[create,destroy,setid,delconsumer]
create:创建一个与流关联的新的消费者组。
destroy:摧毁一个消费群体。
setid:将消费者组最后交付的ID设置为其他ID。
delconsumer:从消费者组中移除特定的消费者。
:param kwargs:额外参数
id:流中考虑已交付的最后一项的ID。
mkstream:是否同时创建一个空流,[True,False]
consumername:要删除的消费者名称
:return:[0,1],0为失败或者无结果,1为成功
'''
conn = self.connection()
id = kwargs.get("id","$")
mkstream = kwargs.get("mkstream",False)
consumername = kwargs.get("consumername",None)
if xgroup_tyoe == "create": # https://redis.io/commands/xgroup
return conn.xgroup_create(name, groupname, id, mkstream)
if xgroup_tyoe == "destroy":
return conn.xgroup_destroy(name, groupname)
if xgroup_tyoe == "setid":
return conn.xgroup_setid(name, groupname, id)
if xgroup_tyoe == "delconsumer":
return conn.xgroup_delconsumer(name, groupname, consumername)
def xinfo(self,name:str,groupname:str=None,info_type:str="stream"):
'''
stream相关信息返回
:param name: str, 流的名称
:param groupname: str, 消费者组的名称
:param info_type: str, xinfo细分指令,[stream,groups,consumers]
stream:返回关于流的一般信息。
groups:返回关于流的消费者组的一般信息。
consumers:返回关于组中消费者的一般信息。
:return:
'''
conn = self.connection()
if info_type == "stream":
return conn.xinfo_stream(name)
if info_type == "groups":
return conn.xinfo_groups(name)
if info_type == "consumers":
return conn.xinfo_consumers(name, groupname)
def xadd(self, name:str, value:dict, id ='*', maxlen=None, approximate=True):
'''
添加到溪流中。
:param name:str, 流的名称
:param value:要插入到流中的字段/值对的字典
:param id:插入此记录的位置。默认情况下是附加的。
:param maxlen:截断超过此大小的旧流成员
:param approximate:实际流长可能略大于maxlen
:return:
'''
conn = self.connection()
value = {key: pickle.dumps(value[key]) for key in value.keys()}
return conn.xadd(name, value, id, maxlen, approximate)
def _xreadValueRecover(self,value:list):
'''
针对stream返回数据格式解析还原数据
:param value: list,[[stream_1,[(id_1,value_dict),(id_2,value_dict)]],[stream_2,[(id_1,value_dict),(id_2,value_dict)]]]
xread,xreadgroup,xrange等返回的数据
:return: list,
'''
if value == []:
return []
else:
value = [
{
i[0]:[
(
j[0],{key:pickle.loads(j[1][key]) for key in j[1].keys()}
)
for j in i[1]
]
}
for i in value
]
return value
def _xreadValueRecover1(self,value:list):
'''
针对stream返回数据格式解析还原数据1
:param value: list,[(id, value_dict)]
:return:
'''
if (value == []) or (value == [(None, None)]):
return []
else:
value =[(j[0],{key:pickle.loads(j[1][key]) for key in j[1].keys()}) for j in value]
return value
def xread(self, streams:dict, count=None, block=None):
'''
阻塞和监视多个流的新数据。
:param streams:dict,{stream_1:last_id,stream_2:$},其中last_id是指上一次抽取数据中的id,若是第一次抽取,可以直接写0,
会返回第一条存储的信息。若没有后续id,会返回[]
:param count:int,如果设置,则只返回这么多项,从可用的最早项开始。
:param block:是否阻塞,默认不阻塞,若为0则开启阻塞,会一值监听redis直到数据返回
:return:[[stream_1,[(id_1,value_dict),(id_2,value_dict)]],[stream_2,[(id_1,value_dict),(id_2,value_dict)]]]
'''
conn = self.connection()
value = conn.xread(streams, count, block)
return self._xreadValueRecover(value)
def xreadgroup(self, groupname, consumername, streams, count=None, block=None, noack=False):
'''
通过消费者组从流中读取。
:param groupname:str, 消费者组的名称
:param consumername:str, 消费者的名称
:param streams:dict,{stream_1:last_id,stream_2:$},其中last_id是指上一次抽取数据中的id,若是第一次抽取,可以直接写0,
会返回第一条存储的信息。若没有后续id,会返回[]
:param count:int,如果设置,则只返回这么多项,从可用的最早项开始。
:param block:等待的毫秒数,默认不等待,若为0则无限时等待(阻塞监听),会一值监听redis直到数据返回
:param noack:不添加消息到PEL
:return:
'''
conn = self.connection()
value = conn.xreadgroup(groupname, consumername, streams, count=count, noack=noack, block=block)
return self._xreadValueRecover(value)
def xdel(self, name:str, *ids):
conn = self.connection()
return conn.xdel(name, *ids)
def xack(self,name, groupname, *ids):
conn = self.connection()
return conn.xack(name, groupname, *ids)
def xclaim(self,name, groupname, consumername, min_idle_time, message_ids:list,
idle=None, time=None, retrycount=None, force=False,
justid=False):
'''
更改挂起消息的所有权。
:param name:流的名称
:param groupname:消费者组的名称
:param consumername:声明消息的消费者的名称
:param min_idle_time:筛选空闲时间小于这个毫秒数的消息
:param message_ids:声明空闲的消息id的非空列表或元组:可选。设置消息的空闲时间(最后一次发送时间),单位为毫秒
:param idle:可选的。设置消息的空闲时间(最后一次发送时间),单位为毫秒
:param time:可选的整数。这与idle相同,但不是相对的毫秒数,而是将空闲时间设置为特定的Unix时间(以毫秒为单位)。
:param retrycount:可选的整数。将重试计数器设置为指定的值。每次再次传递消息时,此计数器递增。
:param force:可选布尔值,默认为false。即使分配给不同客户端的PEL中还没有某些指定的id,也会在PEL中创建挂起的消息条目。
:param justid:可选布尔值,默认为false。只返回成功声明的消息的id数组,而不返回实际消息
:return:
'''
conn = self.connection()
value = conn.xclaim(name, groupname, consumername, min_idle_time, message_ids,
idle=idle, time=time, retrycount=retrycount, force=force,
justid=justid)
if justid is True:
return value
else:
return self._xreadValueRecover1(value)
def xpending(self, name, groupname):
conn = self.connection()
return conn.xpending(name, groupname)
def xpending_range(self, name, groupname, min, max, count, consumername=None):
conn = self.connection()
return conn.xpending_range(name, groupname, min, max, count, consumername=consumername)
def xrange(self, name, min='-', max='+', count=None):
conn = self.connection()
value = conn.xrange(name,min=min,max=max,count=count)
return self._xreadValueRecover1(value)
if __name__ == "__main__":
from internal.pkg.login.sql_login_sp_social_security import engine
import pandas as pd
with engine.connect() as conn:
source = pd.read_sql(f'''
select
*
from qhdata_standard.dim_admin_area
-- where cur_level = 5
''',conn)
cr = ConnectRedis()
cr.set("a","1")
cr.get("a") # 1
cr.delete('a')
cr.set("dim_admin_area",source)
from datetime import datetime
s= datetime.now()
a = cr.get("dim_admin_area")
# r.get("dim_admin_area")
print(datetime.now() - s)
20230804 redis可以直接连接集群了
参考文章:https://redis-py.readthedocs.io/en/stable/clustering.html
redis版本:4.6.0
from redis.cluster import RedisCluster, ClusterNode
rc = RedisCluster(host='192.168.1.1', port=6379)
rc = RedisCluster.from_url("redis://192.168.1.1:6379/0")
nodes = [ClusterNode('192.168.1.1', 6379)]
rc = RedisCluster(startup_nodes=nodes)
rc.ping()
20230828 兼容连接方式
import redis
if redis.__version__ >= '5.0.0':
from redis.cluster import RedisCluster, ClusterNode # redis-5.0.0版本开始自带连接集群模式
else:
from rediscluster import RedisCluster, exceptions # 历史版本redis-py-cluster来连接集群时候
class ConnectRedis(object):
# https://github.com/Grokzen/redis-py-cluster/blob/570d7f23faa9bde8031f5e2622e4c01aef3c4f7a/rediscluster/client.py#L89
# https://redis-py-cluster.readthedocs.io/en/master/
def __init__(self, **kwargs):
self.nodes = kwargs.get("nodes", [{"host": "192.168.1.1", "port": "6379"}])
self.health_check_interval = kwargs.get("health_check_interval", 30) # 连接心跳
self.conn = None
def _listens_for(num_retries, retry_interval):
# https://blog.51cto.com/yishi/2354752
def decorate(func):
def wrapper(*args, **kw):
retry = 0
while 1:
try:
func_result = func(*args, **kw)
return func_result
except Exception as ex:
print("disconnection error, retrying operation. retry times: {}".format(retry))
if num_retries == 0:
pass
else:
if retry > num_retries:
raise
time.sleep(retry_interval)
retry += 1
return wrapper
return decorate
@_listens_for(0, 5)
def connection(self):
if self.conn is None:
# 以非集群的方式连接
conn = redis.StrictRedis(host=self.nodes[0].get('host', 'localhost'), port=self.nodes[0].get('port', 6379))
redis_mode = conn.info()['redis_mode']
if redis_mode == 'cluster': # 切换成cluster的连接
conn.close() # 关闭之前的连接
if redis.__version__ >= '5.0.0':
nodes = [ClusterNode(i.get('host'), i.get('port')) for i in self.nodes]
else:
nodes = self.nodes
conn = RedisCluster(startup_nodes=nodes,
decode_responses=False,
health_check_interval=self.health_check_interval)
self.conn = conn
else:
conn = self.conn
try:
conn.ping()
return conn
except Exception as ex:
self.conn = None
print(ex)
raise
def ping(self):
with self.connection() as conn:
return conn.ping()