LOG_ARCHIVE_FORMAT 的定义及用法

本文深入探讨了log_archive_format参数在数据库归档模式下的配置与使用,详细解释了如何定义归档文件的命名规则,并强调了在归档路径有效性变化时的策略。文章还提醒读者关注通过替换变量实现文件名唯一性的必要性,确保了在不同操作系统下文件命名的一致性和正确性。
log_archive_format是一个  不能 alter system  的 非基本 初始化参数,定义数据库在archivelog模式下归档文件的 命名规则。

append有效的归档路径下,非常有意思的地方!归档路径无效的话,就转为其他有效路径。优先级如下:

  1. {log_archive_des|log_archive_dest_n}
  2. [log_archive_duplex_dest]
  3. db_recovery_file_dest
  4. %oracle_home%\database[dbs]

 

archive_log_dest[_n] 或 db

默认的log_archive_format:ARC%S_%R.%T

任何os允许的filesystem的字符都是可以的,但应该通过 替换变量 实现 文件名的唯一性

替换变量有大、小写之分,区别在于:大写的替换变量 用“0”填充不足位数。


log_archive_format的替换变量:

  1. %s  --archvied redo file 的文件序列号
  2. %t  --归档线程号
  3. %d  --dbid,注意不同于rman中format的%d,而是同%I
  4. %r  --resetlogsID号
菊子曰  写博客,就用 菊子曰
if __name__ == '__main__': PROCESSMANAGER_DATA_PATH = '/data' PROCESSMANAGER_LOG_PATH = os.path.join(PROCESSMANAGER_DATA_PATH, 'logs') PROCESSMANAGER_LOG_ARCHIVE_PATH = os.path.join(PROCESSMANAGER_LOG_PATH, 'archive') if not os.path.exists(PROCESSMANAGER_LOG_PATH): os.makedirs(PROCESSMANAGER_LOG_PATH) if not os.path.exists(PROCESSMANAGER_LOG_ARCHIVE_PATH): os.makedirs(PROCESSMANAGER_LOG_ARCHIVE_PATH) parser = argparse.ArgumentParser(description='Processmanager for Robot Test Executions') parser.add_argument('--robot_ip', type=str, help="Robot vacuum IP") args = parser.parse_args() logfile_path = os.path.join(PROCESSMANAGER_LOG_PATH, f"processmanager_{args.robot_ip}.log") # in case an old log does exist: keep and rename it if os.path.exists(logfile_path) and os.path.isfile(logfile_path): archive_timestamp = time.time() archived_logfile_path = os.path.join(PROCESSMANAGER_LOG_ARCHIVE_PATH, f"processmanager_{args.robot_ip}_{archive_timestamp}.log") shutil.move(logfile_path, archived_logfile_path) logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', handlers=[ logging.FileHandler(logfile_path), logging.StreamHandler() # when you want to see log print in console ] ) logging.log(logging.INFO, "Booting Processmanager..") print("Starting Processmanager", flush=True) try: _ = ipaddress.ip_address(args.robot_ip) except ValueError: print(f"Error: Invalid IP Address provided: {args.robot_ip}") logging.error(f"Error: Invalid IP Address provided: {args.robot_ip}") raise ValueError loop = asyncio.get_event_loop() async_lock = asyncio.Lock() processmanager_state = ProcessManagerState(args.robot_ip, loop) queue_runner = QueueRunner(args.robot_ip, processmanager_state, async_lock) comm_interface = CommInterface(args.robot_ip, processmanager_state, async_lock) asyncio.ensure_future(queue_runner.queue_item_execution()) asyncio.ensure_future(comm_interface.fetch_rabbitmq()) def handle_sigterm(): loop.stop() logging.log(logging.INFO, "Received SIGTERM: Shutting down Processmanager..") processmanager_state.set_queue_offline() sys.exit(0) loop.add_signal_handler(signal.SIGTERM, handle_sigterm) loop.add_signal_handler(signal.SIGINT, handle_sigterm) print("State, Queue Runner and Comm Interface initialized, starting loops..", flush=True) loop.run_forever()
04-02
import re import os from collections import defaultdict import time def detect_log_spam_in_large_file(log_file_path, threshold=50): """ 大型日志文件刷屏检测工具 :param log_file_path: 日志文件路径 :param threshold: 触发警报的阈值(默认50条/秒) """ # 编译正则表达式用于解析日志行 log_pattern = re.compile( r'^(\w{3}\s+\d{1,2}\s+\d{2}:\d{2}:\d{2})\s+([^\s\[]+)\[\d+\]:' ) # 数据结构:存储每秒的进程计数 # {时间戳: {进程名: 计数}} second_counts = defaultdict(lambda: defaultdict(int)) # 用于跟踪当前处理的秒 current_second = None # 获取文件大小用于进度显示 file_size = os.path.getsize(log_file_path) processed_bytes = 0 last_report_time = time.time() try: with open(log_file_path, 'r', encoding='utf-8', errors='ignore') as f: for line in f: # 更新已处理字节数 processed_bytes += len(line.encode('utf-8')) # 每处理1MB或每5秒报告一次进度 if (time.time() - last_report_time > 5 or processed_bytes % (1024*1024) == 0): progress = (processed_bytes / file_size) * 100 print(f"处理进度: {progress:.1f}% ({processed_bytes}/{file_size} 字节)") last_report_time = time.time() # 解析日志行 match = log_pattern.match(line) if not match: continue # 跳过无法解析的行 timestamp = match.group(1) process = match.group(2) # 如果是新的一秒,检查前一秒的计数 if timestamp != current_second and current_second is not None: # 检查前1秒是否有进程超过阈值 for proc, count in second_counts[current_second].items(): if count >= threshold: print(f"[!] 刷屏警报 - {current_second} | 进程: {proc} | " f"1秒内日志数量: {count}") # 清除历史数据,只保留当前秒计数 second_counts = defaultdict(lambda: defaultdict(int)) second_counts[current_second] = defaultdict(int) # 保留当前秒 # 更新当前秒 current_second = timestamp # 更新计数 second_counts[timestamp][process] += 1 # 处理文件末尾的最后1秒数据 if current_second: for proc, count in second_counts[current_second].items(): if count >= threshold: print(f"[!] 刷屏警报 - {current_second} | 进程: {proc} | " f"1秒内日志数量: {count}") except Exception as e: print(f"处理日志时出错: {str(e)}") finally: print(f"日志分析完成。共处理 {processed_bytes} 字节数据") if __name__ == "__main__": # 使用示例:监控大型日志文件 detect_log_spam_in_large_file("/var/log/syslog.1") 基于这个代码改进
最新发布
10-31
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值