Error_code: 2003

本文记录了一次MySQL主从同步出现故障的情况,主要表现为从库频繁出现Error_code:2003错误,并最终定位为网络问题的过程。文章详细描述了故障现象、排查步骤及解决方法。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

DB:5.6.16

配置:主从

MySQL主从断掉,从库警告日志出现大量的Error_code: 2003
Slave I/O error connecting to master .......retry-time 60 retries 1,Error_code: 2003

1、网络问题
2、密码不对
3、POS不对

在正常运行的时候,突然出问题....

验证密码,POS后,问题依旧,主从依旧连接不上,断定网络调整问题(能ping通,能-h连接到主

因为是测试环境,第二天问网络,尼玛,网络问题 
<div class="submit "><form action="signup.php" method="POST" class="Form1" id="Form1" onsubmit="return check()"> <span>账号&nbsp:&nbsp</span><input class="input1" type="username" onblur="signupusername()" required eholder="请输入账号名"><p class="text1">账号格式有误</p> <p class="text2"></p> <span>密码&nbsp:&nbsp</span><input> class="input2" type="password" name="signup_passWord"onblur="signuppassword()" required placeholder="请输入密码"><p class="text">密码必须由6-16位数字加字母组成</p> <span>确认&nbsp:&nbsp</span><input class="input3" type="password" name="signup_passWord2" onblur="signuppassword2()" required placeholder="请确认密码"> <p class="text3">前后密码不一致</p> <span>手机&nbsp:&nbsp</span><input class="input4" type="phone" name="signup_phone"onblur="signupphone()" required placeholder="请输入手机"><p class="text4">手机号码格式有误</p> <span>邮箱&nbsp:&nbsp</span><input class="input5" type="email" name="signup_email" onblur="signupemail()" required placeholder="请输入邮箱"><p class="text5">邮箱格式有误</p> <p class="yz"> <span>验证码&nbsp:</span> <input type="text" id="text" class="yz_text" required placeholder="请输入验证码">input type="button" id="code" onclick="Code()" title='点击更换验证码'> </p>input class="sub1" type="submit" value="立即注册"></form> </div> &nbsp;<div class="submit action"><form action="login.php" method="POST"> <span>账号&nbsp:&nbsp</span><input class="input6" type="username" name="login_userName" requiredplaceholder="请输入手机号"> <span>密码&nbsp:&nbsp</span><input class="input7" type="password" name="login_passWord" required placeholder="请输入密码"><input class="sub2" type="submit" value="立即登录"></form></div></div> </div> </div>根据代码写一个jQuery交互数据,使用到ajax
05-29
import requests from bs4 import BeautifulSoup import re import time import os import random from fake_useragent import UserAgent from urllib.parse import urlparse, urljoin import logging # 配置日志 logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[ logging.FileHandler("novel_downloader.log"), logging.StreamHandler() ] ) # 创建用户代理生成器 ua = UserAgent() def extract_content_and_next_link(html, base_url): """从HTML中提取内容并找到下一页链接""" soup = BeautifulSoup(html, 'html.parser') # 1. 提取小说内容 content_div = soup.find('div', class_='size16 color5 pt-read-text') if not content_div: content_div = soup.find('div', class_='pt-read-text') if not content_div: logging.error("错误: 未找到内容容器") return None, None # 处理特殊格式 raw_html = str(content_div) processed_html = raw_html.replace('&nbsp;', ' ') processed_html = processed_html.replace('<br/>', '\n') content_soup = BeautifulSoup(processed_html, 'html.parser') raw_text = content_soup.get_text() # 清理文本 cleaned_text = re.sub(r'^ {4}', '', raw_text, flags=re.MULTILINE) cleaned_text = re.sub(r'\n{3,}', '\n\n', cleaned_text) # 2. 提取章节标题 title_tag = soup.find('title') chapter_title = title_tag.text.split('-')[0] if title_tag else "未知章节" # 3. 提取下一页链接 - 这是您要求的重点 next_link = None next_page_tag = soup.find('a', string='下一页') if not next_page_tag: next_page_tag = soup.find('a', string='下一章') if next_page_tag and 'href' in next_page_tag.attrs: next_link = next_page_tag['href'] # 确保URL是绝对路径 if not next_link.startswith('http'): next_link = urljoin(base_url, next_link) logging.info(f"找到下一页链接: {next_link}") return f"{chapter_title}\n\n{cleaned_text}\n\n", next_link def get_random_headers(): """生成随机的请求头""" return { 'User-Agent': ua.random, 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3', 'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1', 'Cache-Control': 'max-age=0', 'DNT': '1', # 禁止跟踪 'Referer': 'https://www.doupoxs.com/' } def download_novel(start_url, output_file="full_novel.txt", max_retries=3): """下载整本小说""" # 解析基础URL parsed_url = urlparse(start_url) base_url = f"{parsed_url.scheme}://{parsed_url.netloc}" current_url = start_url all_content = [] chapter_count = 0 retry_count = 0 # 创建输出目录 output_dir = os.path.dirname(output_file) if output_dir and not os.path.exists(output_dir): os.makedirs(output_dir) logging.info(f"开始下载小说,起始URL: {start_url}") logging.info(f"基础URL: {base_url}") # 创建会话对象 session = requests.Session() # 打开文件准备追加写入 with open(output_file, 'w', encoding='utf-8') as f: while current_url and retry_count < max_retries: chapter_count += 1 logging.info(f"正在下载第 {chapter_count} 章: {current_url}") try: # 获取页面内容 - 使用随机请求头 headers = get_random_headers() headers['Referer'] = base_url response = session.get(current_url, headers=headers, timeout=15) # 检查状态码 if response.status_code != 200: logging.error(f"错误: 无法获取页面,状态码: {response.status_code}") retry_count += 1 continue # 设置正确编码 response.encoding = 'utf-8' # 提取内容和下一页链接 content, next_link = extract_content_and_next_link(response.text, base_url) if content: # 追加写入文件 f.write(content) f.flush() # 确保立即写入 logging.info(f"成功写入第 {chapter_count} 章内容") retry_count = 0 # 重置重试计数器 else: logging.warning(f"未提取到内容,可能页面结构变化") # 准备下一页URL if next_link: current_url = next_link else: current_url = None logging.info("已到达最后一章") # 随机延迟避免封禁 delay = random.uniform(1.5, 4.0) logging.info(f"等待 {delay:.2f} 秒后继续...") time.sleep(delay) except requests.exceptions.RequestException as e: logging.error(f"网络请求出错: {str(e)}") retry_count += 1 time.sleep(5) except Exception as e: logging.error(f"处理章节时出错: {str(e)}") retry_count += 1 time.sleep(5) # 获取最终文件大小 file_size = os.path.getsize(output_file) logging.info(f"已保存到: {output_file}") logging.info(f"总字数: {file_size} 字节") logging.info(f"小说下载完成! 共 {chapter_count} 章") return chapter_count def main(): """主函数,处理用户输入""" print("=" * 50) print("小说下载器 - 专注下一页链接抓取") print("=" * 50) # 用户输入起始URL start_url = input("请输入小说起始URL: ").strip() if not start_url: start_url = "https://www.doupoxs.com/nkl/1426/1426458/65738602.html" print(f"使用默认URL: {start_url}") # 用户输入输出文件名 output_file = input("请输入输出文件名(默认: novel.txt): ").strip() if not output_file: output_file = "novel.txt" # 开始下载 chapter_count = download_novel(start_url, output_file) if chapter_count > 0: print("\n下载完成! 请查看输出文件") else: print("\n下载失败,请检查日志文件") if __name__ == "__main__": try: main() except KeyboardInterrupt: print("\n用户中断,程序退出") except Exception as e: print(f"程序发生错误: {str(e)}") 这个url怎么设置
最新发布
08-02
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值