Flask分页iter_pages之None分析

Flask分页问题解析:如何避免None出现
在使用Flask进行分页时,部分开发者遇到遍历出None的困扰。通过查看源码,发现当`last+1 != num`时会出现此情况。为实现完整遍历,关键在于调整默认参数,确保`num <= left_edge`或其它两个条件之一始终成立。通过这种方式,可以解决Flask分页显示None的问题。

昨天看一了下问答区的问题,很多人都咨询了flask在分页的时候总是会遍历出None,这是怎么回事呢?先来一段小程序:
数据库信息:

clipboard.png
代码程序:

if __name__ == "__main__":
    user=User.query.paginate(1,2)
    for i in user.iter_pages():
        print(i,end="   ")
        
输出信息:1   2   3   4   5   None   8   9 

今天我自己看了一下源码,来分析一下

def iter_pages(self, left_edge=2, left_current=2,
                   right_current=5, right_edge=2):
    last = 0
    for num in xrange(1, self.pages + 1):
        if num <= left_edge or \
           (num > self.page - left_current - 1 and \
            num < self.page + right_current) or \
           num > self.pages - right_edge:
            if last + 1 != num:
                yield None
            yield num
      
import os import logging from urllib.parse import urljoin from concurrent.futures import ThreadPoolExecutor import time import requests from bs4 import BeautifulSoup # 配置日志记录 logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[logging.StreamHandler(), logging.FileHandler('crawler.log')] ) logger = logging.getLogger(__name__) # 图片保存路径 IMAGE_DIR = 'downloaded_images' if not os.path.exists(IMAGE_DIR): os.makedirs(IMAGE_DIR) # 已爬取 URL 集合(用于去重) visited_urls = set() # Markdown 文件保存路径 MD_FILE = 'crawled_content.md' def fetch_page(url, retries=3): """抓取网页内容""" try: response = requests.get(url, timeout=10) response.raise_for_status() # 检查 HTTP 响应状态码 return response.text except requests.RequestException as e: logger.error(f" 请求 {url} 失败: {e}") if retries > 0: logger.info(f" 重试 {url}, 剩余尝试次数: {retries - 1}") time.sleep(2) # 等待 2 秒后重试 return fetch_page(url, retries - 1) else: logger.error(f" 放弃请求 {url}") return None def parse_page(html): """解析网页内容""" soup = BeautifulSoup(html, 'html.parser') # 提取图片链接 img_tags = soup.find_all('img') img_urls = [ urljoin(soup.base['href'], img['src']) if 'src' in img and img['src'] else '' for img in img_tags ] # 提取文本内容 text_content = soup.get_text(strip=True) return { 'text': text_content, 'images': img_urls } def download_image(img_url): """下载图片""" try: response = requests.get(img_url, stream=True, timeout=10) response.raise_for_status() # 提取文件名 img_name = img_url.split('/')[-1] img_path = os.path.join(IMAGE_DIR, img_name) with open(img_path, 'wb') as f: for chunk in response.iter_content(chunk_size=8192): f.write(chunk) logger.info(f" 图片已下载: {img_url}") return img_name # 返回图片文件名 except requests.RequestException as e: logger.error(f" 下载图片失败: {img_url}, 错误信息: {e}") return None def crawl_page(url): """爬取单个页面""" if url in visited_urls: logger.info(f" 跳过已爬取的 URL: {url}") return logger.info(f" 正在爬取: {url}") html = fetch_page(url) if not html: return data = parse_page(html) visited_urls.add(url) # 下载图片并生成Markdown图片链接 markdown_images = [] with ThreadPoolExecutor(max_workers=5) as executor: futures = [executor.submit(download_image, img_url) for img_url in data['images']] for future in futures: img_name = future.result() if img_name: markdown_images.append(f"![{img_name}]({os.path.join(IMAGE_DIR, img_name)})") # 将内容保存到Markdown文件 save_to_markdown(data['text'], markdown_images) return data['text'] def save_to_markdown(text, images): """将文本和图片保存到Markdown文件""" with open(MD_FILE, 'a', encoding='utf-8') as f: f.write(f"# 页面内容\n\n") f.write(f"{text}\n\n") if images: f.write(f"## 图片内容\n\n") for img in images: f.write(f"{img}\n\n") logger.info(" 内容已保存到Markdown文件") def crawl_pages(base_url, page_param='page', start_page=1, max_pages=10): """分页爬取""" all_data = [] current_page = start_page while current_page <= max_pages: url = f"{base_url}?{page_param}={current_page}" logger.info(f" 开始爬取第 {current_page} 页: {url}") # 爬取当前页面 page_content = crawl_page(url) if not page_content: logger.warning(f" 第 {current_page} 页没有有效内容") break # 如果当前页没有内容,停止爬取 # 将当前页的内容添加到总数据中 all_data.append(page_content) # 增加页码,继续下一页 current_page += 1 logger.info(" 分页爬取完成") return all_data def main(): base_url = "https://www.21ic.com/article/906177.html" # 替换为目标网站 page_param = "page" start_page = 1 max_pages = 10 try: all_data = crawl_pages(base_url, page_param, start_page, max_pages) logger.info(" 所有页面爬取完成") except Exception as e: logger.error(f" 爬取过程中发生错误: {e}") if __name__ == '__main__': main()优化我的爬虫代码,并且给我完整的修改之后的代码
最新发布
05-28
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值