Initializer Lists

本文主要介绍了C++中initializer_list的使用方式与实现原理。详细解释了如何利用initializer_list简化构造函数的调用,并探讨了其内部实现机制,包括基于数组的实现方式以及为何它进行的是浅拷贝。

1.简介


2.initializer_list<>

1.使用



(1)complex<int> com{1, 2},也就是类似P(int a, int b)的这样子

(2)如果没有initializer_list<int>版本构造函数,P q{77, 5}; 和P s={77, 5}是可以被调用的,被拆为单个的数字被P(int a, int b);调用。

2.initializer_list<>实现


(1)initializer_list是基于array实现的

(2)编译器为你构造了一个array的数组,将array的begin和len传入到initializer_list,initializer_list构造函数是private

3.G2.9 array实现


4.initializer_list是一个浅拷贝



3.使用


import requests import time import os import pandas as pd template = 'https://www.zhihu.com/?api开头的cursor信息' headers = { 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'} cookies = { # 填自己的z_0 cookie 'cookie': r`#?` } # Initialize lists answer_ids = [] # Fetch the initial page url0 = template resp0 = requests.get(url0, headers=headers, cookies=cookies) next_page = resp0.json()['paging']['next'] for page in range(1, 3): resp = requests.get(next_page, headers=headers, cookies=cookies) print('正在爬取第' + str(page) + '页') for data in resp.json()['data']: answer_id = data['target']['id'] answer_ids.append(answer_id) next_page = resp.json()['paging']['next'] time.sleep(3) # Adjust sleep time as needed # 请求头 headers = { 'x-zse-93': '101_3_3.0', 'x-ab-param': 'se_ffzx_jushen1=0;zr_expslotpaid=1;top_test_4_liguangyi=1;qap_question_author=0;tp_dingyue_video=0;tp_topic_style=0;tp_contents=2;qap_question_visitor= 0;pf_noti_entry_num=2;tp_zrec=1;pf_adjust=1;zr_slotpaidexp=1', 'x-ab-pb': 'CroB1wKmBDMFdAHgBAsE4wQZBRsAaQFWBVIL5ArHAjMEEQU0DLULdQSiAwoE0QT0C58C7AqbCz8AQAG5AtgCVwTBBNoE4AsSBU8DbAThBMoCNwVRBUMA9wNFBNcLzwsqBEIEoANWDNwL9gJsAzQEBwyEAjIDFAVSBbcD6QQpBWALfQI/BY4DZAS0CvgDFQUPC1ADVwPoA9YEagGMAnIDMgU3DMwCVQUBC0cAzAQOBbQAKgI7AqED8wP0A4kMEl0AAAAAAAABAAAAAAEAAAAAAAMAAAEFAAIBAAABFQABAQEAAQAAAgAAABUBAQALAAEAAQAAAAABAAACBAABAAABAAEBAAEAAQAAAAIBAAEAAQAAAQABAAAAAQAAAAA=', 'x-zst-81': '3_2.0ae3TnRUTEvOOUCNMTQnTSHUZo02p-HNMZBO8YD_ycXtucXYqK6P0E79y-LS9-hp1DufI-we8gGHPgJO1xuPZ0GxCTJHR7820XM20cLRGDJXfgGCBxupMuD_Io4cpr4w0mRPO7HoY70SfquPmz93mhDQyiqV9ebO1hwOYiiR0ELYuUrxmtDomqU7ynXtOnAoTh_PhRDSTFHOsaDH_8UYq0CN9UBFM6Hg1f_FOYrOGwBoYrgcCjBL9hvx1oCYK8CVYUBeTv6u1pgcMzwV8wwt1EbrL-UXBgvg0Z9N__vem_C3L8vCZfMS_Uhoftck1UGg0Bhw1rrXKZgcVQQeC-JLZ28eqWcOxLGo_KX3OsquLquoXxDpMUuF_ChUCCqkwe7396qOZ-Je8ADS9CqcmUuoYsq98yqLmUggYsBXfbLVL3qHMjwS_mXefOComiDSOkUOfQqX00UeBUcnXAh3mMD31bgOYSTSufuCYuDgCjqefWqHYeQSC', 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36', 'x-app-version': '6.42.0', 'sec-ch-ua-mobile': '?0', 'x-requested-with': 'fetch', 'x-zse-96': '2.0_aHtyee9qUCtYHUY81LF8NgU0NqNxgUF0MHYyoHe0NG2f', 'sec-ch-ua': '"Chromium";v="92", " Not A;Brand";v="99", "Google Chrome";v="92"', 'accept': '*/*', 'sec-fetch-site': 'same-origin', 'sec-fetch-mode': 'cors', 'sec-fetch-dest': 'empty', 'accept-language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7', 'cookie': r`#?` } def trans_date(v_timestamp): """10位时间戳转换为时间字符串""" timeArray = time.localtime(v_timestamp) otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray) return otherStyleTime def tran_gender(gender_tag): """转换性别""" if gender_tag == 1: return '男' elif gender_tag == 0: return '女' else: # -1 return '未知' def comment_spider(v_result_file, v_answer_list): for answer_id in v_answer_list: url0 = 'https://www.zhihu.com/api/v4/answers/{}/root_comments?order=normal&limit=20&offset=0&status=open'.format( answer_id) r0 = requests.get(url0, headers=headers) # 发送请求 total = r0.json()['common_counts'] # 一共多少条评论 print('一共{}条评论'.format(total)) if total == 0: continue # 判断一共多少页(每页20条评论) max_page = (total + 19) // 20 # 计算总页数,确保即使评论总数小于20,也能进入循环爬取 print('max_page:', max_page) # 开始循环爬取 for i in range(max_page): offset = i * 20 url = 'https://www.zhihu.com/api/v4/answers/{}/root_comments?order=normal&limit=20&offset={}&status=open'.format( answer_id, str(offset)) r = requests.get(url, headers=headers) print('正在爬取第{}页'.format(i + 1)) j_data = r.json() comments = j_data['data'] # 如果没有评论了,就结束循环 if not comments: print('无评论,退出循环') break # 否则开始爬取 # 定义空列表用于存数据 answer_urls = [] # 回答url authors = [] # 评论作者 genders = [] # 作者性别 author_homepages = [] # 作者主页 author_pics = [] # 作者头像 create_times = [] # 评论时间 contents = [] # 评论内容 child_tag = [] # 评论级别 vote_counts = [] # 点赞数 ip_list = [] # IP属地 for c in comments: # 一级评论 # 回答url answer_urls.append('https://www.zhihu.com/answer/' + str(answer_id)) # 评论作者 author = c['author']['member']['name'] authors.append(author) print('作者:', author) # 作者性别 gender_tag = c['author']['member']['gender'] genders.append(tran_gender(gender_tag)) # 作者主页 homepage = 'https://www.zhihu.com/people/' + c['author']['member']['url_token'] author_homepages.append(homepage) # 作者头像 pic = c['author']['member']['avatar_url'] author_pics.append(pic) # 评论时间 create_time = trans_date(c['created_time']) create_times.append(create_time) # 评论内容 comment = c['content'] contents.append(comment) print('评论内容:', comment) # 评论级别 child_tag.append('一级评论') # 点赞数 vote_counts.append(c['vote_count']) # IP属地 ip_list.append(c['address_text'].replace('IP 属地', '')) if c['child_comments']: # 如果二级评论存在 for child in c['child_comments']: # 二级评论 # 回答url answer_urls.append('https://www.zhihu.com/answer/' + str(answer_id)) # 评论作者 print('子评论作者:', child['author']['member']['name']) authors.append(child['author']['member']['name']) # 作者性别 genders.append(tran_gender(child['author']['member']['gender'])) # 作者主页 author_homepages.append( 'https://www.zhihu.com/people/' + child['author']['member']['url_token']) # 作者头像 author_pics.append(child['author']['member']['avatar_url']) # 评论时间 create_times.append(trans_date(child['created_time'])) # 评论内容 print('子评论内容:', child['content']) contents.append(child['content']) # 评论级别 child_tag.append('二级评论') # 点赞数 vote_counts.append(child['vote_count']) # IP属地 ip_list.append(child['address_text'].replace('IP 属地', '')) # 保存数据到csv header = True if os.path.exists(csv_file): # 如果csv存在,不写表头,避免重复写入表头 header = False df = pd.DataFrame( { '回答url': answer_urls, '页码': [i + 1] * len(answer_urls), '评论作者': authors, '作者性别': genders, '评论内容': contents, '作者主页': author_homepages, '作者头像': author_pics, '评论时间': create_times, '评论级别': child_tag, '点赞数': vote_counts, 'IP属地': ip_list, } ) # 保存到csv文件 df.to_csv(v_result_file, mode='a+', index=False, header=header, encoding='utf_8_sig') if __name__ == '__main__': csv_file = '知乎评论.csv' # 如果csv存在,先删除,避免由于追加产生重复数据 if os.path.exists(csv_file): print('文件存在,删除:{}'.format(csv_file)) os.remove(csv_file) # 开始爬取 comment_spider(v_result_file=csv_file, # 保存文件名 v_answer_list=answer_ids) # 使用第一步爬取的answer_ids print('爬虫执行完毕!') 上面的代码转成java项目代码,原有的功能不要做任何的删改一次性给出所有的java代码,每一行添加详细的中文注释,并优化一下代码,使代码的结构更加清晰明了,结构更加简洁优秀,性能更加的好,更加易于人类阅读
最新发布
12-12
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值