from urllib.request import Request, urlopen
import bs4
titles = []
texts = []
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'}
for i in range(2,7):
url = 'http://www.cfbond.com/in/cfkxlb/index_{}.shtml'.format(i)
bf = Request(headers=header,url=url)
html = urlopen(bf)
bs = bs4.BeautifulSoup(html, 'html.parser')
info = bs.find_all('li')
for title in info:
titles.append(title.find('h2',class_='pubList_tit').get_text().replace('\r','').replace(' ','').replace('\n',''))
url = title.find('a')['href']
bf = Request(headers=header,url=url)
html = urlopen(bf)
bs = bs4.BeautifulSoup(html, 'html.parser')
try:
texts.append(bs.find('div',class_='s_xlLContCRC').get_text().replace('\r', '').replace(' ', '').replace('\n','').replace('\t',''))
except:
texts.append('None')
with open('work.txt', 'a', encoding='utf-8') as f:
for n in range(1, len(titles) + 1):
f.write('{0} {1}\n{2}\n'.format(n, titles[n - 1], texts[n - 1]))
f.close()