获取网页的泛癌文章标题,目前一共24篇
get_title <- function(url){
web <- xml2::read_html(url, encoding = "utf-8") #解析url
urlpage <- XML::htmlParse(web) #解析xml
title = XML::xpathSApply(urlpage, '//*[@id="content"]/div[3]/section/article/div[1]/h3/a/articletitle')
title=lapply(title, function(name){name[[1]]})
title
}
title=get_title(url='https://www.nature.com/collections/afdejfafdb/')
sink("title.txt")
for (data in title) {
print(data)
#产生了一个特殊空格<U+00A0>,这个是空格的特殊格式;使用特殊字符进行表达\u00A0$
}
sink()
词频统计
file <- scan('title.txt',sep='\n',what='',encoding="UTF-8")
file <- sub('<U\\+00A0>', ' ',file) #去掉特殊空格<U+00A0>
txtList = lapply(file, strsplit,"\\s+") #使用空格符号进行分词
txtChar = tolower(unlist(txtList)) #把所有单词变为小写模式
txtChar <- txtChar[nchar(txtChar)>2] #去除字符长度小于2的词语
txtChar <- table(txtChar) #统计词频
#grepl类似grep,但是返回逻辑向量,即是否包含pattern
txtChar <- txtChar[!grepl('^[0-9-,]+$',names(txtChar),perl = TRUE)] #去除纯数字
txtChar <- txtChar[!grepl('^and|the|of$',names(txtChar),perl = TRUE)] #delete and the,of
data=data.frame(txtChar)
wordcloud2::wordcloud2(data, size = 1, shape = 'star')
wordcloud::wordcloud(words = data$txtChar, freq = data$Freq, min.freq = 1,
max.words=200, random.order=FALSE, rot.per=0.35,
colors=RColorBrewer::brewer.pal(8, "Dark2"))
ps
title里的对象不知道咋处理,所以直接存入文件中再读出来了
大佬有好办法的,欢迎留言,谢谢~