using
System;
using
System.Collections.Generic;
using
System.Net;
using
System.IO;
using
System.Xml;
using
System.Xml.XPath;
public
class
RssDoc
...
{
XmlDocumentdoc;
List<Hashtable>list;
publicRssDoc()
...{
}
publicRssDoc(XmlDocumentdoc)
...{
this.doc=doc;
Load();
}

/**////<summary>
///获取网络资源
///</summary>
///<paramname="url">url</param>
///<paramname="timeOut">timeout(单位秒)</param>
///<paramname="useProxy">是否用代理</param>
publicstaticXmlDocumentGetDoc(stringurl,inttimeOut,booluseProxy)
...{
XmlDocument_doc=newXmlDocument();
try
...{
WebRequestreq=WebRequest.Create(url);
if(useProxy)
...{
WebProxyproxy=newWebProxy("http://63.149.98.16:80/",true);
req.Proxy=proxy;
}
req.Timeout=timeOut*1000;
WebResponseres=req.GetResponse();
StreamrssStream=res.GetResponseStream();
_doc.Load(rssStream);
rssStream.Dispose();
res.Close();
}
catch
...{
_doc=null;
}
return_doc;
}
privatevoidLoad()
...{
list=newList<Hashtable>();
XmlNodeListnodes=doc.GetElementsByTagName("item");
if(nodes==null||nodes.Count==0)
nodes=doc.GetElementsByTagName("entry");
if(nodes==null||nodes.Count==0)
return;
Hashtableht;
XmlNodeListns;
stringname;
stringdate;
foreach(XmlNodenodeinnodes)
...{
ht=newHashtable();
ns=node.ChildNodes;
try
...{
foreach(XmlNodeninns)
...{
name=n.Name.ToLower();
if(name.Contains("link"))
...{
if(n.Attributes["href"]!=null)
ht["link"]=n.Attributes["href"].Value.Trim();
elseht["link"]=n.InnerText.Trim();
continue;
}
if(name.Contains("title"))
...{
ht["title"]=n.InnerText.Trim();
continue;
}
if(name.Contains("category"))
...{
if(ht["category"]==null)ht["category"]=n.InnerText.Trim();
elseht["category"]=ht["category"].ToString()+","+n.InnerText.Trim();
continue;
}
if(name.Contains("date"))
...{
date=n.InnerText;
if(date!="")
...{
if(date.Contains(","))date=date.Substring(date.IndexOf(",")+1);
date=date.Trim();
if(date.Split('').Length>4)date=date.Replace(date.Split('')[4],"");
if(date.Contains("."))date=date.Split('.')[0].Trim();
date=date.Replace("T","");
if(date.Substring(date.LastIndexOf(":")+1).Length>2)
date=date.Substring(0,date.LastIndexOf(":")+3);
try...{date=DateTime.Parse(date.Trim()).ToString();}
catch...{date=DateTime.Now.ToString();}
}
elsedate=DateTime.Now.ToString();
ht["pubdate"]=date;
continue;
}
if(name.Contains("description"))
...{
ht["description"]=n.InnerText.Trim();
continue;
}
if(name.Contains("content"))
...{
ht["description"]=n.InnerText.Trim();
continue;
}
if(name.Contains("summary"))
...{
if(ht["description"]==null)ht["description"]=n.InnerText.Trim();
}
}
}
catch
...{
continue;
}
if(ht["link"]==null)ht["link"]="";
if(ht["title"]==null||ht["title"].ToString()=="")ht["title"]=ht["link"].ToString();
if(ht["category"]==null)ht["category"]="";
if(ht["pubdate"]==null)ht["pubdate"]=DateTime.Now.ToString();
if(ht["description"]==null)ht["description"]="";
list.Add(ht);
}
}
publicList<Hashtable>Items
...{
get
...{
returnlist;
}
}
}
使用:
XmlDocumentdoc
=
RssDoc.GetDoc(url,
3
,
false
);
RssDocrssdoc
=
new
RssDoc(doc);
string
title
=
""
;
string
link
=
""
;
string
description
=
""
;
string
cate
=
""
;
string
time
=
""
;
foreach
(Hashtablehs
in
rssdoc.Items)
...
{
description=hs["description"].ToString();
title=hs["title"].ToString();
link=hs["link"].ToString();
time=hs["pubdate"].ToString();
cate=hs["category"].ToString();
}
本文介绍了一个RSS解析器的实现细节,包括如何从网络获取RSS源并解析为一系列条目。该解析器支持多种RSS标签,并能正确处理日期格式。
&spm=1001.2101.3001.5002&articleId=81578074&d=1&t=3&u=fe256ba4e8d241a5a90e1d4a0b5dc6e9)
1926

被折叠的 条评论
为什么被折叠?



