一个简单的机器人例子(Java) |
import java.awt.*;
import java.net.*;
import java.io.*;
import java.lang.*;
import java.util.*;
class node{ private Object data; private node next; private node prev; public node(Object o){
data = o; prev = next = null;
} public String toString(){
if(next!=null)return data.toString() + "/n"+ next.toString(); return data.toString();
} public node getNext(){return next;}
public void setNext(node n){next = n;}
public node getPrev(){return prev;}
public void setPrev(node n){prev = n;}
public Object getData(){return data;}
}
class linkedlist{
node head;
node tail;
public linkedlist(){
tail = head = null;
} public String toString(){
if(head==null)return "Empty list";
return head.toString();
} public void insert(Object o){
if(tail==null){
head = tail = new node(o);
}else{
node nn = new node(o);
tail.setNext(nn);
tail=nn;
} } public boolean contains(Object o){
for(node n = head;n!=null;n=n.getNext()){
if(o.equals(n.getData()))return true;
} return false;
} public Object pop(){
if(head==null)return null;
Object ret = head.getData();
head = head.getNext();
if(head==null)tail = null;
return ret;
} public boolean isEmpty(){
return head==null;
} }
class list{
protected node tail;
protected node ptr;
private boolean stop;
public list(){
ptr=tail=null;
stop=false;
} public boolean isEmpty(){return tail==null;}
public void reset(){
stop=false;
ptr=tail;
}
public String toString(){
if(tail==null)return "Empty list";
String ret="";
for(node n = tail.getNext();n!=tail;n=n.getNext())ret+=n.getData
().toString()+"/n";
ret+=tail.getData().toString();
return ret;
}
public Object get(){
if(ptr==null)return null;
ptr = ptr.getNext();
if(ptr==tail.getNext()){
if(stop)return null;
stop=true;
return tail.getNext().getData();
}
return ptr.getData();
}
public void insert(Object o, boolean attail){
node nn = new node(o);
if(tail==null){
nn.setNext(nn);
nn.setPrev(nn);
ptr=tail=nn;
return;
}
if(attail){
tail.getNext().setPrev(nn);
nn.setNext(tail.getNext());
tail.setNext(nn);
nn.setPrev(tail);
tail=nn;
}else{
nn.setNext(tail.getNext());
nn.setPrev(tail);
tail.setNext(nn);
nn.getNext().setPrev(nn);
} }
public void insert(Object o){}
}
class stack extends list{
public stack(){super();}
public void insert(Object o){insert(o, false);}
} class queue extends list{
public queue(){super();}
public void insert(Object o){insert(o, true);}
public String peek(){
if(tail==null)return "";
return tail.getNext().getData().toString();
} public Object pop(){
if(tail==null)return null;
Object ret = tail.getNext().getData();
if(tail.getNext()==tail){
tail=ptr=null;
}else{
if(tail.getNext()==ptr)ptr=ptr.getNext();
tail.setNext(tail.getNext().getNext());
}
return ret;
} }
class hashtable{
private Vector table;
private int size;
public hashtable(){
size = 991; table = new Vector(); for(int i=0;i<size;i++){ table.add(new linkedlist()); } } public void insert(Object o){ int index = o.hashCode();
index = index % size;
if(index<0)index+=size;
linkedlist ol = (linkedlist)table.get(index);
ol.insert(o);
} public boolean contains(Object o){
int index = o.hashCode();
index = index % size;
if(index<0)index+=size;
return ((linkedlist)(table.get(index))).contains(o);
} public String toString(){
String ret ="";
for(int i=0;i<size;i++){
if(!((linkedlist)(table.get(i))).isEmpty()){
ret+="/n";
ret+=table.get(i).toString();
}
} return ret;
} }
class spider implements Runnable{
public queue todo; public stack done; public stack errors; public stack omittions; private hashtable allsites; private String last=""; int maxsites; int visitedsites; int TIMEOUT; String base; String []badEndings2 = {"ps", "gz"}; String []badEndings3 = {"pdf", "txt", "zip", "jpg", "mpg", "gif", "mov", "tut", "req", "abs", "swf", "tex", "dvi", "bin", "exe", "rpm"}; String []badEndings4 = {"jpeg", "mpeg"};
public spider(String starturl, int max, String b){ TIMEOUT = 5000;
base = b;
allsites = new hashtable();
todo = new queue();
done = new stack();
errors = new stack();
omittions = new stack();
try{ URL u = new URL(starturl);
todo.insert(u);
}catch(Exception e){ System.out.println(e);
errors.insert("bad starting url "+starturl+", "+e.toString()); } maxsites = max;
visitedsites = 0;
}
/* * how many millisec to wait for each page */ public void setTimer(int amount){ TIMEOUT = amount; }
/* * strips the '#' anchor off a url */ private URL stripRef(URL u){
try{
return new URL(u.getProtocol(), u.getHost(), u.getPort(), u.getFile());
}catch(Exception e){return u;}
}
/* * adds a url for future processing */ public void addSite(URL toadd){
if(null!=toadd.getRef())toadd = stripRef(toadd);
if(!allsites.contains(toadd)){
allsites.insert(toadd);
if(!toadd.toString().startsWith(base)){
omittions.insert("foreign URL: "+toadd.toString());
return;
} if(!toadd.toString().startsWith("http") && !toadd.toString().startsWith("HTTP")){
omittions.insert("ignoring URL: "+toadd.toString());
return;
}
String s = toadd.getFile();
String last="";
String []comp={};
if(s.charAt(s.length()-3)=='.'){
last = s.substring(s.length()-2);
comp = badEndings2;
}else if(s.charAt(s.length()-4)=='.'){
last = s.substring(s.length()-3);
comp = badEndings3;
}else if(s.charAt(s.length()-5)=='.'){
last = s.substring(s.length()-4);
comp = badEndings4;
} for(int i=0;i<comp.length;i++){
if(last.equalsIgnoreCase(comp[i])){//loop through all bad extensions omittions.insert("ignoring URL: "+toadd.toString());
return;
} } todo.insert(toadd);
} }
/* * true if there are pending urls and the maximum hasn't been reached */ public boolean hasMore(){
return !todo.isEmpty() && visitedsites<maxsites;
}
/* * returns the next site, works like enumeration, will return new values each time */ private URL getNextSite(){
last = todo.peek();
visitedsites++;
return (URL)todo.pop();
}
/* * Just to see what we are doing now... */ public String getCurrent(){
return last;
}
/* * process the next site */ public void doNextSite(){
URL current = getNextSite();
if(current==null)return;
try{ //System.err.println("Processing #"+visitedsites+": "+current); parse(current);
done.insert(current);
} catch(Exception e){
errors.insert("Bad site: "+current.toString()+", "+e.toString()); } }
public void run(){
while(hasMore())doNextSite();
}
/* * to print out the internal data structures */ public String toString(){return getCompleted()+getErrors();} private String getErrors(){
if(errors.isEmpty())return "No errors/n";
else return "Errors:/n"+errors.toString()+"/nEnd of errors/n";
}
private String getCompleted(){
return "Completed Sites:/n"+done.toString()+"/nEnd of completed sites/n"; }
/* * Parses a web page at (site) and adds all the urls it sees */ private void parse(URL site) throws Exception{ String source=getText(site);
String title=getTitle(source);
if(title.indexOf("404")!=-1 || title.indexOf("Error")!=-1 || title.indexOf("Not Found")!=-1){ throw new Exception (("404, Not Found: "+site)); } int loc, beg;
boolean hasLT=false;
boolean hasSp=false;
boolean hasF=false;
boolean hasR=false; boolean hasA=false;
boolean hasM=false; boolean hasE=false;
for(loc=0;loc<source.length();loc++){
char c = source.charAt(loc);
if(!hasLT){
hasLT = (c=='<');
}
//search for "<a "
else if(hasLT && !hasA && !hasF){
if(c=='a' || c=='A')hasA=true;
else if(c=='f' || c=='F')hasF=true;
else hasLT=false;
}else if(hasLT && hasA && !hasF && !hasSp){
if(c==' ' || c=='/t' || c=='/n')hasSp=true;
else hasLT = hasA = false;
}
//search for "<frame " else if(hasLT && hasF && !hasA && !hasR){
if(c=='r' || c=='R')hasR=true;
else hasLT = hasF = false;
}else if(hasLT && hasF && hasR && !hasA){
if(c=='a' || c=='A')hasA=true;
else hasLT = hasF = hasR = false;
}else if(hasLT && hasF && hasR && hasA && !hasM){ if(c=='m' || c=='M')hasM=true;
else hasLT = hasF = hasR = hasA = false;
}else if(hasLT && hasF && hasR && hasA && hasM && !hasE){ if(c=='e' || c=='E')hasE=true;
else hasLT = hasF = hasR = hasA = hasM = false;
}else if(hasLT && hasF && hasR && hasA && hasM && hasE && !hasSp){ if(c==' ' || c=='/t' || c=='/n')hasSp=true;
else hasLT = hasF = hasR = hasA = hasM = hasE = false;
} //found "<frame " else if(hasLT && hasF && hasR && hasA && hasM && hasE && hasSp){
hasLT = hasF = hasR = hasA = hasM = hasE = hasSp = false;
beg = loc;
loc = source.indexOf(">", loc);
if(loc==-1){
errors.insert("malformed frame at "+site.toString()); loc = beg;
} else{
try{
parseFrame(site, source.substring(beg, loc));
} catch(Exception e){
errors.insert("while parsing "+site.toString()+", error parsing frame: "+e.toString());
} } }
//found "<a " else if(hasLT && hasA && hasSp && !hasF){
hasLT = hasA = hasSp = false;
beg = loc;
loc = source.indexOf(">", loc);
if(loc==-1){ errors.insert("malformed linked at "+site.toString());
loc = beg;
} else{ try{
parseLink(site, source.substring(beg, loc));
} catch(Exception e){
errors.insert("while parsing "+site.toString()+", error parsing link: "+e.toString());
} } } } } /* * parses a frame */ private void parseFrame(URL at_page, String s) throws Exception{ int beg=s.indexOf("src");
if(beg==-1)beg=s.indexOf("SRC");
if(beg==-1)return;//doesn't have a src, ignore
beg = s.indexOf("=", beg);
if(beg==-1)throw new Exception("while parsing "+at_page.toString()+", bad frame, missing /'=/' after src: "+s);
int start = beg;
for(;beg<s.length();beg++){
if(s.charAt(beg)=='/'')break;
if(s.charAt(beg)=='/"')break; } int end=beg+1;
for(;end<s.length();end++){
if(s.charAt(beg)==s.charAt(end))break; } beg++;
if(beg>=end){//missing quotes... just take the first token after "src=" for(beg=start+1;beg<s.length() && (s.charAt(beg)==' ');beg++){} for(end=beg+1;end<s.length() && (s.charAt(beg)!=' ') && (s.charAt(beg)!='>');end++){}
}
if(beg>=end){
errors.insert("while parsing "+at_page.toString()+", bad frame: "+s); return;
}
String linkto=s.substring(beg,end);
if(linkto.startsWith("mailto:")||linkto.startsWith("Mailto:"))return;
if(linkto.startsWith("javascript:")||linkto.startsWith("Javascript:"))
return;
if(linkto.startsWith("news:")||linkto.startsWith("Javascript:"))return;
try{
addSite(new URL(at_page, linkto));
return;
}catch(Exception e1){} try{
addSite(new URL(linkto));
return;
}catch(Exception e2){} try{
URL cp = new URL(at_page.toString()+"/index.html");
System.out.println("attemping to use "+cp);
addSite(new URL(cp, linkto));
return;
}catch(Exception e3){}
errors.insert("while parsing "+at_page.toString()+", bad frame:
"+linkto+", formed from: "+s);
}
/* * given a link at a URL, will parse it and add it to the list of sites to do */ private void parseLink(URL at_page, String s) throws Exception{
//System.out.println("parsing link "+s);
int beg=s.indexOf("href");
if(beg==-1)beg=s.indexOf("HREF");
if(beg==-1)return;//doesn't have a href, must be an anchor
beg = s.indexOf("=", beg);
if(beg==-1)throw new Exception("while parsing "+at_page.toString()+", bad
link, missing /'=/' after href: "+s);
int start = beg;
for(;beg<s.length();beg++){
if(s.charAt(beg)=='/'')break;
if(s.charAt(beg)=='/"')break; } int end=beg+1;
for(;end<s.length();end++){
if(s.charAt(beg)==s.charAt(end))break; }
beg++;
if(beg>=end){//missing quotes... just take the first token after "href=" for(beg=start+1;beg<s.length() && (s.charAt(beg)==' ');beg++){} for(end=beg+1;end<s.length() && (s.charAt(beg)!=' ') && (s.charAt(beg)!='>');end++){}
}
if(beg>=end){
errors.insert("while parsing "+at_page.toString()+", bad href: "+s); return; }
String linkto=s.substring(beg,end);
if(linkto.startsWith("mailto:")||linkto.startsWith("Mailto:"))return;
if(linkto.startsWith("javascript:")||linkto.startsWith("Javascript:"))return;
if(linkto.startsWith("news:")||linkto.startsWith("Javascript:"))return;
try{
addSite(new URL(at_page, linkto)); return; }catch(Exception e1){}
try{
addSite(new URL(linkto));
return;
}catch(Exception e2){} try{
addSite(new URL(new URL(at_page.toString()+"/index.html"), linkto)); return;
}catch(Exception e3){}
errors.insert("while parsing "+at_page.toString()+", bad link: "+linkto+", formed from: "+s);
}
/* * gets the title of a web page with content s */ private String getTitle(String s){ try{ int beg=s.indexOf("<title>");
if(beg==-1)beg=s.indexOf("<TITLE>");
int end=s.indexOf("</title>");
if(end==-1)end=s.indexOf("</TITLE>");
return s.substring(beg,end);
} catch(Exception e){return "";} }
/* * gets the text of a web page, times out after 10s */ private String getText(URL site) throws Exception { urlReader u = new urlReader(site);
Thread t = new Thread(u);
t.setDaemon(true);
t.start();
t.join(TIMEOUT);
String ret = u.poll();
if(ret==null){
throw new Exception("connection timed out");
}else if(ret.equals("Not html")){
throw new Exception("Not an HTML document");
} return ret; }
/* * returns how many sites have been visited so far */ public int Visited(){return visitedsites;} }
class urlReader implements Runnable{
URL site;
String s;
public urlReader(URL u){
site = u;
s=null;
} public void run(){
try{
String ret=new String();
URLConnection u = site.openConnection();
String type = u.getContentType();
if(type.indexOf("text")==-1 &&
type.indexOf("txt")==-1 &&
type.indexOf("HTM")==-1 &&
type.indexOf("htm")==-1){
//System.err.println("bad content type "+type+" at site "+site);
System.out.println("bad content type "+type+" at site "+site);
ret = "Not html";
return;
} InputStream in = u.getInputStream();
BufferedInputStream bufIn = new BufferedInputStream(in);
int data;
while(true){
data = bufIn.read();
// Check for EOF
if (data == -1) break;
else ret+= ( (char) data);
}
s = ret;
}catch(Exception e){s=null;}
}
public String poll(){return s;}
}
public class spidergui extends Frame{
private spider s;
private Color txtColor;
private Color errColor;
private Color topColor;
private Color numColor;
private Color curColor;
public spidergui(spider spi, String title){
super(title);
curColor = new Color(40, 40, 200);
txtColor = new Color(0, 0, 0);
errColor = new Color(255, 0, 0);
topColor = new Color(40, 40, 100);
numColor = new Color(50, 150, 50);
s=spi;
setBounds(0, 0, 800, 600);
show();
toFront();
repaint();
} public void endShow(){
System.out.println(s);
hide();
dispose();
} public void paint(Graphics g){
super.paint(g);
s.todo.reset();
s.done.reset();
s.errors.reset();
s.omittions.reset();
String txt;
Object o;
g.setColor(curColor);
g.setFont(new Font("arial", Font.PLAIN, 18));
String cur = s.getCurrent();
if(cur.length()>80)g.drawString(
cur.substring(0, 40)+
" . . . "+
cur.substring(cur.length()-30, cur.length()),
50, 50);
else g.drawString(cur, 50, 50);
g.setColor(numColor);
g.setFont(new Font("arial", Font.BOLD, 24));
g.drawString(Integer.toString(s.Visited()), 350, 80);
g.setFont(new Font("arial", Font.PLAIN, 14));
g.setColor(topColor);
g.drawString("To Do:", 100, 80);
g.drawString("Completed:", 500, 80);
g.drawString("Ignored:", 500, 250);
g.drawString("Errors:", 100, 420);
g.setColor(txtColor);
g.setFont(new Font("arial", Font.PLAIN, 12));
for(int i=0;i<23 && (o=s.todo.get())!=null;i++){
txt = Integer.toString(i+1) + ": "+o.toString();
if(txt.length()>65)g.drawString(
txt.substring(0, 38) +
" . . . " +
txt.substring(txt.length()-18, txt.length()),
20, 100+13*i);
else g.drawString(txt, 20, 100+13*i);
} for(int i=0;i<10 && (o=s.done.get())!=null;i++){
txt = Integer.toString(i+1) + ": "+o.toString();
if(txt.length()>60)g.drawString(txt.substring(0, 57)+"...", 400,
100+13*i);
else g.drawString(txt, 400, 100+13*i);
} for(int i=0;i<10 && (o=s.omittions.get())!=null;i++){
txt = Integer.toString(i+1) + ": "+o.toString();
if(txt.length()>60)g.drawString(txt.substring(0, 57)+"...", 400,
270+13*i);
else g.drawString(txt, 400, 270+13*i);
} g.setColor(errColor);
for(int i=0;i<10 && (o=s.errors.get())!=null;i++){
txt = Integer.toString(i+1) + ": "+o.toString();
g.drawString(txt, 20, 440+13*i);
}
} public void run(){
repaint();
while(s.hasMore()){
repaint();
s.doNextSite();
}
repaint();
}
public static void main(String []args){
int max = 5;
String site="";
String base="";
int time=0;
for(int i=0;i<args.length;i++){
if(args[i].startsWith("-max=")){
max=Integer.parseInt(args[i].substring(5,args[i].length()));
}
else if(args[i].startsWith("-time=")){
time=Integer.parseInt(args[i].substring(6,args[i].length()));
}
else if(args[i].startsWith("-init=")){
site=args[i].substring(6,args[i].length());
}
else if(args[i].startsWith("-base=")){
base=args[i].substring(6,args[i].length());
} else if(args[i].startsWith("-help")||args[i].startsWith("-?")){
System.out.println("additional command line switches:");
System.out.println("-max=N : to limit to N sites, default 5");
System.out.println("-init=URL : to set the initial site, REQUIRED");
System.out.println("-base=URL : only follow url's that start with
this");
System.out.println(" default /"/" (matches all URLs)");
System.out.println("-time=N : how many millisec to wait for each
page");
System.out.println(" default 5000 (5 seconds)");
System.exit(0);
} else System.err.println("unrecognized switch: "+args[i]+",
continuing");
}
if(site==""){
System.err.println("No initial site parameter!");
System.err.println("Use -init=<site> switch to set, or -help for more info.");
System.exit(1);
}
spider spi=new spider(site, max, base);
if(time>0)spi.setTimer(time);
spidergui s = new spidergui(spi, "Spider: "+site);
s.run();
System.out.println(spi);
}
}
互联网是一个庞大的非结构化的数据库,将数据有效的检索并组织呈现出来有着巨大的应用前景,尤其是类似RSS的以XML为基础的结构化的数据越来越多,内容的组织方式越来越灵活,检索组织并呈现会有着越来越广泛的应用范围,同时在时效性和可读性上也会有越来越高的要求。这一切的基础是爬虫,信息的来源入口。一个高效,灵活可扩展的爬虫对以上应用都有着无可替代的重要意义。
要设计一个爬虫,首先需要考虑的效率。对于网络而言,基于TCP/IP的通信编程有几种方法。
第一种是单线程阻塞,这是最简单也最容易实现的一种,一个例子:在Shell中通过curl,pcregrep等一系统命令可以直接实现一个简单的爬虫,但同时它的效率问题也显而易见:由于是阻塞方式读取,dns解析,建立连接,写入请求,读取结果这些步骤上都会产生时间的延迟,从而无法有效的利用服务器的全部资源。
第二种是多线程阻塞。建立多个阻塞的线程,分别请求不同的url。相对于第一种方法,它可以更有效的利用机器的资源,特别是网络资源,因为无数线程在同时工作,所以网络会比较充分的利用,但同时对机器CPU资源的消耗也是比较大,在用户级多线程间的频繁切换对于性能的影响已经值得我们考虑。
第三种是单线程非阻塞。这是目前使用的比较多的一种做法,无论在client还是server都有着广泛的应用。在一个线程内打开多个非阻塞的连接,通过poll/epoll/select对连接状态进行判断,在第一时间响应请求,不但充分利用了网络资源,同时也将本机CPU资源的消耗降至最低。这种方法需要对dns请求,连接,读写操作都采用异步非阻塞操作,其中第一种比较复杂,可以采用adns作为解决方案,后面三个操作相对简单可以直接在程序内实现。
效率问题解决后就需要考虑具体的设计问题了。
url肯定需要一个单独的类进行处理,包括显示,分析url,得到主机,端口,文件数据。
然后需要对url进行排重,需要一个比较大的url Hash表。
如果还要对网页内容进行排重,则还需要一个Document Hash表。
爬过的url需要记录下来,由于量比较大,我们将它写到磁盘上,所以还需要一个FIFO的类(记作urlsDisk)。
现在需要爬的url同样需要一个FIFO类来处理,重新开始时,url会从定时从爬过的url FIFO里取出来,写到这个FIFO里。正在运行的爬虫需要从这个FIFO里读数据出来,加入到主机类的url列表里。当然,也会从前一个FIFO里直接读url出来,不过优先级应该比这个里面出来的url低,毕竟是已经爬过的。
爬虫一般是对多个网站进行爬取,但在同时站点内dns的请求可以只做一次,这就需要将主机名独立于url,单独有一个类进行处理。
主机名解析完成后需要有一个解析完成的IP类与之应用,用于connect的时候使用。
HTML文档的解析类也要有一个,用来分析网页,取出里面的url,加入到urlsDisk。
再加上一些字符串,调度类,一个简单的爬虫基本上就完成了。
以上基本上是Larbin的设计思路,Larbin在具体实现上还有一些特殊的处理,例如带了一个webserver,以及对特殊文件的处理。Larbin有一点设计不不太好,就是慢的访问会越来越多,占用大量的连接,需要改进,另外如果对于大规模的爬虫,这仅仅实现了抓取的部分,要分布式的扩展还需要增加url的集中管理与调度以及前台spider的分布式算法。