import java.io.IOException;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
public class Exec {
public static double computeAngle(String[] doc1, String[] doc2) {
// assume words are unique and only occur once
int dotProduct = 0;
double denominator;
for (int i = 0; i < doc1.length; i++) {
String word = doc1[i];
for (int j = 0; j < doc2.length; j++) {
if (word.equals(doc2[j])) {
dotProduct += 1;
break;
}
}
}
denominator = Math.sqrt(doc1.length) * Math.sqrt(doc2.length);
double ratio = dotProduct / denominator;
return Math.acos(ratio);
}
// For simple, only to get one document which must be belong to the matching
// document
public static Document docsLike(IndexReader reader, IndexSearcher searcher,
int id) throws IOException {
Document doc = reader.document(id);
String[] s1 = (doc.get("subjects")).split(" ");
int matchid = 0;
double bestAngle = Double.MAX_VALUE;
for (int i = 0; i < reader.maxDoc(); i++) {
if (i != id) {
Document tempdoc = reader.document(i);
String[] s2 = (tempdoc.get("subjects")).split(" ");
double angle = computeAngle(s1, s2);
// System.out.print("***"+angle);
if (angle < bestAngle) {
bestAngle = angle;
matchid = i;
}
}
}
// System.out.println("/n***********"+bestAngle+"***********"+matchid);
return reader.document(matchid);
}
public static void main(String[] args) throws IOException {
IndexReader reader;
IndexSearcher searcher;
// Writer
Directory directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory,
new WhitespaceAnalyzer(), true);
writer.setUseCompoundFile(true);
String[] isbns = new String[] { "1", "2", "3", "4", "5", "6", "7", "8",
"9", "10", "11" };
String[] titles = new String[] { "Modern Art of Education",
"Imperial Secrets of Health", "Tao Te Ching",
"G.del, Escher, Bach", "Mindstorms",
"Java Development with Ant", "JUnit in Action",
"Lucene in Action", "Tapestry in Action",
"Extreme Programming Explained", "The Pragmatic Programmer" };
String[] pubmonths = new String[] { "198106", "199401", "198810",
"197903", "198001", "200208", "200310", "200406", "199910",
"200403", "199910" };
String[] categories = new String[] { "/education/pedagogy",
"/health/alternative/chinese", "/philosophy/eastern",
"/technology/computers/ai",
"/technology/computers/programming/education",
"/technology/computers/programming",
"/technology/computers/programming",
"/technology/computers/programming",
"/technology/computers/programming",
"/technology/computers/programming/methodology",
"/technology/computers/programming" };
String[] subjects = new String[] {
"education philosophy psychology practice Waldorf",
"diet chinese medicine qi gong health herbs",
"taoism chinese ideas",
"artificial intelligence number theory mathematics music",
"children computers powerful ideas LOGO education",
"apache jakarta ant build tool junit java development",
"junit unit testing mock objects",
"lucene search programming",
"tapestry web user interface components",
"extreme programming agile test driven development methodology",
"pragmatic agile methodology developer tools" };
for (int i = 0; i < titles.length; i++) {
Document doc = new Document();
doc.add(Field.Keyword("isbns", isbns[i]));
doc.add(Field.Text("titles", titles[i]));
doc.add(Field.Text("pubmonths", pubmonths[i]));
doc.add(Field.Text("categories", categories[i]));
doc.add(Field.Text("subjects", subjects[i], true));
writer.addDocument(doc);
}
writer.optimize();
writer.close();
// Reader
reader = IndexReader.open(directory);
searcher = new IndexSearcher(reader);
int numDocs = reader.maxDoc();
for (int i = 0; i < numDocs; i++) {
System.out.println();
Document doc = reader.document(i);
System.out.println(doc.get("titles"));
Document docs = docsLike(reader, searcher, i);
System.out.println(" -> " + docs.get("titles"));
}
}
}
本文介绍了一个使用Lucene进行文档索引和搜索的例子,并通过计算余弦相似度来找到与给定文档最相似的另一个文档。该示例涵盖了如何创建索引、添加文档以及通过分析字段内容来确定文档间相似度的过程。
1万+

被折叠的 条评论
为什么被折叠?



