最近由于工作原因,接触到了JAVA的中文分词, 现在来讲讲具体操作.
java的分词器都是基于 lucene 核心的.
我选择的是
ikanalyzer2.0.2的最新版本
具体调用过程如下:
import java.io.Reader; import java.io.StringReader; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.StopFilter; import org.apache.lucene.analysis.Token; import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.cjk.CJKAnalyzer; import org.apache.lucene.analysis.cn.ChineseAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.mira.lucene.analysis.MIK_CAnalyzer; public class JeAnalyzer { //private static String testString1 = "\u4E2D\u56FD\u4EBA\u52A0\u6CB9"; private static String testString1 = "中国人加油"; public static void testStandard(String testString) { try { Analyzer analyzer = new StandardAnalyzer(); Reader r = new StringReader(testString); StopFilter sf = (StopFilter) analyzer.tokenStream("", r); System.err.println("=====standard analyzer===="); Token t; while ((t = sf.next()) != null) { System.out.println(t.termText()); } } catch(Exception e) { e.printStackTrace(); } } public static void testCJK(String testString) { try { Analyzer analyzer = new CJKAnalyzer(); Reader r = new StringReader(testString); StopFilter sf = (StopFilter) analyzer.tokenStream("", r); System.err.println("=====cjk analyzer===="); Token t; while ((t = sf.next()) != null) { System.out.println(t.termText()); } } catch(Exception e) { e.printStackTrace(); } } public static void testChiniese(String testString) { try { Analyzer analyzer = new ChineseAnalyzer(); Reader r = new StringReader(testString); TokenFilter tf = (TokenFilter) analyzer.tokenStream("", r); System.err.println("=====chinese analyzer===="); Token t; while ((t = tf.next()) != null) { System.out.println(t.termText()); } } catch(Exception e) { e.printStackTrace(); } } public static String transJe(String testString,String c1,String c2) { String result = ""; try { Analyzer analyzer = new MIK_CAnalyzer(); Reader r = new StringReader(testString); TokenStream ts = (TokenStream)analyzer.tokenStream("", r); //System.out.println("=====je analyzer===="); Token t; while ((t = ts.next()) != null) { result += t.termText()+","; } } catch(Exception e) { e.printStackTrace(); } return result; } public static void main(String[] args) { try { //String testString = testString1; String testString = testString1; System.out.println(testString); //testStandard(testString); //testCJK(testString); //testPaoding(testString); //testChiniese(testString); //testString = new TestT().convert(testString); //testString = "\u636E\u8DEF\u900F\u793E\u62A5\u9053\uff0C\u5370\u5EA6\u5C3C\u897F\u4E9A\u793E\u4f1A\u4E8B\u52A1\u90E8\u4E00\u5B98\u5458\u661F\u671F\u4E8C"; String sResult[] = transJe(testString,"gb2312","utf-8").split(","); for(int i = 0 ; i< sResult.length ; i++) { System.out.println(sResult[i]); } } catch(Exception e) { e.printStackTrace(); } } }
需要 的JAR如下:
lucene-analyzers-2.4.1.jar
lucene-core-2.4.1.jar
当然少不了
IKAnalyzer2.0.2OBF.jar
在UNIX下使用的话,注意对中文的编码问题.