java中文分词

最近由于工作原因,接触到了JAVA的中文分词, 现在来讲讲具体操作.

java的分词器都是基于 lucene 核心的.

我选择的是

ikanalyzer2.0.2的最新版本

具体调用过程如下:

import java.io.Reader; import java.io.StringReader; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.StopFilter; import org.apache.lucene.analysis.Token; import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.cjk.CJKAnalyzer; import org.apache.lucene.analysis.cn.ChineseAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.mira.lucene.analysis.MIK_CAnalyzer; public class JeAnalyzer { //private static String testString1 = "\u4E2D\u56FD\u4EBA\u52A0\u6CB9"; private static String testString1 = "中国人加油"; public static void testStandard(String testString) { try { Analyzer analyzer = new StandardAnalyzer(); Reader r = new StringReader(testString); StopFilter sf = (StopFilter) analyzer.tokenStream("", r); System.err.println("=====standard analyzer===="); Token t; while ((t = sf.next()) != null) { System.out.println(t.termText()); } } catch(Exception e) { e.printStackTrace(); } } public static void testCJK(String testString) { try { Analyzer analyzer = new CJKAnalyzer(); Reader r = new StringReader(testString); StopFilter sf = (StopFilter) analyzer.tokenStream("", r); System.err.println("=====cjk analyzer===="); Token t; while ((t = sf.next()) != null) { System.out.println(t.termText()); } } catch(Exception e) { e.printStackTrace(); } } public static void testChiniese(String testString) { try { Analyzer analyzer = new ChineseAnalyzer(); Reader r = new StringReader(testString); TokenFilter tf = (TokenFilter) analyzer.tokenStream("", r); System.err.println("=====chinese analyzer===="); Token t; while ((t = tf.next()) != null) { System.out.println(t.termText()); } } catch(Exception e) { e.printStackTrace(); } } public static String transJe(String testString,String c1,String c2) { String result = ""; try { Analyzer analyzer = new MIK_CAnalyzer(); Reader r = new StringReader(testString); TokenStream ts = (TokenStream)analyzer.tokenStream("", r); //System.out.println("=====je analyzer===="); Token t; while ((t = ts.next()) != null) { result += t.termText()+","; } } catch(Exception e) { e.printStackTrace(); } return result; } public static void main(String[] args) { try { //String testString = testString1; String testString = testString1; System.out.println(testString); //testStandard(testString); //testCJK(testString); //testPaoding(testString); //testChiniese(testString); //testString = new TestT().convert(testString); //testString = "\u636E\u8DEF\u900F\u793E\u62A5\u9053\uff0C\u5370\u5EA6\u5C3C\u897F\u4E9A\u793E\u4f1A\u4E8B\u52A1\u90E8\u4E00\u5B98\u5458\u661F\u671F\u4E8C"; String sResult[] = transJe(testString,"gb2312","utf-8").split(","); for(int i = 0 ; i< sResult.length ; i++) { System.out.println(sResult[i]); } } catch(Exception e) { e.printStackTrace(); } } }

需要 的JAR如下:

lucene-analyzers-2.4.1.jar

lucene-core-2.4.1.jar

当然少不了

IKAnalyzer2.0.2OBF.jar

在UNIX下使用的话,注意对中文的编码问题.

import WordSegment.*; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import java.awt.*; import java.io.File; import java.util.Vector; import javax.swing.*; /** * */ /** * @author Truman * */ public class WordSegDemoFrame extends JFrame implements ActionListener { final static int ALGO_FMM = 1; final static int ALGO_BMM = 2; private JMenuBar menuBar = new JMenuBar(); private JMenuItem openDicItem, closeItem; private JRadioButtonMenuItem fmmItem, bmmItem; private JMenuItem openTrainFileItem, saveDicItem, aboutItem; private JButton btSeg; private JTextField tfInput; private JTextArea taOutput; private JPanel panel; JLabel infoDic, infoAlgo; private WordSegment seger; private DicTrainer trainer = new DicTrainer(); private void initFrame() { setTitle("Mini分词器"); setDefaultCloseOperation(EXIT_ON_CLOSE); setJMenuBar(menuBar); JMenu fileMenu = new JMenu("文件"); JMenu algorithmMenu = new JMenu("分词算法"); JMenu trainMenu = new JMenu("训练语料"); JMenu helpMenu = new JMenu("帮助"); openDicItem = fileMenu.add("载入词典"); fileMenu.addSeparator(); closeItem = fileMenu.add("退出"); algorithmMenu.add(fmmItem = new JRadioButtonMenuItem("正向最大匹配", true)); algorithmMenu.add(bmmItem = new JRadioButtonMenuItem("逆向最大匹配", false)); ButtonGroup algorithms = new ButtonGroup(); algorithms.add(fmmItem); algorithms.add(bmmItem); openTrainFileItem = trainMenu.add("载入并训练语料"); saveDicItem = trainMenu.add("保存词典"); aboutItem = helpMenu.add("关于Word Segment Demo"); menuBar.add(fileMenu); menuBar.add(algorithmMenu); menuBar.add(trainMenu); menuBar.add(helpMenu); openDicItem.addActionListener(this); closeItem.addActionListener(this); openTrainFileItem.addActionListener(this); saveDicItem.addActionListener(this); aboutItem.addActionListener(this); fmmItem.addActionListener(this); bmmItem.addActionListener(this); JPanel topPanel = new JPanel(); topPanel.setLayout(new FlowLayout());
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值