经过测试发现spring-data-solr 和solrj这两个jar 包之间存在着版本冲突,最后使用spring-data-solr 3.0.6
solrj 6.6.3 这两个版本没有冲突测试正常。先把这两个pom依赖给大家。
<dependency>
<groupId>org.springframework.data</groupId>
<artifactId>spring-data-solr</artifactId>
<version>3.0.6.RELEASE</version>
</dependency>
<dependency>
<groupId>org.apache.solr</groupId>
<artifactId>solr-solrj</artifactId>
<version>6.6.3</version>
</dependency>
首先自己的solr环境要搭建好。接下来就是解决中文分词的问题。
public String getAnalysis(String sentence) throws IOException, SolrServerException {
FieldAnalysisRequest request = new FieldAnalysisRequest("/analysis/field");
// 字段名,随便指定一个支持中文分词的字段
request.addFieldName(ZbZskData.FIELD_KEYWORDS);
// 字段值,可以为空字符串,但是需要显式指定此参数
request.setFieldValue("");
request.setQuery(sentence);
FieldAnalysisResponse response = null;
response = request.process(solrTemplate.getSolrClient(), ZbZskData.CORE_NAME);
String results = "";
Iterator<AnalysisResponseBase.AnalysisPhase> it = response.getFieldNameAnalysis(ZbZskData.FIELD_KEYWORDS)
.getQueryPhases().iterator();
while (it.hasNext()) {
AnalysisResponseBase.AnalysisPhase pharse = (AnalysisResponseBase.AnalysisPhase) it.next();
List<AnalysisResponseBase.TokenInfo> list = pharse.getTokens();
for (AnalysisResponseBase.TokenInfo info : list) {
results += " " + (info.getText());
}
}
return results;
}
再用中文分词的时候,要注意使用的字段一定要支持中文分词。
/**
* 对应solr conf/managed-schema keywords字段,此处必须为集合形式存在
*/
@Indexed(name = FIELD_KEYWORDS, type = "text_ik")
private Set<String> keywords;
在solr中的配置为
<field name="name" type="string" indexed="true" stored="true"/>
<field name="type" type="string" indexed="true" stored="true"/>
<field name="table_name" type="text_ik" indexed="true" stored="true"/>
<field name="keywords" type="text_ik" indexed="true" stored="true" multiValued="true"/>
<copyField source="keywords" dest="keywords"/>
<copyField source="name" dest="keywords"/>
<copyField source="type" dest="keywords"/>
<copyField source="table_name" dest="keywords"/>
添加测试代码
@Test
public void analysis() throws IOException, SolrServerException {
System.out.println(musicRepository.getAnalysis("三千越甲可吞吴"));
}

接下来封装solr对数据处理的接口:
public interface SolrServiceAPI<T> {
//新增单条数据
void addBean(T t);
//批量添加数据
void addList(List<T> tList);
//根据id查找
void findById(String id);
//查询并分页
ScoredPage<T> queryByPage(long pageIndex, int pageSize);
//根据Id查询
void deleteById(String id);
//关键字查询
ScoredPage<T> searchByKeyword(String searchTerm, long pageIndex, int pageSize);
//语句搜索
ScoredPage<T> searchBySentence(String sentence, long pageIndex, int pageSize)
throws IOException, SolrServerException;
// 屏蔽无需显示的字段
ScoredPage<T> searchByNameProjection(String searchName, long pageIndex, int pageSize);
// 高亮分页查询
Map<String, Object> search(Map searchMap);
}
对应的实现类:
@Service
public class SolrService extends BaseService implements SolrServiceAPI<User> {
@Autowired
SolrTemplate solrTemplate;
@Override
public void addBean(ZbZskData zb_zsk_datacenter) {
//将数据添加到solr中
solrTemplate.saveBean(ZbZskData.CORE_NAME, zb_zsk_datacenter);
//提交
solrTemplate.commit(ZbZskData.CORE_NAME);
}
@Override
public void addList(List<ZbZskData> zb_zsk_datacenters_list) {
solrTemplate.saveBeans(ZbZskData.CORE_NAME, zb_zsk_datacenters_list);
solrTemplate.commit(ZbZskData.CORE_NAME);
}
@Override
public void findById(String id) {
Optional<ZbZskData> music = solrTemplate.getById(ZbZskData.CORE_NAME, id, ZbZskData.class);
System.out.println(music);
}
//分页查询
@Override
public ScoredPage<ZbZskData> queryByPage(long pageIndex, int pageSize) {
// 创建查询条件,查询所有使用 *:*
Query query = new SimpleQuery("*:*");
// 设置分页条件
query.setOffset(pageIndex);
query.setRows(pageSize);
// 执行分页查询
ScoredPage<ZbZskData> zsk = solrTemplate.queryForPage(ZbZskData.CORE_NAME, query, ZbZskData.class);
// 解析分页结果
for (ZbZskData zsk_datacenter : zsk) {
System.out.println(zsk_datacenter.getZB_ZSK_KEYWORDS());
System.out.println(zsk_datacenter.getZB_ZSK_DOC_NAME());
}
System.out.println("总记录的数 : " + zsk.getTotalElements());
System.out.println("总页数 : " + zsk.getTotalPages());
return zsk;
}
//根据Id删除
@Override
public void deleteById(String id) {
solrTemplate.deleteByIds(ZbZskData.CORE_NAME, id);
// 提交
solrTemplate.commit(ZbZskData.CORE_NAME);
}
//关键字搜索
@Override
public ScoredPage<ZbZskData> searchByKeyword(String searchTerm, long pageIndex, int pageSize) {
String[] words = searchTerm.toUpperCase().split(" ");
String[] contidionFields = new String[]{ZbZskData.FIELD_KEYWORDS};
Criteria conditions = createSearchConditions(contidionFields, words);
SimpleQuery search = new SimpleQuery(conditions);
search.setOffset(pageIndex);
search.setRows(pageSize);
ScoredPage<ZbZskData> page = solrTemplate.queryForPage(ZbZskData.CORE_NAME, search, ZbZskData.class);
System.out.println("总记录的数 : " + page.getTotalElements());
System.out.println("总页数 : " + page.getTotalPages());
return page;
}
//构造搜索条件
private Criteria createSearchConditions(String[] conditionFileds, String[] words) {
Criteria conditions = null;
for (String word : words) {
if (conditions == null) {
for (int i = 0; i < conditionFileds.length; i++) {
if (i == 0) {
conditions = new Criteria(conditionFileds[i]).contains(word);
} else {
conditions = conditions.or(new Criteria(conditionFileds[i]).contains(word));
}
}
} else {
for (String condition : conditionFileds) {
conditions = conditions.or(new Criteria(condition).contains(word));
}
}
}
return conditions;
}
//语句搜索
@Override
public ScoredPage<ZbZskData> searchBySentence(String sentence, long pageIndex, int pageSize) throws IOException, SolrServerException {
String words = getAnalysis(sentence.toUpperCase());
String trim = words.trim();
return searchByKeyword(trim, pageIndex, pageSize);
}
@Override
public ScoredPage<ZbZskData> searchByNameProjection(String searchName, long pageIndex, int pageSize) {
String[] words = searchName.toUpperCase().split(" ");
String[] contidionFields = new String[]{ZbZskData.FIELD_KEYWORDS, ZbZskData.FIELD_ZB_ZSK_DOC_NAME};
Criteria conditions = createSearchConditions(contidionFields, words);
SimpleQuery search = new SimpleQuery(conditions);
// 投影
search.addProjectionOnField(ZbZskData.FIELD_KEYWORDS);
search.setOffset(pageIndex);
search.setRows(pageSize);
ScoredPage<ZbZskData> page = solrTemplate.queryForPage(ZbZskData.CORE_NAME, search, ZbZskData.class);
return page;
}
// 高亮分页查询
@Override
public Map<String, Object> search(Map searchMap) {
//使用高亮查询
HighlightQuery query = new SimpleHighlightQuery(new SimpleStringCriteria("*:*"));
highlightConfig(query);
if (searchMap != null) {
//如果searchMap不为null;则根据关键词进行搜索;关键词都为字符串
String keywords = (String) searchMap.get("keywords");
//关键词搜索从标题、品牌、商家名字、分类名字
if (StringUtils.isNoneBlank(keywords)) {
//关键词不为空:创建条件进行搜索--->进行过滤搜索
//.is表示在索引库里面搜索包含keyword的数据
Criteria criteria = new Criteria(ZbZskData.FIELD_KEYWORDS).is(keywords);
//添加查询条件
query.addCriteria(criteria);
}
}
//设置分页展示:下标从0开始,一页显示30条数据
query.setOffset(0L);
query.setRows(10);
//-----------------------------------高亮域的替换显示
//获取socredPage对象
HighlightPage<ZbZskData> scoredPage = solrTemplate.queryForHighlightPage(ZbZskData.CORE_NAME, query, ZbZskData.class);
highlightReplace(scoredPage);
//返回1:获取Item所有数据
List<ZbZskData> itemList = scoredPage.getContent();
//返回2:显示多少条数据
long totalElements = scoredPage.getTotalElements();
//将数据封装成Map集合进行返回
Map<String, Object> dataMap = new HashMap<String, Object>();
dataMap.put("rows", itemList);
dataMap.put("total", totalElements);
return dataMap;
}
//设置高亮域
public void highlightConfig(HighlightQuery query) {
HighlightOptions highlightOptions = new HighlightOptions();
//高亮域的设置:1 高亮域 2 前缀、后缀
highlightOptions.addField(ZbZskData.FIELD_KEYWORDS).addField(ZbZskData.FIELD_ZB_ZSK_DOC_NAME);
highlightOptions.setSimplePrefix("<span style='color:red;'>");
highlightOptions.setSimplePostfix("</span>");
//将高亮配置设置到高亮选项中
query.setHighlightOptions(highlightOptions);
}
//高亮
public void highlightReplace(HighlightPage<ZbZskData> scoredPage) {
//获取所有数据 所有数据,拥有[高亮]也拥有[非高亮]
List<HighlightEntry<ZbZskData>> highlighted = scoredPage.getHighlighted();
//循环所有数据
for (HighlightEntry<ZbZskData> itemHighlightEntry : highlighted) {
//获取非高亮数据
ZbZskData music = itemHighlightEntry.getEntity();
//获取高亮值 [如果只有一个高亮域,则集合中只有一条数据]
List<HighlightEntry.Highlight> highlights = itemHighlightEntry.getHighlights();
if (highlights != null && highlights.size() > 0) {
//普通高亮值
HighlightEntry.Highlight highlight = highlights.get(0);
//获取高亮碎片
List<String> snipplets = highlight.getSnipplets();
if (snipplets != null && snipplets.size() > 0) {
//获取高亮碎片
StringBuffer buffer = new StringBuffer();
for (String snipplet : snipplets) {
buffer.append(snipplet);
}
//将非高亮替换成高亮
music.setZB_ZSK_KEYWORDS(buffer.toString());
}
}
}
}
}
本文介绍如何在Spring Data Solr中解决版本冲突,并实现中文分词功能。通过配置支持中文分词的字段及使用特定版本的依赖包,确保了数据正确处理和检索。
501

被折叠的 条评论
为什么被折叠?



