准备:
1、 下载ollama windows版本并安装打开
官网:https://ollama.com/download/windows
2、https://ollama.com/library/qwen 搜索qwen找到你想下载对应的 我这里选最小的0.5b为例
3、打开ollama在控制台输入 安装千问大模型 0.5b版的
ollama run qwen:0.5b
安装完可以测试下:
集成
1、准备好一个springboot项目 我这里jdk版本使用的jdk17
加入springboot-ai自动装配依赖jar
<dependency>
<groupId>io.springboot.ai</groupId>
<artifactId>spring-ai-ollama-spring-boot-starter</artifactId>
<version>1.0.3</version>
</dependency>
yml配置文件
server:
port: 8080
spring:
application:
name: spring-ai
ai:
ollama:
base-url: http://localhost:11434
chat:
options:
model: qwen:0.5b
autoconfigure:
exclude:
- org.springframework.cloud.function.context.config.ContextFunctionCatalogAutoConfiguration
controller
import lombok.extern.slf4j.Slf4j;
import org.springframework.ai.chat.ChatResponse;
import org.springframework.ai.chat.prompt.Prompt;
import org.springframework.ai.ollama.OllamaChatClient;
import org.springframework.ai.ollama.api.OllamaOptions;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
/**
* @ClassName:OllamaChatClientController
* @Author: heng
* @Date: 2025/1/7 13:56
* @Description: 使用Ollama聊天api
*/
@Slf4j
@RestController
public class OllamaChatClientController {
@Autowired
private OllamaChatClient ollamaChatClient;
@GetMapping("/ollama/chat/msg")
public String sendollaMachat(@RequestParam String msg) {
return ollamaChatClient.call(msg);
}
@GetMapping("/ollama/chat/prompt")
public Object sendollaMachatV2(@RequestParam String msg) {
Prompt prompt = new Prompt(msg);
return ollamaChatClient.call(prompt);
}
@GetMapping("/ollama/chat/model")
public Object sendollaMachatV3(@RequestParam String msg) {
Prompt prompt = new Prompt(
msg,
OllamaOptions.create()
.withModel("qwen:0.5b")
.withTemperature(0.4F));
ChatResponse chatResponse = ollamaChatClient.call(prompt);
return chatResponse.getResult().getOutput().getContent();
}
}
测试