有点困惑

有点困惑,最近工作虽然比较忙,但是总想深入的学习,提升提升。
但往往又不知道如何?课余的学习比较盲目。各种技术和知识掌握的不够扎实。
从基础从来,一步一个脚印。每天总结哈自己的东西。。。。

java编程的基本功,程序设计的基础。

web客户端的开发

结合公司的web框架学习深入
from flask import Flask, request, jsonify, render_template from flask_socketio import SocketIO, emit import ollama import json import threading import time import os from datetime import datetime import speech_recognition as sr from gtts import gTTS import pygame import base64 import logging # 配置日志 logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) app = Flask(__name__) app.config['SECRET_KEY'] = 'your_secret_key' socketio = SocketIO(app, cors_allowed_origins="*") # 确保目录存在 os.makedirs("conversations", exist_ok=True) os.makedirs("audio", exist_ok=True) # 加载角色设定 with open('character.txt', 'r', encoding='utf-8') as f: character_setting = f.read() # 对话历史 conversation_history = [] def save_conversation(): """保存对话到txt文件""" timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") filename = f"conversations/conversation_{timestamp}.txt" with open(filename, 'w', encoding='utf-8') as f: f.write(f"AI男友对话记录 - {timestamp}\n\n") f.write("角色设定:\n") f.write(character_setting) f.write("\n\n对话内容:\n") for msg in conversation_history: f.write(f"{msg['role']}: {msg['content']}\n") logger.info(f"对话已保存到 {filename}") def text_to_speech(text, filename): """文本转语音并保存为MP3""" try: tts = gTTS(text=text, lang='zh-cn') filename = f"audio/{filename}.mp3" tts.save(filename) return filename except Exception as e: logger.error(f"语音生成失败: {e}") return None def play_audio(filename): """播放音频文件""" try: pygame.mixer.init() pygame.mixer.music.load(filename) pygame.mixer.music.play() while pygame.mixer.music.get_busy(): time.sleep(0.1) except Exception as e: logger.error(f"音频播放失败: {e}") def get_ai_response(user_input): """获取AI回复""" try: # 构建包含角色设定的提示 prompt = f"{character_setting}\n\n当前对话:\n" # 添加最近的对话历史(最后5轮) recent_history = conversation_history[-10:] if len(conversation_history) > 10 else conversation_history for msg in recent_history: role = "小美" if msg['role'] == 'user' else "你" prompt += f"{role}: {msg['content']}\n" prompt += f"小美: {user_input}\n你: " # 调用Ollama API response = ollama.chat(model='llama2', messages=[ {'role': 'user', 'content': prompt} ]) ai_response = response['message']['content'] return ai_response except Exception as e: logger.error(f"AI响应失败: {e}") return "抱歉,我现在有点困惑,能再说一次吗?" @app.route('/') def index(): return render_template('index.html') @app.route('/chat', methods=['POST']) def chat(): try: data = request.json user_input = data.get('message', '') if not user_input: return jsonify({'error': '消息不能为空'}), 400 # 添加到历史 conversation_history.append({'role': 'user', 'content': user_input, 'timestamp': datetime.now().isoformat()}) # 获取AI回复 ai_response = get_ai_response(user_input) # 添加到历史 conversation_history.append({'role': 'assistant', 'content': ai_response, 'timestamp': datetime.now().isoformat()}) # 定期保存对话 if len(conversation_history) % 5 == 0: threading.Thread(target=save_conversation).start() return jsonify({'response': ai_response}) except Exception as e: logger.error(f"聊天错误: {e}") return jsonify({'error': '内部错误'}), 500 @app.route('/text_to_speech', methods=['POST']) def text_to_speech_api(): try: data = request.json text = data.get('text', '') if not text: return jsonify({'error': '文本不能为空'}), 400 timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") filename = f"speech_{timestamp}" # 生成语音 audio_file = text_to_speech(text, filename) if audio_file: # 读取音频文件并编码为base64 with open(audio_file, 'rb') as f: audio_data = base64.b64encode(f.read()).decode('utf-8') return jsonify({'audio': audio_data, 'filename': filename}) else: return jsonify({'error': '语音生成失败'}), 500 except Exception as e: logger.error(f"语音生成错误: {e}") return jsonify({'error': '内部错误'}), 500 @socketio.on('voice_message') def handle_voice_message(data): try: # 接收base64编码的音频数据 audio_data = base64.b64decode(data['audio']) filename = f"audio/voice_input_{datetime.now().strftime('%Y%m%d_%H%M%S')}.wav" # 保存音频文件 with open(filename, 'wb') as f: f.write(audio_data) # 语音转文本 recognizer = sr.Recognizer() with sr.AudioFile(filename) as source: audio = recognizer.record(source) user_input = recognizer.recognize_google(audio, language='zh-CN') # 发送文本到聊天处理 emit('voice_transcription', {'text': user_input}) # 获取AI回复 ai_response = get_ai_response(user_input) # 生成语音回复 timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") audio_file = text_to_speech(ai_response, f"response_{timestamp}") # 读取音频文件并编码为base64 with open(audio_file, 'rb') as f: response_audio = base64.b64encode(f.read()).decode('utf-8') # 发送语音回复 emit('voice_response', { 'text': ai_response, 'audio': response_audio }) except sr.UnknownValueError: emit('error', {'message': '无法识别语音'}) except sr.RequestError as e: emit('error', {'message': f'语音识别服务错误: {e}'}) except Exception as e: logger.error(f"语音处理错误: {e}") emit('error', {'message': '内部错误'}) if __name__ == '__main__': # 初始化pygame mixer pygame.mixer.init() # 启动Flask应用 logger.info("AI男友系统启动中...") socketio.run(app, host='0.0.0.0', port=5000, debug=True) 代码逐行解析
08-28
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值