模拟实现console.dir函数

本文介绍了一个使用JavaScript实现的对象属性遍历与显示方法,通过递归方式展示对象的属性和方法,并支持折叠展开功能。此外,还介绍了如何对属性进行排序以提高可读性。

function dir(obj,name,initContainer){
var ul = initContainer ? initContainer : document.createElement("ul");
var li = document.createElement("li");
var span = document.createElement("span");
span.innerHTML = "+"
span.className = "plus";
span.onclick = function(){
if(this.rendered){
if(this.className === "minus"){
this.className = "plus";
this.innerHTML = "+"
this.parentNode.lastChild.style.display = "block";
}else{
this.className = "minus";
this.innerHTML = "-"
this.parentNode.lastChild.style.display = "none";
}
return ;
}
var ul = document.createElement("ul");
for(var k in obj){
dir(obj[k],k,ul)
}
li.appendChild(ul);

this.className = "minus";
this.innerHTML = "-"
this.rendered = true;
}
li.appendChild(span);
var span2 = document.createElement("span");
name = name || obj.toString();
span2.innerHTML = name + " : " + typeof obj;
li.appendChild(span2);
ul.appendChild(li);
return ul
}

/*-----------------------------Test dir--------------------------------*/
window.onload = function(){
document.body.appendChild(dir(window));
}

测试打印出window对象的所有属性和方法,见附件。

另外还可以排序,以下按属性和方法的名称排序(当然也可以按照typeof类型排序):

var properties = [];
for(var p in obj){
properties.push(p);
}
properties.sort();
var len = properties.length;
for(var i=0;i<len;i++){
dir(obj[properties[i]],properties[i],ul)
}
li.appendChild(ul);
const express = require('express'); const multer = require('multer'); const cors = require('cors'); const fs = require('fs'); const path = require('path'); const app = express(); const port = 3000; // 中间件 app.use(cors()); app.use(express.json()); // 健康检查端点 app.get('/api/health', (req, res) => { const modelStatus = checkModelStatus(); res.json({ status: 'ok', message: '语音识别服务运行正常', modelStatus: modelStatus }); }); // 配置multer用于文件上传 const storage = multer.diskStorage({ destination: (req, file, cb) => { const uploadDir = 'uploads'; if (!fs.existsSync(uploadDir)) { fs.mkdirSync(uploadDir); } cb(null, uploadDir); }, filename: (req, file, cb) => { const ext = path.extname(file.originalname) || '.mp3'; cb(null, Date.now() + ext); } }); const upload = multer({ storage: storage, limits: { fileSize: 10 * 1024 * 1024, }, fileFilter: (req, file, cb) => { if (file.mimetype.startsWith('audio/')) { cb(null, true); } else { cb(new Error('只支持音频文件'), false); } } }); // Vosk模型路径 const modelPath = './vosk-model'; // 详细的模型检查函数 const checkModelStatus = () => { if (!fs.existsSync(modelPath)) { return { exists: false, message: '模型文件夹不存在', requiredFiles: ['am/', 'conf/', 'graph/'], suggestion: '请下载并重命名模型文件夹为 vosk-model' }; } const requiredDirs = ['am', 'conf', 'graph']; const missingDirs = requiredDirs.filter(dir => !fs.existsSync(path.join(modelPath, dir)) ); if (missingDirs.length > 0) { return { exists: true, complete: false, missing: missingDirs, message: '模型文件夹不完整', suggestion: '请下载完整的Vosk中文模型' }; } // 检查关键文件 const requiredFiles = [ 'conf/mfcc.conf', 'conf/model.conf', 'graph/HCLr.fst', 'graph/Gr.fst' ]; const missingFiles = requiredFiles.filter(file => !fs.existsSync(path.join(modelPath, file)) ); if (missingFiles.length > 0) { return { exists: true, complete: false, missingFiles: missingFiles, message: '模型文件不完整' }; } return { exists: true, complete: true, message: '模型完整可用' }; }; // 检查Vosk模块是否可用 let voskAvailable = false; try { require('vosk'); voskAvailable = true; console.log('✅ Vosk模块加载成功'); } catch (e) { console.warn('❌ Vosk模块不可用:', e.message); } // 模拟识别函数 const simulateRecognition = (filePath) => { return new Promise((resolve) => { setTimeout(() => { const phrases = [ "这是一个语音识别测试结果", "欢迎使用语音转文字服务", "今天的天气真不错", "人工智能正在改变世界", "语音识别技术越来越先进了" ]; const randomText = phrases[Math.floor(Math.random() * phrases.length)]; resolve({ success: true, text: randomText, simulated: true }); }, 1500); }); }; // 语音转文字API app.post('/api/speech-to-text', upload.single('audio'), async (req, res) => { console.log('收到音频文件:', req.file?.filename); if (!req.file) { return res.status(400).json({ error: '没有上传音频文件' }); } try { const modelStatus = checkModelStatus(); // 如果模型不存在或不完整,使用模拟模式 if (!modelStatus.exists || !modelStatus.complete) { console.log('使用模拟模式,模型状态:', modelStatus); const result = await simulateRecognition(req.file.path); // 清理文件 if (fs.existsSync(req.file.path)) { fs.unlinkSync(req.file.path); } return res.json({ ...result, modelStatus: modelStatus }); } // 如果Vosk模块不可用,也使用模拟模式 if (!voskAvailable) { console.log('Vosk模块不可用,使用模拟模式'); const result = await simulateRecognition(req.file.path); if (fs.existsSync(req.file.path)) { fs.unlinkSync(req.file.path); } return res.json({ ...result, modelStatus: modelStatus }); } // 使用真实的Vosk识别 console.log('使用Vosk进行语音识别'); const vosk = require('vosk'); const model = new vosk.Model(modelPath); console.log("🚀 ~ modelPath:", modelPath) const rec = new vosk.Recognizer({ model: model, sampleRate: 16000 }); console.log("🚀 ~ rec:", rec) const audioData = fs.readFileSync(req.file.path); console.log("🚀 ~ audioData:", audioData) let result; if (rec.acceptWaveform(audioData)) { result = rec.result(); } else { result = rec.partialResult(); } console.log("🚀 ~ result:", result) // 清理资源 rec.free(); if (fs.existsSync(req.file.path)) { fs.unlinkSync(req.file.path); } res.json({ success: true, text: result.text || '未识别到内容', result: result, modelStatus: modelStatus }); } catch (error) { console.error('语音识别错误:', error); // 清理文件 if (req.file && fs.existsSync(req.file.path)) { fs.unlinkSync(req.file.path); } res.status(500).json({ error: '语音识别处理失败', details: error.message, modelStatus: checkModelStatus() }); } }); // 获取模型信息的API app.get('/api/model-info', (req, res) => { const modelStatus = checkModelStatus(); res.json(modelStatus); }); // 启动服务 app.listen(port, '0.0.0.0', () => { console.log('='.repeat(60)); console.log('🚀 语音识别服务启动成功'); console.log('='.repeat(60)); console.log(`📍 本地访问: http://localhost:${port}`); console.log('='.repeat(60)); // 检查模型状态 const modelStatus = checkModelStatus(); console.log('📦 模型状态:'); console.log(' - 存在:', modelStatus.exists); console.log(' - 完整:', modelStatus.complete); if (!modelStatus.exists) { console.log('❌ 模型文件夹不存在'); console.log('💡 请执行以下步骤:'); console.log(' 1. 访问 https://alphacephei.com/vosk/models'); console.log(' 2. 下载 vosk-model-cn-0.22.zip'); console.log(' 3. 解压并重命名文件夹为 vosk-model'); console.log(' 4. 放置在项目根目录'); } else if (!modelStatus.complete) { console.log('❌ 模型不完整'); console.log('💡 缺失内容:', modelStatus.missing || modelStatus.missingFiles); console.log('💡 请下载完整的Vosk中文模型'); } else { console.log('✅ 模型完整可用'); } console.log('='.repeat(60)); console.log('📋 测试接口:'); console.log(` - 健康检查: http://localhost:${port}/api/health`); console.log(` - 模型信息: http://localhost:${port}/api/model-info`); console.log('='.repeat(60)); });后端代码如上,前端代码如下:recorderManager.start({ duration: 60000, // 录音时长,单位ms sampleRate: 44100, // 采样率 numberOfChannels: 1, // 录音通道数 encodeBitRate: 192000, // 编码码率 format: 'mp3' // 音频格式,支持aac/mp3 }),发送录音文件后,后端代码输出:收到音频文件: 1755842922597.mp3 使用Vosk进行语音识别 LOG (VoskAPI:ReadDataFiles():model.cc:213) Decoding params beam=12 max-active=5000 lattice-beam=4 LOG (VoskAPI:ReadDataFiles():model.cc:216) Silence phones 1:2:3:4:5:6:7:8:9:10 LOG (VoskAPI:RemoveOrphanNodes():nnet-nnet.cc:948) Removed 0 orphan nodes. LOG (VoskAPI:RemoveOrphanComponents():nnet-nnet.cc:847) Removing 0 orphan components. LOG (VoskAPI:ReadDataFiles():model.cc:248) Loading i-vector extractor from ./vosk-model/ivector/final.ie LOG (VoskAPI:ComputeDerivedVars():ivector-extractor.cc:183) Computing derived variables for iVector extractor LOG (VoskAPI:ComputeDerivedVars():ivector-extractor.cc:204) Done. LOG (VoskAPI:ReadDataFiles():model.cc:282) Loading HCL and G from ./vosk-model/graph/HCLr.fst ./vosk-model/graph/Gr.fst LOG (VoskAPI:ReadDataFiles():model.cc:303) Loading winfo ./vosk-model/graph/phones/word_boundary.int 🚀 ~ modelPath: ./vosk-model 🚀 ~ rec: Recognizer { handle: <Buffer@0x00000292DA298F40 type: { size: 0, indirection: 1, get: [Function: get], set: [Function: set], name: 'void', ffi_type: <Buffer@0x00007FFE45082CF8 name: 'void'> }> } 🚀 ~ audioData: <Buffer@0x00000292C6E68F40 ff fb 90 c4 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ... 54701 more bytes> 🚀 ~ result: { partial: '' }为什么识别不到文字
最新发布
08-23
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值