1.环境搭建:参考之前的博客,服务器和本地电脑都要搭建
2.模型搭建:
数据处理:(训练集)
import os
import re
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import QuantileTransformer
import json
# 提取文件名特征的函数
def extract_features(file_name):
match = re.match(r"([A-Za-z]{2})([A-Za-z]{2})(\d{2})_(\d{2})\.txt", file_name)
if match:
data_source = match.group(1) # 数据来源 (Ga, Ju, Si)
gait_type = match.group(4) # 步态编号 (如 01, 10)
# 转换数据来源为完整名称
data_source_mapping = {
"Ga": 0, # 0代表Galit Yogev
"Ju": 1, # 1代表Hausdorff
"Si": 2 # 2代表Silvi Frenkel-Toledo
}
# 转换步态编号为描述
gait_type_mapping = {
"01": 0, # 0代表正常行走
"02": 1, # 1代表双任务行走
"03": 2, # 0代表正常行走
"04": 3, # 1代表双任务行走
"05": 4, # 0代表正常行走
"06": 5, # 1代表双任务行走
"07": 6, # 0代表正常行走
"08": 7, # 1代表双任务行走
"09": 8, # 0代表正常行走
"10": 9 # 1代表双任务行走
}
data_source_code = data_source_mapping.get(data_source, -1) # 默认为-1表示未知
gait_type_code = gait_type_mapping.get(gait_type, -1) # 默认为-1表示未知
return data_source_code, gait_type_code
else:
return None, None
# 使用滑动窗口切片时间序列数据的函数
def sliding_window_slice(data, window_size, step_size):
slices = []
for start in range(0, len(data) - window_size + 1, step_size):
end = start + window_size
slice_data = data[start:end]
slices.append(slice_data)
#print(f"{file_name} 的切片数量: {len(slices)}")#打印一下每个文件的切片数量,确保它符合预期,有可能卡慎用
return slices
def add_features_as_columns(slice_data, data_source, gait_type):
"""
将 data_source 和 gait_type 作为两列数据添加到 slice_data 后面。
Args:
slice_data (numpy.ndarray): 形状为 (600, 19) 的数据。
data_source (int): 数据来源特征。
gait_type (int): 步态类型特征。
Returns:
numpy.ndarray: 添加特征后的数据,形状为 (600, 21)。
"""
# 1. 将 data_source 和 gait_type 扩展为列向量
data_source_column = np.full((slice_data.shape[0], 1), data_source) # 形状为 (600, 1)
gait_type_column = np.full((slice_data.shape[0], 1), gait_type) # 形状为 (600, 1)
# 2. 将扩展后的列向量拼接到 slice_data 后面
new_data = np.concatenate((slice_data, data_source_column, gait_type_column), axis=1)
return new_data
# 处理文件夹中的所有txt文件,生成数据集
def process_files_in_folder(folder_path, window_size=600, step_size=100):
data = []
labels = []
# 获取文件夹中的所有txt文件
files = os.listdir(folder_path)
txt_files = [file for file in files if file.endswith('.txt')]
for file_name in txt_files:
# 提取文件名特征(数据来源、步态类型)
data_source, gait_type = extract_features(file_name)
#print(f"文件名: {file_name} , 数据来源: {data_source}, 步态类型: {gait_type}")#检查文件名特征提取是否正确
if data_source is not None and gait_type is not None:
# 加载传感器数据(假设数据是以空格或逗号分隔的数字)
file_path = os.path.join(folder_path, file_name)
sensor_data = np.loadtxt(file_path) # 根据实际格式调整加载方式
# 使用滑动窗口切片
slices = sliding_window_slice(sensor_data, window_size, step_size)
# 给每个切片添加标签(例如根据文件名中的组别信息)
label = 0 if 'Co' in file_name else 1 # Co -> 0, Pt -> 1
#print(f"文件名: {file_name}, 标签: {label}")#验证标签是否正确
# 将数据来源和步态类型特征附加到每个切片
for slice_data in slices:
# 将数据来源和步态类型特征附加到传感器数据
#feature_data = np.append(slice_data, [data_source, gait_type]) # 拼接特征到数据切片
feature_data=add_features_as_columns(slice_data, data_source, gait_type)
data.append(feature_data)
labels.append(label)
#print(f"文件名: {file_name}, 标签: {label}")
else:
print(f"文件名 {file_name} 格式不正确,跳过")
# 将所有数据堆叠成训练集
data = np.array(data)
labels = np.array(labels)
print(f"切片数据集形状:{data[100].shape}")
print(f"堆叠后数据集形状:{data.shape}")
print(data[1000][:, -2:])
print(f"{data_source}")
print(f"{gait_type}")
print(f"加入特征后数据形状: {feature_data.shape}")
print(f"传感器数据形状: {slice_data.shape}")
return data, labels
# 设置文件夹路径
folder_path = 'gait-in-parkinsons-disease-1.0.0/gait-in-parkinsons-disease-1.0.0' # 请替换为实际路径
# 处理文件并生成训练数据集
X, y = process_files_in_folder(folder_path)
print(f"训练数据集形状: {X.shape}")
print(f"标签形状: {y.shape}")
#print(f"文件名: {file_name}, 标签: {label}")
模型搭建:
import json
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
from sklearn.model_selection import KFold
from sklearn.preprocessing import QuantileTransformer
# 1. 定义LSTM模型
class LSTMModel(nn.Module):
# ... (与您提供的代码相同)
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(LSTMModel, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
# 定义LSTM层
self.lstm = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
batch_first=True # 输入输出张量格式为(batch, seq_len, feature)
)
# 定义全连接层
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
# 初始化隐藏状态和细胞状态
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
# 前向传播LSTM
out, _ = self.lstm(x, (h0, c0)) # 输出维度(batch_size, seq_length, hidden_size)
# 只取最后一个时间步的输出
out = out[:, -1, :]
# 全连接层
out = self.fc(out)
return out
# 2. 超参数设置
input_size = 21
hidden_size = 128
num_layers = 2
num_classes = 2
batch_size = 64
learning_rate = 0.001
num_epochs = 10
k = 5 # 折数
# 3. 数据准备 (假设您的数据是numpy数组格式)
# X.shape = (31549, 600, 21)
# y.shape = (31549,)
# 4. KFold 交叉验证
kf = KFold(n_splits=k, shuffle=True, random_state=42)
for fold, (train_index, val_index) in enumerate(kf.split(X, y)):
print(f"Fold {fold + 1}:")
# 划分训练集和验证集
X_train_raw, X_val_raw = X[train_index], X[val_index]
y_train, y_val = y[train_index], y[val_index]
# --- 分位数变换(保持时间序列结构)---
# 1. 将训练集重塑为二维 (n_samples * n_timesteps, n_features)
n_samples_train, n_timesteps, n_features = X_train_raw.shape
X_train_2d = X_train_raw.reshape(-1, n_features)
# --- 新增分位数变换步骤 ---
# 初始化分位数变换器(每个 fold 独立)
qt = QuantileTransformer(n_quantiles=100, output_distribution='uniform', random_state=42)
# 仅在训练集上拟合分位数变换器
X_train_transformed_2d = qt.fit_transform(X_train_2d)
#X_train_transformed = qt.fit_transform(X_train_raw)
# 3. 还原为三维
X_train_transformed = X_train_transformed_2d.reshape(n_samples_train, n_timesteps, n_features)
# 使用训练集的参数变换验证集
#X_val_transformed = qt.transform(X_val_raw)
# -------------------------
# --- 超范围处理(截断到 [0, 1])---
#X_val_transformed = np.clip(X_val_transformed, 0.0, 1.0)
# -------------------------------
# 4. 处理验证集(同样先展平再变换)
n_samples_val = X_val_raw.shape[0]
X_val_2d = X_val_raw.reshape(-1, n_features)
X_val_transformed_2d = qt.transform(X_val_2d)
X_val_transformed = X_val_transformed_2d.reshape(n_samples_val, n_timesteps, n_features)
# -----------------------------------
# 转换为PyTorch数据集
train_dataset = TensorDataset(
torch.from_numpy(X_train_transformed).float(),
torch.from_numpy(y_train).long()
)
val_dataset = TensorDataset(
torch.from_numpy(X_val_transformed).float(),
torch.from_numpy(y_val).long()
)
# 创建数据加载器
train_loader = DataLoader(
dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=2
)
val_loader = DataLoader(
dataset=val_dataset,
batch_size=batch_size,
shuffle=False, # 验证集不需要打乱
num_workers=2
)
# 5. 初始化模型
#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = LSTMModel(input_size, hidden_size, num_layers, num_classes).to(device)
# 6. 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# 7. 训练循环
for epoch in range(num_epochs):
model.train()
for i, (sequences, labels) in enumerate(train_loader):
sequences = sequences.to(device)
labels = labels.to(device)
#optimizer.zero_grad()
# 前向传播
outputs = model(sequences)
loss = criterion(outputs, labels)
# 反向传播和优化
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print(f'Epoch [{epoch+1}/{num_epochs}], Step [{i+1}/{len(train_loader)}], Loss: {loss.item():.4f}')
# 8. 验证
model.eval()
correct = 0
total = 0
with torch.no_grad():
for sequences, labels in val_loader:
sequences = sequences.to(device)
labels = labels.to(device)
outputs = model(sequences)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f"Epoch [{epoch+1}/{num_epochs}], Validation Accuracy: {100 * correct / total:.2f}%")
# 8. 保存模型和配置(添加在训练循环之后,分位数变换器保存之前)
# ==================== 新增开始 ====================
# 保存模型配置
config = {
"input_size": input_size,
"hidden_size": hidden_size,
"num_layers": num_layers,
"num_classes": num_classes,
"window_size": 600,
"step_size": 100
}
with open(f"model_config_fold{fold}.json", "w") as f:
json.dump(config, f)
# 保存模型参数
print(f"正在保存第 {fold} 折模型...") # 添加在保存代码前
torch.save(model.state_dict(), f"lstm_model_fold{fold}.pth")
print(f"第 {fold} 折模型保存完成!")
# ==================== 新增结束 ====================
# 保存当前 fold 的分位数变换器
import joblib
joblib.dump(qt, f"qt_fold_{fold}.pkl") # 文件名示例:qt_fold_0.pkl
print("-" * 20)
3.树莓派:
ip:192.168.11.27
读取蓝牙数据:
位置:/feet/bluewritelast.py
4.pc端
蓝牙数据的接收:E:\gxy\gxy\D\teach\footbed\code\pc_code\bluereadlast.py
import socket
import serial
import threading
# 配置:每个蓝牙设备对应独立的端口和串口
DEVICE_CONFIGS = [
{
'server_port': 12345, # 蓝牙设备1的监听端口
'com_port': 'COM6', # 对应要发送的串口
'baud_rate': 115200
},
{
'server_port': 12346, # 蓝牙设备2的监听端口
'com_port': 'COM7', # 另一个串口
'baud_rate': 115200
}
]
class SerialServer(threading.Thread):
def __init__(self, server_port, com_port, baud_rate):
super().__init__()
self.server_port = server_port
self.com_port = com_port
self.baud_rate = baud_rate
self.running = True
def run(self):
# 初始化串口
try:
ser = serial.Serial(self.com_port, self.baud_rate)
print(f"Serial {self.com_port} opened")
except serial.SerialException as e:
print(f"Failed to open {self.com_port}: {e}")
return
# 初始化网络服务
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind(('0.0.0.0', self.server_port))
s.listen()
print(f"Listening on port {self.server_port} for {self.com_port}")
while self.running:
conn, addr = s.accept()
print(f"Connected to {addr} for port {self.server_port}")
with conn:
try:
while self.running:
data = conn.recv(1024)
if not data:
break
# 转发到串口
ser.write(data)
print(f"[Port {self.server_port} -> {self.com_port}] Sent: {data.hex()}")
except (ConnectionResetError, socket.error) as e:
print(f"Connection error on port {self.server_port}: {e}")
break
except Exception as e:
print(f"Server error on port {self.server_port}: {e}")
finally:
ser.close()
def stop(self):
self.running = False
if __name__ == "__main__":
# 启动两个服务线程
threads = []
for config in DEVICE_CONFIGS:
thread = SerialServer(
server_port=config['server_port'],
com_port=config['com_port'],
baud_rate=config['baud_rate']
)
thread.start()
threads.append(thread)
try:
# 主线程保持运行
while True:
pass
except KeyboardInterrupt:
print("\nShutting down...")
for thread in threads:
thread.stop()
for thread in threads:
thread.join()
print("All threads stopped")
读取数据+部署模型+得出结论:E:\gxy\gxy\D\teach\footbed\code\pc_code\last.py
import os
import csv
import time
import numpy as np
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from datetime import datetime
import re
import torch
import torch.nn as nn
import joblib
from pathlib import Path
class LSTMModel(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(LSTMModel, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
batch_first=True
)
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
out, _ = self.lstm(x, (h0, c0))
out = out[:, -1, :]
out = self.fc(out)
return out
class EnsemblePredictor:
def __init__(self, model_dir: str):
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.models = []
self.transformers = []
# 加载5个fold的模型和变换器
for fold in range(5):
# 加载分位数变换器
qt_path = Path(model_dir) / f"qt_fold_{fold}.pkl"
self.transformers.append(joblib.load(qt_path))
# 加载模型
model_path = Path(model_dir) / f"lstm_model_fold{fold}.pth"
model = LSTMModel(21, 128, 2, 2)
model.load_state_dict(torch.load(model_path, map_location=self.device,weights_only=True))
model.to(self.device)
model.eval()
self.models.append(model)
def _preprocess(self, data: np.ndarray, fold: int) -> torch.Tensor:
"""数据预处理流水线"""
# 1. 分位数变换
data_2d = data.reshape(-1, data.shape[-1]) # (600, 21)
transformed = self.transformers[fold].transform(data_2d)
# 2. 标准化 (假设训练时使用全局统计量)
#normalized = (transformed - np.mean(transformed)) / np.std(transformed)
# 3. 转换为PyTorch张量
tensor_data = torch.from_numpy(transformed).float()
return tensor_data.unsqueeze(0).to(self.device) # (1, 600, 21)
def predict(self, batch_data: np.ndarray) -> float:
"""集成预测"""
if batch_data.shape != (1, 600, 21):
raise ValueError(f"输入数据形状需为(1,600,21),当前为{batch_data.shape}")
probabilities = []
for fold in range(5):
# 预处理数据
tensor_data = self._preprocess(batch_data, fold)
# 模型推理
with torch.no_grad():
output = self.models[fold](tensor_data)
prob = torch.softmax(output, dim=1)[0][1].item()
print(prob)#########
probabilities.append(prob)
# 计算平均概率
return np.mean(probabilities)
class FilePairHandler:
def __init__(self, watch_dir):
self.watch_dir = watch_dir
self.current_left = None
self.current_right = None
self.active_files = {} # {timestamp: (left_path, right_path)}
self._init_file_monitor()
self.scan_existing_files()
def _init_file_monitor(self):
"""初始化文件监控"""
self.observer = Observer()
event_handler = FileCreateHandler(self)
self.observer.schedule(event_handler, self.watch_dir, recursive=False)
self.observer.start()
def scan_existing_files(self):
"""扫描已有文件建立配对"""
all_files = os.listdir(self.watch_dir)
file_groups = {}
# 按时间戳分组
for f in all_files:
if f.endswith('.csv') and ('left' in f or 'right' in f):
try:
# 提取时间戳部分(格式:YYYY-MM-DD-HH-MM)
#ts_part = '_'.join(f.split('_')[:2]) # 假设文件名格式:YYYY-MM-DD-HH-MM_left.csv
ts_part = re.search(r"\d{4}-\d{2}-\d{2}-\d{2}-\d{2}", f).group(0)
#print(ts_part)
suffix = 'left' if 'left' in f else 'right'
file_groups.setdefault(ts_part, {})[suffix] = f
except Exception as e:
print(f"文件名解析失败: {f} - {str(e)}")
# 验证并存储完整配对
for ts, files in file_groups.items():
if 'left' in files and 'right' in files:
left_path = os.path.join(self.watch_dir, files['left'])
right_path = os.path.join(self.watch_dir, files['right'])
self.active_files[ts] = (left_path, right_path)
# 设置最新文件对
if self.active_files:
latest_ts = max(self.active_files.keys(), key=lambda x: datetime.strptime(x, "%Y-%m-%d-%H-%M"))
self.current_left, self.current_right = self.active_files[latest_ts]
print(f"初始文件对已加载:\nLEFT: {self.current_left}\nRIGHT: {self.current_right}")
def update_file_pair(self, new_file):
"""更新当前处理的文件对"""
# 解析新文件的时间戳
try:
ts_part = '_'.join(os.path.basename(new_file).split('_')[:2])
suffix = 'left' if 'left' in new_file else 'right'
# 更新对应时间戳的文件记录
if ts_part not in self.active_files:
self.active_files[ts_part] = {}
self.active_files[ts_part][suffix] = new_file
# 检查是否形成新配对
if len(self.active_files[ts_part]) == 2:
self.current_left, self.current_right = self.active_files[ts_part].values()
print(f"切换到新文件对:\nLEFT: {self.current_left}\nRIGHT: {self.current_right}")
except Exception as e:
print(f"文件更新失败: {str(e)}")
class FileCreateHandler(FileSystemEventHandler):
def __init__(self, file_handler):
super().__init__()
self.file_handler = file_handler
def on_created(self, event):
if not event.is_directory and event.src_path.endswith('.csv'):
print(f"检测到新文件: {event.src_path}")
self.file_handler.update_file_pair(event.src_path)
class RealtimeProcessor:
def __init__(self, watch_dir, buffer_size=600):
self.file_handler = FilePairHandler(watch_dir)
self.buffer = []
self.buffer_size = buffer_size
self.file_pointers = {}
self.file_refs = {} # 文件起始时间记录
@staticmethod
def parse_timestamp(ts_str):
"""时间戳转换方法(静态方法)"""
try:
# 支持两种时间格式
for fmt in ("%Y-%m-%d %H:%M:%S:%f", "%Y-%m-%d-%H-%M"):
try:
return datetime.strptime(ts_str, fmt).timestamp()
except ValueError:
continue
raise ValueError("未知时间格式")
except Exception as e:
print(f"时间戳解析失败: {ts_str} - {str(e)}")
return 0.0
def process_stream(self):
"""主处理循环"""
while True:
# 检查当前文件对有效性
if not self.file_handler.current_left or not self.file_handler.current_right:
print("等待有效文件对...")
time.sleep(1)
continue
# 处理当前文件对
try:
left_lines = self._read_new_lines(self.file_handler.current_left)
right_lines = self._read_new_lines(self.file_handler.current_right)
#print("1")
# 同步处理行数据
for left_row, right_row in zip(left_lines, right_lines):
processed = self.process_row(left_row, right_row)
if processed:
self.buffer.append(processed)
if len(self.buffer) >= self.buffer_size:
self._generate_batch()
#print("1=2")
except Exception as e:
print(f"处理异常: {str(e)}")
time.sleep(0.1)
def _read_new_lines(self, file_path):
"""读取文件新增内容"""
if file_path not in self.file_pointers:
self.file_pointers[file_path] = 0
# 初始化时间基准
if file_path not in self.file_refs:
with open(file_path, 'r') as f:
first_line = f.readline().strip().split(',')
#first_line = f.readline().strip()
self.file_refs[file_path] = self.parse_timestamp(first_line[0])
max_retries = 5 # 最大重试次数
retry_delay = 0.2 # 重试间隔(秒)
valid_lines = []
# try:
# with open(file_path, 'r') as f:
# f.seek(self.file_pointers[file_path])
# lines = []
# while True:
# line = f.readline()
# if not line: break
# lines.append(line.strip().split(','))
# self.file_pointers[file_path] = f.tell()
# return lines
# except Exception as e:
# print(f"文件读取失败: {file_path} - {str(e)}")
# return []
try:
with open(file_path, 'r') as f:
# 获取当前读取位置
f.seek(self.file_pointers.get(file_path, 0))
while True:
current_pos = f.tell()
line = f.readline()
# 结束条件
if not line:
break
# 列数校验
parts = line.strip().split(',')
if self._validate_columns(parts, file_path):
valid_lines.append(parts)
self.file_pointers[file_path] = f.tell() # 仅当数据正确时更新指针
else:
# 列数错误时执行重试逻辑
if max_retries > 0:
print(f"等待数据修正,剩余重试次数: {max_retries}")
f.seek(current_pos) # 回退文件指针
time.sleep(retry_delay)
max_retries -= 1
continue
else:
print("达到最大重试次数,跳过当前行")
self.file_pointers[file_path] = f.tell() # 强制跳过错误行
break
except Exception as e:
print(f"文件读取异常: {str(e)}")
return valid_lines
@staticmethod # ✅ 必须添加静态方法装饰器
def _validate_columns(parts, file_path):
"""列数校验方法(静态方法)"""
expected = 18
actual = len(parts)
if actual != expected:
print(f"文件 {os.path.basename(file_path)} 列数错误: 期望{expected}列,实际{actual}列")
return False
return True
@staticmethod
def process_csv_row(row_str):
parts = [x.strip() for x in row_str.split(',') if x.strip()]
expected_columns = 1 + 16 # 根据实际需求调整
if len(parts) != expected_columns:
print(f"数据列数错误: 预期 {expected_columns} 列,实际 {len(parts)} 列")
print(f"问题行内容: {row_str}")
return None
return parts
def process_row(self, left_row, right_row):
"""单行数据处理"""
try:
# 时间戳处理
ts_left = self.parse_timestamp(left_row[0])
ts_right = self.parse_timestamp(right_row[0])
# 计算相对时间(基于文件起始时间)
file_left = self.file_handler.current_left
relative_time = ts_left - self.file_refs[file_left]
#left_row1=self.process_csv_row(left_row)
#right_row1=self.process_csv_row(right_row)
# 数值列处理
left_values = list(map(float, left_row[1:-1]))
right_values = list(map(float, right_row[1:-1]))
# 构建新数据行
new_row = [
relative_time, # 0: 相对时间
(left_values[14] + left_values[15])/2, # 1
left_values[13], # 2
left_values[11], # 3
left_values[10], # 4
left_values[8], # 5
left_values[4], # 6
left_values[2], # 7
(left_values[0] + left_values[1])/2, # 8
(right_values[14] + right_values[15])/2,# 9
right_values[13], #10
right_values[11], #11
right_values[10], #12
right_values[8], #13
right_values[4], #14
right_values[2], #15
(right_values[0] + right_values[1])/2, #16
sum(left_values), #17
sum(right_values), #18
3, #19
10 #20
]
return [round(x, 4) if isinstance(x, float) else x for x in new_row]
except (IndexError, ValueError) as e:
print(f"行处理失败: {str(e)}\nLEFT: {left_row}\nRIGHT: {right_row}")
return None
def _generate_batch(self):
"""生成模型输入批次"""
if len(self.buffer) < self.buffer_size:
return
batch_data = np.array(self.buffer[:self.buffer_size], dtype=np.float32)
batch_data = batch_data.reshape(1, self.buffer_size, 21)
print(f"生成批次数据,形状: {batch_data.shape}")
# 此处添加模型调用逻辑
predictor = EnsemblePredictor(r"E:\gxy\gxy\D\teach\footbed\model")
# 生成测试数据(实际应从数据流获取)
#test_data = np.random.randn(1, 600, 21) # 形状必须为(1,600,21)
# 执行预测
prob = predictor.predict(batch_data)
print(f"预测为类别1的概率: {prob:.2%}")
# 根据阈值得到最终分类
final_class = 1 if prob >= 0.5 else 0
print(f"最终分类结果: {final_class}")
# model.predict(batch_data)
# 清空已处理数据
#time.sleep(0.5)
keep_size = 300 # 保留300个旧样本
self.buffer = self.buffer[-keep_size:] if len(self.buffer) > keep_size else []
if __name__ == "__main__":
# 配置监控路径
WATCH_DIR = r"E:\gxy\gxy\D\teach\footbed\鞋垫传感器数据\pms_foor_16.1110\data"
# 初始化处理器
processor = RealtimeProcessor(WATCH_DIR)
try:
processor.process_stream()
except KeyboardInterrupt:
processor.file_handler.observer.stop()
processor.file_handler.observer.join()
print("实时处理已终止")