#include <vector>
#include <cmath>
#include <random>
#include<iostream>
// Sigmoid激活函数
double sigmoid(double x) {
return 1.0 / (1.0 + exp(-x));
}
// RNN单元
class RNNCell {
private:
int input_size;
int hidden_size;
std::vector<std::vector<double>> Wxh;
std::vector<std::vector<double>> Whh;
std::vector<double> bh;
std::vector<double> state;
// 初始化权重和偏置
void init_parameters() {
std::default_random_engine generator;
std::normal_distribution<double> distribution(0.0, 0.1);
// 权重初始化
Wxh.resize(hidden_size, std::vector<double>(input_size));
Whh.resize(hidden_size, std::vector<double>(hidden_size));
for (auto &row : Wxh)
for (auto &val : row)
val = distribution(generator);
for (auto &row : Whh)
for (auto &val : row)
val = distribution(generator);
// 偏置初始化
bh.resize(hidden_size);
for (auto &val : bh)
val = distribution(generator);
// 隐藏状态初始化
state.resize(hidden_size);
for (auto &val : state)
val = 0.0;
}
public:
RNNCell(int input_sz, int hidden_sz) : input_size(input_sz), hidden_size(hidden_sz) {
init_parameters();
}
// 前向传播
std::vector<double> forward(const std::vector<double>& input) {
std::vector<double> new_state(hidden_size);
for (int i = 0; i < hidden_size; ++i) {
// 新的状态 = tanh(Wxh * 输入 + Whh * 前一状态 + 偏置)
new_state[i] = sigmoid(Wxh[i][0] * input[0] + Whh[i][0] * state[0] + bh[i]);
for (int j = 1; j < input_size; ++j) {
new_state[i] += Wxh[i][j] * input[j];
}
for (int j = 1; j < hidden_size; ++j) {
new_state[i] += Whh[i][j] * state[j];
}
new_state[i] = tanh(new_state[i]);
}
state = new_state;
return state;
}
};
int main() {
const int input_size = 3;
const int hidden_size = 5;
RNNCell cell(input_size, hidden_size);
std::vector<double> input = {1.0, 0.5, -1.0};
auto output = cell.forward(input);
for (double v : output) {
std::cout << v << std::endl;
}
return 0;
}//
Qt与Rnn循环神经网络231101
最新推荐文章于 2024-12-19 13:04:45 发布