-
Notifications
You must be signed in to change notification settings - Fork 3
/
z-neural-network.cpp
121 lines (103 loc) · 4.26 KB
/
z-neural-network.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
#include <iostream>
#include <vector>
#include <cmath>
#include <random>
#include <algorithm>
class SimpleNeuralNetwork {
private:
std::vector<double> input_weights;
std::vector<double> output_weights;
int hidden_neurons;
double input_bias;
double output_bias;
double sigmoid(double x) const {
return 1.0 / (1.0 + exp(-x));
}
// Normalize data to range [0, 1]
double normalize(int x, int max) const {
return static_cast<double>(x) / max;
}
public:
SimpleNeuralNetwork(int hidden_size = 100) : hidden_neurons(hidden_size), input_bias(0), output_bias(0) {
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<> dis(-1.0, 1.0);
input_weights.resize(hidden_size);
output_weights.resize(hidden_size);
for (int i = 0; i < hidden_size; ++i) {
input_weights[i] = dis(gen);
output_weights[i] = dis(gen);
}
input_bias = dis(gen);
output_bias = dis(gen);
}
void train(const std::vector<int>& inputs, const std::vector<double>& targets, int max_input, int max_target) {
double learning_rate = 0.001;
int epochs = 10000;
for (int epoch = 0; epoch < epochs; ++epoch) {
for (size_t i = 0; i < inputs.size(); ++i) {
double normalized_input = normalize(inputs[i], max_input);
double normalized_target = normalize(targets[i], max_target);
// Forward pass
std::vector<double> hidden_outputs(hidden_neurons, 0.0);
double output = output_bias;
for (int h = 0; h < hidden_neurons; ++h) {
hidden_outputs[h] = sigmoid(normalized_input * input_weights[h] + input_bias);
output += hidden_outputs[h] * output_weights[h];
}
double prediction = sigmoid(output);
// Backward pass
double error = normalized_target - prediction;
double delta_output = error * prediction * (1 - prediction);
output_bias += learning_rate * delta_output;
if(epoch % 10000 == 0){
std::cout << "Error: " << error << "\n";
}
for (int h = 0; h < hidden_neurons; ++h) {
double delta_hidden = delta_output * output_weights[h] * hidden_outputs[h] * (1 - hidden_outputs[h]);
output_weights[h] += learning_rate * delta_output * hidden_outputs[h];
input_weights[h] += learning_rate * delta_hidden * normalized_input;
input_bias += learning_rate * delta_hidden;
}
}
}
}
double predict(int x, int max_input) const {
double normalized_input = normalize(x, max_input);
double output = output_bias;
for (int h = 0; h < hidden_neurons; ++h) {
double hidden_output = sigmoid(normalized_input * input_weights[h] + input_bias);
output += hidden_output * output_weights[h];
}
return sigmoid(output);
}
void print() const {
std::cout << "Neural Network Model:\n";
std::cout << "Input Weights: ";
for (const auto& w : input_weights) std::cout << w << " ";
std::cout << "\nOutput Weights: ";
for (const auto& w : output_weights) std::cout << w << " ";
std::cout << "\nInput Bias: " << input_bias << "\nOutput Bias: " << output_bias << std::endl;
}
};
int main() {
int num_data = 100;
int max_input = 1000;
int max_target = 100;
std::vector<int> inputs(num_data);
std::vector<double> targets(num_data);
// Generate linearly spaced inputs and map them to targets
for (int i = 0; i < num_data; ++i) {
inputs[i] = i * (max_input / num_data);
targets[i] = i * (max_target / num_data);
}
SimpleNeuralNetwork nn;
nn.train(inputs, targets, max_input, max_target);
// Predict and display results
nn.print();
for (int i = 0; i < num_data; i += 10) {
double prediction = nn.predict(inputs[i], max_input) * max_target; // De-normalize output
std::cout << "Input: " << inputs[i] << " Predicted: " << prediction << " Actual: " << targets[i] << std::endl;
}
return 0;
}