Skip to content

Commit

Permalink
Simple Dataset
Browse files Browse the repository at this point in the history
  • Loading branch information
fszewczyk committed Nov 9, 2023
1 parent c1a8e07 commit a40b76d
Show file tree
Hide file tree
Showing 4 changed files with 24 additions and 63 deletions.
48 changes: 0 additions & 48 deletions examples/xor_classification.cpp

This file was deleted.

30 changes: 15 additions & 15 deletions examples/xor_regression.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,12 @@ int main() {
using T = Type::float32;

// clang-format off
std::vector<Vec32> xs;
std::vector<Vec32> ys;
Dataset<Vec32, Vec32> data;

// ---------- INPUT ----------- | -------- OUTPUT --------- //
xs.push_back(Vec32::of(0, 0)); ys.push_back(Vec32::of(0));
xs.push_back(Vec32::of(1, 0)); ys.push_back(Vec32::of(1));
xs.push_back(Vec32::of(0, 1)); ys.push_back(Vec32::of(1));
xs.push_back(Vec32::of(1, 1)); ys.push_back(Vec32::of(0));
data.addSample(Vec32::of(0, 0), Vec32::of(0));
data.addSample(Vec32::of(0, 1), Vec32::of(1));
data.addSample(Vec32::of(1, 0), Vec32::of(1));
data.addSample(Vec32::of(1, 1), Vec32::of(0));

auto network = SequentialBuilder<Type::float32>::begin()
.add(Linear32::create(2, 15))
Expand All @@ -28,19 +26,21 @@ int main() {
auto lossFunction = Loss::MSE<T>;

for (size_t epoch = 0; epoch < 100; epoch++) { // We train for 100 epochs
optimizer.reset(); // Reset the gradients

auto pred = network->forward(xs); // We get some prediction
auto loss = Loss::compute(lossFunction, pred, ys); // And calculate its error
auto epochLoss = Val32::create(0);

optimizer.reset(); // Reset the gradients
for (auto &[x, y] : data) {
auto pred = network->forward(x); // We get some prediction
epochLoss = epochLoss + Loss::compute(lossFunction, pred, y); // And calculate its error
}
optimizer.step(); // Update the parameters

auto averageLoss = loss / Val32::create(xs.size());
auto averageLoss = epochLoss / Val32::create(data.size());
std::cout << "Epoch: " << epoch + 1 << " Loss: " << averageLoss->getValue() << std::endl;
}

for (size_t sample = 0; sample < xs.size(); ++sample) { // Go through each example
Vec32 pred = network->forward(xs[sample]); // Predict result
std::cout << xs[sample] << " -> " << pred[0] << "\t| True: " << ys[sample][0] << std::endl;
for (auto &[x, y] : data) { // Go through each example
auto pred = network->forward(x); // We get some prediction
std::cout << x << " -> " << pred[0] << "\t| True: " << y[0] << std::endl;
}
}
2 changes: 2 additions & 0 deletions include/ShkyeraGrad.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
#include "nn/Neuron.hpp"
#include "nn/Sequential.hpp"

#include "nn/data/Dataset.hpp"

#include "nn/optimizers/AdaMax.hpp"
#include "nn/optimizers/Adam.hpp"
#include "nn/optimizers/NAG.hpp"
Expand Down
7 changes: 7 additions & 0 deletions include/nn/Loss.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,13 @@ Function<T> CrossEntropy = [](Vector<T> a, Vector<T> b) {
return loss;
};

template <typename T>
ValuePtr<T> compute(Function<T> lossFunction, const Vector<T> prediction, const Vector<T> target) {
auto loss = lossFunction(prediction, target);
loss->backward();
return loss;
}

template <typename T> ValuePtr<T> compute(Function<T> lossFunction, const Batch<T> prediction, const Batch<T> target) {
ValuePtr<T> loss = Value<T>::create(0);
for (size_t i = 0; i < prediction.size(); ++i) {
Expand Down

0 comments on commit a40b76d

Please sign in to comment.