Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dataset and DataLoader #18

Merged
merged 5 commits into from
Nov 9, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions .github/workflows/linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,5 @@ jobs:
run: |
g++ examples/scalars.cpp -O3 --std=c++17
./a.out
g++ examples/xor_classification.cpp -O3 --std=c++17
./a.out
g++ examples/xor_regression.cpp -O3 --std=c++17
./a.out
2 changes: 0 additions & 2 deletions .github/workflows/macos.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,5 @@ jobs:
run: |
g++ examples/scalars.cpp -O3 --std=c++17
./a.out
g++ examples/xor_classification.cpp -O3 --std=c++17
./a.out
g++ examples/xor_regression.cpp -O3 --std=c++17
./a.out
1 change: 0 additions & 1 deletion .github/workflows/windows.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,5 +30,4 @@ jobs:
CXX: ${{matrix.conf.compiler}}
run: |
g++ -o out examples/scalars.cpp -O3 --std=c++17
g++ -o out examples/xor_classification.cpp -O3 --std=c++17
g++ -o out examples/xor_regression.cpp -O3 --std=c++17
1 change: 0 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
build/
data/

docs/html
docs/latex
Expand Down
40 changes: 20 additions & 20 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,14 +34,17 @@ int main() {
using namespace shkyera;
using T = Type::float32;

std::vector<Vec32> xs;
std::vector<Vec32> ys;

// ---------- INPUT ----------- | -------- OUTPUT --------- //
xs.push_back(Vec32::of(0, 0)); ys.push_back(Vec32::of(0));
xs.push_back(Vec32::of(1, 0)); ys.push_back(Vec32::of(1));
xs.push_back(Vec32::of(0, 1)); ys.push_back(Vec32::of(1));
xs.push_back(Vec32::of(1, 1)); ys.push_back(Vec32::of(0));
// This is our XOR dataset. It maps from Vec32 to Vec32
Dataset<Vec32, Vec32> data;
data.addSample(Vec32::of(0, 0), Vec32::of(0));
data.addSample(Vec32::of(0, 1), Vec32::of(1));
data.addSample(Vec32::of(1, 0), Vec32::of(1));
data.addSample(Vec32::of(1, 1), Vec32::of(0));

// The is the data loader, it will take care of batching
size_t batchSize = 2;
bool shuffle = true;
DataLoader loader(data, batchSize, shuffle);

auto network = SequentialBuilder<Type::float32>::begin()
.add(Linear32::create(2, 15))
Expand All @@ -52,29 +55,26 @@ int main() {
.add(Sigmoid32::create())
.build();


auto optimizer = Adam32(network->parameters(), 0.05);
auto optimizer = Adam32(network->parameters(), 0.1);
auto lossFunction = Loss::MSE<T>;

for (size_t epoch = 0; epoch < 100; epoch++) { // We train for 100 epochs
auto epochLoss = Val32::create(0);

optimizer.reset(); // Reset the gradients
for (size_t sample = 0; sample < xs.size(); ++sample) { // We go through each sample
Vec32 pred = network->forward(xs[sample]); // We get some prediction
auto loss = lossFunction(pred, ys[sample]); // And calculate its error

epochLoss = epochLoss + loss; // Store the loss for feedback
optimizer.reset(); // Reset the gradients
for (const auto &[x, y] : loader) { // For each batch
auto pred = network->forward(x); // We get some prediction
epochLoss = epochLoss + Loss::compute(lossFunction, pred, y); // And calculate its error
}
optimizer.step(); // Update the parameters

auto averageLoss = epochLoss / Val32::create(xs.size());
auto averageLoss = epochLoss / Val32::create(loader.getTotalBatches());
std::cout << "Epoch: " << epoch + 1 << " Loss: " << averageLoss->getValue() << std::endl;
}

for (size_t sample = 0; sample < xs.size(); ++sample) { // Go through each example
Vec32 pred = network->forward(xs[sample]); // Predict result
std::cout << xs[sample] << " -> " << pred[0] << "\t| True: " << ys[sample][0] << std::endl;
for (auto &[x, y] : data) { // Go through each example
auto pred = network->forward(x); // We get some prediction
std::cout << x << " -> " << pred[0] << "\t| True: " << y[0] << std::endl;
}
}
```
52 changes: 0 additions & 52 deletions examples/xor_classification.cpp

This file was deleted.

39 changes: 20 additions & 19 deletions examples/xor_regression.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,17 @@ int main() {
using T = Type::float32;

// clang-format off
std::vector<Vec32> xs;
std::vector<Vec32> ys;

// ---------- INPUT ----------- | -------- OUTPUT --------- //
xs.push_back(Vec32::of(0, 0)); ys.push_back(Vec32::of(0));
xs.push_back(Vec32::of(1, 0)); ys.push_back(Vec32::of(1));
xs.push_back(Vec32::of(0, 1)); ys.push_back(Vec32::of(1));
xs.push_back(Vec32::of(1, 1)); ys.push_back(Vec32::of(0));
// This is our XOR dataset. It maps from Vec32 to Vec32
Dataset<Vec32, Vec32> data;
data.addSample(Vec32::of(0, 0), Vec32::of(0));
data.addSample(Vec32::of(0, 1), Vec32::of(1));
data.addSample(Vec32::of(1, 0), Vec32::of(1));
data.addSample(Vec32::of(1, 1), Vec32::of(0));

// The is the data loader, it will take care of batching
size_t batchSize = 2;
bool shuffle = true;
DataLoader loader(data, batchSize, shuffle);

auto network = SequentialBuilder<Type::float32>::begin()
.add(Linear32::create(2, 15))
Expand All @@ -24,27 +27,25 @@ int main() {
.build();
// clang-format on

auto optimizer = Adam32(network->parameters(), 0.05);
auto optimizer = Adam32(network->parameters(), 0.1);
auto lossFunction = Loss::MSE<T>;

for (size_t epoch = 0; epoch < 100; epoch++) { // We train for 100 epochs
auto epochLoss = Val32::create(0);

optimizer.reset(); // Reset the gradients
for (size_t sample = 0; sample < xs.size(); ++sample) { // We go through each sample
Vec32 pred = network->forward(xs[sample]); // We get some prediction
auto loss = lossFunction(pred, ys[sample]); // And calculate its error

epochLoss = epochLoss + loss; // Store the loss for feedback
optimizer.reset(); // Reset the gradients
for (const auto &[x, y] : loader) { // For each batch
auto pred = network->forward(x); // We get some prediction
epochLoss = epochLoss + Loss::compute(lossFunction, pred, y); // And calculate its error
}
optimizer.step(); // Update the parameters

auto averageLoss = epochLoss / Val32::create(xs.size());
auto averageLoss = epochLoss / Val32::create(loader.getTotalBatches());
std::cout << "Epoch: " << epoch + 1 << " Loss: " << averageLoss->getValue() << std::endl;
}

for (size_t sample = 0; sample < xs.size(); ++sample) { // Go through each example
Vec32 pred = network->forward(xs[sample]); // Predict result
std::cout << xs[sample] << " -> " << pred[0] << "\t| True: " << ys[sample][0] << std::endl;
for (auto &[x, y] : data) { // Go through each example
auto pred = network->forward(x); // We get some prediction
std::cout << x << " -> " << pred[0] << "\t| True: " << y[0] << std::endl;
}
}
3 changes: 3 additions & 0 deletions include/ShkyeraGrad.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@
#include "nn/Neuron.hpp"
#include "nn/Sequential.hpp"

#include "nn/data/DataLoader.hpp"
#include "nn/data/Dataset.hpp"

#include "nn/optimizers/AdaMax.hpp"
#include "nn/optimizers/Adam.hpp"
#include "nn/optimizers/NAG.hpp"
Expand Down
2 changes: 2 additions & 0 deletions include/core/Utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,8 @@ std::enable_if_t<std::is_integral_v<T>, std::vector<T>> sample(T from, T to, siz
return sampled;
}

template <typename T> void shuffle(std::vector<T> &vec) { std::shuffle(vec.begin(), vec.end(), rand_dev); }

template <typename Clock = std::chrono::high_resolution_clock> auto startTimer() { return Clock::now(); }

template <typename Clock = std::chrono::high_resolution_clock>
Expand Down
1 change: 1 addition & 0 deletions include/core/Vector.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ template <typename T> class Vector;

using Vec32 = Vector<Type::float32>;
using Vec64 = Vector<Type::float64>;
template <typename T> using Batch = std::vector<Vector<T>>;

template <typename T> class Vector {
private:
Expand Down
24 changes: 19 additions & 5 deletions include/nn/Loss.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#pragma once

#include "../core/Value.hpp"
#include "../core/Vector.hpp"

namespace shkyera::Loss {

Expand All @@ -31,8 +32,6 @@ Function<T> MSE = [](Vector<T> a, Vector<T> b) {
if (a.size() > 0)
loss = loss / Value<T>::create(a.size());

loss->backward();

return loss;
};

Expand All @@ -52,8 +51,6 @@ Function<T> MAE = [](Vector<T> a, Vector<T> b) {
if (a.size() > 0)
loss = loss / Value<T>::create(a.size());

loss->backward();

return loss;
};

Expand All @@ -80,9 +77,26 @@ Function<T> CrossEntropy = [](Vector<T> a, Vector<T> b) {
loss = loss - (b[i] * (a[i]->log()));
}

return loss;
};

template <typename T>
ValuePtr<T> compute(Function<T> lossFunction, const Vector<T> prediction, const Vector<T> target) {
auto loss = lossFunction(prediction, target);
loss->backward();
return loss;
}

template <typename T> ValuePtr<T> compute(Function<T> lossFunction, const Batch<T> prediction, const Batch<T> target) {
ValuePtr<T> loss = Value<T>::create(0);
for (size_t i = 0; i < prediction.size(); ++i) {
loss = loss + lossFunction(prediction[i], target[i]);
}
loss = loss / Value<T>::create(prediction.size());

loss->backward();

return loss;
};
}

} // namespace shkyera::Loss
11 changes: 10 additions & 1 deletion include/nn/Module.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,17 @@ template <typename T> class Module {
Module() = default;

public:
Vector<T> forward(const Vector<T> &x) const { return (*this)(x); }
template <typename U> U forward(const U &x) const { return (*this)(x); }

virtual Vector<T> operator()(const Vector<T> &x) const { return x; }
std::vector<Vector<T>> operator()(const std::vector<Vector<T>> &x) const {
std::vector<Vector<T>> out(x.size());
for (size_t i = 0; i < x.size(); ++i) {
out[i] = this->operator()(x[i]);
}
return out;
}

virtual std::vector<ValuePtr<T>> parameters() const { return {}; }
};

Expand Down
Loading
Loading