Skip to content

Commit

Permalink
Merge pull request #16 from fszewczyk/easy-vector-creation
Browse files Browse the repository at this point in the history
Vector Creation through Parameter Pack
  • Loading branch information
fszewczyk authored Nov 9, 2023
2 parents 185f073 + 0f6ade6 commit 5f83c13
Show file tree
Hide file tree
Showing 6 changed files with 57 additions and 27 deletions.
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,10 @@ int main() {
std::vector<Vec32> ys;

// ---------- INPUT ----------- | -------- OUTPUT --------- //
xs.push_back(Vec32::of({0, 0})); ys.push_back(Vec32::of({0}));
xs.push_back(Vec32::of({1, 0})); ys.push_back(Vec32::of({1}));
xs.push_back(Vec32::of({0, 1})); ys.push_back(Vec32::of({1}));
xs.push_back(Vec32::of({1, 1})); ys.push_back(Vec32::of({0}));
xs.push_back(Vec32::of(0, 0)); ys.push_back(Vec32::of(0));
xs.push_back(Vec32::of(1, 0)); ys.push_back(Vec32::of(1));
xs.push_back(Vec32::of(0, 1)); ys.push_back(Vec32::of(1));
xs.push_back(Vec32::of(1, 1)); ys.push_back(Vec32::of(0));

auto network = SequentialBuilder<Type::float32>::begin()
.add(Linear32::create(2, 15))
Expand Down
18 changes: 17 additions & 1 deletion docs/tutorials/Cheatsheet.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,22 @@ Adam64 = Adam<Type::f64>
{Class}64 = {Class}<Type::float64> = {Class}<double>
```

## Vectors

Here are all the available operations using `Vector`:

```{.cpp}
auto a = Vector<float>::of(1, 2, 3);
auto b = Vector<float>::of({2, 3, 4}); // a[i] == b[i];
a.size() // 3
a.dot(b) // 1 * 2 + 2 * 3 + 3 * 4 = 20
a.sum() // 6
a *= 3 // a = {3, 6, 9}
a /= 2 // a = {1.5, 3, 4.5}
a[1] // 3
```

## Layers

Here's a full list of available layers:
Expand All @@ -32,7 +48,7 @@ These are all implemented optimizers:
```{.cpp}
auto simple = Optimizer32(network->parameters(), learningRate);
auto sgdWithMomentum = SGD32(network->parameters(), learningRate, momentum = 0.9);
auto adam = Adam32(network->parameters(), learningRate, beta1 = 0.9, beta2=0.999, epsilon=1e-8);
auto adam = Adam32(network->parameters(), learningRate, beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8);
```

## Loss functions
Expand Down
27 changes: 15 additions & 12 deletions docs/tutorials/GetStarted.md
Original file line number Diff line number Diff line change
Expand Up @@ -79,21 +79,24 @@ If you want some refreshment on derivatives, check out [this wonderful video](ht
Multiple scalars can be grouped together in a `Vector` to simplify operating on them. Input to any `Module` (more on them later) is a `Vector`. This abstraction provides some functionality that allows you to compute, for example a dot product.

```{.cpp}
// The easiest way to create a Vector
auto a = Vector<T>::of({1, 2, 3});
// The easiest way to create e Vector
auto a = Vector<T>::of(1, 2, 3);
// A bit more annoying way to create a Vector
auto b = Vector<T>::of({1, 2, 3});
// The hard way to create a Vector
auto b = Vector<T>(Value<T>::create(2), Value<T>::create(3), Value<T>::create(4));
auto c = Vector<T>(Value<T>::create(2), Value<T>::create(3), Value<T>::create(4));
// You can access elements in a vector
auto c = Vector<T>::of({a[0]*b[0], a[1]*b[1], a[2]*b[2]});
auto d = Vector<T>::of({a[0]*b[0], a[1]*b[1], a[2]*b[2]});
// And even iterate over it
for(auto &entry : c)
std::cout << c << std::endl; // prints: 2 6 12
for(auto &entry : d)
std::cout << entry << std::endl; // prints: 2 6 12
auto d = a.dot(b) // c = 1 * 2 + 2 * 3 + 3 * 4 = 20
d->backward(); // You can compute of this result since it's a scalar!
auto e = b.dot(c) // c = 1 * 2 + 2 * 3 + 3 * 4 = 20
e->backward(); // You can compute of this result since it's a scalar!
```

`Vectors` are very useful since this is the way both the input and output data is represented. Each sample consits of an input `Vector` and a target output `Vector`.
Expand Down Expand Up @@ -169,10 +172,10 @@ std::vector<Vec32> xs;
std::vector<Vec32> ys;
// ---------- INPUT ----------- | -------- OUTPUT --------- //
xs.push_back(Vec32::of({0, 0})); ys.push_back(Vec32::of({0}));
xs.push_back(Vec32::of({1, 0})); ys.push_back(Vec32::of({1}));
xs.push_back(Vec32::of({0, 1})); ys.push_back(Vec32::of({1}));
xs.push_back(Vec32::of({1, 1})); ys.push_back(Vec32::of({0}));
xs.push_back(Vec32::of(0, 0)); ys.push_back(Vec32::of(0));
xs.push_back(Vec32::of(1, 0)); ys.push_back(Vec32::of(1));
xs.push_back(Vec32::of(0, 1)); ys.push_back(Vec32::of(1));
xs.push_back(Vec32::of(1, 1)); ys.push_back(Vec32::of(0));
```

### Neural Network
Expand Down
8 changes: 4 additions & 4 deletions examples/xor_classification.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,10 @@ int main() {
std::vector<Vec32> ys;

// ---------- INPUT ----------- | -------- OUTPUT --------- //
xs.push_back(Vec32::of({0, 0})); ys.push_back(Vec32::of({1, 0}));
xs.push_back(Vec32::of({1, 0})); ys.push_back(Vec32::of({0, 1}));
xs.push_back(Vec32::of({0, 1})); ys.push_back(Vec32::of({0, 1}));
xs.push_back(Vec32::of({1, 1})); ys.push_back(Vec32::of({1, 0}));
xs.push_back(Vec32::of(0, 0)); ys.push_back(Vec32::of(1, 0));
xs.push_back(Vec32::of(1, 0)); ys.push_back(Vec32::of(0, 1));
xs.push_back(Vec32::of(0, 1)); ys.push_back(Vec32::of(0, 1));
xs.push_back(Vec32::of(1, 1)); ys.push_back(Vec32::of(1, 0));

auto mlp = SequentialBuilder<Type::float32>::begin()
.add(Linear32::create(2, 15))
Expand Down
8 changes: 4 additions & 4 deletions examples/xor_regression.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,10 @@ int main() {
std::vector<Vec32> ys;

// ---------- INPUT ----------- | -------- OUTPUT --------- //
xs.push_back(Vec32::of({0, 0})); ys.push_back(Vec32::of({0}));
xs.push_back(Vec32::of({1, 0})); ys.push_back(Vec32::of({1}));
xs.push_back(Vec32::of({0, 1})); ys.push_back(Vec32::of({1}));
xs.push_back(Vec32::of({1, 1})); ys.push_back(Vec32::of({0}));
xs.push_back(Vec32::of(0, 0)); ys.push_back(Vec32::of(0));
xs.push_back(Vec32::of(1, 0)); ys.push_back(Vec32::of(1));
xs.push_back(Vec32::of(0, 1)); ys.push_back(Vec32::of(1));
xs.push_back(Vec32::of(1, 1)); ys.push_back(Vec32::of(0));

auto network = SequentialBuilder<Type::float32>::begin()
.add(Linear32::create(2, 15))
Expand Down
15 changes: 13 additions & 2 deletions include/core/Vector.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ template <typename T> class Vector {
Vector(std::vector<ValuePtr<T>> values);

static Vector<T> of(const std::vector<T> &values);
template <typename... Args> static Vector<T> of(const Args &...args);

ValuePtr<T> dot(const Vector<T> &other) const;
ValuePtr<T> sum() const;
size_t size() const;
Expand Down Expand Up @@ -73,6 +75,15 @@ template <typename T> Vector<T> Vector<T>::of(const std::vector<T> &values) {
return valuePtrs;
}

template <typename T> template <typename... Args> Vector<T> Vector<T>::of(const Args &...args) {
std::vector<ValuePtr<T>> valuePtrs;
valuePtrs.reserve(sizeof...(args));

(valuePtrs.emplace_back(Value<T>::create(args)), ...);

return Vector<T>(valuePtrs);
}

template <typename T> size_t Vector<T>::size() const { return _values.size(); }

template <typename T> ValuePtr<T> Vector<T>::dot(const Vector<T> &other) const {
Expand All @@ -96,12 +107,12 @@ template <typename T> ValuePtr<T> Vector<T>::sum() const {
}

template <typename T> Vector<T> operator/(Vector<T> x, T val) {
x /= val;
x *= Value<T>::create(val);
return x;
}

template <typename T> Vector<T> operator*(Vector<T> x, T val) {
x *= val;
x *= Value<T>::create(val);
return x;
}

Expand Down

0 comments on commit 5f83c13

Please sign in to comment.