-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
51 lines (39 loc) · 1.41 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import numpy as np
from microgradnp.engine import Value
from microgradnp.nn import Linear, ReLU, Sequential
if __name__ == '__main__':
# Define dataset and model
n_samples = 100
input_features = 5
hidden_dim = 16
output_features = 3
n_epochs = 100
learning_rate = 0.01
X = np.random.randn(n_samples, input_features)
Y = np.random.randn(n_samples, output_features)
model = Sequential(Linear(input_features, hidden_dim),
ReLU(),
Linear(hidden_dim, hidden_dim),
ReLU(),
Linear(hidden_dim, output_features))
def mse_loss(y_pred, y_true):
diff = y_pred - y_true
squared_diff = diff**2
loss = squared_diff.mean()
return loss
# Training loop
for epoch in range(n_epochs):
epoch_loss = 0.0
for x, y in zip(X, Y):
x_value = Value(x.reshape(1, -1))
y_value = Value(y.reshape(1, -1))
model.zero_grad()
y_pred = model.forward(x_value)
loss = mse_loss(y_pred, y_value)
epoch_loss += loss.data
loss.backward()
for param in model.parameters():
# print('Param:', param._op, param.grad)
param.data -= learning_rate * param.grad
epoch_loss /= n_samples
print(f"Epoch {epoch + 1}/{n_epochs}, Loss: {epoch_loss}")