Skip to content

Commit b39c384

Browse files
committed
Make the questionaire
1 parent 8c1c8b2 commit b39c384

File tree

3 files changed

+124
-228
lines changed

3 files changed

+124
-228
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ pip install -r requirements.txt
99

1010
into the terminal to install the required software.
1111

12-
Jax takes care of our autograd needs. The documentation is available at https://jax.readthedocs.io/en/latest/index.html . Flax is a high-level neural network library. https://flax.readthedocs.io/en/latest/ hosts the documentation.
12+
Torch takes care of our autograd needs. The documentation is available at https://pytorch.org/docs/stable/index.html. torch.nn provides all the necessary modules for neural network. https://pytorch.org/docs/stable/nn.html hosts the documentation.
1313

1414
### Task 1: Denoising a cosine
1515

src/denoise_cosine.py

Lines changed: 39 additions & 100 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,11 @@
11
"""An example focused on training a network to denoise a time series."""
22

3+
from typing import Dict
34

45
import matplotlib.pyplot as plt
56
import torch as th
7+
from torch.func import grad_and_value
8+
from tqdm import tqdm
69

710

811
def sigmoid(x: th.Tensor) -> th.Tensor:
@@ -18,36 +21,18 @@ def sigmoid(x: th.Tensor) -> th.Tensor:
1821
return 0.
1922

2023

21-
class Net(th.nn.Module):
22-
"""Decosine Network."""
24+
def net(params: Dict, x: th.Tensor) -> th.Tensor:
25+
"""Set up a single layer network.
2326
24-
def __init__(
25-
self, input_neurons: int, output_neurons: int, hidden_neurons: int
26-
) -> None:
27-
"""Initialize the network.
28-
29-
Args:
30-
input_neurons (int): Number of input neurons.
31-
output_neurons (int): Number of output neurons.
32-
hidden_neurons (int): Number of hidden neurons.
33-
"""
34-
super().__init__()
35-
# TODO: Create two layers using th.nn.Linear.
36-
37-
38-
def forward(self, x: th.Tensor) -> th.Tensor:
39-
"""Network forward pass.
40-
41-
Args:
42-
x (th.Tensor): Input tensor of shape 1x200.
27+
Args:
28+
params (Dict): Dictionary containing W1, b, and W2.
29+
x (th.Tensor): Network input.
4330
44-
Returns:
45-
th.Tensor: Network prediction of shape 1x200.
46-
"""
47-
# TODO: Implment the forward pass using our sigmoid function
48-
# as well as the layers you created in the __init__ function.
49-
# return the network output instead of 0.
50-
return 0.
31+
Returns:
32+
th.Tensor: Network prediction.
33+
"""
34+
# TODO: Implement single layer pass.
35+
return None
5136

5237

5338
def cost(y: th.Tensor, h: th.Tensor) -> th.Tensor:
@@ -60,79 +45,56 @@ def cost(y: th.Tensor, h: th.Tensor) -> th.Tensor:
6045
Returns:
6146
th.Tensor: Squared Error.
6247
"""
63-
# TODO: Return squared error cost instead of 0.
48+
# TODO: Implement Squared Error loss.
6449
return 0.
6550

6651

67-
def sgd(model: Net, step_size: float) -> Net:
68-
"""Perform Stochastic Gradient Descent.
52+
def net_cost(params: Dict, x: th.Tensor, y: th.Tensor) -> th.Tensor:
53+
"""Evaluate the network and compute the loss.
6954
7055
Args:
71-
model (Net): Network object.
72-
step_size (float): Step size for SGD.
56+
params (Dict): Dictionary containing W1, b, and W2.
57+
x (th.Tensor): Network input.
58+
y (th.Tensor): Squared error loss.
7359
7460
Returns:
75-
Net: SGD applied model.
76-
"""
77-
for param in model.parameters():
78-
# TODO: compute an update for every parameter using param.data,
79-
# step size as well as param.grad.data
80-
pass
81-
return model
82-
83-
84-
def zero_grad(model: Net) -> Net:
85-
"""Make gradients zero after SGD.
86-
87-
Args:
88-
model (Net): Network object.
89-
90-
Returns:
91-
Net: Network with zeroed gradients.
61+
th.Tensor: Squared Error.
9262
"""
93-
for param in model.parameters():
94-
# TODO: call zero_() for every parameter.
95-
pass
96-
return model
63+
# TODO: Call network, compute and return the loss.
64+
return None
9765

9866

9967
if __name__ == "__main__":
100-
# TODO: Use th.manual_seed to set the seed for the network initialization.
68+
# TODO: Use th.manual_seed as 42 to set the seed for the network initialization
10169
pass
102-
# TODO: Choose a step size.
103-
step_size = 0.00
104-
# TODO: Chose a suitable amount of iterations.
105-
iterations = 100
70+
# TODO: Choose a suitable stepsize
71+
step_size = 0.0
72+
iterations = 150
10673
input_neurons = output_neurons = 200
107-
# TODO: Choose a network size.
74+
# TODO: Choose a proper network size.
10875
hidden_neurons = 0
10976

11077
x = th.linspace(-3 * th.pi, 3 * th.pi, 200)
11178
y = th.cos(x)
11279

113-
# TODO: Instatiate our network using the `Net`-constructor.
114-
model = None
80+
# TODO: Initialize the parameters
81+
W1 = None
82+
b = None
83+
W2 = None
11584

116-
for i in range(iterations):
85+
# TODO: Instantiate grad_and_value function
86+
value_grad = None
87+
88+
for i in (pbar := tqdm(range(iterations))):
11789
th.manual_seed(i)
11890
y_noise = y + th.randn([200])
11991

120-
# TODO: Compute the network output using your model.
121-
preds = 0.
122-
123-
# TODO: Compute the loss value using your cost function.
124-
loss_val = 0.
92+
# TODO: Compute loss and gradients
12593

126-
#TODO: Compute the gradient by calling the backward() function of your loss Tensor.
127-
pass
94+
# TODO: Update parameters using SGD
12895

129-
# TODO: Use your sgd function to update your model.
130-
model = None
131-
# TODO: Use your zero grad function to reset your gradients
132-
model = None
133-
print(f"Iteration: {i}, Cost: {loss_val.item()}")
134-
135-
y_hat = model(y_noise).detach().numpy()
96+
# TODO: Compute test y_hat using y_noise and converged parameters
97+
y_hat = None
13698

13799
plt.title("Denoising a cosine")
138100
plt.plot(x, y, label="solution")
@@ -143,26 +105,3 @@ def zero_grad(model: Net) -> Net:
143105
plt.savefig("./figures/Denoise.png", dpi=600, bbox_inches="tight")
144106
plt.show()
145107
print("Done")
146-
147-
148-
149-
150-
151-
152-
153-
if __name__ == "__main__":
154-
step_size = 0.01
155-
iterations = 100
156-
hidden_neurons = 10
157-
158-
# generate cosine signal
159-
x = jnp.linspace(-3 * jnp.pi, 3 * jnp.pi, 200)
160-
y = jnp.cos(x)
161-
162-
# TODO: Create W1, W2 and b using different random keys
163-
164-
for i in range(iterations):
165-
# add noise to cosine
166-
y_noise = y + jax.random.normal(jax.random.PRNGKey(i), [200])
167-
168-
# TODO: Implement a dense neural network to denoise the cosine.

0 commit comments

Comments
 (0)