Skip to content
Snippets Groups Projects
Commit 8dff0ea6 authored by GILSON Matthieu's avatar GILSON Matthieu
Browse files

Upload New File

parent ba23159a
Branches
No related tags found
No related merge requests found
%% Cell type:code id:91a69c6f tags:
``` python
import numpy as np
import scipy.stats as stt
import matplotlib.pyplot as plt
```
%% Cell type:markdown id:6e61ad53 tags:
We generate a dataset of 2-dimensional samples normally distributed with mean 0 and a covariance matrix `sigma`
%% Cell type:code id:1f24c04e tags:
``` python
n_train = 100 # number of training samples
# mean of input distribution
mu = np.zeros([2])
# covariance of input distribution
sigma = np.eye(2)
sigma[1,1] = 0.5
sigma[0,1] = sigma[1,0] =0.7 * sigma[0,0] * sigma[1,1]
# generator of input samples (normal distribution)
gen_data = stt.multivariate_normal(mu, sigma)
# select desired digits
data = gen_data.rvs(size=n_train)
```
%% Cell type:markdown id:d1d82a0b tags:
Let us plot the data in their native plane and the principal component of the distribution
%% Cell type:code id:d4e0b423 tags:
``` python
# input samples and principal component
ev, u = np.linalg.eig(sigma)
i_pc = np.argmax(np.abs(ev))
pc1 = np.real(u[:,i_pc])
plt.figure(figsize=[4,3])
plt.axes([0.2,0.2,0.7,0.7])
plt.scatter(data[:,0], data[:,1], marker='.', color='k')
plt.arrow(0, 0, 2*pc1[0], 2*pc1[1], width=0.1, color='blue')
plt.legend(['samples','pc1'])
plt.xlabel('x1')
plt.ylabel('x2')
plt.show()
```
%% Output
Text(0, 0.5, 'x2')
%% Cell type:markdown id:d90d6ac2 tags:
We now train a neuronal network following Oja's rule.
The activation dynamics are simply linear, mapping inputs `x` to output `y` after mixing by a weight matrix `w`:
$$ y = w x = \sum_i w_i x_i $$
The learning rule corresponds to the weight update:
$$ \Delta w_{i} \propto y * ( x_i - y * w_i ) $$
%% Cell type:code id:59431cc6 tags:
``` python
N = 2 # number of inputs (2D image of 27 pixels in each dimension)
eta = 3.0 / n_train # learning rate
w = np.random.randn(N) * 0.1 # initial weights
def f(x):
return np.dot(w,x)
w_hist = np.zeros([n_train,N])
y_hist = np.zeros([n_train])
# loop over all digits
for i in range(n_train):
# calculate output from input
x = data[i,:]
y = f(x)
# Oja'r rule
w += eta * y * ( x - y * w )
# store weights and output
y_hist[i] = y
w_hist[i,:] = w
```
%% Cell type:markdown id:0e916570 tags:
We plot the evolution of the weights and output activity
%% Cell type:code id:cee1fb3b tags:
``` python
# weight evolution
plt.figure(figsize=[4,3])
plt.axes([0.2,0.2,0.7,0.7])
plt.plot(range(n_train), w_hist)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.xlabel('epoch', fontsize=10)
plt.ylabel('weight', fontsize=10)
plt.figure(figsize=[4,3])
plt.axes([0.2,0.2,0.7,0.7])
plt.imshow(w_hist.T, cmap='jet', interpolation='none', aspect='auto')
plt.colorbar()
plt.yticks(fontsize=10)
plt.yticks(fontsize=10)
plt.xlabel('training trial', fontsize=10)
plt.ylabel('weight index', fontsize=10)
# activity evolution
plt.figure(figsize=[4,3])
plt.axes([0.2,0.2,0.7,0.7])
plt.plot(range(n_train), y_hist, 'k')
plt.xlabel('training trial')
plt.ylabel('output y')
plt.show()
```
%% Output
%% Cell type:code id:5b4e2682 tags:
``` python
# final weights versus principal component
plt.figure(figsize=[4,3])
plt.axes([0.2,0.2,0.7,0.7])
plt.scatter(data[:,0], data[:,1], marker='.', color='k')
plt.arrow(0, 0, 2*pc1[0], 2*pc1[1], width=0.1, color='blue')
plt.arrow(0, 0, w[0], w[1], width=0.1, color='red')
plt.legend(['samples','pc1','weights'])
plt.xlabel('x1')
plt.ylabel('x2')
plt.show()
```
%% Output
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment