Skip to content

Commit 0b0b8a2

Browse files
author
Bodo Rueckauer
committed
Releasing 0.3.2 (Brian2 extensions).
1 parent 624ce46 commit 0b0b8a2

File tree

5 files changed

+34
-34
lines changed

5 files changed

+34
-34
lines changed

RELEASE_NOTES.rst renamed to changelog.rst

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,16 @@
11
SNN Toolbox: Release Notes
22
==========================
33

4+
Version 0.3.2
5+
-------------
6+
7+
Simulation with Brian2 backend now supports:
8+
- Constant input currents (less noisy than Poisson input)
9+
- Reset-by-subtraction (more accurate than reset-to-zero).
10+
- Bias currents
11+
12+
Thanks to wilkieolin for this contribution.
13+
414
Version 0.3.1
515
-------------
616

examples/mnist_keras_brian2.py

Lines changed: 10 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,8 @@
1313

1414
import keras
1515
from keras import Input, Model
16-
from keras.layers import Conv2D, AveragePooling2D, Flatten, Dense, Dropout
16+
from keras.layers import Conv2D, AveragePooling2D, Flatten, Dense, Dropout, \
17+
BatchNormalization, Activation
1718
from keras.datasets import mnist
1819
from keras.utils import np_utils
1920

@@ -67,24 +68,21 @@
6768

6869
layer = Conv2D(filters=16,
6970
kernel_size=(5, 5),
70-
strides=(2, 2),
71-
activation='relu',
72-
use_bias=False)(input_layer)
71+
strides=(2, 2))(input_layer)
72+
layer = BatchNormalization(axis=axis)(layer)
73+
layer = Activation('relu')(layer)
7374
layer = Conv2D(filters=32,
7475
kernel_size=(3, 3),
75-
activation='relu',
76-
use_bias=False)(layer)
76+
activation='relu')(layer)
7777
layer = AveragePooling2D()(layer)
7878
layer = Conv2D(filters=8,
7979
kernel_size=(3, 3),
8080
padding='same',
81-
activation='relu',
82-
use_bias=False)(layer)
81+
activation='relu')(layer)
8382
layer = Flatten()(layer)
8483
layer = Dropout(0.01)(layer)
8584
layer = Dense(units=10,
86-
activation='softmax',
87-
use_bias=False)(layer)
85+
activation='softmax')(layer)
8886

8987
model = Model(input_layer, layer)
9088

@@ -123,11 +121,11 @@
123121
'duration': 50, # Number of time steps to run each sample.
124122
'num_to_test': 5, # How many test samples to run.
125123
'batch_size': 1, # Batch size for simulation.
126-
'dt': 0.1 # Time interval for the differential equations to be solved over.
124+
'dt': 0.1 # Time resolution for ODE solving.
127125
}
128126

129127
config['input'] = {
130-
'poisson_input': True # Images are encodes as spike trains.
128+
'poisson_input': False # Images are encodes as spike trains.
131129
}
132130

133131
config['output'] = {

examples/mnist_keras_nest.py

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -68,23 +68,19 @@
6868
layer = Conv2D(filters=16,
6969
kernel_size=(5, 5),
7070
strides=(2, 2),
71-
activation='relu',
72-
use_bias=False)(input_layer)
71+
activation='relu')(input_layer)
7372
layer = Conv2D(filters=32,
7473
kernel_size=(3, 3),
75-
activation='relu',
76-
use_bias=False)(layer)
74+
activation='relu')(layer)
7775
layer = AveragePooling2D()(layer)
7876
layer = Conv2D(filters=8,
7977
kernel_size=(3, 3),
8078
padding='same',
81-
activation='relu',
82-
use_bias=False)(layer)
79+
activation='relu')(layer)
8380
layer = Flatten()(layer)
8481
layer = Dropout(0.01)(layer)
8582
layer = Dense(units=10,
86-
activation='softmax',
87-
use_bias=False)(layer)
83+
activation='softmax')(layer)
8884

8985
model = Model(input_layer, layer)
9086

@@ -125,10 +121,6 @@
125121
'batch_size': 1, # Batch size for simulation.
126122
}
127123

128-
config['input'] = {
129-
'poisson_input': True # Images are encodes as spike trains.
130-
}
131-
132124
config['output'] = {
133125
'plot_vars': { # Various plots (slows down simulation).
134126
'spiketrains', # Leave section empty to turn off plots.

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
setup(
1616
name='snntoolbox',
17-
version='0.3.1', # see https://www.python.org/dev/peps/pep-0440/
17+
version='0.3.2', # see https://www.python.org/dev/peps/pep-0440/
1818
description='Spiking neural network conversion toolbox',
1919
long_description=long_description,
2020
author='Bodo Rueckauer',

snntoolbox/simulation/target_simulators/brian2_target_sim.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -65,8 +65,8 @@ def __init__(self, config, queue=None):
6565
self.v_reset = 'v = v - v_thresh'
6666
else:
6767
self.v_reset = 'v = v_reset'
68-
self.eqs = ''' dv/dt = bias : 1
69-
bias : hertz'''
68+
self.eqs = '''dv/dt = bias : 1
69+
bias : hertz'''
7070
self.spikemonitors = []
7171
self.statemonitors = []
7272
self.snn = None
@@ -219,14 +219,14 @@ def compile(self):
219219

220220
def simulate(self, **kwargs):
221221

222+
inputs = kwargs[str('x_b_l')].flatten() / self.sim.ms
222223
if self._poisson_input:
223-
self._input_layer.rates = kwargs[str('x_b_l')].flatten() * 1000 / \
224-
self.rescale_fac * self.sim.Hz
224+
self._input_layer.rates = inputs / self.rescale_fac
225225
elif self._dataset_format == 'aedat':
226226
# TODO: Implement by using brian2.SpikeGeneratorGroup.
227227
raise NotImplementedError
228228
else:
229-
self._input_layer.bias = kwargs[str('x_b_l')].flatten() / self.sim.ms
229+
self._input_layer.bias = inputs
230230

231231
self.snn.run(self._duration * self.sim.ms, namespace=self._cell_params,
232232
report='stdout', report_period=10 * self.sim.ms)
@@ -385,8 +385,8 @@ def set_spiketrain_stats_input(self):
385385
AbstractSNN.set_spiketrain_stats_input(self)
386386

387387
def set_biases(self, biases):
388-
"""Set biases.
389-
"""
388+
"""Set biases."""
390389
if any(biases):
391-
assert self.layers[-1].bias.shape == biases.shape, "Shape of biases and network do not match."
392-
self.layers[-1].bias = biases * 1000 * self.sim.Hz
390+
assert self.layers[-1].bias.shape == biases.shape, \
391+
"Shape of biases and network do not match."
392+
self.layers[-1].bias = biases / self.sim.ms

0 commit comments

Comments
 (0)