Skip to content

Commit cacf39c

Browse files
authoredMay 21, 2024··
Merge pull request #215 from NeuroBench/dev
Release 1.0.4
2 parents 968c627 + 57a95b7 commit cacf39c

30 files changed

+818
-550
lines changed
 

‎.bumpversion.toml

+27
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
2+
[tool.bumpversion]
3+
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
4+
serialize = ["{major}.{minor}.{patch}"]
5+
regex = false
6+
current_version = "1.0.4"
7+
ignore_missing_version = false
8+
search = "{current_version}"
9+
replace = "{new_version}"
10+
tag = false
11+
sign_tags = false
12+
tag_name = "{new_version}"
13+
tag_message = "Bump version: {current_version} → {new_version}"
14+
allow_dirty = false
15+
commit = false
16+
message = "Bump version: {current_version} → {new_version}"
17+
commit_args = ""
18+
19+
[[tool.bumpversion.files]]
20+
filename = "pyproject.toml"
21+
search = "version = \"{current_version}\""
22+
replace = "version = \"{new_version}\""
23+
24+
[[tool.bumpversion.files]]
25+
filename = "docs/conf.py"
26+
search = "release = \"{current_version}\""
27+
replace = "release = \"{new_version}\""
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
name: Publish Distribution to PyPI
2+
3+
on:
4+
push:
5+
tags:
6+
- '[0-9]+.[0-9]+.[0-9]+'
7+
8+
jobs:
9+
build-and-publish-final-dist:
10+
runs-on: ubuntu-latest
11+
steps:
12+
- name: Check out code
13+
uses: actions/checkout@v4
14+
15+
- name: Set up Python
16+
uses: actions/setup-python@v5
17+
with:
18+
python-version: '3.11'
19+
20+
- name: Install Poetry
21+
run: |
22+
pip install poetry
23+
24+
- name: Install dependencies
25+
run: |
26+
poetry install --without dev
27+
28+
- name: Build the package
29+
run: |
30+
poetry build
31+
32+
- name: Publish to PyPI
33+
env:
34+
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
35+
run: |
36+
poetry publish

‎.readthedocs.yaml

+9-3
Original file line numberDiff line numberDiff line change
@@ -10,11 +10,17 @@ build:
1010
# https://python-poetry.org/docs/#installing-manually
1111
- pip install poetry
1212
# Tell poetry to not use a virtual environment
13-
- poetry config virtualenvs.create false
13+
# - poetry config virtualenvs.create false
1414
post_install:
1515
# Install dependencies with 'docs' dependency group
1616
# https://python-poetry.org/docs/managing-dependencies/#dependency-groups
17-
- poetry install --with dev
17+
# VIRTUAL_ENV needs to be set manually for now.
18+
# See https://github.com/readthedocs/readthedocs.org/pull/11152/
19+
- VIRTUAL_ENV=$READTHEDOCS_VIRTUALENV_PATH poetry install --with dev
1820

1921
sphinx:
20-
configuration: docs/conf.py
22+
configuration: docs/conf.py
23+
24+
# python:
25+
# install:
26+
# - requirements: docs/requirements.txt

‎RELEASE.rst

+70
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
Version Release Guidelines
2+
=======================
3+
4+
This document describes the guidelines for releasing new versions of the library. We follow semantic versioning, which means our version numbers have three parts: MAJOR.MINOR.PATCH.
5+
6+
- MAJOR version when you make incompatible API changes
7+
- MINOR version when you add functionality in a backwards-compatible manner
8+
- PATCH version when you make backwards-compatible bug fixes
9+
10+
11+
1. Install the `bump-my-version` package:
12+
13+
```
14+
pip install --upgrade bump-my-version
15+
```
16+
--------------------
17+
18+
2. Create a new branch for the release from dev branch:
19+
20+
```
21+
git checkout -b release/x.y.z
22+
```
23+
--------------------
24+
25+
3. Update the version number using the `bump-my-version` command:
26+
27+
```
28+
bump-my-version bump path
29+
```
30+
or
31+
```
32+
bump-my-version bump minor
33+
```
34+
or
35+
```
36+
bump-my-version bump major
37+
```
38+
--------------------
39+
40+
4. Commit the changes with the following message and push the changes to the release branch:
41+
42+
```
43+
git commit -m "Bump version: {current_version} → {new_version}"
44+
```
45+
46+
```
47+
git push origin release/x.y.z
48+
```
49+
50+
--------------------
51+
52+
5. Create a pull request from the release branch to the dev branch.
53+
54+
6. Once the pull request is approved and merged, create a new pull request from the dev branch to the master branch.
55+
56+
7. Once the pull request is approved and merged, create the tag on the main branch to invoke the package publishing workflow:
57+
58+
```
59+
git tag -a x.y.z -m "Release x.y.z"
60+
```
61+
62+
```
63+
git push origin tag <tag_name>
64+
```
65+
--------------------
66+
67+
8. Once the tag is pushed, the package publishing workflow will be triggered and the package will be published to the PyPI.
68+
69+
9. Once the package is published, create a new release on GitHub with the tag name and the release notes (generate them automatically).
70+

‎docs/_static/.gitkeep

Whitespace-only changes.

‎docs/conf.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
project = "NeuroBench"
1515
copyright = "2024, Jason Yik, Noah Pacik-Nelson, Korneel Van Den Berghe"
1616
author = "Jason Yik, Noah Pacik-Nelson, Korneel Van Den Berghe"
17-
release = "v1.0.0"
17+
release = "1.0.4"
1818

1919
# -- General configuration ---------------------------------------------------
2020
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration

‎docs/neurobench.benchmarks.rst

+2-2
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@ neurobench.benchmarks
44
Benchmark
55
^^^^^^^^^
66

7-
.. automodule:: neurobench.benchmarks.benchmark
8-
:members:
7+
.. automodule:: neurobench.benchmarks
8+
:members: Benchmark
99
:undoc-members:
1010
:show-inheritance:
1111

‎docs/neurobench.datasets.rst

-15
Original file line numberDiff line numberDiff line change
@@ -15,21 +15,6 @@ as linear 16-bit, single-channel, pulse code modulated values, at a 16 kHz sampl
1515
:show-inheritance:
1616

1717

18-
DVS Gestures
19-
^^^^^^^^^^^^
20-
21-
The IBM Dynamic Vision Sensor (DVS) Gesture dataset is composed of recordings of 29 distinct individuals executing 10 different
22-
types of gestures, including but not limited to clapping, waving, etc. Additionally, an 11th gesture class is included that comprises
23-
gestures that cannot be categorized within the first 10 classes. The gestures are recorded under four distinct lighting conditions,
24-
and each gesture is associated with a label that indicates the corresponding lighting condition under which it was performed.
25-
26-
.. automodule:: neurobench.datasets.dvs_gesture
27-
:special-members: __init__, __getitem__
28-
:members:
29-
:undoc-members:
30-
:show-inheritance:
31-
32-
3318
Prophesee Megapixel Automotive
3419
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
3520

‎docs/requirements.txt

+4
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
sphinx-rtd-theme
2+
tqdm
3+
tonic
4+
numpy

‎docs/tutorial/index.rst

+4-1
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,10 @@ using snnTorch.
5252
snn.Leaky(beta=beta, spike_grad=spike_grad, init_hidden=True, output=True),
5353
)
5454
55-
To get started, we will load our desired dataset in a dataloader:
55+
To get started, we will load our desired dataset in a dataloader. Note that any
56+
torch.Dataloader can be used for the benchmark, it is not constrained to the datasets
57+
available in the harness. Check out the `Tonic library <https://tonic.readthedocs.io/en/latest/#>`_
58+
for an excellent resource for neuromorphic datasets!
5659

5760
.. code:: python
5861

‎neurobench/benchmarks/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
from .benchmark import *
1+
from .benchmark import Benchmark

‎neurobench/benchmarks/benchmark.py

+6-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,12 @@
55
from . import static_metrics, workload_metrics
66

77
# workload metrics which require hooks
8-
requires_hooks = ["activation_sparsity", "number_neuron_updates", "synaptic_operations"]
8+
requires_hooks = [
9+
"activation_sparsity",
10+
"number_neuron_updates",
11+
"synaptic_operations",
12+
"membrane_updates",
13+
]
914

1015

1116
class Benchmark:

‎neurobench/benchmarks/hooks.py

+7
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,8 @@ def __init__(self, layer, connection_layer=None, prev_act_layer_hook=None):
2121
"""
2222
self.activation_outputs = []
2323
self.activation_inputs = []
24+
self.pre_fire_mem_potential = []
25+
self.post_fire_mem_potential = []
2426
if layer is not None:
2527
self.hook = layer.register_forward_hook(self.hook_fn)
2628
self.hook_pre = layer.register_forward_pre_hook(self.pre_hook_fn)
@@ -46,6 +48,8 @@ def pre_hook_fn(self, layer, input):
4648
4749
"""
4850
self.activation_inputs.append(input)
51+
if self.spiking:
52+
self.pre_fire_mem_potential.append(layer.mem)
4953

5054
def hook_fn(self, layer, input, output):
5155
"""
@@ -62,6 +66,7 @@ def hook_fn(self, layer, input, output):
6266
"""
6367
if self.spiking:
6468
self.activation_outputs.append(output[0])
69+
self.post_fire_mem_potential.append(layer.mem)
6570

6671
else:
6772
self.activation_outputs.append(output)
@@ -75,6 +80,8 @@ def reset(self):
7580
"""Resets the stored activation outputs and inputs."""
7681
self.activation_outputs = []
7782
self.activation_inputs = []
83+
self.pre_fire_mem_potential = []
84+
self.post_fire_mem_potential = []
7885

7986
def close(self):
8087
"""Remove the registered hook."""

‎neurobench/benchmarks/workload_metrics.py

+63
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
import numpy as np
33
from ..utils import check_shape, make_binary_copy, single_layer_MACs
44
from .hooks import ActivationHook, LayerHook
5+
from collections import defaultdict
56

67

78
class AccumulatedMetric:
@@ -121,6 +122,68 @@ def activation_sparsity(model, preds, data):
121122
return sparsity
122123

123124

125+
class membrane_updates(AccumulatedMetric):
126+
"""
127+
Number of membrane potential updates.
128+
129+
This metric can only be used for spiking models implemented with SNNTorch.
130+
131+
"""
132+
133+
def __init__(self):
134+
"""Init metric state."""
135+
self.total_samples = 0
136+
self.neuron_membrane_updates = defaultdict(int)
137+
138+
def reset(self):
139+
"""Reset metric state."""
140+
self.total_samples = 0
141+
self.neuron_membrane_updates = defaultdict(int)
142+
143+
def __call__(self, model, preds, data):
144+
"""
145+
Number of membrane updates of the model forward.
146+
147+
Args:
148+
model: A NeuroBenchModel.
149+
preds: A tensor of model predictions.
150+
data: A tuple of data and labels.
151+
Returns:
152+
float: Number of membrane potential updates.
153+
154+
"""
155+
for hook in model.activation_hooks:
156+
for index_mem in range(len(hook.pre_fire_mem_potential) - 1):
157+
pre_fire_mem = hook.pre_fire_mem_potential[index_mem + 1]
158+
post_fire_mem = hook.post_fire_mem_potential[index_mem + 1]
159+
nr_updates = torch.count_nonzero(pre_fire_mem - post_fire_mem)
160+
self.neuron_membrane_updates[str(type(hook.layer))] += int(nr_updates)
161+
self.neuron_membrane_updates[str(type(hook.layer))] += int(
162+
torch.numel(hook.post_fire_mem_potential[0])
163+
)
164+
self.total_samples += data[0].size(0)
165+
return self.compute()
166+
167+
def compute(self):
168+
"""
169+
Compute membrane updates using accumulated data.
170+
171+
Returns:
172+
float: Compute the total updates to each neuron's membrane potential within the model,
173+
aggregated across all neurons and normalized by the number of samples processed.
174+
175+
"""
176+
if self.total_samples == 0:
177+
return 0
178+
179+
total_mem_updates = 0
180+
for key in self.neuron_membrane_updates:
181+
total_mem_updates += self.neuron_membrane_updates[key]
182+
183+
total_updates_per_sample = total_mem_updates / self.total_samples
184+
return total_updates_per_sample
185+
186+
124187
def number_neuron_updates(model, preds, data):
125188
"""
126189
Number of times each neuron type is updated.

‎neurobench/datasets/MSWC_dataset.py

+1
Original file line numberDiff line numberDiff line change
@@ -257,6 +257,7 @@ class MSWC(Dataset):
257257
Subset version (https://huggingface.co/datasets/NeuroBench/mswc_fscil_subset)
258258
of the original MSWC dataset (https://mlcommons.org/en/multilingual-spoken-words/)
259259
for a few-shot class-incremental learning (FSCIL) task consisting of 200 voice commands keywords:
260+
260261
- 100 base classes available for pre-training with:
261262
- 500 train samples
262263
- 100 validation samples

‎neurobench/datasets/__init__.py

-6
Original file line numberDiff line numberDiff line change
@@ -21,12 +21,6 @@ def Gen4DetectionDataLoader(*args, **kwargs):
2121
)(*args, **kwargs)
2222

2323

24-
def DVSGesture(*args, **kwargs):
25-
return _lazy_import("neurobench.datasets", ".dvs_gesture", "DVSGesture")(
26-
*args, **kwargs
27-
)
28-
29-
3024
def MackeyGlass(*args, **kwargs):
3125
return _lazy_import("neurobench.datasets", ".mackey_glass", "MackeyGlass")(
3226
*args, **kwargs

‎neurobench/datasets/dvs_gesture.py

-294
This file was deleted.

‎neurobench/datasets/primate_reaching.py

+7-4
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ def __init__(
9999
"indy_20160630_01.mat": "197413a5339630ea926cbd22b8b43338",
100100
"indy_20160622_01.mat": "c33d5fff31320d709d23fe445561fb6e",
101101
"loco_20170301_05.mat": "47342da09f9c950050c9213c3df38ea3",
102-
"loco_20170217_02.mat": "739b70762d838f3a1f358733c426bb02",
102+
"loco_20170215_02.mat": "739b70762d838f3a1f358733c426bb02",
103103
"loco_20170210_03.mat": "4cae63b58c4cb9c8abd44929216c703b",
104104
}
105105

@@ -224,7 +224,7 @@ def load_data(self):
224224

225225
# Define the segments' start & end indices
226226
self.start_end_indices = np.array(self.get_flag_index(target_pos))
227-
self.time_segments = np.array(self.split_into_segments(self.start_end_indices))
227+
self.time_segments = np.array(self.split_into_segments(self.start_end_indices, target_pos.shape[1]))
228228

229229
spike_train = np.zeros((*spikes.shape, len(new_t)), dtype=np.int8)
230230

@@ -290,7 +290,8 @@ def split_data(self):
290290
train_len = math.floor(self.train_ratio * sub_length)
291291
val_len = math.floor((sub_length - train_len) / 2)
292292

293-
offset = int(np.round(self.bin_width / SAMPLING_RATE)) * self.num_steps
293+
# offset = int(np.round(self.bin_width / SAMPLING_RATE)) * self.num_steps
294+
offset = 0
294295

295296
# split the data into 4 equal parts
296297
# for each part, split the data according to training, testing and validation split
@@ -331,8 +332,10 @@ def remove_segments_by_length(self):
331332
)[0]
332333

333334
@staticmethod
334-
def split_into_segments(indices):
335+
def split_into_segments(indices, last_idx):
335336
"""Combine the start and end index into a NumPy array."""
337+
indices = np.insert(indices, 0, 0)
338+
indices = np.append(indices, [last_idx])
336339
start_end = np.array([indices[:-1], indices[1:]])
337340

338341
return np.transpose(start_end)

‎neurobench/examples/dvs_gesture/CSNN.py

-137
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,306 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"metadata": {
6+
"id": "yGm4fad3M-Sr"
7+
},
8+
"source": [
9+
"# DVS Gesture Benchmark Tutorial\n",
10+
"\n",
11+
"This tutorial aims to provide an insight on how the NeuroBench framework is organized and how you can use it to benchmark your own models!\n",
12+
"\n",
13+
"## About DVS Gesture:\n",
14+
"The IBM Dynamic Vision Sensor (DVS) Gesture dataset is composed of recordings of 29 distinct individuals executing 10 different types of gestures, including but not limited to clapping, waving, etc. Additionally, an 11th gesture class is included that comprises gestures that cannot be categorized within the first 10 classes. The gestures are recorded under four distinct lighting conditions, and each gesture is associated with a label that indicates the corresponding lighting condition under which it was performed.\n",
15+
"\n",
16+
"### Benchmark Task:\n",
17+
"The task is to classify gestures and achieve high accuracy. This tutorial demonstrates with a trained convolutional spiking neural network."
18+
]
19+
},
20+
{
21+
"cell_type": "markdown",
22+
"metadata": {},
23+
"source": [
24+
"First we will import the relevant libraries. We will use the [Tonic library](https://tonic.readthedocs.io/en/latest/) for loading and pre-processing the data, and the model wrapper, post-processor, and benchmark object from NeuroBench."
25+
]
26+
},
27+
{
28+
"cell_type": "code",
29+
"execution_count": null,
30+
"metadata": {
31+
"id": "lqtM6XbMM_hO"
32+
},
33+
"outputs": [],
34+
"source": [
35+
"# Tonic library is used for DVS Gesture dataset loading and processing\n",
36+
"import tonic\n",
37+
"import tonic.transforms as transforms\n",
38+
"from torch.utils.data import DataLoader\n",
39+
"\n",
40+
"from neurobench.models import SNNTorchModel\n",
41+
"from neurobench.postprocessing import choose_max_count\n",
42+
"from neurobench.benchmarks import Benchmark"
43+
]
44+
},
45+
{
46+
"cell_type": "markdown",
47+
"metadata": {
48+
"id": "R7HMjVPX7LZh"
49+
},
50+
"source": [
51+
"For this tutorial, we will make use of a four-layer convolutional SNN, written using snnTorch."
52+
]
53+
},
54+
{
55+
"cell_type": "code",
56+
"execution_count": null,
57+
"metadata": {
58+
"id": "r0yYDNRZ7UxY"
59+
},
60+
"outputs": [],
61+
"source": [
62+
"import torch\n",
63+
"import torch.nn as nn\n",
64+
"import snntorch as snn\n",
65+
"from snntorch import surrogate\n",
66+
"\n",
67+
"class Net(torch.nn.Module):\n",
68+
" def __init__(self):\n",
69+
" super().__init__()\n",
70+
"\n",
71+
" # Hyperparameters\n",
72+
" beta_1 = 0.9999903192467171\n",
73+
" beta_2 = 0.7291118090686332\n",
74+
" beta_3 = 0.9364650136740154\n",
75+
" beta_4 = 0.8348241794080301\n",
76+
" threshold_1 = 3.511291184386264\n",
77+
" threshold_2 = 3.494437965584431\n",
78+
" threshold_3 = 1.5986853560315544\n",
79+
" threshold_4 = 0.3641469130041378\n",
80+
" spike_grad = surrogate.atan()\n",
81+
" dropout = 0.5956071342984011\n",
82+
" \n",
83+
" # Initialize layers\n",
84+
" self.conv1 = nn.Conv2d(2, 16, 5, padding=\"same\")\n",
85+
" self.pool1 = nn.MaxPool2d(2)\n",
86+
" self.lif1 = snn.Leaky(beta=beta_1, threshold=threshold_1, spike_grad=spike_grad, init_hidden=True)\n",
87+
" \n",
88+
" self.conv2 = nn.Conv2d(16, 32, 5, padding=\"same\")\n",
89+
" self.pool2 = nn.MaxPool2d(2)\n",
90+
" self.lif2 = snn.Leaky(beta=beta_2, threshold=threshold_2, spike_grad=spike_grad, init_hidden=True)\n",
91+
" \n",
92+
" self.conv3 = nn.Conv2d(32, 64, 5, padding=\"same\")\n",
93+
" self.pool3 = nn.MaxPool2d(2)\n",
94+
" self.lif3 = snn.Leaky(beta=beta_3, threshold=threshold_3, spike_grad=spike_grad, init_hidden=True)\n",
95+
" \n",
96+
" self.linear1 = nn.Linear(64*4*4, 11)\n",
97+
" self.dropout_4 = nn.Dropout(dropout)\n",
98+
" self.lif4 = snn.Leaky(beta=beta_4, threshold=threshold_4, spike_grad=spike_grad, init_hidden=True, output=True)\n",
99+
"\n",
100+
" def forward(self, x):\n",
101+
" # x is expected to be in shape (batch, channels, height, width) = (B, 2, 32, 32)\n",
102+
" \n",
103+
" # Layer 1\n",
104+
" y = self.conv1(x)\n",
105+
" y = self.pool1(y)\n",
106+
" spk1 = self.lif1(y)\n",
107+
"\n",
108+
" # Layer 2\n",
109+
" y = self.conv2(spk1)\n",
110+
" y = self.pool2(y)\n",
111+
" spk2 = self.lif2(y)\n",
112+
"\n",
113+
" # Layer 3\n",
114+
" y = self.conv3(spk2)\n",
115+
" y = self.pool3(y)\n",
116+
" spk3 = self.lif3(y)\n",
117+
"\n",
118+
" # Layer 4\n",
119+
" y = self.linear1(spk3.flatten(1))\n",
120+
" y = self.dropout_4(y)\n",
121+
" spk4, mem4 = self.lif4(y)\n",
122+
"\n",
123+
" return spk4, mem4"
124+
]
125+
},
126+
{
127+
"cell_type": "markdown",
128+
"metadata": {
129+
"id": "VNIgTfvuOMe-"
130+
},
131+
"source": [
132+
"We load a pre-trained model. The model is wrapped in the SNNTorchModel wrapper, which includes boilerplate inference code and interfaces with the top-level Benchmark class."
133+
]
134+
},
135+
{
136+
"cell_type": "code",
137+
"execution_count": null,
138+
"metadata": {
139+
"id": "chZeyUTAOQ6B"
140+
},
141+
"outputs": [],
142+
"source": [
143+
"device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n",
144+
"\n",
145+
"net = Net()\n",
146+
"net.load_state_dict(torch.load(\"model_data/dvs_gesture_snn\", map_location=device))\n",
147+
"\n",
148+
"model = SNNTorchModel(net)"
149+
]
150+
},
151+
{
152+
"cell_type": "markdown",
153+
"metadata": {},
154+
"source": [
155+
"Next, we will load the dataset. Here, we are using the DVSGesture dataset from the Tonic library, as well as transforms to turn the events into frames that can be processed."
156+
]
157+
},
158+
{
159+
"cell_type": "code",
160+
"execution_count": null,
161+
"metadata": {
162+
"id": "x4jOfnt6OeIH"
163+
},
164+
"outputs": [],
165+
"source": [
166+
"# Load the dataset, here we are using the Tonic library\n",
167+
"data_dir = \"../../../data/dvs_gesture\" # data in repo root dir\n",
168+
"test_transform = transforms.Compose([transforms.Denoise(filter_time=10000),\n",
169+
" transforms.Downsample(spatial_factor=0.25),\n",
170+
" transforms.ToFrame(sensor_size=(32, 32, 2),\n",
171+
" n_time_bins=150),\n",
172+
" ])\n",
173+
"test_set = tonic.datasets.DVSGesture(save_to=data_dir, transform=test_transform, train=False)\n",
174+
"test_set_loader = DataLoader(test_set, batch_size=16,\n",
175+
" collate_fn=tonic.collation.PadTensors(batch_first=True))"
176+
]
177+
},
178+
{
179+
"cell_type": "raw",
180+
"metadata": {
181+
"id": "UfRfdvXvOqRP"
182+
},
183+
"source": [
184+
"Specify any pre-processors and post-processors you want to use. These will be applied to your data before feeding into the model, and to the output spikes respectively.\n",
185+
"Here, the transforms listed above account for all necessary pre-processing. The post-processor counts up the spikes corresponding to the output labels, and chooses the label with the max count."
186+
]
187+
},
188+
{
189+
"cell_type": "code",
190+
"execution_count": null,
191+
"metadata": {
192+
"id": "3GHY8vTROwzP"
193+
},
194+
"outputs": [],
195+
"source": [
196+
"preprocessors = []\n",
197+
"postprocessors = [choose_max_count]"
198+
]
199+
},
200+
{
201+
"cell_type": "markdown",
202+
"metadata": {
203+
"id": "o9doNsI0O0Jl"
204+
},
205+
"source": [
206+
"Next specify the metrics which you want to calculate. The metrics include static metrics, which are computed before any model inference, and workload metrics, which show inference results.\n",
207+
"\n",
208+
"- Footprint: Bytes used to store the model parameters and buffers.\n",
209+
"- Connection sparsity: Proportion of zero weights in the model.\n",
210+
"- Classification accuracy: Accuracy of keyword predictions.\n",
211+
"- Activation sparsity: Proportion of zero activations, averaged over all neurons, timesteps, and samples.\n",
212+
"- Synaptic operations: Number of weight-activation operations, averaged over keyword samples.\n",
213+
" - Effective MACs: Number of non-zero multiply-accumulate synops, where the activations are not spikes with values -1 or 1.\n",
214+
" - Effective ACs: Number of non-zero accumulate synops, where the activations are -1 or 1 only.\n",
215+
" - Dense: Total zero and non-zero synops."
216+
]
217+
},
218+
{
219+
"cell_type": "code",
220+
"execution_count": null,
221+
"metadata": {
222+
"id": "sDUczVTkPOsQ"
223+
},
224+
"outputs": [],
225+
"source": [
226+
"static_metrics = [\"footprint\", \"connection_sparsity\"]\n",
227+
"workload_metrics = [\"classification_accuracy\", \"activation_sparsity\", \"synaptic_operations\"]"
228+
]
229+
},
230+
{
231+
"cell_type": "markdown",
232+
"metadata": {
233+
"id": "KXQYfiJpPTZb"
234+
},
235+
"source": [
236+
"Next, we instantiate the benchmark. We pass the model, the dataloader, the preprocessors, the postprocessor and the list of the static and data metrics which we want to measure:"
237+
]
238+
},
239+
{
240+
"cell_type": "code",
241+
"execution_count": null,
242+
"metadata": {
243+
"id": "U0_N96ADPeO5"
244+
},
245+
"outputs": [],
246+
"source": [
247+
"benchmark = Benchmark(model, test_set_loader, preprocessors, postprocessors, [static_metrics, workload_metrics])"
248+
]
249+
},
250+
{
251+
"cell_type": "markdown",
252+
"metadata": {
253+
"id": "6ytLJ-dUPp0b"
254+
},
255+
"source": [
256+
"Now, let's run the benchmark and print our results!"
257+
]
258+
},
259+
{
260+
"cell_type": "code",
261+
"execution_count": null,
262+
"metadata": {
263+
"id": "Ldww7kiYPsU2"
264+
},
265+
"outputs": [],
266+
"source": [
267+
"results = benchmark.run()\n",
268+
"print(results)"
269+
]
270+
},
271+
{
272+
"cell_type": "markdown",
273+
"metadata": {},
274+
"source": [
275+
"Expected output:\n",
276+
"{'footprint': 304828, 'connection_sparsity': 0.0, \n",
277+
"'classification_accuracy': 0.8636363636363633, 'activation_sparsity': 0.9507192967815323, \n",
278+
"'synaptic_operations': {'Effective_MACs': 9227011.575757576, 'Effective_ACs': 30564577.174242426, 'Dense': 891206400.0}}"
279+
]
280+
}
281+
],
282+
"metadata": {
283+
"colab": {
284+
"provenance": []
285+
},
286+
"kernelspec": {
287+
"display_name": "Python 3 (ipykernel)",
288+
"language": "python",
289+
"name": "python3"
290+
},
291+
"language_info": {
292+
"codemirror_mode": {
293+
"name": "ipython",
294+
"version": 3
295+
},
296+
"file_extension": ".py",
297+
"mimetype": "text/x-python",
298+
"name": "python",
299+
"nbconvert_exporter": "python",
300+
"pygments_lexer": "ipython3",
301+
"version": "3.10.6"
302+
}
303+
},
304+
"nbformat": 4,
305+
"nbformat_minor": 4
306+
}

‎neurobench/examples/dvs_gesture/benchmark.py

-36
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
import torch
2+
from snn import Net
3+
4+
# Tonic library is used for DVS Gesture dataset loading and processing
5+
import tonic
6+
import tonic.transforms as transforms
7+
from torch.utils.data import DataLoader
8+
9+
from neurobench.models import SNNTorchModel
10+
from neurobench.postprocessing import choose_max_count
11+
from neurobench.benchmarks import Benchmark
12+
13+
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
14+
15+
net = Net()
16+
net.load_state_dict(torch.load("model_data/dvs_gesture_snn", map_location=device))
17+
18+
model = SNNTorchModel(net)
19+
20+
# Load the dataset, here we are using the Tonic library
21+
data_dir = "../../../data/dvs_gesture" # data in repo root dir
22+
test_transform = transforms.Compose([transforms.Denoise(filter_time=10000),
23+
transforms.Downsample(spatial_factor=0.25),
24+
transforms.ToFrame(sensor_size=(32, 32, 2),
25+
n_time_bins=150),
26+
])
27+
test_set = tonic.datasets.DVSGesture(save_to=data_dir, transform=test_transform, train=False)
28+
test_set_loader = DataLoader(test_set, batch_size=16,
29+
collate_fn=tonic.collation.PadTensors(batch_first=True))
30+
31+
preprocessors = []
32+
postprocessors = [choose_max_count]
33+
34+
static_metrics = ["footprint", "connection_sparsity"]
35+
workload_metrics = ["classification_accuracy", "activation_sparsity", "synaptic_operations"]
36+
37+
benchmark = Benchmark(model, test_set_loader, preprocessors, postprocessors, [static_metrics, workload_metrics])
38+
results = benchmark.run(device=device)
39+
print(results)
40+
41+
# Results:
42+
# {'footprint': 304828, 'connection_sparsity': 0.0,
43+
# 'classification_accuracy': 0.8636363636363633, 'activation_sparsity': 0.9507192967815323,
44+
# 'synaptic_operations': {'Effective_MACs': 9227011.575757576, 'Effective_ACs': 30564577.174242426, 'Dense': 891206400.0}}
Binary file not shown.
Binary file not shown.
+62
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
import torch
2+
import torch.nn as nn
3+
import snntorch as snn
4+
from snntorch import surrogate
5+
6+
class Net(torch.nn.Module):
7+
def __init__(self):
8+
super().__init__()
9+
10+
# Hyperparameters
11+
beta_1 = 0.9999903192467171
12+
beta_2 = 0.7291118090686332
13+
beta_3 = 0.9364650136740154
14+
beta_4 = 0.8348241794080301
15+
threshold_1 = 3.511291184386264
16+
threshold_2 = 3.494437965584431
17+
threshold_3 = 1.5986853560315544
18+
threshold_4 = 0.3641469130041378
19+
spike_grad = surrogate.atan()
20+
dropout = 0.5956071342984011
21+
22+
# Initialize layers
23+
self.conv1 = nn.Conv2d(2, 16, 5, padding="same")
24+
self.pool1 = nn.MaxPool2d(2)
25+
self.lif1 = snn.Leaky(beta=beta_1, threshold=threshold_1, spike_grad=spike_grad, init_hidden=True)
26+
27+
self.conv2 = nn.Conv2d(16, 32, 5, padding="same")
28+
self.pool2 = nn.MaxPool2d(2)
29+
self.lif2 = snn.Leaky(beta=beta_2, threshold=threshold_2, spike_grad=spike_grad, init_hidden=True)
30+
31+
self.conv3 = nn.Conv2d(32, 64, 5, padding="same")
32+
self.pool3 = nn.MaxPool2d(2)
33+
self.lif3 = snn.Leaky(beta=beta_3, threshold=threshold_3, spike_grad=spike_grad, init_hidden=True)
34+
35+
self.linear1 = nn.Linear(64*4*4, 11)
36+
self.dropout_4 = nn.Dropout(dropout)
37+
self.lif4 = snn.Leaky(beta=beta_4, threshold=threshold_4, spike_grad=spike_grad, init_hidden=True, output=True)
38+
39+
def forward(self, x):
40+
# x is expected to be in shape (batch, channels, height, width) = (B, 2, 32, 32)
41+
42+
# Layer 1
43+
y = self.conv1(x)
44+
y = self.pool1(y)
45+
spk1 = self.lif1(y)
46+
47+
# Layer 2
48+
y = self.conv2(spk1)
49+
y = self.pool2(y)
50+
spk2 = self.lif2(y)
51+
52+
# Layer 3
53+
y = self.conv3(spk2)
54+
y = self.pool3(y)
55+
spk3 = self.lif3(y)
56+
57+
# Layer 4
58+
y = self.linear1(spk3.flatten(1))
59+
y = self.dropout_4(y)
60+
spk4, mem4 = self.lif4(y)
61+
62+
return spk4, mem4
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,138 @@
1+
import snntorch as snn
2+
from snntorch import functional as SF
3+
from snntorch import surrogate
4+
5+
import torch
6+
import torch.nn as nn
7+
import torch.backends.cudnn as cudnn
8+
import snntorch.utils as utils
9+
10+
import numpy as np
11+
12+
import tonic
13+
import tonic.transforms as transforms
14+
from tonic import DiskCachedDataset
15+
from torch.utils.data import DataLoader
16+
17+
from snn import Net
18+
19+
from tqdm import tqdm
20+
21+
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
22+
23+
# Set the random seed for PyTorch
24+
def rand_seed(n):
25+
torch.manual_seed(n)
26+
if torch.cuda.is_available():
27+
torch.backends.cudnn.deterministic = True
28+
torch.backends.cudnn.benchmark = False
29+
torch.cuda.manual_seed_all(n)
30+
31+
# The SNNTorch forward pass
32+
def forward_pass(net, data):
33+
spk_rec = []
34+
utils.reset(net)
35+
for step in range(data.shape[1]):
36+
spk_out, _ = net(data[:, step, ...])
37+
spk_rec.append(spk_out)
38+
return torch.stack(spk_rec)
39+
40+
lr = 0.008273059787948487
41+
batch_size = 64
42+
train_time_bin = 25
43+
test_time_bin = 150
44+
epochs = 100
45+
data_dir = './data'
46+
47+
def dataloader():
48+
# sensor_size = tonic.datasets.DVSGesture.sensor_size
49+
sensor_size = (32, 32, 2)
50+
51+
train_transform = transforms.Compose([transforms.Denoise(filter_time=10000),
52+
transforms.Downsample(spatial_factor=0.25),
53+
transforms.ToFrame(sensor_size=sensor_size,
54+
n_time_bins=train_time_bin),
55+
])
56+
57+
test_transform = transforms.Compose([transforms.Denoise(filter_time=10000),
58+
transforms.Downsample(spatial_factor=0.25),
59+
transforms.ToFrame(sensor_size=sensor_size,
60+
n_time_bins=test_time_bin),
61+
])
62+
63+
trainset = tonic.datasets.DVSGesture(save_to=data_dir, transform=train_transform, train=True)
64+
testset = tonic.datasets.DVSGesture(save_to=data_dir, transform=test_transform, train=False)
65+
66+
cached_trainset = DiskCachedDataset(trainset, cache_path='./data/cache/dvs/train')
67+
cached_testset = DiskCachedDataset(testset, cache_path='./data/cache/dvs/test')
68+
69+
train_loader = DataLoader(cached_trainset, batch_size=batch_size,
70+
collate_fn=tonic.collation.PadTensors(batch_first=True))
71+
# test whole validation set at once so that accuracy is exact
72+
test_loader = DataLoader(cached_testset, batch_size=512,
73+
collate_fn=tonic.collation.PadTensors(batch_first=True))
74+
75+
return train_loader, test_loader
76+
77+
if __name__ == '__main__':
78+
79+
rand_seed(1234)
80+
81+
train_loader, test_loader = dataloader()
82+
83+
net = Net().to(device)
84+
85+
optimizer = torch.optim.Adam(net.parameters(), lr=lr, betas=(0.9, 0.999))
86+
87+
loss_fn = SF.mse_count_loss()
88+
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=8250, eta_min=0, last_epoch=-1)
89+
90+
# Training Start
91+
best_acc = 0
92+
for epoch in range(epochs):
93+
print(f"Epoch {epoch}:")
94+
train_loss = []
95+
train_acc = []
96+
net.train()
97+
for data, targets in tqdm(train_loader):
98+
data = data.to(device)
99+
targets = targets.to(device)
100+
101+
spk_rec = forward_pass(net, data)
102+
loss_val = loss_fn(spk_rec, targets)
103+
104+
train_loss.append(loss_val.item())
105+
train_acc.append(SF.accuracy_rate(spk_rec, targets))
106+
107+
optimizer.zero_grad()
108+
loss_val.backward()
109+
optimizer.step()
110+
scheduler.step()
111+
112+
print(f"Train Loss: {np.mean(train_loss):.3f}")
113+
print(f"Train Accuracy: {np.mean(train_acc) * 100:.2f}%")
114+
115+
val_loss = []
116+
val_acc = []
117+
net.eval()
118+
for data, targets in tqdm(iter(test_loader)):
119+
data = data.to(device)
120+
targets = targets.to(device)
121+
122+
spk_rec = forward_pass(net, data)
123+
124+
val_loss.append(loss_fn(spk_rec, targets).item())
125+
val_acc.append(SF.accuracy_rate(spk_rec, targets))
126+
127+
print(f"Test Loss: {np.mean(val_loss):.3f}")
128+
print(f"Test Accuracy: {np.mean(val_acc) * 100:.2f}%")
129+
130+
if np.mean(val_acc) > best_acc:
131+
print("New Best Test Accuracy. Saving...")
132+
best_acc = np.mean(val_acc)
133+
torch.save(net.state_dict(), "./model_data/dvs_gesture_snn")
134+
135+
print(f"---------------------\n")
136+
137+
# Load the weights into the network for inference and benchmarking
138+
net.load_state_dict(torch.load("./model_data/dvs_gesture_snn"))

‎neurobench/examples/dvs_gesture/training.py

-31
This file was deleted.

‎pyproject.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "neurobench"
3-
version = "1.0.3"
3+
version = "1.0.4"
44
description = "Collaborative, Fair, and Representative Benchmarks for Neuromorphic Computing"
55
authors = ["NeuroBench Team <neurobench@googlegroups.com>"]
66
readme = "README.rst"

‎tests/test_datasets.py

-16
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
from neurobench.datasets import SpeechCommands
44
from neurobench.datasets import Gen4DetectionDataLoader
55
from neurobench.datasets import PrimateReaching
6-
from neurobench.datasets import DVSGesture
76
from neurobench.datasets import MackeyGlass
87
from neurobench.datasets import WISDM
98
from torch.utils.data import DataLoader
@@ -61,21 +60,6 @@ def test_speech_commands():
6160
assert int(ds[0][1]) == 0
6261

6362

64-
def test_dvs_gesture():
65-
path = dataset_path + "dvs_gesture/"
66-
try:
67-
assert os.path.exists(path)
68-
except AssertionError:
69-
raise FileExistsError(f"Can't find {path}")
70-
ds = DVSGesture(path)
71-
72-
assert len(ds) > 0
73-
assert list(ds[0][0].shape) == [340, 3, 128, 128]
74-
75-
assert int(ds[0][1]) >= 0
76-
assert int(ds[0][1]) <= 10
77-
78-
7963
def test_mackey_glass():
8064
filepath = dataset_path + "mackey_glass/mg_17.npy"
8165
try:

‎tests/test_metrics.py

+29-1
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
detect_activations_connections,
2222
synaptic_operations,
2323
number_neuron_updates,
24+
membrane_updates,
2425
)
2526
from torch.profiler import profile, record_function, ProfilerActivity
2627

@@ -201,7 +202,7 @@ def test_r2():
201202

202203
data = (
203204
torch.randn(2, batch_size),
204-
torch.tensor(targets).transpose(0, 1),
205+
torch.tensor(targets, dtype=torch.float).transpose(0, 1),
205206
) # input and targets
206207

207208
preds = [
@@ -531,6 +532,33 @@ def test_neuron_update_metric():
531532
print("Passed neuron update metric")
532533

533534

535+
def test_membrane_potential_updates():
536+
537+
# test snn layers
538+
net_snn = nn.Sequential(
539+
# nn.Flatten(),
540+
nn.Linear(20, 5, bias=False),
541+
snn.Leaky(
542+
beta=0.9, spike_grad=surrogate.fast_sigmoid(), init_hidden=True, output=True
543+
),
544+
)
545+
546+
# simulate spiking input with only ones
547+
inp = torch.ones(5, 10, 20) # batch size, time steps, input size
548+
549+
model = SNNTorchModel(net_snn)
550+
551+
detect_activations_connections(model)
552+
553+
out = model(inp)
554+
mem_updates = membrane_updates()
555+
tot_mem_updates = mem_updates(model, out, (inp, 0))
556+
557+
assert tot_mem_updates == 50
558+
559+
print("Passed membrane updates")
560+
561+
534562
class simple_LSTM(nn.Module):
535563
"""Nonsense LSTM for operations testing Should be 615 MACs."""
536564

0 commit comments

Comments
 (0)
Please sign in to comment.