Skip to content

Commit

Permalink
Merge pull request #65 from majianjia/dev
Browse files Browse the repository at this point in the history
Add version, KLD lenght
  • Loading branch information
majianjia authored Jul 23, 2019
2 parents 4a63e36 + e5e513d commit 0ea319e
Show file tree
Hide file tree
Showing 6 changed files with 39 additions and 5 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,4 @@ __pycache__
*.h5
*.obj
*.sconsign.dblite
.ipynb_checkpoints
.ipynb_checkpoints
2 changes: 2 additions & 0 deletions docs/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@

NNoM is a high-level inference Neural Network library specifically for microcontrollers.

Document version 0.2.1

[[Chinese Intro]](rt-thread_guide.md)

**Highlights**
Expand Down
4 changes: 3 additions & 1 deletion examples/auto_test/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,6 @@ Debug
*.sconsign.dblite
.ipynb_checkpoints
weights.h
result.csv
result.csv
test_*
_cifar.py
6 changes: 6 additions & 0 deletions inc/nnom.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,12 @@
#define q15_t int16_t
#define q31_t int32_t

/* version */
#define NNOM_MAJORVERSION 0L /**< major version number */
#define NNOM_SUBVERSION 2L /**< minor version number */
#define NNOM_REVISION 1L /**< revise version number */
#define NNOM_VERSION (NNOM_MAJORVERSION * 10000) + (NNOM_SUBVERSION * 100) + NNOM_REVISION)

typedef enum
{
NN_SUCCESS = 0, /**< No error */
Expand Down
27 changes: 25 additions & 2 deletions scripts/nnom_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,7 @@ def layers_output_ranges(model, x_test, kld=True, calibrate_size=1000):

# saturation shift, using KLD method
# Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
if(kld and not is_shift_fixed(layer) and "input" not in layer.name): # test, also do not use kld in input layer
if(kld and not is_shift_fixed(layer) and "input" not in layer.name and "dense" not in layer.name): # test, also do not use kld in input layer
import scipy.stats
abs_max = max(abs(max_val), abs(min_val))
small_var = 1e-5
Expand All @@ -359,7 +359,7 @@ def layers_output_ranges(model, x_test, kld=True, calibrate_size=1000):
flat_hist = np.histogram(features.flatten(), bins=bins)[0]
kl_loss = []
kl_shifts = []
for shift in range(8):
for shift in range(4):
t = 2 ** (dec_bits + shift) # 2-based threshold
act = np.round(features.flatten() * t)
act = act / t
Expand Down Expand Up @@ -701,6 +701,29 @@ def is_skipable_layer(layer):
fp.write('\tlayer[%s] = model.hook(Softmax(), layer[%s]);\n'%(id, LI[inp][0]))
else:
raise Exception('unsupported layer', layer.name, layer)

"""
# temporary fixed for activations attached into layers in construction
def is_activation_attached(layer):
if(("Softmax" in layer.output.name and "softmax" not in layer.name)or
("Relu" in layer.output.name and "re_lu" not in layer.name) or
("Sigmoid" in layer.output.name and "sigmoid" not in layer.name) or
("Tanh" in layer.output.name and "tanh" not in layer.name)):
return True
return False
if "input" not in layer.name and is_activation_attached(layer):
inp = layer.output.name.replace(':', '/').split('/')[0]
cfg = layer.get_config()
if(cfg['activation'] == 'relu'):
fp.write('\tlayer[%s] = model.active(act_relu(), layer[%s]);\n'%(id, LI[inp][0]))
if(cfg['activation'] == 'tanh'):
fp.write('\tlayer[%s] = model.active(act_tanh(%s_OUTPUT_SHIFT), layer[%s]);\n'%(id, inp.upper(), LI[inp][0]))
if(cfg['activation'] == 'sigmoid'):
fp.write('\tlayer[%s] = model.active(act_sigmoid(%s_OUTPUT_SHIFT), layer[%s]);\n'%(id, inp.upper(), LI[inp][0]))
elif(cfg['activation'] == 'softmax'):
fp.write('\tlayer[%s] = model.hook(Softmax(), layer[%s]);\n'%(id, LI[inp][0]))
"""

# FIXME, test later.
if('softmax' in layer.name
or ('activation' in layer.name and layer.get_config()['activation'] == 'softmax')):
Expand Down
3 changes: 2 additions & 1 deletion src/nnom.c
Original file line number Diff line number Diff line change
Expand Up @@ -868,7 +868,8 @@ nnom_status_t model_compile(nnom_model_t *m, nnom_layer_t *input, nnom_layer_t *
if (output == NULL)
m->tail = find_last(input);

NNOM_LOG("\nStart compiling model...\n");
NNOM_LOG("\nNNoM version %d.%d.%d\n", NNOM_MAJORVERSION, NNOM_SUBVERSION, NNOM_REVISION);
NNOM_LOG("Start compiling model...\n");
NNOM_LOG("Layer(#) Activation output shape ops(MAC) mem(in, out, buf) mem blk lifetime\n");
NNOM_LOG("-------------------------------------------------------------------------------------------------\n");

Expand Down

0 comments on commit 0ea319e

Please sign in to comment.