Skip to content

Commit

Permalink
Merge pull request #2 from YeongHyeon/0.2.6
Browse files Browse the repository at this point in the history
0.2.6
  • Loading branch information
YeongHyeon authored Feb 8, 2022
2 parents 1096301 + 6e98ad0 commit 22b4347
Show file tree
Hide file tree
Showing 4 changed files with 112 additions and 111 deletions.
Binary file added dist/whiteboxlayer-0.2.6-py3-none-any.whl
Binary file not shown.
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

setup(
name = 'whiteboxlayer',
version = '0.2.5',
version = '0.2.6',
description = 'TensorFlow based custom layers',
author = 'YeongHyeon Park',
author_email = '[email protected]',
Expand Down
111 changes: 111 additions & 0 deletions whiteboxlayer/extensions/convolution.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
import tensorflow as tf

def conv1d(layer, x, stride, \
filter_size=[3, 16, 32], dilations=[1, 1, 1], \
padding='SAME', batch_norm=False, trainable=True, activation=None, usebias=True, name='', verbose=True):

w = layer.get_variable(shape=filter_size, \
trainable=trainable, name='%s_w' %(name))
if(usebias): b = layer.get_variable(shape=[filter_size[-1]], \
trainable=trainable, name='%s_b' %(name))

wx = tf.nn.conv1d(
input=x,
filters=w,
stride=stride,
padding=padding,
data_format='NWC',
dilations=None,
name='%s_cv' %(name)
)

if(usebias): y = tf.math.add(wx, b, name='%s_add' %(name))
else: y = wx
if(verbose): print("Conv (%s)" %(name), x.shape, "->", y.shape)

if(batch_norm): y = layer.batch_normalization(x=y, \
trainable=trainable, name='%s_bn' %(name), verbose=verbose)
return layer.activation(x=y, activation=activation, name=name)

def convt1d(layer, x, stride, output_shape, \
filter_size=[3, 16, 32], dilations=[1, 1, 1], \
padding='SAME', batch_norm=False, trainable=True, activation=None, usebias=True, name='', verbose=True):

w = layer.get_variable(shape=filter_size, \
trainable=trainable, name='%s_w' %(name))
if(usebias): b = layer.get_variable(shape=[filter_size[-2]], \
trainable=trainable, name='%s_b' %(name))

wx = tf.nn.conv1d_transpose(
input=x,
filters=w,
output_shape=output_shape,
strides=stride,
padding=padding,
data_format='NWC',
dilations=dilations,
name='%s_cvt' %(name)
)

if(usebias): y = tf.math.add(wx, b, name='%s_add' %(name))
else: y = wx
if(verbose): print("ConvT (%s)" %(name), x.shape, "->", y.shape)

if(batch_norm): y = layer.batch_normalization(x=y, \
trainable=trainable, name='%s_bn' %(name), verbose=verbose)
return layer.activation(x=y, activation=activation, name=name)

def conv2d(layer, x, stride, \
filter_size=[3, 3, 16, 32], dilations=[1, 1, 1, 1], \
padding='SAME', batch_norm=False, trainable=True, activation=None, usebias=True, name='', verbose=True):

w = layer.get_variable(shape=filter_size, \
trainable=trainable, name='%s_w' %(name))
if(usebias): b = layer.get_variable(shape=[filter_size[-1]], \
trainable=trainable, name='%s_b' %(name))

wx = tf.nn.conv2d(
input=x,
filters=w,
strides=[1, stride, stride, 1],
padding=padding,
data_format='NHWC',
dilations=dilations,
name='%s_cv' %(name)
)

if(usebias): y = tf.math.add(wx, b, name='%s_add' %(name))
else: y = wx
if(verbose): print("Conv (%s)" %(name), x.shape, "->", y.shape)

if(batch_norm): y = layer.batch_normalization(x=y, \
trainable=trainable, name='%s_bn' %(name), verbose=verbose)
return layer.activation(x=y, activation=activation, name=name)

def convt2d(layer, x, stride, output_shape, \
filter_size=[3, 3, 16, 32], dilations=[1, 1, 1, 1], \
padding='SAME', batch_norm=False, trainable=True, activation=None, usebias=True, name='', verbose=True):

w = layer.get_variable(shape=filter_size, \
trainable=trainable, name='%s_w' %(name))
if(usebias): b = layer.get_variable(shape=[filter_size[-2]], \
trainable=trainable, name='%s_b' %(name))

wx = tf.nn.conv2d_transpose(
input=x,
filters=w,
output_shape=output_shape,
strides=[1, stride, stride, 1],
padding=padding,
data_format='NHWC',
dilations=dilations,
name='%s_cvt' %(name)
)

if(usebias): y = tf.math.add(wx, b, name='%s_add' %(name))
else: y = wx
if(verbose): print("ConvT (%s)" %(name), x.shape, "->", y.shape)

if(batch_norm): y = layer.batch_normalization(x=y, \
trainable=trainable, name='%s_bn' %(name), verbose=verbose)
return layer.activation(x=y, activation=activation, name=name)
110 changes: 0 additions & 110 deletions whiteboxlayer/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,116 +144,6 @@ def maxpool(self, x, ksize=2, strides=1, \
if(verbose): print("MaxPool (%s)" %(name), x.shape, "->", y.shape)
return y

def conv1d(self, x, stride, \
filter_size=[3, 16, 32], dilations=[1, 1, 1], \
padding='SAME', batch_norm=False, trainable=True, activation=None, usebias=True, name='', verbose=True):

w = self.get_variable(shape=filter_size, \
trainable=trainable, name='%s_w' %(name))
if(usebias): b = self.get_variable(shape=[filter_size[-1]], \
trainable=trainable, name='%s_b' %(name))

wx = tf.nn.conv1d(
input=x,
filters=w,
stride=stride,
padding=padding,
data_format='NWC',
dilations=None,
name='%s_cv' %(name)
)

if(usebias): y = tf.math.add(wx, b, name='%s_add' %(name))
else: y = wx
if(verbose): print("Conv (%s)" %(name), x.shape, "->", y.shape)

if(batch_norm): y = self.batch_normalization(x=y, \
trainable=trainable, name='%s_bn' %(name), verbose=verbose)
return self.activation(x=y, activation=activation, name=name)

def convt1d(self, x, stride, output_shape, \
filter_size=[3, 16, 32], dilations=[1, 1, 1], \
padding='SAME', batch_norm=False, trainable=True, activation=None, usebias=True, name='', verbose=True):

w = self.get_variable(shape=filter_size, \
trainable=trainable, name='%s_w' %(name))
if(usebias): b = self.get_variable(shape=[filter_size[-2]], \
trainable=trainable, name='%s_b' %(name))

wx = tf.nn.conv1d_transpose(
input=x,
filters=w,
output_shape=output_shape,
strides=stride,
padding=padding,
data_format='NWC',
dilations=dilations,
name='%s_cvt' %(name)
)

if(usebias): y = tf.math.add(wx, b, name='%s_add' %(name))
else: y = wx
if(verbose): print("ConvT (%s)" %(name), x.shape, "->", y.shape)

if(batch_norm): y = self.batch_normalization(x=y, \
trainable=trainable, name='%s_bn' %(name), verbose=verbose)
return self.activation(x=y, activation=activation, name=name)

def conv2d(self, x, stride, \
filter_size=[3, 3, 16, 32], dilations=[1, 1, 1, 1], \
padding='SAME', batch_norm=False, trainable=True, activation=None, usebias=True, name='', verbose=True):

w = self.get_variable(shape=filter_size, \
trainable=trainable, name='%s_w' %(name))
if(usebias): b = self.get_variable(shape=[filter_size[-1]], \
trainable=trainable, name='%s_b' %(name))

wx = tf.nn.conv2d(
input=x,
filters=w,
strides=[1, stride, stride, 1],
padding=padding,
data_format='NHWC',
dilations=dilations,
name='%s_cv' %(name)
)

if(usebias): y = tf.math.add(wx, b, name='%s_add' %(name))
else: y = wx
if(verbose): print("Conv (%s)" %(name), x.shape, "->", y.shape)

if(batch_norm): y = self.batch_normalization(x=y, \
trainable=trainable, name='%s_bn' %(name), verbose=verbose)
return self.activation(x=y, activation=activation, name=name)

def convt2d(self, x, stride, output_shape, \
filter_size=[3, 3, 16, 32], dilations=[1, 1, 1, 1], \
padding='SAME', batch_norm=False, trainable=True, activation=None, usebias=True, name='', verbose=True):

w = self.get_variable(shape=filter_size, \
trainable=trainable, name='%s_w' %(name))
if(usebias): b = self.get_variable(shape=[filter_size[-2]], \
trainable=trainable, name='%s_b' %(name))

wx = tf.nn.conv2d_transpose(
input=x,
filters=w,
output_shape=output_shape,
strides=[1, stride, stride, 1],
padding=padding,
data_format='NHWC',
dilations=dilations,
name='%s_cvt' %(name)
)

if(usebias): y = tf.math.add(wx, b, name='%s_add' %(name))
else: y = wx
if(verbose): print("ConvT (%s)" %(name), x.shape, "->", y.shape)

if(batch_norm): y = self.batch_normalization(x=y, \
trainable=trainable, name='%s_bn' %(name), verbose=verbose)
return self.activation(x=y, activation=activation, name=name)

def fully_connected(self, x, c_out, \
batch_norm=False, trainable=True, activation=None, usebias=True, name='', verbose=True):

Expand Down

0 comments on commit 22b4347

Please sign in to comment.