From a5ffa496793dc7c96ea3b1f9852381837e98e3bf Mon Sep 17 00:00:00 2001 From: Oli Larkin Date: Wed, 27 Dec 2023 11:45:08 +0100 Subject: [PATCH 1/3] Pass in const reference to weights And const_iterator rather than iterator --- NAM/convnet.cpp | 8 ++++---- NAM/convnet.h | 6 +++--- NAM/dsp.cpp | 6 +++--- NAM/dsp.h | 6 +++--- NAM/lstm.cpp | 6 +++--- NAM/lstm.h | 4 ++-- NAM/wavenet.cpp | 12 ++++++------ NAM/wavenet.h | 10 +++++----- 8 files changed, 29 insertions(+), 29 deletions(-) diff --git a/NAM/convnet.cpp b/NAM/convnet.cpp index 1d4e3ba..bc0a392 100644 --- a/NAM/convnet.cpp +++ b/NAM/convnet.cpp @@ -10,7 +10,7 @@ #include "dsp.h" #include "convnet.h" -nam::convnet::BatchNorm::BatchNorm(const int dim, std::vector::iterator& weights) +nam::convnet::BatchNorm::BatchNorm(const int dim, std::vector::const_iterator& weights) { // Extract from param buffer Eigen::VectorXf running_mean(dim); @@ -48,7 +48,7 @@ void nam::convnet::BatchNorm::process_(Eigen::MatrixXf& x, const long i_start, c void nam::convnet::ConvNetBlock::set_weights_(const int in_channels, const int out_channels, const int _dilation, const bool batchnorm, const std::string activation, - std::vector::iterator& weights) + std::vector::const_iterator& weights) { this->_batchnorm = batchnorm; // HACK 2 kernel @@ -74,7 +74,7 @@ long nam::convnet::ConvNetBlock::get_out_channels() const return this->conv.get_out_channels(); } -nam::convnet::_Head::_Head(const int channels, std::vector::iterator& weights) +nam::convnet::_Head::_Head(const int channels, std::vector::const_iterator& weights) { this->_weight.resize(channels); for (int i = 0; i < channels; i++) @@ -98,7 +98,7 @@ nam::convnet::ConvNet::ConvNet(const int channels, const std::vector& dilat { this->_verify_weights(channels, dilations, batchnorm, weights.size()); this->_blocks.resize(dilations.size()); - std::vector::iterator it = weights.begin(); + std::vector::const_iterator it = weights.begin(); for (size_t i = 0; i < dilations.size(); i++) this->_blocks[i].set_weights_(i == 0 ? 1 : channels, channels, dilations[i], batchnorm, activation, it); this->_block_vals.resize(this->_blocks.size() + 1); diff --git a/NAM/convnet.h b/NAM/convnet.h index 310a1e5..a410506 100644 --- a/NAM/convnet.h +++ b/NAM/convnet.h @@ -23,7 +23,7 @@ class BatchNorm { public: BatchNorm(){}; - BatchNorm(const int dim, std::vector::iterator& weights); + BatchNorm(const int dim, std::vector::const_iterator& weights); void process_(Eigen::MatrixXf& input, const long i_start, const long i_end) const; private: @@ -41,7 +41,7 @@ class ConvNetBlock public: ConvNetBlock(){}; void set_weights_(const int in_channels, const int out_channels, const int _dilation, const bool batchnorm, - const std::string activation, std::vector::iterator& weights); + const std::string activation, std::vector::const_iterator& weights); void process_(const Eigen::MatrixXf& input, Eigen::MatrixXf& output, const long i_start, const long i_end) const; long get_out_channels() const; Conv1D conv; @@ -56,7 +56,7 @@ class _Head { public: _Head(){}; - _Head(const int channels, std::vector::iterator& weights); + _Head(const int channels, std::vector::const_iterator& weights); void process_(const Eigen::MatrixXf& input, Eigen::VectorXf& output, const long i_start, const long i_end) const; private: diff --git a/NAM/dsp.cpp b/NAM/dsp.cpp index d068eed..68f5f37 100644 --- a/NAM/dsp.cpp +++ b/NAM/dsp.cpp @@ -167,7 +167,7 @@ void nam::Linear::process(NAM_SAMPLE* input, NAM_SAMPLE* output, const int num_f // NN modules ================================================================= -void nam::Conv1D::set_weights_(std::vector::iterator& weights) +void nam::Conv1D::set_weights_(std::vector::const_iterator& weights) { if (this->_weight.size() > 0) { @@ -198,7 +198,7 @@ void nam::Conv1D::set_size_(const int in_channels, const int out_channels, const } void nam::Conv1D::set_size_and_weights_(const int in_channels, const int out_channels, const int kernel_size, - const int _dilation, const bool do_bias, std::vector::iterator& weights) + const int _dilation, const bool do_bias, std::vector::const_iterator& weights) { this->set_size_(in_channels, out_channels, kernel_size, do_bias, _dilation); this->set_weights_(weights); @@ -236,7 +236,7 @@ nam::Conv1x1::Conv1x1(const int in_channels, const int out_channels, const bool this->_bias.resize(out_channels); } -void nam::Conv1x1::set_weights_(std::vector::iterator& weights) +void nam::Conv1x1::set_weights_(std::vector::const_iterator& weights) { for (int i = 0; i < this->_weight.rows(); i++) for (int j = 0; j < this->_weight.cols(); j++) diff --git a/NAM/dsp.h b/NAM/dsp.h index c3e8ec9..737d6c3 100644 --- a/NAM/dsp.h +++ b/NAM/dsp.h @@ -124,11 +124,11 @@ class Conv1D { public: Conv1D() { this->_dilation = 1; }; - void set_weights_(std::vector::iterator& weights); + void set_weights_(std::vector::const_iterator& weights); void set_size_(const int in_channels, const int out_channels, const int kernel_size, const bool do_bias, const int _dilation); void set_size_and_weights_(const int in_channels, const int out_channels, const int kernel_size, const int _dilation, - const bool do_bias, std::vector::iterator& weights); + const bool do_bias, std::vector::const_iterator& weights); // Process from input to output // Rightmost indices of input go from i_start to i_end, // Indices on output for from j_start (to j_start + i_end - i_start) @@ -153,7 +153,7 @@ class Conv1x1 { public: Conv1x1(const int in_channels, const int out_channels, const bool _bias); - void set_weights_(std::vector::iterator& weights); + void set_weights_(std::vector::const_iterator& weights); // :param input: (N,Cin) or (Cin,) // :return: (N,Cout) or (Cout,), respectively Eigen::MatrixXf process(const Eigen::MatrixXf& input) const; diff --git a/NAM/lstm.cpp b/NAM/lstm.cpp index 38ff494..bde249f 100644 --- a/NAM/lstm.cpp +++ b/NAM/lstm.cpp @@ -4,7 +4,7 @@ #include "lstm.h" -nam::lstm::LSTMCell::LSTMCell(const int input_size, const int hidden_size, std::vector::iterator& weights) +nam::lstm::LSTMCell::LSTMCell(const int input_size, const int hidden_size, std::vector::const_iterator& weights) { // Resize arrays this->_w.resize(4 * hidden_size, input_size + hidden_size); @@ -63,12 +63,12 @@ void nam::lstm::LSTMCell::process_(const Eigen::VectorXf& x) } } -nam::lstm::LSTM::LSTM(const int num_layers, const int input_size, const int hidden_size, std::vector& weights, +nam::lstm::LSTM::LSTM(const int num_layers, const int input_size, const int hidden_size, const std::vector& weights, const double expected_sample_rate) : DSP(expected_sample_rate) { this->_input.resize(1); - std::vector::iterator it = weights.begin(); + std::vector::const_iterator it = weights.begin(); for (int i = 0; i < num_layers; i++) this->_layers.push_back(LSTMCell(i == 0 ? input_size : hidden_size, hidden_size, it)); this->_head_weight.resize(hidden_size); diff --git a/NAM/lstm.h b/NAM/lstm.h index 6b02b18..cdfada0 100644 --- a/NAM/lstm.h +++ b/NAM/lstm.h @@ -22,7 +22,7 @@ namespace lstm class LSTMCell { public: - LSTMCell(const int input_size, const int hidden_size, std::vector::iterator& weights); + LSTMCell(const int input_size, const int hidden_size, std::vector::const_iterator& weights); Eigen::VectorXf get_hidden_state() const { return this->_xh(Eigen::placeholders::lastN(this->_get_hidden_size())); }; void process_(const Eigen::VectorXf& x); @@ -50,7 +50,7 @@ class LSTMCell class LSTM : public DSP { public: - LSTM(const int num_layers, const int input_size, const int hidden_size, std::vector& weights, + LSTM(const int num_layers, const int input_size, const int hidden_size, const std::vector& weights, const double expected_sample_rate = -1.0); ~LSTM() = default; diff --git a/NAM/wavenet.cpp b/NAM/wavenet.cpp index fc96fa6..111dae0 100644 --- a/NAM/wavenet.cpp +++ b/NAM/wavenet.cpp @@ -12,7 +12,7 @@ nam::wavenet::_DilatedConv::_DilatedConv(const int in_channels, const int out_ch this->set_size_(in_channels, out_channels, kernel_size, bias, dilation); } -void nam::wavenet::_Layer::set_weights_(std::vector::iterator& weights) +void nam::wavenet::_Layer::set_weights_(std::vector::const_iterator& weights) { this->_conv.set_weights_(weights); this->_input_mixin.set_weights_(weights); @@ -133,7 +133,7 @@ void nam::wavenet::_LayerArray::set_num_frames_(const long num_frames) this->_layers[i].set_num_frames_(num_frames); } -void nam::wavenet::_LayerArray::set_weights_(std::vector::iterator& weights) +void nam::wavenet::_LayerArray::set_weights_(std::vector::const_iterator& weights) { this->_rechannel.set_weights_(weights); for (size_t i = 0; i < this->_layers.size(); i++) @@ -186,7 +186,7 @@ nam::wavenet::_Head::_Head(const int input_size, const int num_layers, const int } } -void nam::wavenet::_Head::set_weights_(std::vector::iterator& weights) +void nam::wavenet::_Head::set_weights_(std::vector::const_iterator& weights) { for (size_t i = 0; i < this->_layers.size(); i++) this->_layers[i].set_weights_(weights); @@ -231,7 +231,7 @@ void nam::wavenet::_Head::_apply_activation_(Eigen::MatrixXf& x) // WaveNet ==================================================================== nam::wavenet::WaveNet::WaveNet(const std::vector& layer_array_params, - const float head_scale, const bool with_head, std::vector weights, + const float head_scale, const bool with_head, const std::vector& weights, const double expected_sample_rate) : DSP(expected_sample_rate) , _num_frames(0) @@ -272,9 +272,9 @@ void nam::wavenet::WaveNet::finalize_(const int num_frames) this->_advance_buffers_(num_frames); } -void nam::wavenet::WaveNet::set_weights_(std::vector& weights) +void nam::wavenet::WaveNet::set_weights_(const std::vector& weights) { - std::vector::iterator it = weights.begin(); + std::vector::const_iterator it = weights.begin(); for (size_t i = 0; i < this->_layer_arrays.size(); i++) this->_layer_arrays[i].set_weights_(it); // this->_head.set_params_(it); diff --git a/NAM/wavenet.h b/NAM/wavenet.h index 7ea94f1..b4ab962 100644 --- a/NAM/wavenet.h +++ b/NAM/wavenet.h @@ -30,7 +30,7 @@ class _Layer , _1x1(channels, channels, true) , _activation(activations::Activation::get_activation(activation)) , _gated(gated){}; - void set_weights_(std::vector::iterator& weights); + void set_weights_(std::vector::const_iterator& weights); // :param `input`: from previous layer // :param `output`: to next layer void process_(const Eigen::MatrixXf& input, const Eigen::MatrixXf& condition, Eigen::MatrixXf& head_input, @@ -108,7 +108,7 @@ class _LayerArray Eigen::MatrixXf& head_outputs // post head-rechannel ); void set_num_frames_(const long num_frames); - void set_weights_(std::vector::iterator& it); + void set_weights_(std::vector::const_iterator& it); // "Zero-indexed" receptive field. // E.g. a 1x1 convolution has a z.i.r.f. of zero. @@ -144,7 +144,7 @@ class _Head { public: _Head(const int input_size, const int num_layers, const int channels, const std::string activation); - void set_weights_(std::vector::iterator& weights); + void set_weights_(std::vector::const_iterator& weights); // NOTE: the head transforms the provided input by applying a nonlinearity // to it in-place! void process_(Eigen::MatrixXf& inputs, Eigen::MatrixXf& outputs); @@ -169,11 +169,11 @@ class WaveNet : public DSP { public: WaveNet(const std::vector& layer_array_params, const float head_scale, const bool with_head, - std::vector weights, const double expected_sample_rate = -1.0); + const std::vector& weights, const double expected_sample_rate = -1.0); ~WaveNet() = default; void finalize_(const int num_frames) override; - void set_weights_(std::vector& weights); + void set_weights_(const std::vector& weights); private: long _num_frames; From 0833ea6760c70c9fde8f48c47d370d6222c8269a Mon Sep 17 00:00:00 2001 From: Oli Larkin Date: Sat, 30 Dec 2023 21:11:29 +0100 Subject: [PATCH 2/3] Introduce weights_it alias --- NAM/convnet.cpp | 8 ++++---- NAM/convnet.h | 6 +++--- NAM/dsp.cpp | 6 +++--- NAM/dsp.h | 8 +++++--- NAM/lstm.cpp | 5 +++-- NAM/lstm.h | 2 +- NAM/wavenet.cpp | 8 ++++---- NAM/wavenet.h | 6 +++--- 8 files changed, 26 insertions(+), 23 deletions(-) diff --git a/NAM/convnet.cpp b/NAM/convnet.cpp index bc0a392..937bf0b 100644 --- a/NAM/convnet.cpp +++ b/NAM/convnet.cpp @@ -10,7 +10,7 @@ #include "dsp.h" #include "convnet.h" -nam::convnet::BatchNorm::BatchNorm(const int dim, std::vector::const_iterator& weights) +nam::convnet::BatchNorm::BatchNorm(const int dim, weights_it& weights) { // Extract from param buffer Eigen::VectorXf running_mean(dim); @@ -48,7 +48,7 @@ void nam::convnet::BatchNorm::process_(Eigen::MatrixXf& x, const long i_start, c void nam::convnet::ConvNetBlock::set_weights_(const int in_channels, const int out_channels, const int _dilation, const bool batchnorm, const std::string activation, - std::vector::const_iterator& weights) + weights_it& weights) { this->_batchnorm = batchnorm; // HACK 2 kernel @@ -74,7 +74,7 @@ long nam::convnet::ConvNetBlock::get_out_channels() const return this->conv.get_out_channels(); } -nam::convnet::_Head::_Head(const int channels, std::vector::const_iterator& weights) +nam::convnet::_Head::_Head(const int channels, weights_it& weights) { this->_weight.resize(channels); for (int i = 0; i < channels; i++) @@ -98,7 +98,7 @@ nam::convnet::ConvNet::ConvNet(const int channels, const std::vector& dilat { this->_verify_weights(channels, dilations, batchnorm, weights.size()); this->_blocks.resize(dilations.size()); - std::vector::const_iterator it = weights.begin(); + weights_it it = weights.begin(); for (size_t i = 0; i < dilations.size(); i++) this->_blocks[i].set_weights_(i == 0 ? 1 : channels, channels, dilations[i], batchnorm, activation, it); this->_block_vals.resize(this->_blocks.size() + 1); diff --git a/NAM/convnet.h b/NAM/convnet.h index a410506..02af4c8 100644 --- a/NAM/convnet.h +++ b/NAM/convnet.h @@ -23,7 +23,7 @@ class BatchNorm { public: BatchNorm(){}; - BatchNorm(const int dim, std::vector::const_iterator& weights); + BatchNorm(const int dim, weights_it& weights); void process_(Eigen::MatrixXf& input, const long i_start, const long i_end) const; private: @@ -41,7 +41,7 @@ class ConvNetBlock public: ConvNetBlock(){}; void set_weights_(const int in_channels, const int out_channels, const int _dilation, const bool batchnorm, - const std::string activation, std::vector::const_iterator& weights); + const std::string activation, weights_it& weights); void process_(const Eigen::MatrixXf& input, Eigen::MatrixXf& output, const long i_start, const long i_end) const; long get_out_channels() const; Conv1D conv; @@ -56,7 +56,7 @@ class _Head { public: _Head(){}; - _Head(const int channels, std::vector::const_iterator& weights); + _Head(const int channels, weights_it& weights); void process_(const Eigen::MatrixXf& input, Eigen::VectorXf& output, const long i_start, const long i_end) const; private: diff --git a/NAM/dsp.cpp b/NAM/dsp.cpp index 68f5f37..db0730b 100644 --- a/NAM/dsp.cpp +++ b/NAM/dsp.cpp @@ -167,7 +167,7 @@ void nam::Linear::process(NAM_SAMPLE* input, NAM_SAMPLE* output, const int num_f // NN modules ================================================================= -void nam::Conv1D::set_weights_(std::vector::const_iterator& weights) +void nam::Conv1D::set_weights_(weights_it& weights) { if (this->_weight.size() > 0) { @@ -198,7 +198,7 @@ void nam::Conv1D::set_size_(const int in_channels, const int out_channels, const } void nam::Conv1D::set_size_and_weights_(const int in_channels, const int out_channels, const int kernel_size, - const int _dilation, const bool do_bias, std::vector::const_iterator& weights) + const int _dilation, const bool do_bias, weights_it& weights) { this->set_size_(in_channels, out_channels, kernel_size, do_bias, _dilation); this->set_weights_(weights); @@ -236,7 +236,7 @@ nam::Conv1x1::Conv1x1(const int in_channels, const int out_channels, const bool this->_bias.resize(out_channels); } -void nam::Conv1x1::set_weights_(std::vector::const_iterator& weights) +void nam::Conv1x1::set_weights_(weights_it& weights) { for (int i = 0; i < this->_weight.rows(); i++) for (int j = 0; j < this->_weight.cols(); j++) diff --git a/NAM/dsp.h b/NAM/dsp.h index 737d6c3..e74a153 100644 --- a/NAM/dsp.h +++ b/NAM/dsp.h @@ -23,6 +23,8 @@ namespace nam { +using weights_it = std::vector::const_iterator; + enum EArchitectures { kLinear = 0, @@ -124,11 +126,11 @@ class Conv1D { public: Conv1D() { this->_dilation = 1; }; - void set_weights_(std::vector::const_iterator& weights); + void set_weights_(weights_it& weights); void set_size_(const int in_channels, const int out_channels, const int kernel_size, const bool do_bias, const int _dilation); void set_size_and_weights_(const int in_channels, const int out_channels, const int kernel_size, const int _dilation, - const bool do_bias, std::vector::const_iterator& weights); + const bool do_bias, weights_it& weights); // Process from input to output // Rightmost indices of input go from i_start to i_end, // Indices on output for from j_start (to j_start + i_end - i_start) @@ -153,7 +155,7 @@ class Conv1x1 { public: Conv1x1(const int in_channels, const int out_channels, const bool _bias); - void set_weights_(std::vector::const_iterator& weights); + void set_weights_(weights_it& weights); // :param input: (N,Cin) or (Cin,) // :return: (N,Cout) or (Cout,), respectively Eigen::MatrixXf process(const Eigen::MatrixXf& input) const; diff --git a/NAM/lstm.cpp b/NAM/lstm.cpp index bde249f..3b55575 100644 --- a/NAM/lstm.cpp +++ b/NAM/lstm.cpp @@ -2,9 +2,10 @@ #include #include +#include "dsp.h" #include "lstm.h" -nam::lstm::LSTMCell::LSTMCell(const int input_size, const int hidden_size, std::vector::const_iterator& weights) +nam::lstm::LSTMCell::LSTMCell(const int input_size, const int hidden_size, weights_it& weights) { // Resize arrays this->_w.resize(4 * hidden_size, input_size + hidden_size); @@ -68,7 +69,7 @@ nam::lstm::LSTM::LSTM(const int num_layers, const int input_size, const int hidd : DSP(expected_sample_rate) { this->_input.resize(1); - std::vector::const_iterator it = weights.begin(); + auto it = weights.begin(); for (int i = 0; i < num_layers; i++) this->_layers.push_back(LSTMCell(i == 0 ? input_size : hidden_size, hidden_size, it)); this->_head_weight.resize(hidden_size); diff --git a/NAM/lstm.h b/NAM/lstm.h index cdfada0..a432169 100644 --- a/NAM/lstm.h +++ b/NAM/lstm.h @@ -22,7 +22,7 @@ namespace lstm class LSTMCell { public: - LSTMCell(const int input_size, const int hidden_size, std::vector::const_iterator& weights); + LSTMCell(const int input_size, const int hidden_size, weights_it& weights); Eigen::VectorXf get_hidden_state() const { return this->_xh(Eigen::placeholders::lastN(this->_get_hidden_size())); }; void process_(const Eigen::VectorXf& x); diff --git a/NAM/wavenet.cpp b/NAM/wavenet.cpp index 111dae0..42dd299 100644 --- a/NAM/wavenet.cpp +++ b/NAM/wavenet.cpp @@ -12,7 +12,7 @@ nam::wavenet::_DilatedConv::_DilatedConv(const int in_channels, const int out_ch this->set_size_(in_channels, out_channels, kernel_size, bias, dilation); } -void nam::wavenet::_Layer::set_weights_(std::vector::const_iterator& weights) +void nam::wavenet::_Layer::set_weights_(weights_it& weights) { this->_conv.set_weights_(weights); this->_input_mixin.set_weights_(weights); @@ -133,7 +133,7 @@ void nam::wavenet::_LayerArray::set_num_frames_(const long num_frames) this->_layers[i].set_num_frames_(num_frames); } -void nam::wavenet::_LayerArray::set_weights_(std::vector::const_iterator& weights) +void nam::wavenet::_LayerArray::set_weights_(weights_it& weights) { this->_rechannel.set_weights_(weights); for (size_t i = 0; i < this->_layers.size(); i++) @@ -186,7 +186,7 @@ nam::wavenet::_Head::_Head(const int input_size, const int num_layers, const int } } -void nam::wavenet::_Head::set_weights_(std::vector::const_iterator& weights) +void nam::wavenet::_Head::set_weights_(weights_it& weights) { for (size_t i = 0; i < this->_layers.size(); i++) this->_layers[i].set_weights_(weights); @@ -274,7 +274,7 @@ void nam::wavenet::WaveNet::finalize_(const int num_frames) void nam::wavenet::WaveNet::set_weights_(const std::vector& weights) { - std::vector::const_iterator it = weights.begin(); + weights_it it = weights.begin(); for (size_t i = 0; i < this->_layer_arrays.size(); i++) this->_layer_arrays[i].set_weights_(it); // this->_head.set_params_(it); diff --git a/NAM/wavenet.h b/NAM/wavenet.h index b4ab962..bd30f03 100644 --- a/NAM/wavenet.h +++ b/NAM/wavenet.h @@ -30,7 +30,7 @@ class _Layer , _1x1(channels, channels, true) , _activation(activations::Activation::get_activation(activation)) , _gated(gated){}; - void set_weights_(std::vector::const_iterator& weights); + void set_weights_(weights_it& weights); // :param `input`: from previous layer // :param `output`: to next layer void process_(const Eigen::MatrixXf& input, const Eigen::MatrixXf& condition, Eigen::MatrixXf& head_input, @@ -108,7 +108,7 @@ class _LayerArray Eigen::MatrixXf& head_outputs // post head-rechannel ); void set_num_frames_(const long num_frames); - void set_weights_(std::vector::const_iterator& it); + void set_weights_(weights_it& it); // "Zero-indexed" receptive field. // E.g. a 1x1 convolution has a z.i.r.f. of zero. @@ -144,7 +144,7 @@ class _Head { public: _Head(const int input_size, const int num_layers, const int channels, const std::string activation); - void set_weights_(std::vector::const_iterator& weights); + void set_weights_(weights_it& weights); // NOTE: the head transforms the provided input by applying a nonlinearity // to it in-place! void process_(Eigen::MatrixXf& inputs, Eigen::MatrixXf& outputs); From 78f42d048aaa9757616792427ba8cdeb712584e7 Mon Sep 17 00:00:00 2001 From: Oli Larkin Date: Sat, 30 Dec 2023 21:55:33 +0100 Subject: [PATCH 3/3] Use Eigen::Ref<> rather than normal references https://eigen.tuxfamily.org/dox/TopicFunctionTakingEigenTypes.html#TopicUsingRefClass --- NAM/activations.h | 2 +- NAM/convnet.cpp | 10 +++++----- NAM/convnet.h | 6 +++--- NAM/dsp.cpp | 8 ++++---- NAM/dsp.h | 4 ++-- NAM/lstm.cpp | 2 +- NAM/wavenet.cpp | 30 ++++++++++++++++++++---------- NAM/wavenet.h | 18 +++++++++--------- 8 files changed, 45 insertions(+), 35 deletions(-) diff --git a/NAM/activations.h b/NAM/activations.h index e9afc33..51b03d9 100644 --- a/NAM/activations.h +++ b/NAM/activations.h @@ -45,7 +45,7 @@ class Activation public: Activation() = default; virtual ~Activation() = default; - virtual void apply(Eigen::MatrixXf& matrix) { apply(matrix.data(), matrix.rows() * matrix.cols()); } + virtual void apply(Eigen::Ref matrix) { apply(matrix.data(), matrix.rows() * matrix.cols()); } virtual void apply(Eigen::Block block) { apply(block.data(), block.rows() * block.cols()); } virtual void apply(Eigen::Block block) { diff --git a/NAM/convnet.cpp b/NAM/convnet.cpp index 937bf0b..1122871 100644 --- a/NAM/convnet.cpp +++ b/NAM/convnet.cpp @@ -35,7 +35,7 @@ nam::convnet::BatchNorm::BatchNorm(const int dim, weights_it& weights) this->loc = _bias - this->scale.cwiseProduct(running_mean); } -void nam::convnet::BatchNorm::process_(Eigen::MatrixXf& x, const long i_start, const long i_end) const +void nam::convnet::BatchNorm::process_(Eigen::Ref x, const long i_start, const long i_end) const { // todo using colwise? // #speed but conv probably dominates @@ -58,7 +58,7 @@ void nam::convnet::ConvNetBlock::set_weights_(const int in_channels, const int o this->activation = activations::Activation::get_activation(activation); } -void nam::convnet::ConvNetBlock::process_(const Eigen::MatrixXf& input, Eigen::MatrixXf& output, const long i_start, +void nam::convnet::ConvNetBlock::process_(const Eigen::Ref input, Eigen::Ref output, const long i_start, const long i_end) const { const long ncols = i_end - i_start; @@ -82,7 +82,7 @@ nam::convnet::_Head::_Head(const int channels, weights_it& weights) this->_bias = *(weights++); } -void nam::convnet::_Head::process_(const Eigen::MatrixXf& input, Eigen::VectorXf& output, const long i_start, +void nam::convnet::_Head::process_(const Eigen::Ref input, Eigen::VectorXf& output, const long i_start, const long i_end) const { const long length = i_end - i_start; @@ -146,7 +146,7 @@ void nam::convnet::ConvNet::_update_buffers_(NAM_SAMPLE* input, const int num_fr const size_t buffer_size = this->_input_buffer.size(); - if (this->_block_vals[0].rows() != 1 || this->_block_vals[0].cols() != buffer_size) + if (this->_block_vals[0].rows() != Eigen::Index(1) || this->_block_vals[0].cols() != Eigen::Index(buffer_size)) { this->_block_vals[0].resize(1, buffer_size); this->_block_vals[0].setZero(); @@ -155,7 +155,7 @@ void nam::convnet::ConvNet::_update_buffers_(NAM_SAMPLE* input, const int num_fr for (size_t i = 1; i < this->_block_vals.size(); i++) { if (this->_block_vals[i].rows() == this->_blocks[i - 1].get_out_channels() - && this->_block_vals[i].cols() == buffer_size) + && this->_block_vals[i].cols() == Eigen::Index(buffer_size)) continue; // Already has correct size this->_block_vals[i].resize(this->_blocks[i - 1].get_out_channels(), buffer_size); this->_block_vals[i].setZero(); diff --git a/NAM/convnet.h b/NAM/convnet.h index 02af4c8..a117b43 100644 --- a/NAM/convnet.h +++ b/NAM/convnet.h @@ -24,7 +24,7 @@ class BatchNorm public: BatchNorm(){}; BatchNorm(const int dim, weights_it& weights); - void process_(Eigen::MatrixXf& input, const long i_start, const long i_end) const; + void process_(Eigen::Ref input, const long i_start, const long i_end) const; private: // TODO simplify to just ax+b @@ -42,7 +42,7 @@ class ConvNetBlock ConvNetBlock(){}; void set_weights_(const int in_channels, const int out_channels, const int _dilation, const bool batchnorm, const std::string activation, weights_it& weights); - void process_(const Eigen::MatrixXf& input, Eigen::MatrixXf& output, const long i_start, const long i_end) const; + void process_(const Eigen::Ref input, Eigen::Ref output, const long i_start, const long i_end) const; long get_out_channels() const; Conv1D conv; @@ -57,7 +57,7 @@ class _Head public: _Head(){}; _Head(const int channels, weights_it& weights); - void process_(const Eigen::MatrixXf& input, Eigen::VectorXf& output, const long i_start, const long i_end) const; + void process_(const Eigen::Ref input, Eigen::VectorXf& output, const long i_start, const long i_end) const; private: Eigen::VectorXf _weight; diff --git a/NAM/dsp.cpp b/NAM/dsp.cpp index db0730b..9b27ec9 100644 --- a/NAM/dsp.cpp +++ b/NAM/dsp.cpp @@ -39,7 +39,7 @@ void nam::DSP::prewarm() void nam::DSP::process(NAM_SAMPLE* input, NAM_SAMPLE* output, const int num_frames) { // Default implementation is the null operation - for (size_t i = 0; i < num_frames; i++) + for (auto i = 0; i < num_frames; i++) output[i] = input[i]; } @@ -157,7 +157,7 @@ void nam::Linear::process(NAM_SAMPLE* input, NAM_SAMPLE* output, const int num_f this->nam::Buffer::_update_buffers_(input, num_frames); // Main computation! - for (size_t i = 0; i < num_frames; i++) + for (auto i = 0; i < num_frames; i++) { const size_t offset = this->_input_buffer_offset - this->_weight.size() + i + 1; auto input = Eigen::Map(&this->_input_buffer[offset], this->_receptive_field); @@ -204,7 +204,7 @@ void nam::Conv1D::set_size_and_weights_(const int in_channels, const int out_cha this->set_weights_(weights); } -void nam::Conv1D::process_(const Eigen::MatrixXf& input, Eigen::MatrixXf& output, const long i_start, const long ncols, +void nam::Conv1D::process_(const Eigen::Ref input, Eigen::Ref output, const long i_start, const long ncols, const long j_start) const { // This is the clever part ;) @@ -246,7 +246,7 @@ void nam::Conv1x1::set_weights_(weights_it& weights) this->_bias(i) = *(weights++); } -Eigen::MatrixXf nam::Conv1x1::process(const Eigen::MatrixXf& input) const +Eigen::MatrixXf nam::Conv1x1::process(const Eigen::Ref input) const { if (this->_do_bias) return (this->_weight * input).colwise() + this->_bias; diff --git a/NAM/dsp.h b/NAM/dsp.h index e74a153..118e782 100644 --- a/NAM/dsp.h +++ b/NAM/dsp.h @@ -134,7 +134,7 @@ class Conv1D // Process from input to output // Rightmost indices of input go from i_start to i_end, // Indices on output for from j_start (to j_start + i_end - i_start) - void process_(const Eigen::MatrixXf& input, Eigen::MatrixXf& output, const long i_start, const long i_end, + void process_(const Eigen::Ref input, Eigen::Ref output, const long i_start, const long i_end, const long j_start) const; long get_in_channels() const { return this->_weight.size() > 0 ? this->_weight[0].cols() : 0; }; long get_kernel_size() const { return this->_weight.size(); }; @@ -158,7 +158,7 @@ class Conv1x1 void set_weights_(weights_it& weights); // :param input: (N,Cin) or (Cin,) // :return: (N,Cout) or (Cout,), respectively - Eigen::MatrixXf process(const Eigen::MatrixXf& input) const; + Eigen::MatrixXf process(const Eigen::Ref input) const; long get_out_channels() const { return this->_weight.rows(); }; diff --git a/NAM/lstm.cpp b/NAM/lstm.cpp index 3b55575..aa0f6ad 100644 --- a/NAM/lstm.cpp +++ b/NAM/lstm.cpp @@ -81,7 +81,7 @@ nam::lstm::LSTM::LSTM(const int num_layers, const int input_size, const int hidd void nam::lstm::LSTM::process(NAM_SAMPLE* input, NAM_SAMPLE* output, const int num_frames) { - for (size_t i = 0; i < num_frames; i++) + for (auto i = 0; i < num_frames; i++) output[i] = this->_process_sample(input[i]); } diff --git a/NAM/wavenet.cpp b/NAM/wavenet.cpp index 42dd299..7899c3b 100644 --- a/NAM/wavenet.cpp +++ b/NAM/wavenet.cpp @@ -19,8 +19,8 @@ void nam::wavenet::_Layer::set_weights_(weights_it& weights) this->_1x1.set_weights_(weights); } -void nam::wavenet::_Layer::process_(const Eigen::MatrixXf& input, const Eigen::MatrixXf& condition, - Eigen::MatrixXf& head_input, Eigen::MatrixXf& output, const long i_start, +void nam::wavenet::_Layer::process_(const Eigen::Ref input, const Eigen::Ref condition, + Eigen::Ref head_input, Eigen::Ref output, const long i_start, const long j_start) { const long ncols = condition.cols(); @@ -102,17 +102,27 @@ void nam::wavenet::_LayerArray::prepare_for_frames_(const long num_frames) this->_rewind_buffers_(); } -void nam::wavenet::_LayerArray::process_(const Eigen::MatrixXf& layer_inputs, const Eigen::MatrixXf& condition, - Eigen::MatrixXf& head_inputs, Eigen::MatrixXf& layer_outputs, - Eigen::MatrixXf& head_outputs) +void nam::wavenet::_LayerArray::process_(const Eigen::Ref layer_inputs, const Eigen::Ref condition, + Eigen::Ref head_inputs, Eigen::Ref layer_outputs, + Eigen::Ref head_outputs) { this->_layer_buffers[0].middleCols(this->_buffer_start, layer_inputs.cols()) = this->_rechannel.process(layer_inputs); const size_t last_layer = this->_layers.size() - 1; for (size_t i = 0; i < this->_layers.size(); i++) { - this->_layers[i].process_(this->_layer_buffers[i], condition, head_inputs, - i == last_layer ? layer_outputs : this->_layer_buffers[i + 1], this->_buffer_start, - i == last_layer ? 0 : this->_buffer_start); + if (i == last_layer) + { + this->_layers[i].process_(this->_layer_buffers[i], condition, head_inputs, + layer_outputs, this->_buffer_start, + 0); + } + else + { + this->_layers[i].process_(this->_layer_buffers[i], condition, head_inputs, + this->_layer_buffers[i + 1], this->_buffer_start, + this->_buffer_start); + } + } head_outputs = this->_head_rechannel.process(head_inputs); } @@ -192,7 +202,7 @@ void nam::wavenet::_Head::set_weights_(weights_it& weights) this->_layers[i].set_weights_(weights); } -void nam::wavenet::_Head::process_(Eigen::MatrixXf& inputs, Eigen::MatrixXf& outputs) +void nam::wavenet::_Head::process_(Eigen::Ref inputs, Eigen::Ref outputs) { const size_t num_layers = this->_layers.size(); this->_apply_activation_(inputs); @@ -223,7 +233,7 @@ void nam::wavenet::_Head::set_num_frames_(const long num_frames) } } -void nam::wavenet::_Head::_apply_activation_(Eigen::MatrixXf& x) +void nam::wavenet::_Head::_apply_activation_(Eigen::Ref x) { this->_activation->apply(x); } diff --git a/NAM/wavenet.h b/NAM/wavenet.h index bd30f03..fbaab54 100644 --- a/NAM/wavenet.h +++ b/NAM/wavenet.h @@ -33,8 +33,8 @@ class _Layer void set_weights_(weights_it& weights); // :param `input`: from previous layer // :param `output`: to next layer - void process_(const Eigen::MatrixXf& input, const Eigen::MatrixXf& condition, Eigen::MatrixXf& head_input, - Eigen::MatrixXf& output, const long i_start, const long j_start); + void process_(const Eigen::Ref input, const Eigen::Ref condition, Eigen::Ref head_input, + Eigen::Ref output, const long i_start, const long j_start); void set_num_frames_(const long num_frames); long get_channels() const { return this->_conv.get_in_channels(); }; int get_dilation() const { return this->_conv.get_dilation(); }; @@ -101,11 +101,11 @@ class _LayerArray void prepare_for_frames_(const long num_frames); // All arrays are "short". - void process_(const Eigen::MatrixXf& layer_inputs, // Short - const Eigen::MatrixXf& condition, // Short - Eigen::MatrixXf& layer_outputs, // Short - Eigen::MatrixXf& head_inputs, // Sum up on this. - Eigen::MatrixXf& head_outputs // post head-rechannel + void process_(const Eigen::Ref layer_inputs, // Short + const Eigen::Ref condition, // Short + Eigen::Ref layer_outputs, // Short + Eigen::Ref head_inputs, // Sum up on this. + Eigen::Ref head_outputs // post head-rechannel ); void set_num_frames_(const long num_frames); void set_weights_(weights_it& it); @@ -147,7 +147,7 @@ class _Head void set_weights_(weights_it& weights); // NOTE: the head transforms the provided input by applying a nonlinearity // to it in-place! - void process_(Eigen::MatrixXf& inputs, Eigen::MatrixXf& outputs); + void process_(Eigen::Ref inputs, Eigen::Ref outputs); void set_num_frames_(const long num_frames); private: @@ -161,7 +161,7 @@ class _Head std::vector _buffers; // Apply the activation to the provided array, in-place - void _apply_activation_(Eigen::MatrixXf& x); + void _apply_activation_(Eigen::Ref x); }; // The main WaveNet model