From a5ffa496793dc7c96ea3b1f9852381837e98e3bf Mon Sep 17 00:00:00 2001 From: Oli Larkin Date: Wed, 27 Dec 2023 11:45:08 +0100 Subject: [PATCH] Pass in const reference to weights And const_iterator rather than iterator --- NAM/convnet.cpp | 8 ++++---- NAM/convnet.h | 6 +++--- NAM/dsp.cpp | 6 +++--- NAM/dsp.h | 6 +++--- NAM/lstm.cpp | 6 +++--- NAM/lstm.h | 4 ++-- NAM/wavenet.cpp | 12 ++++++------ NAM/wavenet.h | 10 +++++----- 8 files changed, 29 insertions(+), 29 deletions(-) diff --git a/NAM/convnet.cpp b/NAM/convnet.cpp index 1d4e3ba..bc0a392 100644 --- a/NAM/convnet.cpp +++ b/NAM/convnet.cpp @@ -10,7 +10,7 @@ #include "dsp.h" #include "convnet.h" -nam::convnet::BatchNorm::BatchNorm(const int dim, std::vector::iterator& weights) +nam::convnet::BatchNorm::BatchNorm(const int dim, std::vector::const_iterator& weights) { // Extract from param buffer Eigen::VectorXf running_mean(dim); @@ -48,7 +48,7 @@ void nam::convnet::BatchNorm::process_(Eigen::MatrixXf& x, const long i_start, c void nam::convnet::ConvNetBlock::set_weights_(const int in_channels, const int out_channels, const int _dilation, const bool batchnorm, const std::string activation, - std::vector::iterator& weights) + std::vector::const_iterator& weights) { this->_batchnorm = batchnorm; // HACK 2 kernel @@ -74,7 +74,7 @@ long nam::convnet::ConvNetBlock::get_out_channels() const return this->conv.get_out_channels(); } -nam::convnet::_Head::_Head(const int channels, std::vector::iterator& weights) +nam::convnet::_Head::_Head(const int channels, std::vector::const_iterator& weights) { this->_weight.resize(channels); for (int i = 0; i < channels; i++) @@ -98,7 +98,7 @@ nam::convnet::ConvNet::ConvNet(const int channels, const std::vector& dilat { this->_verify_weights(channels, dilations, batchnorm, weights.size()); this->_blocks.resize(dilations.size()); - std::vector::iterator it = weights.begin(); + std::vector::const_iterator it = weights.begin(); for (size_t i = 0; i < dilations.size(); i++) this->_blocks[i].set_weights_(i == 0 ? 1 : channels, channels, dilations[i], batchnorm, activation, it); this->_block_vals.resize(this->_blocks.size() + 1); diff --git a/NAM/convnet.h b/NAM/convnet.h index 310a1e5..a410506 100644 --- a/NAM/convnet.h +++ b/NAM/convnet.h @@ -23,7 +23,7 @@ class BatchNorm { public: BatchNorm(){}; - BatchNorm(const int dim, std::vector::iterator& weights); + BatchNorm(const int dim, std::vector::const_iterator& weights); void process_(Eigen::MatrixXf& input, const long i_start, const long i_end) const; private: @@ -41,7 +41,7 @@ class ConvNetBlock public: ConvNetBlock(){}; void set_weights_(const int in_channels, const int out_channels, const int _dilation, const bool batchnorm, - const std::string activation, std::vector::iterator& weights); + const std::string activation, std::vector::const_iterator& weights); void process_(const Eigen::MatrixXf& input, Eigen::MatrixXf& output, const long i_start, const long i_end) const; long get_out_channels() const; Conv1D conv; @@ -56,7 +56,7 @@ class _Head { public: _Head(){}; - _Head(const int channels, std::vector::iterator& weights); + _Head(const int channels, std::vector::const_iterator& weights); void process_(const Eigen::MatrixXf& input, Eigen::VectorXf& output, const long i_start, const long i_end) const; private: diff --git a/NAM/dsp.cpp b/NAM/dsp.cpp index d068eed..68f5f37 100644 --- a/NAM/dsp.cpp +++ b/NAM/dsp.cpp @@ -167,7 +167,7 @@ void nam::Linear::process(NAM_SAMPLE* input, NAM_SAMPLE* output, const int num_f // NN modules ================================================================= -void nam::Conv1D::set_weights_(std::vector::iterator& weights) +void nam::Conv1D::set_weights_(std::vector::const_iterator& weights) { if (this->_weight.size() > 0) { @@ -198,7 +198,7 @@ void nam::Conv1D::set_size_(const int in_channels, const int out_channels, const } void nam::Conv1D::set_size_and_weights_(const int in_channels, const int out_channels, const int kernel_size, - const int _dilation, const bool do_bias, std::vector::iterator& weights) + const int _dilation, const bool do_bias, std::vector::const_iterator& weights) { this->set_size_(in_channels, out_channels, kernel_size, do_bias, _dilation); this->set_weights_(weights); @@ -236,7 +236,7 @@ nam::Conv1x1::Conv1x1(const int in_channels, const int out_channels, const bool this->_bias.resize(out_channels); } -void nam::Conv1x1::set_weights_(std::vector::iterator& weights) +void nam::Conv1x1::set_weights_(std::vector::const_iterator& weights) { for (int i = 0; i < this->_weight.rows(); i++) for (int j = 0; j < this->_weight.cols(); j++) diff --git a/NAM/dsp.h b/NAM/dsp.h index c3e8ec9..737d6c3 100644 --- a/NAM/dsp.h +++ b/NAM/dsp.h @@ -124,11 +124,11 @@ class Conv1D { public: Conv1D() { this->_dilation = 1; }; - void set_weights_(std::vector::iterator& weights); + void set_weights_(std::vector::const_iterator& weights); void set_size_(const int in_channels, const int out_channels, const int kernel_size, const bool do_bias, const int _dilation); void set_size_and_weights_(const int in_channels, const int out_channels, const int kernel_size, const int _dilation, - const bool do_bias, std::vector::iterator& weights); + const bool do_bias, std::vector::const_iterator& weights); // Process from input to output // Rightmost indices of input go from i_start to i_end, // Indices on output for from j_start (to j_start + i_end - i_start) @@ -153,7 +153,7 @@ class Conv1x1 { public: Conv1x1(const int in_channels, const int out_channels, const bool _bias); - void set_weights_(std::vector::iterator& weights); + void set_weights_(std::vector::const_iterator& weights); // :param input: (N,Cin) or (Cin,) // :return: (N,Cout) or (Cout,), respectively Eigen::MatrixXf process(const Eigen::MatrixXf& input) const; diff --git a/NAM/lstm.cpp b/NAM/lstm.cpp index 38ff494..bde249f 100644 --- a/NAM/lstm.cpp +++ b/NAM/lstm.cpp @@ -4,7 +4,7 @@ #include "lstm.h" -nam::lstm::LSTMCell::LSTMCell(const int input_size, const int hidden_size, std::vector::iterator& weights) +nam::lstm::LSTMCell::LSTMCell(const int input_size, const int hidden_size, std::vector::const_iterator& weights) { // Resize arrays this->_w.resize(4 * hidden_size, input_size + hidden_size); @@ -63,12 +63,12 @@ void nam::lstm::LSTMCell::process_(const Eigen::VectorXf& x) } } -nam::lstm::LSTM::LSTM(const int num_layers, const int input_size, const int hidden_size, std::vector& weights, +nam::lstm::LSTM::LSTM(const int num_layers, const int input_size, const int hidden_size, const std::vector& weights, const double expected_sample_rate) : DSP(expected_sample_rate) { this->_input.resize(1); - std::vector::iterator it = weights.begin(); + std::vector::const_iterator it = weights.begin(); for (int i = 0; i < num_layers; i++) this->_layers.push_back(LSTMCell(i == 0 ? input_size : hidden_size, hidden_size, it)); this->_head_weight.resize(hidden_size); diff --git a/NAM/lstm.h b/NAM/lstm.h index 6b02b18..cdfada0 100644 --- a/NAM/lstm.h +++ b/NAM/lstm.h @@ -22,7 +22,7 @@ namespace lstm class LSTMCell { public: - LSTMCell(const int input_size, const int hidden_size, std::vector::iterator& weights); + LSTMCell(const int input_size, const int hidden_size, std::vector::const_iterator& weights); Eigen::VectorXf get_hidden_state() const { return this->_xh(Eigen::placeholders::lastN(this->_get_hidden_size())); }; void process_(const Eigen::VectorXf& x); @@ -50,7 +50,7 @@ class LSTMCell class LSTM : public DSP { public: - LSTM(const int num_layers, const int input_size, const int hidden_size, std::vector& weights, + LSTM(const int num_layers, const int input_size, const int hidden_size, const std::vector& weights, const double expected_sample_rate = -1.0); ~LSTM() = default; diff --git a/NAM/wavenet.cpp b/NAM/wavenet.cpp index fc96fa6..111dae0 100644 --- a/NAM/wavenet.cpp +++ b/NAM/wavenet.cpp @@ -12,7 +12,7 @@ nam::wavenet::_DilatedConv::_DilatedConv(const int in_channels, const int out_ch this->set_size_(in_channels, out_channels, kernel_size, bias, dilation); } -void nam::wavenet::_Layer::set_weights_(std::vector::iterator& weights) +void nam::wavenet::_Layer::set_weights_(std::vector::const_iterator& weights) { this->_conv.set_weights_(weights); this->_input_mixin.set_weights_(weights); @@ -133,7 +133,7 @@ void nam::wavenet::_LayerArray::set_num_frames_(const long num_frames) this->_layers[i].set_num_frames_(num_frames); } -void nam::wavenet::_LayerArray::set_weights_(std::vector::iterator& weights) +void nam::wavenet::_LayerArray::set_weights_(std::vector::const_iterator& weights) { this->_rechannel.set_weights_(weights); for (size_t i = 0; i < this->_layers.size(); i++) @@ -186,7 +186,7 @@ nam::wavenet::_Head::_Head(const int input_size, const int num_layers, const int } } -void nam::wavenet::_Head::set_weights_(std::vector::iterator& weights) +void nam::wavenet::_Head::set_weights_(std::vector::const_iterator& weights) { for (size_t i = 0; i < this->_layers.size(); i++) this->_layers[i].set_weights_(weights); @@ -231,7 +231,7 @@ void nam::wavenet::_Head::_apply_activation_(Eigen::MatrixXf& x) // WaveNet ==================================================================== nam::wavenet::WaveNet::WaveNet(const std::vector& layer_array_params, - const float head_scale, const bool with_head, std::vector weights, + const float head_scale, const bool with_head, const std::vector& weights, const double expected_sample_rate) : DSP(expected_sample_rate) , _num_frames(0) @@ -272,9 +272,9 @@ void nam::wavenet::WaveNet::finalize_(const int num_frames) this->_advance_buffers_(num_frames); } -void nam::wavenet::WaveNet::set_weights_(std::vector& weights) +void nam::wavenet::WaveNet::set_weights_(const std::vector& weights) { - std::vector::iterator it = weights.begin(); + std::vector::const_iterator it = weights.begin(); for (size_t i = 0; i < this->_layer_arrays.size(); i++) this->_layer_arrays[i].set_weights_(it); // this->_head.set_params_(it); diff --git a/NAM/wavenet.h b/NAM/wavenet.h index 7ea94f1..b4ab962 100644 --- a/NAM/wavenet.h +++ b/NAM/wavenet.h @@ -30,7 +30,7 @@ class _Layer , _1x1(channels, channels, true) , _activation(activations::Activation::get_activation(activation)) , _gated(gated){}; - void set_weights_(std::vector::iterator& weights); + void set_weights_(std::vector::const_iterator& weights); // :param `input`: from previous layer // :param `output`: to next layer void process_(const Eigen::MatrixXf& input, const Eigen::MatrixXf& condition, Eigen::MatrixXf& head_input, @@ -108,7 +108,7 @@ class _LayerArray Eigen::MatrixXf& head_outputs // post head-rechannel ); void set_num_frames_(const long num_frames); - void set_weights_(std::vector::iterator& it); + void set_weights_(std::vector::const_iterator& it); // "Zero-indexed" receptive field. // E.g. a 1x1 convolution has a z.i.r.f. of zero. @@ -144,7 +144,7 @@ class _Head { public: _Head(const int input_size, const int num_layers, const int channels, const std::string activation); - void set_weights_(std::vector::iterator& weights); + void set_weights_(std::vector::const_iterator& weights); // NOTE: the head transforms the provided input by applying a nonlinearity // to it in-place! void process_(Eigen::MatrixXf& inputs, Eigen::MatrixXf& outputs); @@ -169,11 +169,11 @@ class WaveNet : public DSP { public: WaveNet(const std::vector& layer_array_params, const float head_scale, const bool with_head, - std::vector weights, const double expected_sample_rate = -1.0); + const std::vector& weights, const double expected_sample_rate = -1.0); ~WaveNet() = default; void finalize_(const int num_frames) override; - void set_weights_(std::vector& weights); + void set_weights_(const std::vector& weights); private: long _num_frames;