Skip to content

Commit

Permalink
Pass in const reference to weights
Browse files Browse the repository at this point in the history
And const_iterator rather than iterator
  • Loading branch information
olilarkin committed Dec 30, 2023
1 parent 74a07ce commit d581d11
Show file tree
Hide file tree
Showing 8 changed files with 31 additions and 31 deletions.
10 changes: 5 additions & 5 deletions NAM/convnet.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
#include "dsp.h"
#include "convnet.h"

nam::convnet::BatchNorm::BatchNorm(const int dim, std::vector<float>::iterator& weights)
nam::convnet::BatchNorm::BatchNorm(const int dim, std::vector<float>::const_iterator& weights)
{
// Extract from param buffer
Eigen::VectorXf running_mean(dim);
Expand Down Expand Up @@ -48,7 +48,7 @@ void nam::convnet::BatchNorm::process_(Eigen::MatrixXf& x, const long i_start, c

void nam::convnet::ConvNetBlock::set_weights_(const int in_channels, const int out_channels, const int _dilation,
const bool batchnorm, const std::string activation,
std::vector<float>::iterator& weights)
std::vector<float>::const_iterator& weights)
{
this->_batchnorm = batchnorm;
// HACK 2 kernel
Expand All @@ -74,7 +74,7 @@ long nam::convnet::ConvNetBlock::get_out_channels() const
return this->conv.get_out_channels();
}

nam::convnet::_Head::_Head(const int channels, std::vector<float>::iterator& weights)
nam::convnet::_Head::_Head(const int channels, std::vector<float>::const_iterator& weights)
{
this->_weight.resize(channels);
for (int i = 0; i < channels; i++)
Expand All @@ -92,13 +92,13 @@ void nam::convnet::_Head::process_(const Eigen::MatrixXf& input, Eigen::VectorXf
}

nam::convnet::ConvNet::ConvNet(const int channels, const std::vector<int>& dilations, const bool batchnorm,
const std::string activation, std::vector<float>& weights,
const std::string activation, const std::vector<float>& weights,
const double expected_sample_rate)
: Buffer(*std::max_element(dilations.begin(), dilations.end()), expected_sample_rate)
{
this->_verify_weights(channels, dilations, batchnorm, weights.size());
this->_blocks.resize(dilations.size());
std::vector<float>::iterator it = weights.begin();
std::vector<float>::const_iterator it = weights.begin();
for (size_t i = 0; i < dilations.size(); i++)
this->_blocks[i].set_weights_(i == 0 ? 1 : channels, channels, dilations[i], batchnorm, activation, it);
this->_block_vals.resize(this->_blocks.size() + 1);
Expand Down
8 changes: 4 additions & 4 deletions NAM/convnet.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ class BatchNorm
{
public:
BatchNorm(){};
BatchNorm(const int dim, std::vector<float>::iterator& weights);
BatchNorm(const int dim, std::vector<float>::const_iterator& weights);
void process_(Eigen::MatrixXf& input, const long i_start, const long i_end) const;

private:
Expand All @@ -41,7 +41,7 @@ class ConvNetBlock
public:
ConvNetBlock(){};
void set_weights_(const int in_channels, const int out_channels, const int _dilation, const bool batchnorm,
const std::string activation, std::vector<float>::iterator& weights);
const std::string activation, std::vector<float>::const_iterator& weights);
void process_(const Eigen::MatrixXf& input, Eigen::MatrixXf& output, const long i_start, const long i_end) const;
long get_out_channels() const;
Conv1D conv;
Expand All @@ -56,7 +56,7 @@ class _Head
{
public:
_Head(){};
_Head(const int channels, std::vector<float>::iterator& weights);
_Head(const int channels, std::vector<float>::const_iterator& weights);
void process_(const Eigen::MatrixXf& input, Eigen::VectorXf& output, const long i_start, const long i_end) const;

private:
Expand All @@ -68,7 +68,7 @@ class ConvNet : public Buffer
{
public:
ConvNet(const int channels, const std::vector<int>& dilations, const bool batchnorm, const std::string activation,
std::vector<float>& weights, const double expected_sample_rate = -1.0);
const std::vector<float>& weights, const double expected_sample_rate = -1.0);
~ConvNet() = default;

protected:
Expand Down
6 changes: 3 additions & 3 deletions NAM/dsp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ void nam::Linear::process(NAM_SAMPLE* input, NAM_SAMPLE* output, const int num_f

// NN modules =================================================================

void nam::Conv1D::set_weights_(std::vector<float>::iterator& weights)
void nam::Conv1D::set_weights_(std::vector<float>::const_iterator& weights)
{
if (this->_weight.size() > 0)
{
Expand Down Expand Up @@ -198,7 +198,7 @@ void nam::Conv1D::set_size_(const int in_channels, const int out_channels, const
}

void nam::Conv1D::set_size_and_weights_(const int in_channels, const int out_channels, const int kernel_size,
const int _dilation, const bool do_bias, std::vector<float>::iterator& weights)
const int _dilation, const bool do_bias, std::vector<float>::const_iterator& weights)
{
this->set_size_(in_channels, out_channels, kernel_size, do_bias, _dilation);
this->set_weights_(weights);
Expand Down Expand Up @@ -236,7 +236,7 @@ nam::Conv1x1::Conv1x1(const int in_channels, const int out_channels, const bool
this->_bias.resize(out_channels);
}

void nam::Conv1x1::set_weights_(std::vector<float>::iterator& weights)
void nam::Conv1x1::set_weights_(std::vector<float>::const_iterator& weights)
{
for (int i = 0; i < this->_weight.rows(); i++)
for (int j = 0; j < this->_weight.cols(); j++)
Expand Down
6 changes: 3 additions & 3 deletions NAM/dsp.h
Original file line number Diff line number Diff line change
Expand Up @@ -124,11 +124,11 @@ class Conv1D
{
public:
Conv1D() { this->_dilation = 1; };
void set_weights_(std::vector<float>::iterator& weights);
void set_weights_(std::vector<float>::const_iterator& weights);
void set_size_(const int in_channels, const int out_channels, const int kernel_size, const bool do_bias,
const int _dilation);
void set_size_and_weights_(const int in_channels, const int out_channels, const int kernel_size, const int _dilation,
const bool do_bias, std::vector<float>::iterator& weights);
const bool do_bias, std::vector<float>::const_iterator& weights);
// Process from input to output
// Rightmost indices of input go from i_start to i_end,
// Indices on output for from j_start (to j_start + i_end - i_start)
Expand All @@ -153,7 +153,7 @@ class Conv1x1
{
public:
Conv1x1(const int in_channels, const int out_channels, const bool _bias);
void set_weights_(std::vector<float>::iterator& weights);
void set_weights_(std::vector<float>::const_iterator& weights);
// :param input: (N,Cin) or (Cin,)
// :return: (N,Cout) or (Cout,), respectively
Eigen::MatrixXf process(const Eigen::MatrixXf& input) const;
Expand Down
6 changes: 3 additions & 3 deletions NAM/lstm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

#include "lstm.h"

nam::lstm::LSTMCell::LSTMCell(const int input_size, const int hidden_size, std::vector<float>::iterator& weights)
nam::lstm::LSTMCell::LSTMCell(const int input_size, const int hidden_size, std::vector<float>::const_iterator& weights)
{
// Resize arrays
this->_w.resize(4 * hidden_size, input_size + hidden_size);
Expand Down Expand Up @@ -63,12 +63,12 @@ void nam::lstm::LSTMCell::process_(const Eigen::VectorXf& x)
}
}

nam::lstm::LSTM::LSTM(const int num_layers, const int input_size, const int hidden_size, std::vector<float>& weights,
nam::lstm::LSTM::LSTM(const int num_layers, const int input_size, const int hidden_size, const std::vector<float>& weights,
const double expected_sample_rate)
: DSP(expected_sample_rate)
{
this->_input.resize(1);
std::vector<float>::iterator it = weights.begin();
std::vector<float>::const_iterator it = weights.begin();
for (int i = 0; i < num_layers; i++)
this->_layers.push_back(LSTMCell(i == 0 ? input_size : hidden_size, hidden_size, it));
this->_head_weight.resize(hidden_size);
Expand Down
4 changes: 2 additions & 2 deletions NAM/lstm.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ namespace lstm
class LSTMCell
{
public:
LSTMCell(const int input_size, const int hidden_size, std::vector<float>::iterator& weights);
LSTMCell(const int input_size, const int hidden_size, std::vector<float>::const_iterator& weights);
Eigen::VectorXf get_hidden_state() const { return this->_xh(Eigen::placeholders::lastN(this->_get_hidden_size())); };
void process_(const Eigen::VectorXf& x);

Expand Down Expand Up @@ -50,7 +50,7 @@ class LSTMCell
class LSTM : public DSP
{
public:
LSTM(const int num_layers, const int input_size, const int hidden_size, std::vector<float>& weights,
LSTM(const int num_layers, const int input_size, const int hidden_size, const std::vector<float>& weights,
const double expected_sample_rate = -1.0);
~LSTM() = default;

Expand Down
12 changes: 6 additions & 6 deletions NAM/wavenet.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ nam::wavenet::_DilatedConv::_DilatedConv(const int in_channels, const int out_ch
this->set_size_(in_channels, out_channels, kernel_size, bias, dilation);
}

void nam::wavenet::_Layer::set_weights_(std::vector<float>::iterator& weights)
void nam::wavenet::_Layer::set_weights_(std::vector<float>::const_iterator& weights)
{
this->_conv.set_weights_(weights);
this->_input_mixin.set_weights_(weights);
Expand Down Expand Up @@ -133,7 +133,7 @@ void nam::wavenet::_LayerArray::set_num_frames_(const long num_frames)
this->_layers[i].set_num_frames_(num_frames);
}

void nam::wavenet::_LayerArray::set_weights_(std::vector<float>::iterator& weights)
void nam::wavenet::_LayerArray::set_weights_(std::vector<float>::const_iterator& weights)
{
this->_rechannel.set_weights_(weights);
for (size_t i = 0; i < this->_layers.size(); i++)
Expand Down Expand Up @@ -186,7 +186,7 @@ nam::wavenet::_Head::_Head(const int input_size, const int num_layers, const int
}
}

void nam::wavenet::_Head::set_weights_(std::vector<float>::iterator& weights)
void nam::wavenet::_Head::set_weights_(std::vector<float>::const_iterator& weights)
{
for (size_t i = 0; i < this->_layers.size(); i++)
this->_layers[i].set_weights_(weights);
Expand Down Expand Up @@ -231,7 +231,7 @@ void nam::wavenet::_Head::_apply_activation_(Eigen::MatrixXf& x)
// WaveNet ====================================================================

nam::wavenet::WaveNet::WaveNet(const std::vector<nam::wavenet::LayerArrayParams>& layer_array_params,
const float head_scale, const bool with_head, std::vector<float> weights,
const float head_scale, const bool with_head, const std::vector<float>& weights,
const double expected_sample_rate)
: DSP(expected_sample_rate)
, _num_frames(0)
Expand Down Expand Up @@ -272,9 +272,9 @@ void nam::wavenet::WaveNet::finalize_(const int num_frames)
this->_advance_buffers_(num_frames);
}

void nam::wavenet::WaveNet::set_weights_(std::vector<float>& weights)
void nam::wavenet::WaveNet::set_weights_(const std::vector<float>& weights)
{
std::vector<float>::iterator it = weights.begin();
std::vector<float>::const_iterator it = weights.begin();
for (size_t i = 0; i < this->_layer_arrays.size(); i++)
this->_layer_arrays[i].set_weights_(it);
// this->_head.set_params_(it);
Expand Down
10 changes: 5 additions & 5 deletions NAM/wavenet.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class _Layer
, _1x1(channels, channels, true)
, _activation(activations::Activation::get_activation(activation))
, _gated(gated){};
void set_weights_(std::vector<float>::iterator& weights);
void set_weights_(std::vector<float>::const_iterator& weights);
// :param `input`: from previous layer
// :param `output`: to next layer
void process_(const Eigen::MatrixXf& input, const Eigen::MatrixXf& condition, Eigen::MatrixXf& head_input,
Expand Down Expand Up @@ -108,7 +108,7 @@ class _LayerArray
Eigen::MatrixXf& head_outputs // post head-rechannel
);
void set_num_frames_(const long num_frames);
void set_weights_(std::vector<float>::iterator& it);
void set_weights_(std::vector<float>::const_iterator& it);

// "Zero-indexed" receptive field.
// E.g. a 1x1 convolution has a z.i.r.f. of zero.
Expand Down Expand Up @@ -144,7 +144,7 @@ class _Head
{
public:
_Head(const int input_size, const int num_layers, const int channels, const std::string activation);
void set_weights_(std::vector<float>::iterator& weights);
void set_weights_(std::vector<float>::const_iterator& weights);
// NOTE: the head transforms the provided input by applying a nonlinearity
// to it in-place!
void process_(Eigen::MatrixXf& inputs, Eigen::MatrixXf& outputs);
Expand All @@ -169,11 +169,11 @@ class WaveNet : public DSP
{
public:
WaveNet(const std::vector<LayerArrayParams>& layer_array_params, const float head_scale, const bool with_head,
std::vector<float> weights, const double expected_sample_rate = -1.0);
const std::vector<float>& weights, const double expected_sample_rate = -1.0);
~WaveNet() = default;

void finalize_(const int num_frames) override;
void set_weights_(std::vector<float>& weights);
void set_weights_(const std::vector<float>& weights);

private:
long _num_frames;
Expand Down

0 comments on commit d581d11

Please sign in to comment.