Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Eigen ref #98

Draft
wants to merge 3 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion NAM/activations.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ class Activation
public:
Activation() = default;
virtual ~Activation() = default;
virtual void apply(Eigen::MatrixXf& matrix) { apply(matrix.data(), matrix.rows() * matrix.cols()); }
virtual void apply(Eigen::Ref<Eigen::MatrixXf> matrix) { apply(matrix.data(), matrix.rows() * matrix.cols()); }
virtual void apply(Eigen::Block<Eigen::MatrixXf> block) { apply(block.data(), block.rows() * block.cols()); }
virtual void apply(Eigen::Block<Eigen::MatrixXf, -1, -1, true> block)
{
Expand Down
18 changes: 9 additions & 9 deletions NAM/convnet.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
#include "dsp.h"
#include "convnet.h"

nam::convnet::BatchNorm::BatchNorm(const int dim, std::vector<float>::iterator& weights)
nam::convnet::BatchNorm::BatchNorm(const int dim, weights_it& weights)
{
// Extract from param buffer
Eigen::VectorXf running_mean(dim);
Expand All @@ -35,7 +35,7 @@ nam::convnet::BatchNorm::BatchNorm(const int dim, std::vector<float>::iterator&
this->loc = _bias - this->scale.cwiseProduct(running_mean);
}

void nam::convnet::BatchNorm::process_(Eigen::MatrixXf& x, const long i_start, const long i_end) const
void nam::convnet::BatchNorm::process_(Eigen::Ref<Eigen::MatrixXf> x, const long i_start, const long i_end) const
{
// todo using colwise?
// #speed but conv probably dominates
Expand All @@ -48,7 +48,7 @@ void nam::convnet::BatchNorm::process_(Eigen::MatrixXf& x, const long i_start, c

void nam::convnet::ConvNetBlock::set_weights_(const int in_channels, const int out_channels, const int _dilation,
const bool batchnorm, const std::string activation,
std::vector<float>::iterator& weights)
weights_it& weights)
{
this->_batchnorm = batchnorm;
// HACK 2 kernel
Expand All @@ -58,7 +58,7 @@ void nam::convnet::ConvNetBlock::set_weights_(const int in_channels, const int o
this->activation = activations::Activation::get_activation(activation);
}

void nam::convnet::ConvNetBlock::process_(const Eigen::MatrixXf& input, Eigen::MatrixXf& output, const long i_start,
void nam::convnet::ConvNetBlock::process_(const Eigen::Ref<const Eigen::MatrixXf> input, Eigen::Ref<Eigen::MatrixXf> output, const long i_start,
const long i_end) const
{
const long ncols = i_end - i_start;
Expand All @@ -74,15 +74,15 @@ long nam::convnet::ConvNetBlock::get_out_channels() const
return this->conv.get_out_channels();
}

nam::convnet::_Head::_Head(const int channels, std::vector<float>::iterator& weights)
nam::convnet::_Head::_Head(const int channels, weights_it& weights)
{
this->_weight.resize(channels);
for (int i = 0; i < channels; i++)
this->_weight[i] = *(weights++);
this->_bias = *(weights++);
}

void nam::convnet::_Head::process_(const Eigen::MatrixXf& input, Eigen::VectorXf& output, const long i_start,
void nam::convnet::_Head::process_(const Eigen::Ref<const Eigen::MatrixXf> input, Eigen::VectorXf& output, const long i_start,
const long i_end) const
{
const long length = i_end - i_start;
Expand All @@ -98,7 +98,7 @@ nam::convnet::ConvNet::ConvNet(const int channels, const std::vector<int>& dilat
{
this->_verify_weights(channels, dilations, batchnorm, weights.size());
this->_blocks.resize(dilations.size());
std::vector<float>::iterator it = weights.begin();
weights_it it = weights.begin();
for (size_t i = 0; i < dilations.size(); i++)
this->_blocks[i].set_weights_(i == 0 ? 1 : channels, channels, dilations[i], batchnorm, activation, it);
this->_block_vals.resize(this->_blocks.size() + 1);
Expand Down Expand Up @@ -146,7 +146,7 @@ void nam::convnet::ConvNet::_update_buffers_(NAM_SAMPLE* input, const int num_fr

const size_t buffer_size = this->_input_buffer.size();

if (this->_block_vals[0].rows() != 1 || this->_block_vals[0].cols() != buffer_size)
if (this->_block_vals[0].rows() != Eigen::Index(1) || this->_block_vals[0].cols() != Eigen::Index(buffer_size))
{
this->_block_vals[0].resize(1, buffer_size);
this->_block_vals[0].setZero();
Expand All @@ -155,7 +155,7 @@ void nam::convnet::ConvNet::_update_buffers_(NAM_SAMPLE* input, const int num_fr
for (size_t i = 1; i < this->_block_vals.size(); i++)
{
if (this->_block_vals[i].rows() == this->_blocks[i - 1].get_out_channels()
&& this->_block_vals[i].cols() == buffer_size)
&& this->_block_vals[i].cols() == Eigen::Index(buffer_size))
continue; // Already has correct size
this->_block_vals[i].resize(this->_blocks[i - 1].get_out_channels(), buffer_size);
this->_block_vals[i].setZero();
Expand Down
12 changes: 6 additions & 6 deletions NAM/convnet.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ class BatchNorm
{
public:
BatchNorm(){};
BatchNorm(const int dim, std::vector<float>::iterator& weights);
void process_(Eigen::MatrixXf& input, const long i_start, const long i_end) const;
BatchNorm(const int dim, weights_it& weights);
void process_(Eigen::Ref<Eigen::MatrixXf> input, const long i_start, const long i_end) const;

private:
// TODO simplify to just ax+b
Expand All @@ -41,8 +41,8 @@ class ConvNetBlock
public:
ConvNetBlock(){};
void set_weights_(const int in_channels, const int out_channels, const int _dilation, const bool batchnorm,
const std::string activation, std::vector<float>::iterator& weights);
void process_(const Eigen::MatrixXf& input, Eigen::MatrixXf& output, const long i_start, const long i_end) const;
const std::string activation, weights_it& weights);
void process_(const Eigen::Ref<const Eigen::MatrixXf> input, Eigen::Ref<Eigen::MatrixXf> output, const long i_start, const long i_end) const;
long get_out_channels() const;
Conv1D conv;

Expand All @@ -56,8 +56,8 @@ class _Head
{
public:
_Head(){};
_Head(const int channels, std::vector<float>::iterator& weights);
void process_(const Eigen::MatrixXf& input, Eigen::VectorXf& output, const long i_start, const long i_end) const;
_Head(const int channels, weights_it& weights);
void process_(const Eigen::Ref<const Eigen::MatrixXf> input, Eigen::VectorXf& output, const long i_start, const long i_end) const;

private:
Eigen::VectorXf _weight;
Expand Down
14 changes: 7 additions & 7 deletions NAM/dsp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ void nam::DSP::prewarm()
void nam::DSP::process(NAM_SAMPLE* input, NAM_SAMPLE* output, const int num_frames)
{
// Default implementation is the null operation
for (size_t i = 0; i < num_frames; i++)
for (auto i = 0; i < num_frames; i++)
output[i] = input[i];
}

Expand Down Expand Up @@ -157,7 +157,7 @@ void nam::Linear::process(NAM_SAMPLE* input, NAM_SAMPLE* output, const int num_f
this->nam::Buffer::_update_buffers_(input, num_frames);

// Main computation!
for (size_t i = 0; i < num_frames; i++)
for (auto i = 0; i < num_frames; i++)
{
const size_t offset = this->_input_buffer_offset - this->_weight.size() + i + 1;
auto input = Eigen::Map<const Eigen::VectorXf>(&this->_input_buffer[offset], this->_receptive_field);
Expand All @@ -167,7 +167,7 @@ void nam::Linear::process(NAM_SAMPLE* input, NAM_SAMPLE* output, const int num_f

// NN modules =================================================================

void nam::Conv1D::set_weights_(std::vector<float>::iterator& weights)
void nam::Conv1D::set_weights_(weights_it& weights)
{
if (this->_weight.size() > 0)
{
Expand Down Expand Up @@ -198,13 +198,13 @@ void nam::Conv1D::set_size_(const int in_channels, const int out_channels, const
}

void nam::Conv1D::set_size_and_weights_(const int in_channels, const int out_channels, const int kernel_size,
const int _dilation, const bool do_bias, std::vector<float>::iterator& weights)
const int _dilation, const bool do_bias, weights_it& weights)
{
this->set_size_(in_channels, out_channels, kernel_size, do_bias, _dilation);
this->set_weights_(weights);
}

void nam::Conv1D::process_(const Eigen::MatrixXf& input, Eigen::MatrixXf& output, const long i_start, const long ncols,
void nam::Conv1D::process_(const Eigen::Ref<const Eigen::MatrixXf> input, Eigen::Ref<Eigen::MatrixXf> output, const long i_start, const long ncols,
const long j_start) const
{
// This is the clever part ;)
Expand Down Expand Up @@ -236,7 +236,7 @@ nam::Conv1x1::Conv1x1(const int in_channels, const int out_channels, const bool
this->_bias.resize(out_channels);
}

void nam::Conv1x1::set_weights_(std::vector<float>::iterator& weights)
void nam::Conv1x1::set_weights_(weights_it& weights)
{
for (int i = 0; i < this->_weight.rows(); i++)
for (int j = 0; j < this->_weight.cols(); j++)
Expand All @@ -246,7 +246,7 @@ void nam::Conv1x1::set_weights_(std::vector<float>::iterator& weights)
this->_bias(i) = *(weights++);
}

Eigen::MatrixXf nam::Conv1x1::process(const Eigen::MatrixXf& input) const
Eigen::MatrixXf nam::Conv1x1::process(const Eigen::Ref<const Eigen::MatrixXf> input) const
{
if (this->_do_bias)
return (this->_weight * input).colwise() + this->_bias;
Expand Down
12 changes: 7 additions & 5 deletions NAM/dsp.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@

namespace nam
{
using weights_it = std::vector<float>::const_iterator;

enum EArchitectures
{
kLinear = 0,
Expand Down Expand Up @@ -124,15 +126,15 @@ class Conv1D
{
public:
Conv1D() { this->_dilation = 1; };
void set_weights_(std::vector<float>::iterator& weights);
void set_weights_(weights_it& weights);
void set_size_(const int in_channels, const int out_channels, const int kernel_size, const bool do_bias,
const int _dilation);
void set_size_and_weights_(const int in_channels, const int out_channels, const int kernel_size, const int _dilation,
const bool do_bias, std::vector<float>::iterator& weights);
const bool do_bias, weights_it& weights);
// Process from input to output
// Rightmost indices of input go from i_start to i_end,
// Indices on output for from j_start (to j_start + i_end - i_start)
void process_(const Eigen::MatrixXf& input, Eigen::MatrixXf& output, const long i_start, const long i_end,
void process_(const Eigen::Ref<const Eigen::MatrixXf> input, Eigen::Ref<Eigen::MatrixXf> output, const long i_start, const long i_end,
const long j_start) const;
long get_in_channels() const { return this->_weight.size() > 0 ? this->_weight[0].cols() : 0; };
long get_kernel_size() const { return this->_weight.size(); };
Expand All @@ -153,10 +155,10 @@ class Conv1x1
{
public:
Conv1x1(const int in_channels, const int out_channels, const bool _bias);
void set_weights_(std::vector<float>::iterator& weights);
void set_weights_(weights_it& weights);
// :param input: (N,Cin) or (Cin,)
// :return: (N,Cout) or (Cout,), respectively
Eigen::MatrixXf process(const Eigen::MatrixXf& input) const;
Eigen::MatrixXf process(const Eigen::Ref<const Eigen::MatrixXf> input) const;

long get_out_channels() const { return this->_weight.rows(); };

Expand Down
9 changes: 5 additions & 4 deletions NAM/lstm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@
#include <string>
#include <vector>

#include "dsp.h"
#include "lstm.h"

nam::lstm::LSTMCell::LSTMCell(const int input_size, const int hidden_size, std::vector<float>::iterator& weights)
nam::lstm::LSTMCell::LSTMCell(const int input_size, const int hidden_size, weights_it& weights)
{
// Resize arrays
this->_w.resize(4 * hidden_size, input_size + hidden_size);
Expand Down Expand Up @@ -63,12 +64,12 @@ void nam::lstm::LSTMCell::process_(const Eigen::VectorXf& x)
}
}

nam::lstm::LSTM::LSTM(const int num_layers, const int input_size, const int hidden_size, std::vector<float>& weights,
nam::lstm::LSTM::LSTM(const int num_layers, const int input_size, const int hidden_size, const std::vector<float>& weights,
const double expected_sample_rate)
: DSP(expected_sample_rate)
{
this->_input.resize(1);
std::vector<float>::iterator it = weights.begin();
auto it = weights.begin();
for (int i = 0; i < num_layers; i++)
this->_layers.push_back(LSTMCell(i == 0 ? input_size : hidden_size, hidden_size, it));
this->_head_weight.resize(hidden_size);
Expand All @@ -80,7 +81,7 @@ nam::lstm::LSTM::LSTM(const int num_layers, const int input_size, const int hidd

void nam::lstm::LSTM::process(NAM_SAMPLE* input, NAM_SAMPLE* output, const int num_frames)
{
for (size_t i = 0; i < num_frames; i++)
for (auto i = 0; i < num_frames; i++)
output[i] = this->_process_sample(input[i]);
}

Expand Down
4 changes: 2 additions & 2 deletions NAM/lstm.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ namespace lstm
class LSTMCell
{
public:
LSTMCell(const int input_size, const int hidden_size, std::vector<float>::iterator& weights);
LSTMCell(const int input_size, const int hidden_size, weights_it& weights);
Eigen::VectorXf get_hidden_state() const { return this->_xh(Eigen::placeholders::lastN(this->_get_hidden_size())); };
void process_(const Eigen::VectorXf& x);

Expand Down Expand Up @@ -50,7 +50,7 @@ class LSTMCell
class LSTM : public DSP
{
public:
LSTM(const int num_layers, const int input_size, const int hidden_size, std::vector<float>& weights,
LSTM(const int num_layers, const int input_size, const int hidden_size, const std::vector<float>& weights,
const double expected_sample_rate = -1.0);
~LSTM() = default;

Expand Down
42 changes: 26 additions & 16 deletions NAM/wavenet.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,15 @@ nam::wavenet::_DilatedConv::_DilatedConv(const int in_channels, const int out_ch
this->set_size_(in_channels, out_channels, kernel_size, bias, dilation);
}

void nam::wavenet::_Layer::set_weights_(std::vector<float>::iterator& weights)
void nam::wavenet::_Layer::set_weights_(weights_it& weights)
{
this->_conv.set_weights_(weights);
this->_input_mixin.set_weights_(weights);
this->_1x1.set_weights_(weights);
}

void nam::wavenet::_Layer::process_(const Eigen::MatrixXf& input, const Eigen::MatrixXf& condition,
Eigen::MatrixXf& head_input, Eigen::MatrixXf& output, const long i_start,
void nam::wavenet::_Layer::process_(const Eigen::Ref<const Eigen::MatrixXf> input, const Eigen::Ref<const Eigen::MatrixXf> condition,
Eigen::Ref<Eigen::MatrixXf> head_input, Eigen::Ref<Eigen::MatrixXf> output, const long i_start,
const long j_start)
{
const long ncols = condition.cols();
Expand Down Expand Up @@ -102,17 +102,27 @@ void nam::wavenet::_LayerArray::prepare_for_frames_(const long num_frames)
this->_rewind_buffers_();
}

void nam::wavenet::_LayerArray::process_(const Eigen::MatrixXf& layer_inputs, const Eigen::MatrixXf& condition,
Eigen::MatrixXf& head_inputs, Eigen::MatrixXf& layer_outputs,
Eigen::MatrixXf& head_outputs)
void nam::wavenet::_LayerArray::process_(const Eigen::Ref<const Eigen::MatrixXf> layer_inputs, const Eigen::Ref<const Eigen::MatrixXf> condition,
Eigen::Ref<Eigen::MatrixXf> head_inputs, Eigen::Ref<Eigen::MatrixXf> layer_outputs,
Eigen::Ref<Eigen::MatrixXf> head_outputs)
{
this->_layer_buffers[0].middleCols(this->_buffer_start, layer_inputs.cols()) = this->_rechannel.process(layer_inputs);
const size_t last_layer = this->_layers.size() - 1;
for (size_t i = 0; i < this->_layers.size(); i++)
{
this->_layers[i].process_(this->_layer_buffers[i], condition, head_inputs,
i == last_layer ? layer_outputs : this->_layer_buffers[i + 1], this->_buffer_start,
i == last_layer ? 0 : this->_buffer_start);
if (i == last_layer)
{
this->_layers[i].process_(this->_layer_buffers[i], condition, head_inputs,
layer_outputs, this->_buffer_start,
0);
}
else
{
this->_layers[i].process_(this->_layer_buffers[i], condition, head_inputs,
this->_layer_buffers[i + 1], this->_buffer_start,
this->_buffer_start);
}

}
head_outputs = this->_head_rechannel.process(head_inputs);
}
Expand All @@ -133,7 +143,7 @@ void nam::wavenet::_LayerArray::set_num_frames_(const long num_frames)
this->_layers[i].set_num_frames_(num_frames);
}

void nam::wavenet::_LayerArray::set_weights_(std::vector<float>::iterator& weights)
void nam::wavenet::_LayerArray::set_weights_(weights_it& weights)
{
this->_rechannel.set_weights_(weights);
for (size_t i = 0; i < this->_layers.size(); i++)
Expand Down Expand Up @@ -186,13 +196,13 @@ nam::wavenet::_Head::_Head(const int input_size, const int num_layers, const int
}
}

void nam::wavenet::_Head::set_weights_(std::vector<float>::iterator& weights)
void nam::wavenet::_Head::set_weights_(weights_it& weights)
{
for (size_t i = 0; i < this->_layers.size(); i++)
this->_layers[i].set_weights_(weights);
}

void nam::wavenet::_Head::process_(Eigen::MatrixXf& inputs, Eigen::MatrixXf& outputs)
void nam::wavenet::_Head::process_(Eigen::Ref<Eigen::MatrixXf> inputs, Eigen::Ref<Eigen::MatrixXf> outputs)
{
const size_t num_layers = this->_layers.size();
this->_apply_activation_(inputs);
Expand Down Expand Up @@ -223,15 +233,15 @@ void nam::wavenet::_Head::set_num_frames_(const long num_frames)
}
}

void nam::wavenet::_Head::_apply_activation_(Eigen::MatrixXf& x)
void nam::wavenet::_Head::_apply_activation_(Eigen::Ref<Eigen::MatrixXf> x)
{
this->_activation->apply(x);
}

// WaveNet ====================================================================

nam::wavenet::WaveNet::WaveNet(const std::vector<nam::wavenet::LayerArrayParams>& layer_array_params,
const float head_scale, const bool with_head, std::vector<float> weights,
const float head_scale, const bool with_head, const std::vector<float>& weights,
const double expected_sample_rate)
: DSP(expected_sample_rate)
, _num_frames(0)
Expand Down Expand Up @@ -272,9 +282,9 @@ void nam::wavenet::WaveNet::finalize_(const int num_frames)
this->_advance_buffers_(num_frames);
}

void nam::wavenet::WaveNet::set_weights_(std::vector<float>& weights)
void nam::wavenet::WaveNet::set_weights_(const std::vector<float>& weights)
{
std::vector<float>::iterator it = weights.begin();
weights_it it = weights.begin();
for (size_t i = 0; i < this->_layer_arrays.size(); i++)
this->_layer_arrays[i].set_weights_(it);
// this->_head.set_params_(it);
Expand Down
Loading