Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 18 additions & 7 deletions NAM/gating_activations.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,13 @@ class GatingActivation
, gating_activation(gating_act)
, num_channels(input_channels)
{
assert(num_channels > 0);
if (num_channels <= 0)
{
throw std::invalid_argument("GatingActivation: number of input channels must be positive");
}
// Initialize input buffer with correct size
// Note: current code copies column-by-column so we only need (num_channels, 1)
input_buffer.resize(num_channels, 1);
}

~GatingActivation() = default;
Expand All @@ -47,7 +53,8 @@ class GatingActivation
* @param input Input matrix with shape (input_channels + gating_channels) x num_samples
* @param output Output matrix with shape input_channels x num_samples
*/
void apply(Eigen::MatrixXf& input, Eigen::MatrixXf& output)
template<typename InputDerived, typename OutputDerived>
void apply(const Eigen::MatrixBase<InputDerived>& input, Eigen::MatrixBase<OutputDerived>& output)
{
// Validate input dimensions (assert for real-time performance)
const int total_channels = 2 * num_channels;
Expand All @@ -59,6 +66,9 @@ class GatingActivation
const int num_samples = input.cols();
for (int i = 0; i < num_samples; i++)
{
// Store pre-activation input values in buffer to avoid overwriting issues
input_buffer = input.block(0, i, num_channels, 1);

// Apply activation to input channels
Eigen::MatrixXf input_block = input.block(0, i, num_channels, 1);
input_activation->apply(input_block);
Expand All @@ -71,6 +81,7 @@ class GatingActivation
// For wavenet compatibility, we assume one-to-one mapping
output.block(0, i, num_channels, 1) = input_block.array() * gating_block.array();
}

}

/**
Expand All @@ -87,6 +98,7 @@ class GatingActivation
activations::Activation* input_activation;
activations::Activation* gating_activation;
int num_channels;
Eigen::MatrixXf input_buffer;
};

class BlendingActivation
Expand All @@ -103,10 +115,8 @@ class BlendingActivation
, blending_activation(blend_act)
, num_channels(input_channels)
{
if (num_channels <= 0)
{
throw std::invalid_argument("BlendingActivation: number of input channels must be positive");
}
assert(num_channels > 0);

// Initialize input buffer with correct size
// Note: current code copies column-by-column so we only need (num_channels, 1)
input_buffer.resize(num_channels, 1);
Expand All @@ -119,7 +129,8 @@ class BlendingActivation
* @param input Input matrix with shape (input_channels + blend_channels) x num_samples
* @param output Output matrix with shape input_channels x num_samples
*/
void apply(Eigen::MatrixXf& input, Eigen::MatrixXf& output)
template<typename InputDerived, typename OutputDerived>
void apply(const Eigen::MatrixBase<InputDerived>& input, Eigen::MatrixBase<OutputDerived>& output)
{
// Validate input dimensions (assert for real-time performance)
const int total_channels = num_channels * 2; // 2*channels in, channels out
Expand Down
89 changes: 65 additions & 24 deletions NAM/wavenet.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,29 +58,33 @@ void nam::wavenet::_Layer::Process(const Eigen::MatrixXf& input, const Eigen::Ma
this->_conv.GetOutput().leftCols(num_frames) + _input_mixin.GetOutput().leftCols(num_frames);

// Step 2 & 3: activation and 1x1
if (!this->_gated)
if (this->_gating_mode == GatingMode::NONE)
{
this->_activation->apply(this->_z.leftCols(num_frames));
_1x1.process_(_z, num_frames);
}
else
else if (this->_gating_mode == GatingMode::GATED)
{
// CAREFUL: .topRows() and .bottomRows() won't be memory-contiguous for a column-major matrix (Issue 125). Need to
// do this column-wise:
for (int i = 0; i < num_frames; i++)
{
this->_activation->apply(this->_z.block(0, i, bottleneck, 1));
// TODO Need to support other activation functions here instead of hardcoded sigmoid
activations::Activation::get_activation("Sigmoid")->apply(this->_z.block(bottleneck, i, bottleneck, 1));
}
this->_z.block(0, 0, bottleneck, num_frames).array() *=
this->_z.block(bottleneck, 0, bottleneck, num_frames).array();
_1x1.process_(_z.topRows(bottleneck), num_frames); // Might not be RT safe
// Use the GatingActivation class
// Extract the blocks first to avoid temporary reference issues
auto input_block = this->_z.leftCols(num_frames);
auto output_block = this->_z.topRows(bottleneck).leftCols(num_frames);
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is a bit tricky to me...so the bottom rows are for the gate/blend op, and the top are both for the main activation's input and the gating activation's output.

I'll make a note of this.

this->_gating_activation->apply(input_block, output_block);
_1x1.process_(this->_z.topRows(bottleneck), num_frames);
}
else if (this->_gating_mode == GatingMode::BLENDED)
{
// Use the BlendingActivation class
// Extract the blocks first to avoid temporary reference issues
auto input_block = this->_z.leftCols(num_frames);
auto output_block = this->_z.topRows(bottleneck).leftCols(num_frames);
this->_blending_activation->apply(input_block, output_block);
_1x1.process_(this->_z.topRows(bottleneck), num_frames);
}

if (this->_head1x1)
{
if (!this->_gated)
if (this->_gating_mode == GatingMode::NONE)
this->_head1x1->process_(this->_z.leftCols(num_frames), num_frames);
else
this->_head1x1->process(this->_z.topRows(bottleneck).leftCols(num_frames), num_frames);
Expand All @@ -89,7 +93,7 @@ void nam::wavenet::_Layer::Process(const Eigen::MatrixXf& input, const Eigen::Ma
else
{
// Store output to head (skip connection: activated conv output)
if (!this->_gated)
if (this->_gating_mode == GatingMode::NONE)
this->_output_head.leftCols(num_frames).noalias() = this->_z.leftCols(num_frames);
else
this->_output_head.leftCols(num_frames).noalias() = this->_z.topRows(bottleneck).leftCols(num_frames);
Expand All @@ -105,15 +109,16 @@ void nam::wavenet::_Layer::Process(const Eigen::MatrixXf& input, const Eigen::Ma
nam::wavenet::_LayerArray::_LayerArray(const int input_size, const int condition_size, const int head_size,
const int channels, const int bottleneck, const int kernel_size,
const std::vector<int>& dilations, const std::string activation,
const bool gated, const bool head_bias, const int groups_input,
const int groups_1x1, const Head1x1Params& head1x1_params)
const GatingMode gating_mode, const bool head_bias, const int groups_input,
const int groups_1x1, const Head1x1Params& head1x1_params,
const std::string& gating_activation, const std::string& blending_activation)
: _rechannel(input_size, channels, false)
, _head_rechannel(bottleneck, head_size, head_bias)
, _bottleneck(bottleneck)
{
for (size_t i = 0; i < dilations.size(); i++)
this->_layers.push_back(_Layer(condition_size, channels, bottleneck, kernel_size, dilations[i], activation, gated,
groups_input, groups_1x1, head1x1_params));
this->_layers.push_back(_Layer(condition_size, channels, bottleneck, kernel_size, dilations[i], activation, gating_mode,
groups_input, groups_1x1, head1x1_params, gating_activation, blending_activation));
}

void nam::wavenet::_LayerArray::SetMaxBufferSize(const int maxBufferSize)
Expand Down Expand Up @@ -263,9 +268,10 @@ nam::wavenet::WaveNet::WaveNet(const int in_channels,
this->_layer_arrays.push_back(nam::wavenet::_LayerArray(
layer_array_params[i].input_size, layer_array_params[i].condition_size, layer_array_params[i].head_size,
layer_array_params[i].channels, layer_array_params[i].bottleneck, layer_array_params[i].kernel_size,
layer_array_params[i].dilations, layer_array_params[i].activation, layer_array_params[i].gated,
layer_array_params[i].dilations, layer_array_params[i].activation, layer_array_params[i].gating_mode,
layer_array_params[i].head_bias, layer_array_params[i].groups_input, layer_array_params[i].groups_1x1,
layer_array_params[i].head1x1_params));
layer_array_params[i].head1x1_params, layer_array_params[i].gating_activation,
layer_array_params[i].blending_activation));
if (i > 0)
if (layer_array_params[i].channels != layer_array_params[i - 1].head_size)
{
Expand Down Expand Up @@ -468,7 +474,41 @@ std::unique_ptr<nam::DSP> nam::wavenet::Factory(const nlohmann::json& config, st
const int kernel_size = layer_config["kernel_size"];
const auto dilations = layer_config["dilations"];
const std::string activation = layer_config["activation"].get<std::string>();
const bool gated = layer_config["gated"];
// Parse gating mode - support both old "gated" boolean and new "gating_mode" string
GatingMode gating_mode = GatingMode::NONE;
std::string gating_activation = "Sigmoid";
std::string blending_activation = "Sigmoid";

if (layer_config.find("gating_mode") != layer_config.end())
{
std::string gating_mode_str = layer_config["gating_mode"].get<std::string>();
if (gating_mode_str == "gated")
gating_mode = GatingMode::GATED;
else if (gating_mode_str == "blended")
gating_mode = GatingMode::BLENDED;
else if (gating_mode_str == "none")
gating_mode = GatingMode::NONE;
else
throw std::runtime_error("Invalid gating_mode: " + gating_mode_str);

// Parse configurable activations if present
if (layer_config.find("gating_activation") != layer_config.end())
{
gating_activation = layer_config["gating_activation"].get<std::string>();
}
if (layer_config.find("blending_activation") != layer_config.end())
{
blending_activation = layer_config["blending_activation"].get<std::string>();
}
}
else if (layer_config.find("gated") != layer_config.end())
{
// Backward compatibility: convert old "gated" boolean to new enum
bool gated = layer_config["gated"];
gating_mode = gated ? GatingMode::GATED : GatingMode::NONE;
}
// If neither is present, default to NONE

const bool head_bias = layer_config["head_bias"];

// Parse head1x1 parameters
Expand All @@ -478,8 +518,9 @@ std::unique_ptr<nam::DSP> nam::wavenet::Factory(const nlohmann::json& config, st
nam::wavenet::Head1x1Params head1x1_params(head1x1_active, head1x1_out_channels, head1x1_groups);

layer_array_params.push_back(nam::wavenet::LayerArrayParams(input_size, condition_size, head_size, channels,
bottleneck, kernel_size, dilations, activation, gated,
head_bias, groups, groups_1x1, head1x1_params));
bottleneck, kernel_size, dilations, activation, gating_mode,
head_bias, groups, groups_1x1, head1x1_params,
gating_activation, blending_activation));
}
const bool with_head = !config["head"].is_null();
const float head_scale = config["head_scale"];
Expand Down
98 changes: 87 additions & 11 deletions NAM/wavenet.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,26 @@

#include "dsp.h"
#include "conv1d.h"
#include "gating_activations.h"

namespace nam
{
namespace wavenet
{

// Gating mode for WaveNet layers
enum class GatingMode
{
NONE, // No gating or blending
GATED, // Traditional gating (element-wise multiplication)
BLENDED // Blending (weighted average)
};

// Helper function for backward compatibility with boolean gated parameter
inline GatingMode gating_mode_from_bool(bool gated)
{
return gated ? GatingMode::GATED : GatingMode::NONE;
}
// Parameters for head1x1 configuration
struct Head1x1Params
{
Expand All @@ -32,21 +47,50 @@ struct Head1x1Params
class _Layer
{
public:
// New constructor with GatingMode enum and configurable activations
_Layer(const int condition_size, const int channels, const int bottleneck, const int kernel_size, const int dilation,
const std::string activation, const bool gated, const int groups_input, const int groups_1x1,
const Head1x1Params& head1x1_params)
: _conv(channels, gated ? 2 * bottleneck : bottleneck, kernel_size, true, dilation)
, _input_mixin(condition_size, gated ? 2 * bottleneck : bottleneck, false)
const std::string activation, const GatingMode gating_mode, const int groups_input, const int groups_1x1,
const Head1x1Params& head1x1_params, const std::string& gating_activation = "Sigmoid",
const std::string& blending_activation = "Sigmoid")
: _conv(channels, (gating_mode != GatingMode::NONE) ? 2 * bottleneck : bottleneck, kernel_size, true, dilation)
, _input_mixin(condition_size, (gating_mode != GatingMode::NONE) ? 2 * bottleneck : bottleneck, false)
, _1x1(bottleneck, channels, groups_1x1)
, _activation(activations::Activation::get_activation(activation)) // needs to support activations with parameters
, _gated(gated)
, _gating_mode(gating_mode)
, _bottleneck(bottleneck)
{
if (head1x1_params.active)
{
_head1x1 = std::make_unique<Conv1x1>(bottleneck, head1x1_params.out_channels, true, head1x1_params.groups);
}

// Initialize gating/blending activation if needed
if (gating_mode == GatingMode::GATED)
{
_gating_activation = std::make_unique<gating_activations::GatingActivation>(
_activation,
activations::Activation::get_activation(gating_activation),
bottleneck
);
}
else if (gating_mode == GatingMode::BLENDED)
{
_blending_activation = std::make_unique<gating_activations::BlendingActivation>(
_activation,
activations::Activation::get_activation(blending_activation),
bottleneck
);
}
};

// Backward compatibility constructor with boolean gated parameter
_Layer(const int condition_size, const int channels, const int bottleneck, const int kernel_size, const int dilation,
const std::string activation, const bool gated, const int groups_input, const int groups_1x1,
const Head1x1Params& head1x1_params)
: _Layer(condition_size, channels, bottleneck, kernel_size, dilation, activation,
gating_mode_from_bool(gated), groups_input, groups_1x1, head1x1_params, "Sigmoid", "Sigmoid")
{
}

// Resize all arrays to be able to process `maxBufferSize` frames.
void SetMaxBufferSize(const int maxBufferSize);
Expand Down Expand Up @@ -97,17 +141,22 @@ class _Layer
Eigen::MatrixXf _output_head;

activations::Activation* _activation;
const bool _gated;
const GatingMode _gating_mode;
const int _bottleneck; // Internal channel count (not doubled when gated)

// Gating/blending activation objects
std::unique_ptr<gating_activations::GatingActivation> _gating_activation;
std::unique_ptr<gating_activations::BlendingActivation> _blending_activation;
};

class LayerArrayParams
{
public:
LayerArrayParams(const int input_size_, const int condition_size_, const int head_size_, const int channels_,
const int bottleneck_, const int kernel_size_, const std::vector<int>&& dilations_,
const std::string activation_, const bool gated_, const bool head_bias_, const int groups_input,
const int groups_1x1_, const Head1x1Params& head1x1_params_)
const std::string activation_, const GatingMode gating_mode_, const bool head_bias_,
const int groups_input, const int groups_1x1_, const Head1x1Params& head1x1_params_,
const std::string& gating_activation_ = "Sigmoid", const std::string& blending_activation_ = "Sigmoid")
: input_size(input_size_)
, condition_size(condition_size_)
, head_size(head_size_)
Expand All @@ -116,11 +165,13 @@ class LayerArrayParams
, kernel_size(kernel_size_)
, dilations(std::move(dilations_))
, activation(activation_)
, gated(gated_)
, gating_mode(gating_mode_)
, head_bias(head_bias_)
, groups_input(groups_input)
, groups_1x1(groups_1x1_)
, head1x1_params(head1x1_params_)
, gating_activation(gating_activation_)
, blending_activation(blending_activation_)
{
}

Expand All @@ -132,21 +183,46 @@ class LayerArrayParams
const int kernel_size;
std::vector<int> dilations;
const std::string activation;
const bool gated;
const GatingMode gating_mode;
const bool head_bias;
const int groups_input;
const int groups_1x1;
const Head1x1Params head1x1_params;
const std::string gating_activation;
const std::string blending_activation;

// Backward compatibility constructor with boolean gated parameter
LayerArrayParams(const int input_size_, const int condition_size_, const int head_size_, const int channels_,
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm gonna refactor this so that all of the backward compatibility is in the factory, then all the constructors below that can be nice & fresh.

const int bottleneck_, const int kernel_size_, const std::vector<int>&& dilations_,
const std::string activation_, const bool gated_, const bool head_bias_, const int groups_input,
const int groups_1x1_, const Head1x1Params& head1x1_params_)
: LayerArrayParams(input_size_, condition_size_, head_size_, channels_, bottleneck_, kernel_size_,
std::move(dilations_), activation_, gating_mode_from_bool(gated_), head_bias_, groups_input,
groups_1x1_, head1x1_params_, "Sigmoid", "Sigmoid")
{
}
};

// An array of layers with the same channels, kernel sizes, activations.
class _LayerArray
{
public:
// New constructor with GatingMode enum and configurable activations
_LayerArray(const int input_size, const int condition_size, const int head_size, const int channels,
const int bottleneck, const int kernel_size, const std::vector<int>& dilations,
const std::string activation, const GatingMode gating_mode, const bool head_bias, const int groups_input,
const int groups_1x1, const Head1x1Params& head1x1_params, const std::string& gating_activation = "Sigmoid",
const std::string& blending_activation = "Sigmoid");

// Backward compatibility constructor with boolean gated parameter
_LayerArray(const int input_size, const int condition_size, const int head_size, const int channels,
const int bottleneck, const int kernel_size, const std::vector<int>& dilations,
const std::string activation, const bool gated, const bool head_bias, const int groups_input,
const int groups_1x1, const Head1x1Params& head1x1_params);
const int groups_1x1, const Head1x1Params& head1x1_params)
: _LayerArray(input_size, condition_size, head_size, channels, bottleneck, kernel_size, dilations,
activation, gating_mode_from_bool(gated), head_bias, groups_input, groups_1x1, head1x1_params)
{
}

void SetMaxBufferSize(const int maxBufferSize);

Expand Down
Loading