Skip to content

Commit 76be336

Browse files
committed
Add test for Layer::Process() with grouped convolution
- Implemented test_layer_grouped_process_realtime_safe() to verify that the Layer::Process() method with grouped convolution does not allocate or free memory during execution. - Updated run_tests.cpp to include the new test case for comprehensive coverage of grouped convolution functionality.
1 parent ddce333 commit 76be336

File tree

3 files changed

+104
-1
lines changed

3 files changed

+104
-1
lines changed

NAM/wavenet.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,8 @@ nam::wavenet::_LayerArray::_LayerArray(const int input_size, const int condition
7979
, _head_rechannel(channels, head_size, head_bias)
8080
{
8181
for (size_t i = 0; i < dilations.size(); i++)
82-
this->_layers.push_back(_Layer(condition_size, channels, kernel_size, dilations[i], activation, gated, groups_input));
82+
this->_layers.push_back(
83+
_Layer(condition_size, channels, kernel_size, dilations[i], activation, gated, groups_input));
8384
}
8485

8586
void nam::wavenet::_LayerArray::SetMaxBufferSize(const int maxBufferSize)

tools/run_tests.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,7 @@ int main()
102102
test_wavenet::test_conv1d_grouped_process_realtime_safe();
103103
test_wavenet::test_conv1d_grouped_dilated_process_realtime_safe();
104104
test_wavenet::test_layer_process_realtime_safe();
105+
test_wavenet::test_layer_grouped_process_realtime_safe();
105106
test_wavenet::test_layer_array_process_realtime_safe();
106107
test_wavenet::test_process_realtime_safe();
107108

tools/test/test_wavenet/test_real_time_safe.cpp

Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -475,6 +475,107 @@ void test_layer_process_realtime_safe()
475475
}
476476
}
477477

478+
// Test that Layer::Process() method with grouped convolution (groups_input > 1) does not allocate or free memory
479+
void test_layer_grouped_process_realtime_safe()
480+
{
481+
// Setup: Create a Layer with grouped convolution
482+
const int condition_size = 1;
483+
const int channels = 4; // Must be divisible by groups_input
484+
const int kernel_size = 2;
485+
const int dilation = 1;
486+
const std::string activation = "ReLU";
487+
const bool gated = false;
488+
const int groups_input = 2; // groups_input > 1
489+
490+
auto layer = nam::wavenet::_Layer(condition_size, channels, kernel_size, dilation, activation, gated, groups_input);
491+
492+
// Set weights for grouped convolution
493+
// With groups_input=2, channels=4: each group has 2 in_channels and 2 out_channels
494+
// Conv weights: for each group g, for each kernel position k, for each (out_ch, in_ch)
495+
// Group 0: processes channels 0-1, Group 1: processes channels 2-3
496+
std::vector<float> weights;
497+
// Conv weights: 2 groups, kernel_size=2, 2 out_channels per group, 2 in_channels per group
498+
// Group 0, kernel[0]: identity for channels 0-1
499+
weights.push_back(1.0f); // out_ch=0, in_ch=0
500+
weights.push_back(0.0f); // out_ch=0, in_ch=1
501+
weights.push_back(0.0f); // out_ch=1, in_ch=0
502+
weights.push_back(1.0f); // out_ch=1, in_ch=1
503+
// Group 0, kernel[1]: identity
504+
weights.push_back(1.0f);
505+
weights.push_back(0.0f);
506+
weights.push_back(0.0f);
507+
weights.push_back(1.0f);
508+
// Group 1, kernel[0]: identity for channels 2-3
509+
weights.push_back(1.0f); // out_ch=2, in_ch=2
510+
weights.push_back(0.0f); // out_ch=2, in_ch=3
511+
weights.push_back(0.0f); // out_ch=3, in_ch=2
512+
weights.push_back(1.0f); // out_ch=3, in_ch=3
513+
// Group 1, kernel[1]: identity
514+
weights.push_back(1.0f);
515+
weights.push_back(0.0f);
516+
weights.push_back(0.0f);
517+
weights.push_back(1.0f);
518+
// Conv bias: 4 values (one per output channel)
519+
weights.push_back(0.0f);
520+
weights.push_back(0.0f);
521+
weights.push_back(0.0f);
522+
weights.push_back(0.0f);
523+
// Input mixin: (channels, condition_size) = (4, 1)
524+
weights.push_back(1.0f);
525+
weights.push_back(1.0f);
526+
weights.push_back(1.0f);
527+
weights.push_back(1.0f);
528+
// 1x1: (channels, channels) = (4, 4) weights + (4,) bias
529+
// Identity matrix
530+
for (int i = 0; i < 4; i++)
531+
{
532+
for (int j = 0; j < 4; j++)
533+
{
534+
weights.push_back((i == j) ? 1.0f : 0.0f);
535+
}
536+
}
537+
// 1x1 bias: zeros
538+
weights.push_back(0.0f);
539+
weights.push_back(0.0f);
540+
weights.push_back(0.0f);
541+
weights.push_back(0.0f);
542+
543+
auto it = weights.begin();
544+
layer.set_weights_(it);
545+
546+
const int maxBufferSize = 256;
547+
layer.SetMaxBufferSize(maxBufferSize);
548+
549+
// Test with several different buffer sizes
550+
std::vector<int> buffer_sizes{1, 8, 16, 32, 64, 128, 256};
551+
552+
for (int buffer_size : buffer_sizes)
553+
{
554+
// Prepare input/condition matrices (allocate before tracking)
555+
Eigen::MatrixXf input(channels, buffer_size);
556+
Eigen::MatrixXf condition(condition_size, buffer_size);
557+
input.setConstant(0.5f);
558+
condition.setConstant(0.5f);
559+
560+
std::string test_name =
561+
"Layer Process (groups_input=" + std::to_string(groups_input) + ") - Buffer size " + std::to_string(buffer_size);
562+
run_allocation_test_no_allocations(
563+
nullptr, // No setup needed
564+
[&]() {
565+
// Call Process() - this should not allocate or free
566+
layer.Process(input, condition, buffer_size);
567+
},
568+
nullptr, // No teardown needed
569+
test_name.c_str());
570+
571+
// Verify output is valid
572+
auto output = layer.GetOutputNextLayer().leftCols(buffer_size);
573+
assert(output.rows() == channels && output.cols() == buffer_size);
574+
assert(std::isfinite(output(0, 0)));
575+
assert(std::isfinite(output(channels - 1, buffer_size - 1)));
576+
}
577+
}
578+
478579
// Test that LayerArray::Process() method does not allocate or free memory
479580
void test_layer_array_process_realtime_safe()
480581
{

0 commit comments

Comments
 (0)