-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathver0.cu
More file actions
executable file
·161 lines (127 loc) · 4.15 KB
/
ver0.cu
File metadata and controls
executable file
·161 lines (127 loc) · 4.15 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <functional>
#include <iostream>
#include <vector>
#define NUM_THREADS_PER_DIM 32
using namespace std;
__global__ void MatrixMul(const int* a, const int* b, int* c, int n) {
// Compute each thread's global row and column index
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Iterate over row, and down column
int tmp = 0;
for (int k = 0; k < n; k++) {
// Accumulate results for a single element
tmp += a[row * n + k] * b[k * n + col];
}
c[row * n + col] = tmp;
}
// Check result on the CPU
void Validate(vector<int>& a, vector<int>& b, vector<int>& c, int n) {
// For every row...
for (int i = 0; i < n; i++) {
// For every column...
for (int j = 0; j < n; j++) {
// For every element in the row-column pair
int tmp = 0;
for (int k = 0; k < n; k++) {
// Accumulate the partial results
tmp += a[i * n + k] * b[k * n + j];
}
// Check against the CPU result
if (tmp != c[i * n + j]) {
cout << "Mismatch at " << tmp << " " << c[i * n + j] << " " << i << " " << j << endl;
return;
}
}
}
cout << "All Good!" << endl;
}
// Function for initializing matrix on CPU
void Initialize(vector<int>& a) {
srand((unsigned int)time(NULL));
int max_val = 1 << 7;
for (auto i = a.begin(); i < a.end(); i++)
{
*i = (int)rand() % max_val;
}
}
// Function to print the matrix
void PrintMatrix(vector<int>& a, int n) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
cout << a[i * n + j] << " ";
}
cout << endl;
}
}
int main(int argc, char const* argv[]) {
if (argc != 2) {
cout << "Usage: ./ver0 <matrix_size> \n\nmatrix_size: Positive Integer" << endl;
return 1;
}
int n = atoi(argv[1]);
// Size (in bytes) of matrix
size_t bytes = n * n * sizeof(int);
// Host vectors
vector<int> h_a(n * n);
vector<int> h_b(n * n);
vector<int> h_c(n * n);
// Initialize matrices
Initialize(h_a);
Initialize(h_b);
// Allocate device memory
int* d_a, * d_b, * d_c;
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
float gpu_data_transfer_time_ms, gpu_compute_time_ms, gpu_rev_data_transfer_time_ms;
// some events to count the execution time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start to count execution time of GPU version
cudaEventRecord(start, 0);
// Copy data to the device
cudaMemcpy(d_a, h_a.data(), bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b.data(), bytes, cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// compute time elapse on GPU computing
cudaEventElapsedTime(&gpu_data_transfer_time_ms, start, stop);
cout << "Host to Device data transfer time: " << gpu_data_transfer_time_ms << "ms" << endl;
// Blocks per grid dimension (assumes NUM_THREADS_PER_DIM divides N evenly)
int NUM_BLOCKS_PER_DIM = n / NUM_THREADS_PER_DIM;
// Use dim3 structs for block and grid dimensions
dim3 threadsPerBlock(NUM_THREADS_PER_DIM, NUM_THREADS_PER_DIM);
dim3 numBlocks(NUM_BLOCKS_PER_DIM, NUM_BLOCKS_PER_DIM);
cudaEventRecord(start, 0);
// Launch kernel
MatrixMul << <numBlocks, threadsPerBlock >> > (d_a, d_b, d_c, n);
// cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// compute time elapse on GPU computing
cudaEventElapsedTime(&gpu_compute_time_ms, start, stop);
cout << "GPU compute time: " << gpu_compute_time_ms << "ms" << endl;
cudaEventRecord(start, 0);
// Copy back to the host
cudaMemcpy(h_c.data(), d_c, bytes, cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// compute time elapse on GPU computing
cudaEventElapsedTime(&gpu_rev_data_transfer_time_ms, start, stop);
cout << "Device to Host data transfer time: " << gpu_rev_data_transfer_time_ms << "ms" << endl;
// Check result on the CPU
#ifdef VALIDATE
Validate(h_a, h_b, h_c, n);
#endif
// PrintMatrix(h_c, n);
// Free memory on device
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}