multithreading tests from 152 lab 5
[riscv-tests.git] / mt / ae_matmul / matmul_mi.c
1 //**************************************************************************
2 // Multi-threaded Matrix Multiply benchmark
3 //--------------------------------------------------------------------------
4 // TA : Christopher Celio
5 // Student:
6 //
7 //
8 // This benchmark multiplies two 2-D arrays together and writes the results to
9 // a third vector. The input data (and reference data) should be generated
10 // using the matmul_gendata.pl perl script and dumped to a file named
11 // dataset.h.
12
13
14 // print out arrays, etc.
15 //#define DEBUG
16
17 //--------------------------------------------------------------------------
18 // Includes
19
20 #include <string.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23
24
25 //--------------------------------------------------------------------------
26 // Input/Reference Data
27
28 typedef float data_t;
29 #include "dataset.h"
30
31
32 //--------------------------------------------------------------------------
33 // Basic Utilities and Multi-thread Support
34
35 __thread unsigned long coreid;
36 unsigned long ncores;
37
38 #include "util.h"
39
40 #define stringify_1(s) #s
41 #define stringify(s) stringify_1(s)
42 #define stats(code) do { \
43 unsigned long _c = -rdcycle(), _i = -rdinstret(); \
44 code; \
45 _c += rdcycle(), _i += rdinstret(); \
46 if (coreid == 0) \
47 printf("%s: %ld cycles, %ld.%ld cycles/iter, %ld.%ld CPI\n", \
48 stringify(code), _c, _c/DIM_SIZE/DIM_SIZE/DIM_SIZE, 10*_c/DIM_SIZE/DIM_SIZE/DIM_SIZE%10, _c/_i, 10*_c/_i%10); \
49 } while(0)
50
51
52 //--------------------------------------------------------------------------
53 // Helper functions
54
55 void printArray( char name[], int n, data_t arr[] )
56 {
57 int i;
58 if (coreid != 0)
59 return;
60
61 printf( " %10s :", name );
62 for ( i = 0; i < n; i++ )
63 printf( " %3ld ", (long) arr[i] );
64 printf( "\n" );
65 }
66
67 void __attribute__((noinline)) verify(size_t n, const data_t* test, const data_t* correct)
68 {
69 if (coreid != 0)
70 return;
71
72 size_t i;
73 for (i = 0; i < n; i++)
74 {
75 if (test[i] != correct[i])
76 {
77 printf("FAILED test[%d]= %3ld, correct[%d]= %3ld\n",
78 i, (long)test[i], i, (long)correct[i]);
79 exit(-1);
80 }
81 }
82
83 return;
84 }
85
86 //--------------------------------------------------------------------------
87 // matmul function
88
89 // single-thread, naive version
90 void __attribute__((noinline)) matmul_naive(const int lda, const data_t A[], const data_t B[], data_t C[] )
91 {
92 int i, j, k;
93
94 if (coreid > 0)
95 return;
96
97 for ( i = 0; i < lda; i++ )
98 for ( j = 0; j < lda; j++ )
99 {
100 for ( k = 0; k < lda; k++ )
101 {
102 C[i + j*lda] += A[j*lda + k] * B[k*lda + i];
103 }
104 }
105
106 }
107
108
109 void __attribute__((noinline)) matmul(const int lda, const data_t A[], const data_t B[], data_t C[] )
110 {
111
112 data_t a1;
113 data_t a2;
114 data_t a3;
115 data_t a4;
116 data_t a5;
117 data_t a6;
118 data_t a7;
119 data_t a8;
120 data_t *b1;
121 data_t *b2;
122 data_t *b3;
123 data_t *b4;
124 data_t *b5;
125 data_t *b6;
126 data_t *b7;
127 data_t *b8;
128 data_t c1;
129 data_t c2;
130 data_t c3;
131 data_t c4;
132 data_t c5;
133 data_t c6;
134 data_t c7;
135 data_t c8;
136 int i, j, k;
137 int start, end;
138 static data_t BB[1024];
139
140
141 //transpose B
142 if (coreid == 0 | coreid == 1 ) {
143 for ( k = 0; k < lda; k++) {
144 for ( i = coreid*(lda/2); i < (coreid+1)*(lda/2); i++ ) {
145 BB[i*lda + k] = B[k*lda + i];
146 }
147 }
148 }
149 barrier();
150
151 for ( int x = 0; x < ncores; x++) {
152 //split the i values into two chunks so the threads don't interfere on the B loads
153 //this could be generalized if needed, but I won't bother since it would be tricky
154 //and we already know the size and numthreads
155 start = coreid == x ? 0 : 16;
156 end = coreid == x ? 16 : 32;
157 for ( i = start; i < end; i+=8 ) {
158 for ( j = coreid*(lda/ncores); j < (coreid+1)*(lda/ncores); j++ ) {
159 c1=0;c2=0;c3=0;c4=0;c5=0;c6=0;c7=0;c8=0;
160 b1 = &BB[(i+0)*lda];
161 b2 = &BB[(i+1)*lda];
162 b3 = &BB[(i+2)*lda];
163 b4 = &BB[(i+3)*lda];
164 b5 = &BB[(i+4)*lda];
165 b6 = &BB[(i+5)*lda];
166 b7 = &BB[(i+6)*lda];
167 b8 = &BB[(i+7)*lda];
168
169 for ( k = 0; k < lda; k+=8 ) {
170 a1 = A[j*lda + k+0];
171 a2 = A[j*lda + k+1];
172 a3 = A[j*lda + k+2];
173 a4 = A[j*lda + k+3];
174 a5 = A[j*lda + k+4];
175 a6 = A[j*lda + k+5];
176 a7 = A[j*lda + k+6];
177 a8 = A[j*lda + k+7];
178
179 c1 += a1 * b1[k+0];
180 c1 += a2 * b1[k+1];
181 c1 += a3 * b1[k+2];
182 c1 += a4 * b1[k+3];
183 c1 += a5 * b1[k+4];
184 c1 += a6 * b1[k+5];
185 c1 += a7 * b1[k+6];
186 c1 += a8 * b1[k+7];
187
188 c2 += a1 * b2[k+0];
189 c2 += a2 * b2[k+1];
190 c2 += a3 * b2[k+2];
191 c2 += a4 * b2[k+3];
192 c2 += a5 * b2[k+4];
193 c2 += a6 * b2[k+5];
194 c2 += a7 * b2[k+6];
195 c2 += a8 * b2[k+7];
196
197 c3 += a1 * b3[k+0];
198 c3 += a2 * b3[k+1];
199 c3 += a3 * b3[k+2];
200 c3 += a4 * b3[k+3];
201 c3 += a5 * b3[k+4];
202 c3 += a6 * b3[k+5];
203 c3 += a7 * b3[k+6];
204 c3 += a8 * b3[k+7];
205
206 c4 += a1 * b4[k+0];
207 c4 += a2 * b4[k+1];
208 c4 += a3 * b4[k+2];
209 c4 += a4 * b4[k+3];
210 c4 += a5 * b4[k+4];
211 c4 += a6 * b4[k+5];
212 c4 += a7 * b4[k+6];
213 c4 += a8 * b4[k+7];
214
215 c5 += a1 * b5[k+0];
216 c5 += a2 * b5[k+1];
217 c5 += a3 * b5[k+2];
218 c5 += a4 * b5[k+3];
219 c5 += a5 * b5[k+4];
220 c5 += a6 * b5[k+5];
221 c5 += a7 * b5[k+6];
222 c5 += a8 * b5[k+7];
223
224 c6 += a1 * b6[k+0];
225 c6 += a2 * b6[k+1];
226 c6 += a3 * b6[k+2];
227 c6 += a4 * b6[k+3];
228 c6 += a5 * b6[k+4];
229 c6 += a6 * b6[k+5];
230 c6 += a7 * b6[k+6];
231 c6 += a8 * b6[k+7];
232
233 c7 += a1 * b7[k+0];
234 c7 += a2 * b7[k+1];
235 c7 += a3 * b7[k+2];
236 c7 += a4 * b7[k+3];
237 c7 += a5 * b7[k+4];
238 c7 += a6 * b7[k+5];
239 c7 += a7 * b7[k+6];
240 c7 += a8 * b7[k+7];
241
242 c8 += a1 * b8[k+0];
243 c8 += a2 * b8[k+1];
244 c8 += a3 * b8[k+2];
245 c8 += a4 * b8[k+3];
246 c8 += a5 * b8[k+4];
247 c8 += a6 * b8[k+5];
248 c8 += a7 * b8[k+6];
249 c8 += a8 * b8[k+7];
250 }
251 C[i+0 + j*lda] += c1;
252 C[i+1 + j*lda] += c2;
253 C[i+2 + j*lda] += c3;
254 C[i+3 + j*lda] += c4;
255 C[i+4 + j*lda] += c5;
256 C[i+5 + j*lda] += c6;
257 C[i+6 + j*lda] += c7;
258 C[i+7 + j*lda] += c8;
259 }
260 }
261 }
262 }
263
264 //--------------------------------------------------------------------------
265 // Main
266 //
267 // all threads start executing thread_entry(). Use their "coreid" to
268 // differentiate between threads (each thread is running on a separate core).
269
270 void thread_entry(int cid, int nc)
271 {
272 coreid = cid;
273 ncores = nc;
274
275 // static allocates data in the binary, which is visible to both threads
276 static data_t results_data[ARRAY_SIZE];
277
278 /*
279 // Execute the provided, naive matmul
280 barrier();
281 stats(matmul_naive(DIM_SIZE, input1_data, input2_data, results_data); barrier());
282
283
284 // verify
285 verify(ARRAY_SIZE, results_data, verify_data);
286
287 // clear results from the first trial
288 size_t i;
289 if (coreid == 0)
290 for (i=0; i < ARRAY_SIZE; i++)
291 results_data[i] = 0;
292 barrier();
293 */
294
295
296 // Execute your faster matmul
297 barrier();
298 stats(matmul(DIM_SIZE, input1_data, input2_data, results_data); barrier());
299
300 #ifdef DEBUG
301 printArray("results:", ARRAY_SIZE, results_data);
302 printArray("verify :", ARRAY_SIZE, verify_data);
303 #endif
304
305 // verify
306 verify(ARRAY_SIZE, results_data, verify_data);
307 barrier();
308
309 exit(0);
310 }
311