Updated mt tests
[riscv-tests.git] / mt / bc_matmul / bc_matmul.c
1 //**************************************************************************
2 // Multi-threaded Matrix Multiply benchmark
3 //--------------------------------------------------------------------------
4 // TA : Christopher Celio
5 // Student:
6 //
7 //
8 // This benchmark multiplies two 2-D arrays together and writes the results to
9 // a third vector. The input data (and reference data) should be generated
10 // using the matmul_gendata.pl perl script and dumped to a file named
11 // dataset.h.
12
13
14 // print out arrays, etc.
15 //#define DEBUG
16
17 //--------------------------------------------------------------------------
18 // Includes
19
20 #include <string.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23
24 #define REG_I 8
25 #define REG_J 2
26 //#define BLOCK_I 32
27 #define BLOCK_J 16
28 #define BLOCK_K 16
29 #define LDA 32
30 #define NCORES 2
31 #define MIN(X,Y) (X < Y ? X : Y)
32
33 //--------------------------------------------------------------------------
34 // Input/Reference Data
35
36 typedef float data_t;
37 #include "dataset.h"
38
39
40 //--------------------------------------------------------------------------
41 // Basic Utilities and Multi-thread Support
42
43 __thread unsigned long coreid;
44 unsigned long ncores;
45
46 #include "util.h"
47
48 #define stringify_1(s) #s
49 #define stringify(s) stringify_1(s)
50 #define stats(code) do { \
51 unsigned long _c = -rdcycle(), _i = -rdinstret(); \
52 code; \
53 _c += rdcycle(), _i += rdinstret(); \
54 if (coreid == 0) \
55 printf("%s: %ld cycles, %ld.%ld cycles/iter, %ld.%ld CPI\n", \
56 stringify(code), _c, _c/DIM_SIZE/DIM_SIZE/DIM_SIZE, 10*_c/DIM_SIZE/DIM_SIZE/DIM_SIZE%10, _c/_i, 10*_c/_i%10); \
57 } while(0)
58
59
60 //--------------------------------------------------------------------------
61 // Helper functions
62
63 void printArrayMT( char name[], int n, data_t arr[] )
64 {
65 int i;
66 if (coreid != 0)
67 return;
68
69 printf( " %10s :", name );
70 for ( i = 0; i < n; i++ )
71 printf( " %3ld ", (long) arr[i] );
72 printf( "\n" );
73 }
74
75 void __attribute__((noinline)) verifyMT(size_t n, const data_t* test, const data_t* correct)
76 {
77 if (coreid != 0)
78 return;
79
80 size_t i;
81 for (i = 0; i < n; i++)
82 {
83 if (test[i] != correct[i])
84 {
85 printf("FAILED test[%d]= %3ld, correct[%d]= %3ld\n",
86 i, (long)test[i], i, (long)correct[i]);
87 exit(-1);
88 }
89 }
90
91 return;
92 }
93
94 //--------------------------------------------------------------------------
95 // matmul function
96
97 // single-thread, naive version
98 void __attribute__((noinline)) matmul_naive(const int lda, const data_t A[], const data_t B[], data_t C[] )
99 {
100 int i, j, k;
101
102 if (coreid > 0)
103 return;
104
105 for ( i = 0; i < lda; i++ )
106 for ( j = 0; j < lda; j++ )
107 {
108 for ( k = 0; k < lda; k++ )
109 {
110 C[i + j*lda] += A[j*lda + k] * B[k*lda + i];
111 }
112 }
113
114 }
115
116
117
118 void __attribute__((noinline)) matmul(const int lda, const data_t A[], const data_t B[], data_t C[] )
119 {
120
121 // ***************************** //
122 // **** ADD YOUR CODE HERE ***** //
123 // ***************************** //
124 //
125 // feel free to make a separate function for MI and MSI versions.
126
127 int i, j, k, ri, rj, ii, jj, kk;
128 data_t *Aj, *Cj, *Bi;
129 data_t c[REG_I][REG_J], a[REG_J], b[REG_I];
130 size_t start = coreid * (LDA / NCORES), end = (coreid == NCORES - 1 ? LDA : (coreid + 1) * (LDA / NCORES));
131
132 /* if (coreid > 0) { */
133 /* return; */
134 /* } */
135 /* start = 0, end = lda; */
136 if (ncores == NCORES && lda == LDA) {
137 for (jj = start; jj < end; jj += BLOCK_J)
138 for (kk = 0; kk < LDA; kk += BLOCK_K)
139 //for (ii = 0; ii < LDA; ii += BLOCK_I)
140 for (j = jj; j < MIN(end, jj + BLOCK_J); j += REG_J) {
141 Aj = A + j*LDA;
142 Cj = C + j*LDA;
143 for (i = 0; i < LDA; i += REG_I) {
144 /* Load C in register blocks. */
145 Bi = B + i;
146 for (ri = 0; ri < REG_I; ri++) {
147 for (rj = 0; rj < REG_J; rj++) {
148 c[ri][rj] = Cj[i + ri + ( rj)*LDA];
149 }
150 }
151
152
153 for (k = kk; k < MIN(LDA, kk + BLOCK_K); k++) {
154 /* Load a,b in register blocks. */
155 /* for (rj = 0; rj < REG_J; rj++) {
156 a[rj] = A[(j + rj)*LDA + k];
157 }*/
158 /* for (ri = 0; ri < REG_I; ri++) { */
159 /* b[ri] = Bi[k*LDA + ri]; */
160 /* } */
161 /* /\* Compute C in register blocks. *\/ */
162 /* for (rj = 0; rj < REG_J; rj++) { */
163 /* a[rj] = Aj[( rj)*LDA + k]; */
164 /* for (ri = 0; ri < REG_I; ri++) { */
165 /* c[ri][rj] += a[rj] * b[ri]; */
166 /* } */
167 /* } */
168 a[0] = Aj[k];
169 a[1] = Aj[k + LDA];
170 b[0] = Bi[k*LDA];
171 b[1] = Bi[k*LDA + 1];
172 b[2] = Bi[k*LDA + 2];
173 b[3] = Bi[k*LDA + 3];
174 b[4] = Bi[k*LDA + 4];
175 b[5] = Bi[k*LDA + 5];
176 b[6] = Bi[k*LDA + 6];
177 b[7] = Bi[k*LDA + 7];
178
179
180 c[0][0] += b[0] * a[0];
181 c[0][1] += b[0] * a[1];
182 c[1][0] += b[1] * a[0];
183 c[1][1] += b[1] * a[1];
184 c[2][0] += b[2] * a[0];
185 c[2][1] += b[2] * a[1];
186 c[3][0] += b[3] * a[0];
187 c[3][1] += b[3] * a[1];
188 c[4][0] += b[4] * a[0];
189 c[4][1] += b[4] * a[1];
190 c[5][0] += b[5] * a[0];
191 c[5][1] += b[5] * a[1];
192 c[6][0] += b[6] * a[0];
193 c[6][1] += b[6] * a[1];
194 c[7][0] += b[7] * a[0];
195 c[7][1] += b[7] * a[1];
196
197
198 /* c[0][0] += b[0] * a[0]; */
199 /* c[1][1] += b[1] * a[1]; */
200 /* c[2][0] += b[2] * a[0]; */
201 /* c[3][1] += b[3] * a[1]; */
202 /* c[4][0] += b[4] * a[0]; */
203 /* c[5][1] += b[5] * a[1]; */
204 /* c[6][0] += b[6] * a[0]; */
205 /* c[7][1] += b[7] * a[1]; */
206 /* c[0][0] += b[0] * a[0]; */
207 /* c[1][1] += b[1] * a[1]; */
208 /* c[2][0] += b[2] * a[0]; */
209 /* c[3][1] += b[3] * a[1]; */
210 /* c[4][0] += b[4] * a[0]; */
211 /* c[5][1] += b[5] * a[1]; */
212 /* c[6][0] += b[6] * a[0]; */
213 /* c[7][1] += b[7] * a[1]; */
214
215 }
216
217 /* store C in register blocks. */
218 for (ri = 0; ri < REG_I; ri++) {
219 for (rj = 0; rj < REG_J; rj++) {
220 Cj[i + ri + (rj)*LDA] = c[ri][rj];
221 }
222 }
223 }
224
225
226
227
228 }
229 /* We only care about performance for 32x32 matrices and 2 cores. Otherwise just naive mat_mul */
230 } else {
231 if (coreid > 0)
232 return;
233
234 for ( i = 0; i < lda; i++ )
235 for ( j = 0; j < lda; j++ )
236 for ( k = 0; k < lda; k++ )
237 C[i + j*lda] += A[j*lda + k] * B[k*lda + i];
238 }
239 }
240
241 //--------------------------------------------------------------------------
242 // Main
243 //
244 // all threads start executing thread_entry(). Use their "coreid" to
245 // differentiate between threads (each thread is running on a separate core).
246
247 void thread_entry(int cid, int nc)
248 {
249 coreid = cid;
250 ncores = nc;
251
252 // static allocates data in the binary, which is visible to both threads
253 static data_t results_data[ARRAY_SIZE];
254
255
256 // /* // Execute the provided, naive matmul */
257 // barrier(nc);
258 // stats(matmul_naive(DIM_SIZE, input1_data, input2_data, results_data); barrier(nc));
259 //
260 //
261 // // verify
262 // verifyMT(ARRAY_SIZE, results_data, verify_data);
263 //
264 // // clear results from the first trial
265 // size_t i;
266 // if (coreid == 0)
267 // for (i=0; i < ARRAY_SIZE; i++)
268 // results_data[i] = 0;
269 // barrier(nc);
270
271
272 // Execute your faster matmul
273 barrier(nc);
274 stats(matmul(DIM_SIZE, input1_data, input2_data, results_data); barrier(nc));
275
276 #ifdef DEBUG
277 printArrayMT("results:", ARRAY_SIZE, results_data);
278 printArrayMT("verify :", ARRAY_SIZE, verify_data);
279 #endif
280
281 // verify
282 verifyMT(ARRAY_SIZE, results_data, verify_data);
283 barrier(nc);
284
285 exit(0);
286 }
287