multithreading tests from 152 lab 5
[riscv-tests.git] / mt / bf_matmul / bf_matmul.c
1 //**************************************************************************
2 // Multi-threaded Matrix Multiply benchmark
3 //--------------------------------------------------------------------------
4 // TA : Christopher Celio
5 // Student:
6 //
7 //
8 // This benchmark multiplies two 2-D arrays together and writes the results to
9 // a third vector. The input data (and reference data) should be generated
10 // using the matmul_gendata.pl perl script and dumped to a file named
11 // dataset.h.
12
13
14 // print out arrays, etc.
15 //#define DEBUG
16
17 //--------------------------------------------------------------------------
18 // Includes
19
20 #include <string.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23
24
25 //--------------------------------------------------------------------------
26 // Input/Reference Data
27
28 typedef float data_t;
29 #include "dataset.h"
30
31
32 //--------------------------------------------------------------------------
33 // Basic Utilities and Multi-thread Support
34
35 __thread unsigned long coreid;
36 unsigned long ncores;
37
38 #include "util.h"
39
40 #define stringify_1(s) #s
41 #define stringify(s) stringify_1(s)
42 #define stats(code) do { \
43 unsigned long _c = -rdcycle(), _i = -rdinstret(); \
44 code; \
45 _c += rdcycle(), _i += rdinstret(); \
46 if (coreid == 0) \
47 printf("%s: %ld cycles, %ld.%ld cycles/iter, %ld.%ld CPI\n", \
48 stringify(code), _c, _c/DIM_SIZE/DIM_SIZE/DIM_SIZE, 10*_c/DIM_SIZE/DIM_SIZE/DIM_SIZE%10, _c/_i, 10*_c/_i%10); \
49 } while(0)
50
51
52 //--------------------------------------------------------------------------
53 // Helper functions
54
55 void printArray( char name[], int n, data_t arr[] )
56 {
57 int i;
58 if (coreid != 0)
59 return;
60
61 printf( " %10s :", name );
62 for ( i = 0; i < n; i++ )
63 printf( " %3ld ", (long) arr[i] );
64 printf( "\n" );
65 }
66
67 void __attribute__((noinline)) verify(size_t n, const data_t* test, const data_t* correct)
68 {
69 if (coreid != 0)
70 return;
71
72 size_t i;
73 for (i = 0; i < n; i++)
74 {
75 if (test[i] != correct[i])
76 {
77 printf("FAILED test[%d]= %3ld, correct[%d]= %3ld\n",
78 i, (long)test[i], i, (long)correct[i]);
79 exit(-1);
80 }
81 }
82
83 return;
84 }
85
86 //--------------------------------------------------------------------------
87 // matmul function
88
89 // single-thread, naive version
90 void __attribute__((noinline)) matmul_naive(const int lda, const data_t A[], const data_t B[], data_t C[] )
91 {
92 int i, j, k;
93
94 if (coreid > 0)
95 return;
96
97 for ( i = 0; i < lda; i++ )
98 for ( j = 0; j < lda; j++ )
99 {
100 for ( k = 0; k < lda; k++ )
101 {
102 C[i + j*lda] += A[j*lda + k] * B[k*lda + i];
103 }
104 }
105
106 }
107
108
109
110 void __attribute__((noinline)) matmul(const int lda, const data_t A[], const data_t B[], data_t C[] )
111 {
112
113 // ***************************** //
114 // **** ADD YOUR CODE HERE ***** //
115 // ***************************** //
116 //
117 // feel free to make a separate function for MI and MSI versions.
118 int j, k, i;
119 data_t temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
120 data_t temp8, temp9, temp10, temp11, temp12, temp13, temp14, temp15;
121 if(coreid == 0) {
122 for(j = 0; j < 32; j++) {
123 temp0 = 0; //C[j*lda];
124 temp1 = 0; //C[1 + j*lda];
125 temp2 = 0; //C[2 + j*lda];
126 temp3 = 0; //C[3 + j*lda];
127 temp4 = 0; //C[4 + j*lda];
128 temp5 = 0; //C[5 + j*lda];
129 temp6 = 0; //C[6 + j*lda];
130 temp7 = 0; //C[7 + j*lda];
131 temp8 = 0; //C[8 + j*lda];
132 temp9 = 0; //C[9 + j*lda];
133 temp10 = 0; //C[10 + j*lda];
134 temp11 = 0; //C[11 + j*lda];
135 temp12 = 0; //C[12 + j*lda];
136 temp13 = 0; //C[13 + j*lda];
137 temp14 = 0; //C[14 + j*lda];
138 temp15 = 0; //C[15 + j*lda];
139 for(k = 0; k < 32; k++) {
140 temp0 += A[j*lda + k] * B[k*lda];
141 temp1 += A[j*lda + k] * B[1+k*lda];
142 temp2 += A[j*lda + k] * B[2+k*lda];
143 temp3 += A[j*lda + k] * B[3+k*lda];
144 temp4 += A[j*lda + k] * B[4+k*lda];
145 temp5 += A[j*lda + k] * B[5+k*lda];
146 temp6 += A[j*lda + k] * B[6+k*lda];
147 temp7 += A[j*lda + k] * B[7+k*lda];
148 temp8 += A[j*lda + k] * B[8+k*lda];
149 temp9 += A[j*lda + k] * B[9+k*lda];
150 temp10 += A[j*lda + k] * B[10+k*lda];
151 temp11 += A[j*lda + k] * B[11+k*lda];
152 temp12 += A[j*lda + k] * B[12+k*lda];
153 temp13 += A[j*lda + k] * B[13+k*lda];
154 temp14 += A[j*lda + k] * B[14+k*lda];
155 temp15 += A[j*lda + k] * B[15+k*lda];
156 }
157 C[j*lda] = temp0;
158 C[1 + j*lda] = temp1;
159 C[2 + j*lda] = temp2;
160 C[3 + j*lda] = temp3;
161 C[4 + j*lda] = temp4;
162 C[5 + j*lda] = temp5;
163 C[6 + j*lda] = temp6;
164 C[7 + j*lda] = temp7;
165 C[8 + j*lda] = temp8;
166 C[9 + j*lda] = temp9;
167 C[10 + j*lda] = temp10;
168 C[11 + j*lda] = temp11;
169 C[12 + j*lda] = temp12;
170 C[13 + j*lda] = temp13;
171 C[14 + j*lda] = temp14;
172 C[15 + j*lda] = temp15;
173 }
174 }
175
176 else {
177 for(j = 0; j < 32; j++) {
178 temp0 = 0; //C[16+j*lda];
179 temp1 = 0; //C[17+j*lda];
180 temp2 = 0; //C[18+j*lda];
181 temp3 = 0; //C[19+j*lda];
182 temp4 = 0; //C[20+j*lda];
183 temp5 = 0; //C[21+j*lda];
184 temp6 = 0; //C[22+j*lda];
185 temp7 = 0; //C[23+j*lda];
186 temp8 = 0; //C[24+j*lda];
187 temp9 = 0; //C[25+j*lda];
188 temp10 = 0; //C[26+j*lda];
189 temp11 = 0; //C[27+j*lda];
190 temp12 = 0; //C[28+j*lda];
191 temp13 = 0; //C[29+j*lda];
192 temp14 = 0; //C[30+j*lda];
193 temp15 = 0; //C[31+j*lda];
194 for(k = 0; k < 32; k++) {
195 temp0 += A[j*lda + k] * B[16+k*lda];
196 temp1 += A[j*lda + k] * B[17+k*lda];
197 temp2 += A[j*lda + k] * B[18+k*lda];
198 temp3 += A[j*lda + k] * B[19+k*lda];
199 temp4 += A[j*lda + k] * B[20+k*lda];
200 temp5 += A[j*lda + k] * B[21+k*lda];
201 temp6 += A[j*lda + k] * B[22+k*lda];
202 temp7 += A[j*lda + k] * B[23+k*lda];
203 temp8 += A[j*lda + k] * B[24+k*lda];
204 temp9 += A[j*lda + k] * B[25+k*lda];
205 temp10 += A[j*lda + k] * B[26+k*lda];
206 temp11 += A[j*lda + k] * B[27+k*lda];
207 temp12 += A[j*lda + k] * B[28+k*lda];
208 temp13 += A[j*lda + k] * B[29+k*lda];
209 temp14 += A[j*lda + k] * B[30+k*lda];
210 temp15 += A[j*lda + k] * B[31+k*lda];
211 }
212 C[16 + j*lda] = temp0;
213 C[17 + j*lda] = temp1;
214 C[18 + j*lda] = temp2;
215 C[19 + j*lda] = temp3;
216 C[20 + j*lda] = temp4;
217 C[21 + j*lda] = temp5;
218 C[22 + j*lda] = temp6;
219 C[23 + j*lda] = temp7;
220 C[24 + j*lda] = temp8;
221 C[25 + j*lda] = temp9;
222 C[26 + j*lda] = temp10;
223 C[27 + j*lda] = temp11;
224 C[28 + j*lda] = temp12;
225 C[29 + j*lda] = temp13;
226 C[30 + j*lda] = temp14;
227 C[31 + j*lda] = temp15;
228 }
229 }
230
231 }
232
233 //--------------------------------------------------------------------------
234 // Main
235 //
236 // all threads start executing thread_entry(). Use their "coreid" to
237 // differentiate between threads (each thread is running on a separate core).
238
239 void thread_entry(int cid, int nc)
240 {
241 coreid = cid;
242 ncores = nc;
243
244 // static allocates data in the binary, which is visible to both threads
245 static data_t results_data[ARRAY_SIZE];
246
247
248 // // Execute the provided, naive matmul
249 // barrier();
250 // stats(matmul_naive(DIM_SIZE, input1_data, input2_data, results_data); barrier());
251 //
252 //
253 // // verify
254 // verify(ARRAY_SIZE, results_data, verify_data);
255 //
256 // // clear results from the first trial
257 // size_t i;
258 // if (coreid == 0)
259 // for (i=0; i < ARRAY_SIZE; i++)
260 // results_data[i] = 0;
261 // barrier();
262
263
264 // Execute your faster matmul
265 barrier();
266 stats(matmul(DIM_SIZE, input1_data, input2_data, results_data); barrier());
267
268 #ifdef DEBUG
269 printArray("results:", ARRAY_SIZE, results_data);
270 printArray("verify :", ARRAY_SIZE, verify_data);
271 #endif
272
273 // verify
274 verify(ARRAY_SIZE, results_data, verify_data);
275 barrier();
276
277 exit(0);
278 }
279