Fix build with riscv-gcc version 4.9
[riscv-tests.git] / mt / as_matmul / as_matmul.c
1 //**************************************************************************
2 // Multi-threaded Matrix Multiply benchmark
3 //--------------------------------------------------------------------------
4 // TA : Christopher Celio
5 // Student:
6 //
7 //
8 // This benchmark multiplies two 2-D arrays together and writes the results to
9 // a third vector. The input data (and reference data) should be generated
10 // using the matmul_gendata.pl perl script and dumped to a file named
11 // dataset.h.
12
13
14 // print out arrays, etc.
15 //#define DEBUG
16
17 //--------------------------------------------------------------------------
18 // Includes
19
20 #include <string.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23
24
25 //--------------------------------------------------------------------------
26 // Input/Reference Data
27
28 typedef float data_t;
29 #include "dataset.h"
30
31
32 //--------------------------------------------------------------------------
33 // Basic Utilities and Multi-thread Support
34
35 __thread unsigned long coreid;
36 unsigned long ncores;
37
38 #include "util.h"
39
40 #define stringify_1(s) #s
41 #define stringify(s) stringify_1(s)
42 #define stats(code) do { \
43 unsigned long _c = -rdcycle(), _i = -rdinstret(); \
44 code; \
45 _c += rdcycle(), _i += rdinstret(); \
46 if (coreid == 0) \
47 printf("%s: %ld cycles, %ld.%ld cycles/iter, %ld.%ld CPI\n", \
48 stringify(code), _c, _c/DIM_SIZE/DIM_SIZE/DIM_SIZE, 10*_c/DIM_SIZE/DIM_SIZE/DIM_SIZE%10, _c/_i, 10*_c/_i%10); \
49 } while(0)
50
51
52 //--------------------------------------------------------------------------
53 // Helper functions
54
55 void printArrayMT( char name[], int n, data_t arr[] )
56 {
57 int i;
58 if (coreid != 0)
59 return;
60
61 printf( " %10s :", name );
62 for ( i = 0; i < n; i++ )
63 printf( " %3ld ", (long) arr[i] );
64 printf( "\n" );
65 }
66
67 void __attribute__((noinline)) verifyMT(size_t n, const data_t* test, const data_t* correct)
68 {
69 if (coreid != 0)
70 return;
71
72 size_t i;
73 for (i = 0; i < n; i++)
74 {
75 if (test[i] != correct[i])
76 {
77 printf("FAILED test[%d]= %3ld, correct[%d]= %3ld\n",
78 i, (long)test[i], i, (long)correct[i]);
79 exit(-1);
80 }
81 }
82
83 return;
84 }
85
86 //--------------------------------------------------------------------------
87 // matmul function
88
89 // single-thread, naive version
90 void __attribute__((noinline)) matmul_naive(const int lda, const data_t A[], const data_t B[], data_t C[] )
91 {
92 int i, j, k;
93
94 if (coreid > 0)
95 return;
96
97 for ( i = 0; i < lda; i++ )
98 for ( j = 0; j < lda; j++ )
99 {
100 for ( k = 0; k < lda; k++ )
101 {
102 C[i + j*lda] += A[j*lda + k] * B[k*lda + i];
103 }
104 }
105
106 }
107
108
109
110 void __attribute__((noinline)) matmul(const int lda, const data_t A[], const data_t B[], data_t C[] )
111 {
112
113 // ***************************** //
114 // **** ADD YOUR CODE HERE ***** //
115 // ***************************** //
116 //
117 // feel free to make a separate function for MI and MSI versions.
118
119 int i, j, k, n, m;
120
121
122 //matmul_naive(32, input1_data, input2_data, results_data); barrier(nc): 957424 cycles, 29.2 cycles/iter, 3.6 CPI
123 //matmul(32, input1_data, input2_data, results_data); barrier(nc): 340408 cycles, 10.3 cycles/iter, 1.8 CPI
124
125 for (n = 0; n < lda; n += 1) {
126 for (m = 0; m < lda; m += 1) {
127 bTranspose[lda*m + n] = B[lda*n + m];
128 bTranspose[lda*n + m] = B[lda*m + n];
129 }
130 }
131 barrier(ncores);
132
133 for ( j = coreid; j < lda; j += 2*ncores ) {
134 for ( i = 0; i < lda; i += 1 ){
135 c1 = 0; //global vars c1, c2
136 c2 = 0;
137 for ( k = 0; k < lda; k += 1 ) {
138 c1 += A[j * lda + k] * bTranspose[i*lda + k];
139 c2 += A[(j+2) * lda + k] * bTranspose[i*lda + k];
140
141 //barrier(nc);
142 }
143
144 C[i + j * lda] = c1;
145 C[i + (j+2) * lda] = c2;
146 barrier(ncores);
147 }
148 //barrier(nc);
149 }
150
151
152
153
154 //matmul_naive(32, input1_data, input2_data, results_data); barrier(nc): 983609 cycles, 30.0 cycles/iter, 3.7 CPI
155 //matmul(32, input1_data, input2_data, results_data); barrier(nc): 389942 cycles, 11.9 cycles/iter, 2.5 CPI
156
157 /*
158 for ( j = coreid; j < lda; j += 2*ncores ) {
159 for ( i = 0; i < lda; i += 1 ){
160 c1 = 0; //global vars c1, c2
161 c2 = 0;
162 for ( k = 0; k < lda; k += 1 ) {
163 c1 += A[j * lda + k] * B[k*lda + i];
164 c2 += A[(j+2) * lda + k] * B[k*lda + i];
165
166 //barrier(nc);
167 }
168
169 C[i + j * lda] = c1;
170 C[i + (j+2) * lda] = c2;
171 barrier(nc);
172 }
173 //barrier(nc);
174 }
175 */
176
177 // matmul_naive(32, input1_data, input2_data, results_data); barrier(nc): 973781 cycles, 29.7 cycles/iter, 3.7 CPI
178 // matmul(32, input1_data, input2_data, results_data); barrier(nc): 461066 cycles, 14.0 cycles/iter, 3.5 CPI
179 // for ( k = 0; k < lda; k += 1 ) {
180 // for ( j = coreid; j < lda; j += 2*ncores ) {
181 // for ( i = 0; i < lda; i += 1 ){
182 // C[i + j * lda] += A[j * lda + k] * B[k*lda + i];
183 // C[i + (j+2) * lda] += A[(j+2) * lda + k] * B[k*lda + i];
184 // //barrier(nc);
185 // }
186 // barrier(nc);
187 // }
188 // //barrier(nc);
189 // }
190
191
192 // matmul_naive(32, input1_data, input2_data, results_data); barrier(nc): 965136 cycles, 29.4 cycles/iter, 3.7 CPI
193 // matmul(32, input1_data, input2_data, results_data); barrier(nc): 513779 cycles, 15.6 cycles/iter, 3.2 CPI
194
195 // for ( j = coreid; j < lda; j += 2*ncores ) {
196 // for ( i = 0; i < lda; i += 1 ){
197 // for ( k = 0; k < lda; k += 1 ) {
198 // C[i + j * lda] += A[j * lda + k] * B[k*lda + i];
199 // C[i + (j+2) * lda] += A[(j+2) * lda + k] * B[k*lda + i];
200
201 // //barrier(nc);
202 // }
203 // barrier(nc);
204 // }
205 // //barrier(nc);
206 //}
207
208
209 // matmul_naive(32, input1_data, input2_data, results_data); barrier(nc): 937892 cycles, 28.6 cycles/iter, 3.6 CPI
210 // matmul(32, input1_data, input2_data, results_data); barrier(nc): 576478 cycles, 17.5 cycles/iter, 3.5 CPI
211
212 // for ( i = 0; i < lda; i += 1 ){
213 // for ( j = coreid; j < lda; j += 2*ncores ) {
214 // for ( k = 0; k < lda; k += 1 ) {
215 // C[i + j * lda] += A[j * lda + k] * B[k*lda + i];
216 // C[i + (j+2) * lda] += A[(j+2) * lda + k] * B[k*lda + i];
217
218 // //barrier(nc);
219 // }
220 // barrier(nc);
221 // }
222 // //barrier(nc);
223 // }
224
225 //for ( i = coreid; i < lda; i += ncores ){
226 // for ( j = coreid; j < lda; j += ncores ) {
227 // for ( k = coreid; k < lda; k += ncores ) {
228 // C[i + j*lda] += A[j*lda + k] * B[k*lda + i];
229 // }
230 //barrier(nc);
231 // }
232 //}
233 }
234
235 //--------------------------------------------------------------------------
236 // Main
237 //
238 // all threads start executing thread_entry(). Use their "coreid" to
239 // differentiate between threads (each thread is running on a separate core).
240
241 void thread_entry(int cid, int nc)
242 {
243 coreid = cid;
244 ncores = nc;
245
246 // static allocates data in the binary, which is visible to both threads
247 static data_t results_data[ARRAY_SIZE];
248
249
250 // // Execute the provided, naive matmul
251 // barrier(nc);
252 // stats(matmul_naive(DIM_SIZE, input1_data, input2_data, results_data); barrier(nc));
253 //
254 //
255 // // verify
256 // verifyMT(ARRAY_SIZE, results_data, verify_data);
257 //
258 // // clear results from the first trial
259 // size_t i;
260 // if (coreid == 0)
261 // for (i=0; i < ARRAY_SIZE; i++)
262 // results_data[i] = 0;
263 // barrier(nc);
264
265
266 // Execute your faster matmul
267 barrier(nc);
268 stats(matmul(DIM_SIZE, input1_data, input2_data, results_data); barrier(nc));
269
270 #ifdef DEBUG
271 printArrayMT("results:", ARRAY_SIZE, results_data);
272 printArrayMT("verify :", ARRAY_SIZE, verify_data);
273 #endif
274
275 // verify
276 verifyMT(ARRAY_SIZE, results_data, verify_data);
277 barrier(nc);
278
279 exit(0);
280 }
281