1 //**************************************************************************
2 // Multi-threaded Matrix Multiply benchmark
3 //--------------------------------------------------------------------------
4 // TA : Christopher Celio
8 // This benchmark multiplies two 2-D arrays together and writes the results to
9 // a third vector. The input data (and reference data) should be generated
10 // using the matmul_gendata.pl perl script and dumped to a file named
14 // print out arrays, etc.
17 //--------------------------------------------------------------------------
25 //--------------------------------------------------------------------------
26 // Input/Reference Data
32 //--------------------------------------------------------------------------
33 // Basic Utilities and Multi-thread Support
35 __thread
unsigned long coreid
;
40 #define stringify_1(s) #s
41 #define stringify(s) stringify_1(s)
42 #define stats(code) do { \
43 unsigned long _c = -rdcycle(), _i = -rdinstret(); \
45 _c += rdcycle(), _i += rdinstret(); \
47 printf("%s: %ld cycles, %ld.%ld cycles/iter, %ld.%ld CPI\n", \
48 stringify(code), _c, _c/DIM_SIZE/DIM_SIZE/DIM_SIZE, 10*_c/DIM_SIZE/DIM_SIZE/DIM_SIZE%10, _c/_i, 10*_c/_i%10); \
52 //--------------------------------------------------------------------------
55 void printArray( char name
[], int n
, data_t arr
[] )
61 printf( " %10s :", name
);
62 for ( i
= 0; i
< n
; i
++ )
63 printf( " %3ld ", (long) arr
[i
] );
67 void __attribute__((noinline
)) verify(size_t n
, const data_t
* test
, const data_t
* correct
)
73 for (i
= 0; i
< n
; i
++)
75 if (test
[i
] != correct
[i
])
77 printf("FAILED test[%d]= %3ld, correct[%d]= %3ld\n",
78 i
, (long)test
[i
], i
, (long)correct
[i
]);
86 //--------------------------------------------------------------------------
89 // single-thread, naive version
90 void __attribute__((noinline
)) matmul_naive(const int lda
, const data_t A
[], const data_t B
[], data_t C
[] )
97 for ( i
= 0; i
< lda
; i
++ )
98 for ( j
= 0; j
< lda
; j
++ )
100 for ( k
= 0; k
< lda
; k
++ )
102 C
[i
+ j
*lda
] += A
[j
*lda
+ k
] * B
[k
*lda
+ i
];
109 void __attribute__((noinline
)) matmul(const int lda
, const data_t A
[], const data_t B
[], data_t C
[] )
112 data_t temp0
, temp1
, temp2
, temp3
, temp4
, temp5
, temp6
, temp7
;
113 data_t temp8
, temp9
, temp10
, temp11
, temp12
, temp13
, temp14
, temp15
;
117 for(j
= 0; j
< 32; j
++) {
119 temp1
= C
[1 + j
*lda
];
120 temp2
= C
[2 + j
*lda
];
121 temp3
= C
[3 + j
*lda
];
122 temp4
= C
[4 + j
*lda
];
123 temp5
= C
[5 + j
*lda
];
124 temp6
= C
[6 + j
*lda
];
125 temp7
= C
[7 + j
*lda
];
126 temp8
= C
[8 + j
*lda
];
127 temp9
= C
[9 + j
*lda
];
128 temp10
= C
[10 + j
*lda
];
129 temp11
= C
[11 + j
*lda
];
130 temp12
= C
[12 + j
*lda
];
131 temp13
= C
[13 + j
*lda
];
132 temp14
= C
[14 + j
*lda
];
133 temp15
= C
[15 + j
*lda
];
134 for(k
= 0; k
< 32; k
++) {
135 temp0
+= A
[j
*lda
+ k
] * B
[k
*lda
];
136 temp1
+= A
[j
*lda
+ k
] * B
[1+k
*lda
];
137 temp2
+= A
[j
*lda
+ k
] * B
[2+k
*lda
];
138 temp3
+= A
[j
*lda
+ k
] * B
[3+k
*lda
];
139 temp4
+= A
[j
*lda
+ k
] * B
[4+k
*lda
];
140 temp5
+= A
[j
*lda
+ k
] * B
[5+k
*lda
];
141 temp6
+= A
[j
*lda
+ k
] * B
[6+k
*lda
];
142 temp7
+= A
[j
*lda
+ k
] * B
[7+k
*lda
];
143 temp8
+= A
[j
*lda
+ k
] * B
[8+k
*lda
];
144 temp9
+= A
[j
*lda
+ k
] * B
[9+k
*lda
];
145 temp10
+= A
[j
*lda
+ k
] * B
[10+k
*lda
];
146 temp11
+= A
[j
*lda
+ k
] * B
[11+k
*lda
];
147 temp12
+= A
[j
*lda
+ k
] * B
[12+k
*lda
];
148 temp13
+= A
[j
*lda
+ k
] * B
[13+k
*lda
];
149 temp14
+= A
[j
*lda
+ k
] * B
[14+k
*lda
];
150 temp15
+= A
[j
*lda
+ k
] * B
[15+k
*lda
];
153 C
[1 + j
*lda
] = temp1
;
154 C
[2 + j
*lda
] = temp2
;
155 C
[3 + j
*lda
] = temp3
;
156 C
[4 + j
*lda
] = temp4
;
157 C
[5 + j
*lda
] = temp5
;
158 C
[6 + j
*lda
] = temp6
;
159 C
[7 + j
*lda
] = temp7
;
160 C
[8 + j
*lda
] = temp8
;
161 C
[9 + j
*lda
] = temp9
;
162 C
[10 + j
*lda
] = temp10
;
163 C
[11 + j
*lda
] = temp11
;
164 C
[12 + j
*lda
] = temp12
;
165 C
[13 + j
*lda
] = temp13
;
166 C
[14 + j
*lda
] = temp14
;
167 C
[15 + j
*lda
] = temp15
;
172 for(j
= 0; j
< 32; j
++) {
173 temp0
= C
[16 + j
*lda
];
174 temp1
= C
[17 + j
*lda
];
175 temp2
= C
[18 + j
*lda
];
176 temp3
= C
[19 + j
*lda
];
177 temp4
= C
[20 + j
*lda
];
178 temp5
= C
[21 + j
*lda
];
179 temp6
= C
[22 + j
*lda
];
180 temp7
= C
[23 + j
*lda
];
181 temp8
= C
[24 + j
*lda
];
182 temp9
= C
[25 + j
*lda
];
183 temp10
= C
[26 + j
*lda
];
184 temp11
= C
[27 + j
*lda
];
185 temp12
= C
[28 + j
*lda
];
186 temp13
= C
[29 + j
*lda
];
187 temp14
= C
[30 + j
*lda
];
188 temp15
= C
[31 + j
*lda
];
189 for(k
= 0; k
< 32; k
++) {
190 temp0
+= A
[j
*lda
+ k
] * B
[16 + k
*lda
];
191 temp1
+= A
[j
*lda
+ k
] * B
[17 + k
*lda
];
192 temp2
+= A
[j
*lda
+ k
] * B
[18 + k
*lda
];
193 temp3
+= A
[j
*lda
+ k
] * B
[19 + k
*lda
];
194 temp4
+= A
[j
*lda
+ k
] * B
[20 + k
*lda
];
195 temp5
+= A
[j
*lda
+ k
] * B
[21 + k
*lda
];
196 temp6
+= A
[j
*lda
+ k
] * B
[22 + k
*lda
];
197 temp7
+= A
[j
*lda
+ k
] * B
[23 + k
*lda
];
198 temp8
+= A
[j
*lda
+ k
] * B
[24 + k
*lda
];
199 temp9
+= A
[j
*lda
+ k
] * B
[25 + k
*lda
];
200 temp10
+= A
[j
*lda
+ k
] * B
[26 + k
*lda
];
201 temp11
+= A
[j
*lda
+ k
] * B
[27 + k
*lda
];
202 temp12
+= A
[j
*lda
+ k
] * B
[28 + k
*lda
];
203 temp13
+= A
[j
*lda
+ k
] * B
[29 + k
*lda
];
204 temp14
+= A
[j
*lda
+ k
] * B
[30 + k
*lda
];
205 temp15
+= A
[j
*lda
+ k
] * B
[31 + k
*lda
];
207 C
[16 + j
*lda
] = temp0
;
208 C
[17 + j
*lda
] = temp1
;
209 C
[18 + j
*lda
] = temp2
;
210 C
[19 + j
*lda
] = temp3
;
211 C
[20 + j
*lda
] = temp4
;
212 C
[21 + j
*lda
] = temp5
;
213 C
[22 + j
*lda
] = temp6
;
214 C
[23 + j
*lda
] = temp7
;
215 C
[24 + j
*lda
] = temp8
;
216 C
[25 + j
*lda
] = temp9
;
217 C
[26 + j
*lda
] = temp10
;
218 C
[27 + j
*lda
] = temp11
;
219 C
[28 + j
*lda
] = temp12
;
220 C
[29 + j
*lda
] = temp13
;
221 C
[30 + j
*lda
] = temp14
;
222 C
[31 + j
*lda
] = temp15
;
227 //--------------------------------------------------------------------------
230 // all threads start executing thread_entry(). Use their "coreid" to
231 // differentiate between threads (each thread is running on a separate core).
233 void thread_entry(int cid
, int nc
)
238 // static allocates data in the binary, which is visible to both threads
239 static data_t results_data
[ARRAY_SIZE
];
242 // // Execute the provided, naive matmul
244 // stats(matmul_naive(DIM_SIZE, input1_data, input2_data, results_data); barrier());
248 // verify(ARRAY_SIZE, results_data, verify_data);
250 // // clear results from the first trial
253 // for (i=0; i < ARRAY_SIZE; i++)
254 // results_data[i] = 0;
258 // Execute your faster matmul
260 stats(matmul(DIM_SIZE
, input1_data
, input2_data
, results_data
); barrier());
263 printArray("results:", ARRAY_SIZE
, results_data
);
264 printArray("verify :", ARRAY_SIZE
, verify_data
);
268 verify(ARRAY_SIZE
, results_data
, verify_data
);