1 //**************************************************************************
2 // Multi-threaded Matrix Multiply benchmark
3 //--------------------------------------------------------------------------
4 // TA : Christopher Celio
8 // This benchmark multiplies two 2-D arrays together and writes the results to
9 // a third vector. The input data (and reference data) should be generated
10 // using the matmul_gendata.pl perl script and dumped to a file named
14 // print out arrays, etc.
17 //--------------------------------------------------------------------------
25 //--------------------------------------------------------------------------
26 // Input/Reference Data
32 //--------------------------------------------------------------------------
33 // Basic Utilities and Multi-thread Support
35 __thread
unsigned long coreid
;
40 #define stringify_1(s) #s
41 #define stringify(s) stringify_1(s)
42 #define stats(code) do { \
43 unsigned long _c = -rdcycle(), _i = -rdinstret(); \
45 _c += rdcycle(), _i += rdinstret(); \
47 printf("%s: %ld cycles, %ld.%ld cycles/iter, %ld.%ld CPI\n", \
48 stringify(code), _c, _c/DIM_SIZE/DIM_SIZE/DIM_SIZE, 10*_c/DIM_SIZE/DIM_SIZE/DIM_SIZE%10, _c/_i, 10*_c/_i%10); \
52 //--------------------------------------------------------------------------
55 void printArray( char name
[], int n
, data_t arr
[] )
61 printf( " %10s :", name
);
62 for ( i
= 0; i
< n
; i
++ )
63 printf( " %3ld ", (long) arr
[i
] );
67 void __attribute__((noinline
)) verify(size_t n
, const data_t
* test
, const data_t
* correct
)
73 for (i
= 0; i
< n
; i
++)
75 if (test
[i
] != correct
[i
])
77 printf("FAILED test[%d]= %3ld, correct[%d]= %3ld\n",
78 i
, (long)test
[i
], i
, (long)correct
[i
]);
86 //--------------------------------------------------------------------------
89 // single-thread, naive version
90 void __attribute__((noinline
)) matmul_naive(const int lda
, const data_t A
[], const data_t B
[], data_t C
[] )
97 for ( i
= 0; i
< lda
; i
++ )
98 for ( j
= 0; j
< lda
; j
++ )
100 for ( k
= 0; k
< lda
; k
++ )
102 C
[i
+ j
*lda
] += A
[j
*lda
+ k
] * B
[k
*lda
+ i
];
108 void __attribute__((noinline
)) matmul(const int lda
, const data_t A
[], const data_t B
[], data_t C
[] )
110 static __thread
int i
, j
, k
;
111 static __thread data_t tempA0
, tempA1
, tempA2
, tempA3
, tempA4
, tempA5
, tempA6
, tempA7
;
112 static __thread data_t tempC0
, tempC1
, tempC2
, tempC3
, tempC4
, tempC5
, tempC6
, tempC7
, tempC8
, tempC9
, tempC10
, tempC11
, tempC12
, tempC13
, tempC14
, tempC15
;
114 static __thread
int start
, end
, jStride
, jToRow
, jToCol
;
115 static data_t A1
[1024], B1
[1024];;
118 end
= (coreid
+1) << 9;
122 for (j
=start
; j
< end
; j
+=jStride
) {
133 for ( i
=0; i
< lda
; i
+=2 ) {
134 tempA0
= A
[i
+ jToRow
];
135 tempA1
= A
[i
+1 + jToRow
];
136 tempC0
+= tempA0
* B
[(jToCol
) + (i
<<5)];
137 tempC1
+= tempA0
* B
[(jToCol
+1 ) + (i
<<5)];
138 tempC2
+= tempA0
* B
[(jToCol
+2 ) + (i
<<5)];
139 tempC3
+= tempA0
* B
[(jToCol
+3 ) + (i
<<5)];
140 tempC4
+= tempA0
* B
[(jToCol
+4 ) + (i
<<5)];
141 tempC5
+= tempA0
* B
[(jToCol
+5 ) + (i
<<5)];
142 tempC6
+= tempA0
* B
[(jToCol
+6 ) + (i
<<5)];
143 tempC7
+= tempA0
* B
[(jToCol
+7 ) + (i
<<5)];
144 tempC0
+= tempA1
* B
[(jToCol
) + ((i
+1)<<5)];
145 tempC1
+= tempA1
* B
[(jToCol
+1 ) + ((i
+1)<<5)];
146 tempC2
+= tempA1
* B
[(jToCol
+2 ) + ((i
+1)<<5)];
147 tempC3
+= tempA1
* B
[(jToCol
+3 ) + ((i
+1)<<5)];
148 tempC4
+= tempA1
* B
[(jToCol
+4 ) + ((i
+1)<<5)];
149 tempC5
+= tempA1
* B
[(jToCol
+5 ) + ((i
+1)<<5)];
150 tempC6
+= tempA1
* B
[(jToCol
+6 ) + ((i
+1)<<5)];
151 tempC7
+= tempA1
* B
[(jToCol
+7 ) + ((i
+1)<<5)];
164 for (i
= 0; i
< 1024; i
++) {
168 for (j
=start
; j
< end
; j
+=jStride
) {
179 for ( i
=0; i
< lda
; i
+=2 ) {
180 tempA0
= A1
[i
+ jToRow
];
181 tempA1
= A1
[i
+1 + jToRow
];
182 tempC0
+= tempA0
* B1
[(jToCol
) + (i
<<5)];
183 tempC1
+= tempA0
* B1
[(jToCol
+1 ) + (i
<<5)];
184 tempC2
+= tempA0
* B1
[(jToCol
+2 ) + (i
<<5)];
185 tempC3
+= tempA0
* B1
[(jToCol
+3 ) + (i
<<5)];
186 tempC4
+= tempA0
* B1
[(jToCol
+4 ) + (i
<<5)];
187 tempC5
+= tempA0
* B1
[(jToCol
+5 ) + (i
<<5)];
188 tempC6
+= tempA0
* B1
[(jToCol
+6 ) + (i
<<5)];
189 tempC7
+= tempA0
* B1
[(jToCol
+7 ) + (i
<<5)];
190 tempC0
+= tempA1
* B1
[(jToCol
) + ((i
+1)<<5)];
191 tempC1
+= tempA1
* B1
[(jToCol
+1 ) + ((i
+1)<<5)];
192 tempC2
+= tempA1
* B1
[(jToCol
+2 ) + ((i
+1)<<5)];
193 tempC3
+= tempA1
* B1
[(jToCol
+3 ) + ((i
+1)<<5)];
194 tempC4
+= tempA1
* B1
[(jToCol
+4 ) + ((i
+1)<<5)];
195 tempC5
+= tempA1
* B1
[(jToCol
+5 ) + ((i
+1)<<5)];
196 tempC6
+= tempA1
* B1
[(jToCol
+6 ) + ((i
+1)<<5)];
197 tempC7
+= tempA1
* B1
[(jToCol
+7 ) + ((i
+1)<<5)];
211 //--------------------------------------------------------------------------
214 // all threads start executing thread_entry(). Use their "coreid" to
215 // differentiate between threads (each thread is running on a separate core).
217 void thread_entry(int cid
, int nc
)
222 // static allocates data in the binary, which is visible to both threads
223 static data_t results_data
[ARRAY_SIZE
];
226 //// Execute the provided, naive matmul
228 //stats(matmul_naive(DIM_SIZE, input1_data, input2_data, results_data); barrier());
232 //verify(ARRAY_SIZE, results_data, verify_data);
234 //// clear results from the first trial
237 // for (i=0; i < ARRAY_SIZE; i++)
238 // results_data[i] = 0;
242 // Execute your faster matmul
244 stats(matmul(DIM_SIZE
, input1_data
, input2_data
, results_data
); barrier());
247 printArray("results:", ARRAY_SIZE
, results_data
);
248 printArray("verify :", ARRAY_SIZE
, verify_data
);
252 verify(ARRAY_SIZE
, results_data
, verify_data
);