radeonsi: really wait for the second EOP event and not the first one
[mesa.git] / src / gallium / drivers / radeonsi / si_perfcounter.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Nicolai Hähnle <nicolai.haehnle@amd.com>
25 *
26 */
27
28 #include "radeon/r600_cs.h"
29 #include "radeon/r600_query.h"
30 #include "radeon/r600_pipe_common.h"
31 #include "util/u_memory.h"
32
33 #include "si_pipe.h"
34 #include "sid.h"
35
36 enum si_pc_reg_layout {
37 /* All secondary selector dwords follow as one block after the primary
38 * selector dwords for the counters that have secondary selectors.
39 */
40 SI_PC_MULTI_BLOCK = 0,
41
42 /* Each secondary selector dword follows immediately afters the
43 * corresponding primary.
44 */
45 SI_PC_MULTI_ALTERNATE = 1,
46
47 /* All secondary selector dwords follow as one block after all primary
48 * selector dwords.
49 */
50 SI_PC_MULTI_TAIL = 2,
51
52 /* Free-form arrangement of selector registers. */
53 SI_PC_MULTI_CUSTOM = 3,
54
55 SI_PC_MULTI_MASK = 3,
56
57 /* Registers are laid out in decreasing rather than increasing order. */
58 SI_PC_REG_REVERSE = 4,
59
60 SI_PC_FAKE = 8,
61 };
62
63 struct si_pc_block_base {
64 const char *name;
65 unsigned num_counters;
66 unsigned flags;
67
68 unsigned select_or;
69 unsigned select0;
70 unsigned counter0_lo;
71 unsigned *select;
72 unsigned *counters;
73 unsigned num_multi;
74 unsigned num_prelude;
75 unsigned layout;
76 };
77
78 struct si_pc_block {
79 struct si_pc_block_base *b;
80 unsigned selectors;
81 unsigned instances;
82 };
83
84 /* The order is chosen to be compatible with GPUPerfStudio's hardcoding of
85 * performance counter group IDs.
86 */
87 static const char * const si_pc_shader_type_suffixes[] = {
88 "", "_ES", "_GS", "_VS", "_PS", "_LS", "_HS", "_CS"
89 };
90
91 static const unsigned si_pc_shader_type_bits[] = {
92 0x7f,
93 S_036780_ES_EN(1),
94 S_036780_GS_EN(1),
95 S_036780_VS_EN(1),
96 S_036780_PS_EN(1),
97 S_036780_LS_EN(1),
98 S_036780_HS_EN(1),
99 S_036780_CS_EN(1),
100 };
101
102 static struct si_pc_block_base cik_CB = {
103 .name = "CB",
104 .num_counters = 4,
105 .flags = R600_PC_BLOCK_SE | R600_PC_BLOCK_INSTANCE_GROUPS,
106
107 .select0 = R_037000_CB_PERFCOUNTER_FILTER,
108 .counter0_lo = R_035018_CB_PERFCOUNTER0_LO,
109 .num_multi = 1,
110 .num_prelude = 1,
111 .layout = SI_PC_MULTI_ALTERNATE,
112 };
113
114 static unsigned cik_CPC_select[] = {
115 R_036024_CPC_PERFCOUNTER0_SELECT,
116 R_036010_CPC_PERFCOUNTER0_SELECT1,
117 R_03600C_CPC_PERFCOUNTER1_SELECT,
118 };
119 static struct si_pc_block_base cik_CPC = {
120 .name = "CPC",
121 .num_counters = 2,
122
123 .select = cik_CPC_select,
124 .counter0_lo = R_034018_CPC_PERFCOUNTER0_LO,
125 .num_multi = 1,
126 .layout = SI_PC_MULTI_CUSTOM | SI_PC_REG_REVERSE,
127 };
128
129 static struct si_pc_block_base cik_CPF = {
130 .name = "CPF",
131 .num_counters = 2,
132
133 .select0 = R_03601C_CPF_PERFCOUNTER0_SELECT,
134 .counter0_lo = R_034028_CPF_PERFCOUNTER0_LO,
135 .num_multi = 1,
136 .layout = SI_PC_MULTI_ALTERNATE | SI_PC_REG_REVERSE,
137 };
138
139 static struct si_pc_block_base cik_CPG = {
140 .name = "CPG",
141 .num_counters = 2,
142
143 .select0 = R_036008_CPG_PERFCOUNTER0_SELECT,
144 .counter0_lo = R_034008_CPG_PERFCOUNTER0_LO,
145 .num_multi = 1,
146 .layout = SI_PC_MULTI_ALTERNATE | SI_PC_REG_REVERSE,
147 };
148
149 static struct si_pc_block_base cik_DB = {
150 .name = "DB",
151 .num_counters = 4,
152 .flags = R600_PC_BLOCK_SE | R600_PC_BLOCK_INSTANCE_GROUPS,
153
154 .select0 = R_037100_DB_PERFCOUNTER0_SELECT,
155 .counter0_lo = R_035100_DB_PERFCOUNTER0_LO,
156 .num_multi = 3, // really only 2, but there's a gap between registers
157 .layout = SI_PC_MULTI_ALTERNATE,
158 };
159
160 static struct si_pc_block_base cik_GDS = {
161 .name = "GDS",
162 .num_counters = 4,
163
164 .select0 = R_036A00_GDS_PERFCOUNTER0_SELECT,
165 .counter0_lo = R_034A00_GDS_PERFCOUNTER0_LO,
166 .num_multi = 1,
167 .layout = SI_PC_MULTI_TAIL,
168 };
169
170 static unsigned cik_GRBM_counters[] = {
171 R_034100_GRBM_PERFCOUNTER0_LO,
172 R_03410C_GRBM_PERFCOUNTER1_LO,
173 };
174 static struct si_pc_block_base cik_GRBM = {
175 .name = "GRBM",
176 .num_counters = 2,
177
178 .select0 = R_036100_GRBM_PERFCOUNTER0_SELECT,
179 .counters = cik_GRBM_counters,
180 };
181
182 static struct si_pc_block_base cik_GRBMSE = {
183 .name = "GRBMSE",
184 .num_counters = 4,
185
186 .select0 = R_036108_GRBM_SE0_PERFCOUNTER_SELECT,
187 .counter0_lo = R_034114_GRBM_SE0_PERFCOUNTER_LO,
188 };
189
190 static struct si_pc_block_base cik_IA = {
191 .name = "IA",
192 .num_counters = 4,
193
194 .select0 = R_036210_IA_PERFCOUNTER0_SELECT,
195 .counter0_lo = R_034220_IA_PERFCOUNTER0_LO,
196 .num_multi = 1,
197 .layout = SI_PC_MULTI_TAIL,
198 };
199
200 static struct si_pc_block_base cik_PA_SC = {
201 .name = "PA_SC",
202 .num_counters = 8,
203 .flags = R600_PC_BLOCK_SE,
204
205 .select0 = R_036500_PA_SC_PERFCOUNTER0_SELECT,
206 .counter0_lo = R_034500_PA_SC_PERFCOUNTER0_LO,
207 .num_multi = 1,
208 .layout = SI_PC_MULTI_ALTERNATE,
209 };
210
211 /* According to docs, PA_SU counters are only 48 bits wide. */
212 static struct si_pc_block_base cik_PA_SU = {
213 .name = "PA_SU",
214 .num_counters = 4,
215 .flags = R600_PC_BLOCK_SE,
216
217 .select0 = R_036400_PA_SU_PERFCOUNTER0_SELECT,
218 .counter0_lo = R_034400_PA_SU_PERFCOUNTER0_LO,
219 .num_multi = 2,
220 .layout = SI_PC_MULTI_ALTERNATE,
221 };
222
223 static struct si_pc_block_base cik_SPI = {
224 .name = "SPI",
225 .num_counters = 6,
226 .flags = R600_PC_BLOCK_SE,
227
228 .select0 = R_036600_SPI_PERFCOUNTER0_SELECT,
229 .counter0_lo = R_034604_SPI_PERFCOUNTER0_LO,
230 .num_multi = 4,
231 .layout = SI_PC_MULTI_BLOCK,
232 };
233
234 static struct si_pc_block_base cik_SQ = {
235 .name = "SQ",
236 .num_counters = 16,
237 .flags = R600_PC_BLOCK_SE | R600_PC_BLOCK_SHADER,
238
239 .select0 = R_036700_SQ_PERFCOUNTER0_SELECT,
240 .select_or = S_036700_SQC_BANK_MASK(15) |
241 S_036700_SQC_CLIENT_MASK(15) |
242 S_036700_SIMD_MASK(15),
243 .counter0_lo = R_034700_SQ_PERFCOUNTER0_LO,
244 };
245
246 static struct si_pc_block_base cik_SX = {
247 .name = "SX",
248 .num_counters = 4,
249 .flags = R600_PC_BLOCK_SE,
250
251 .select0 = R_036900_SX_PERFCOUNTER0_SELECT,
252 .counter0_lo = R_034900_SX_PERFCOUNTER0_LO,
253 .num_multi = 2,
254 .layout = SI_PC_MULTI_TAIL,
255 };
256
257 static struct si_pc_block_base cik_TA = {
258 .name = "TA",
259 .num_counters = 2,
260 .flags = R600_PC_BLOCK_SE | R600_PC_BLOCK_INSTANCE_GROUPS | R600_PC_BLOCK_SHADER_WINDOWED,
261
262 .select0 = R_036B00_TA_PERFCOUNTER0_SELECT,
263 .counter0_lo = R_034B00_TA_PERFCOUNTER0_LO,
264 .num_multi = 1,
265 .layout = SI_PC_MULTI_ALTERNATE,
266 };
267
268 static struct si_pc_block_base cik_TD = {
269 .name = "TD",
270 .num_counters = 2,
271 .flags = R600_PC_BLOCK_SE | R600_PC_BLOCK_INSTANCE_GROUPS | R600_PC_BLOCK_SHADER_WINDOWED,
272
273 .select0 = R_036C00_TD_PERFCOUNTER0_SELECT,
274 .counter0_lo = R_034C00_TD_PERFCOUNTER0_LO,
275 .num_multi = 1,
276 .layout = SI_PC_MULTI_ALTERNATE,
277 };
278
279 static struct si_pc_block_base cik_TCA = {
280 .name = "TCA",
281 .num_counters = 4,
282 .flags = R600_PC_BLOCK_INSTANCE_GROUPS,
283
284 .select0 = R_036E40_TCA_PERFCOUNTER0_SELECT,
285 .counter0_lo = R_034E40_TCA_PERFCOUNTER0_LO,
286 .num_multi = 2,
287 .layout = SI_PC_MULTI_ALTERNATE,
288 };
289
290 static struct si_pc_block_base cik_TCC = {
291 .name = "TCC",
292 .num_counters = 4,
293 .flags = R600_PC_BLOCK_INSTANCE_GROUPS,
294
295 .select0 = R_036E00_TCC_PERFCOUNTER0_SELECT,
296 .counter0_lo = R_034E00_TCC_PERFCOUNTER0_LO,
297 .num_multi = 2,
298 .layout = SI_PC_MULTI_ALTERNATE,
299 };
300
301 static struct si_pc_block_base cik_TCP = {
302 .name = "TCP",
303 .num_counters = 4,
304 .flags = R600_PC_BLOCK_SE | R600_PC_BLOCK_INSTANCE_GROUPS | R600_PC_BLOCK_SHADER_WINDOWED,
305
306 .select0 = R_036D00_TCP_PERFCOUNTER0_SELECT,
307 .counter0_lo = R_034D00_TCP_PERFCOUNTER0_LO,
308 .num_multi = 2,
309 .layout = SI_PC_MULTI_ALTERNATE,
310 };
311
312 static struct si_pc_block_base cik_VGT = {
313 .name = "VGT",
314 .num_counters = 4,
315 .flags = R600_PC_BLOCK_SE,
316
317 .select0 = R_036230_VGT_PERFCOUNTER0_SELECT,
318 .counter0_lo = R_034240_VGT_PERFCOUNTER0_LO,
319 .num_multi = 1,
320 .layout = SI_PC_MULTI_TAIL,
321 };
322
323 static struct si_pc_block_base cik_WD = {
324 .name = "WD",
325 .num_counters = 4,
326
327 .select0 = R_036200_WD_PERFCOUNTER0_SELECT,
328 .counter0_lo = R_034200_WD_PERFCOUNTER0_LO,
329 };
330
331 static struct si_pc_block_base cik_MC = {
332 .name = "MC",
333 .num_counters = 4,
334
335 .layout = SI_PC_FAKE,
336 };
337
338 static struct si_pc_block_base cik_SRBM = {
339 .name = "SRBM",
340 .num_counters = 2,
341
342 .layout = SI_PC_FAKE,
343 };
344
345 /* Both the number of instances and selectors varies between chips of the same
346 * class. We only differentiate by class here and simply expose the maximum
347 * number over all chips in a class.
348 *
349 * Unfortunately, GPUPerfStudio uses the order of performance counter groups
350 * blindly once it believes it has identified the hardware, so the order of
351 * blocks here matters.
352 */
353 static struct si_pc_block groups_CIK[] = {
354 { &cik_CB, 226, 4 },
355 { &cik_CPF, 17 },
356 { &cik_DB, 257, 4 },
357 { &cik_GRBM, 34 },
358 { &cik_GRBMSE, 15 },
359 { &cik_PA_SU, 153 },
360 { &cik_PA_SC, 395 },
361 { &cik_SPI, 186 },
362 { &cik_SQ, 252 },
363 { &cik_SX, 32 },
364 { &cik_TA, 111, 11 },
365 { &cik_TCA, 39, 2 },
366 { &cik_TCC, 160, 16 },
367 { &cik_TD, 55, 11 },
368 { &cik_TCP, 154, 11 },
369 { &cik_GDS, 121 },
370 { &cik_VGT, 140 },
371 { &cik_IA, 22 },
372 { &cik_MC, 22 },
373 { &cik_SRBM, 19 },
374 { &cik_WD, 22 },
375 { &cik_CPG, 46 },
376 { &cik_CPC, 22 },
377
378 };
379
380 static struct si_pc_block groups_VI[] = {
381 { &cik_CB, 396, 4 },
382 { &cik_CPF, 19 },
383 { &cik_DB, 257, 4 },
384 { &cik_GRBM, 34 },
385 { &cik_GRBMSE, 15 },
386 { &cik_PA_SU, 153 },
387 { &cik_PA_SC, 397 },
388 { &cik_SPI, 197 },
389 { &cik_SQ, 273 },
390 { &cik_SX, 34 },
391 { &cik_TA, 119, 16 },
392 { &cik_TCA, 35, 2 },
393 { &cik_TCC, 192, 16 },
394 { &cik_TD, 55, 16 },
395 { &cik_TCP, 180, 16 },
396 { &cik_GDS, 121 },
397 { &cik_VGT, 147 },
398 { &cik_IA, 24 },
399 { &cik_MC, 22 },
400 { &cik_SRBM, 27 },
401 { &cik_WD, 37 },
402 { &cik_CPG, 48 },
403 { &cik_CPC, 24 },
404
405 };
406
407 static void si_pc_get_size(struct r600_perfcounter_block *group,
408 unsigned count, unsigned *selectors,
409 unsigned *num_select_dw, unsigned *num_read_dw)
410 {
411 struct si_pc_block *sigroup = (struct si_pc_block *)group->data;
412 struct si_pc_block_base *regs = sigroup->b;
413 unsigned layout_multi = regs->layout & SI_PC_MULTI_MASK;
414
415 if (regs->layout & SI_PC_FAKE) {
416 *num_select_dw = 0;
417 } else if (layout_multi == SI_PC_MULTI_BLOCK) {
418 if (count < regs->num_multi)
419 *num_select_dw = 2 * (count + 2) + regs->num_prelude;
420 else
421 *num_select_dw = 2 + count + regs->num_multi + regs->num_prelude;
422 } else if (layout_multi == SI_PC_MULTI_TAIL) {
423 *num_select_dw = 4 + count + MIN2(count, regs->num_multi) + regs->num_prelude;
424 } else if (layout_multi == SI_PC_MULTI_CUSTOM) {
425 assert(regs->num_prelude == 0);
426 *num_select_dw = 3 * (count + MIN2(count, regs->num_multi));
427 } else {
428 assert(layout_multi == SI_PC_MULTI_ALTERNATE);
429
430 *num_select_dw = 2 + count + MIN2(count, regs->num_multi) + regs->num_prelude;
431 }
432
433 *num_read_dw = 6 * count;
434 }
435
436 static void si_pc_emit_instance(struct r600_common_context *ctx,
437 int se, int instance)
438 {
439 struct radeon_winsys_cs *cs = ctx->gfx.cs;
440 unsigned value = S_030800_SH_BROADCAST_WRITES(1);
441
442 if (se >= 0) {
443 value |= S_030800_SE_INDEX(se);
444 } else {
445 value |= S_030800_SE_BROADCAST_WRITES(1);
446 }
447
448 if (instance >= 0) {
449 value |= S_030800_INSTANCE_INDEX(instance);
450 } else {
451 value |= S_030800_INSTANCE_BROADCAST_WRITES(1);
452 }
453
454 radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX, value);
455 }
456
457 static void si_pc_emit_shaders(struct r600_common_context *ctx,
458 unsigned shaders)
459 {
460 struct radeon_winsys_cs *cs = ctx->gfx.cs;
461
462 radeon_set_uconfig_reg_seq(cs, R_036780_SQ_PERFCOUNTER_CTRL, 2);
463 radeon_emit(cs, shaders & 0x7f);
464 radeon_emit(cs, 0xffffffff);
465 }
466
467 static void si_pc_emit_select(struct r600_common_context *ctx,
468 struct r600_perfcounter_block *group,
469 unsigned count, unsigned *selectors)
470 {
471 struct si_pc_block *sigroup = (struct si_pc_block *)group->data;
472 struct si_pc_block_base *regs = sigroup->b;
473 struct radeon_winsys_cs *cs = ctx->gfx.cs;
474 unsigned idx;
475 unsigned layout_multi = regs->layout & SI_PC_MULTI_MASK;
476 unsigned dw;
477
478 assert(count <= regs->num_counters);
479
480 if (regs->layout & SI_PC_FAKE)
481 return;
482
483 if (layout_multi == SI_PC_MULTI_BLOCK) {
484 assert(!(regs->layout & SI_PC_REG_REVERSE));
485
486 dw = count + regs->num_prelude;
487 if (count >= regs->num_multi)
488 dw += regs->num_multi;
489 radeon_set_uconfig_reg_seq(cs, regs->select0, dw);
490 for (idx = 0; idx < regs->num_prelude; ++idx)
491 radeon_emit(cs, 0);
492 for (idx = 0; idx < MIN2(count, regs->num_multi); ++idx)
493 radeon_emit(cs, selectors[idx] | regs->select_or);
494
495 if (count < regs->num_multi) {
496 unsigned select1 =
497 regs->select0 + 4 * regs->num_multi;
498 radeon_set_uconfig_reg_seq(cs, select1, count);
499 }
500
501 for (idx = 0; idx < MIN2(count, regs->num_multi); ++idx)
502 radeon_emit(cs, 0);
503
504 if (count > regs->num_multi) {
505 for (idx = regs->num_multi; idx < count; ++idx)
506 radeon_emit(cs, selectors[idx] | regs->select_or);
507 }
508 } else if (layout_multi == SI_PC_MULTI_TAIL) {
509 unsigned select1, select1_count;
510
511 assert(!(regs->layout & SI_PC_REG_REVERSE));
512
513 radeon_set_uconfig_reg_seq(cs, regs->select0, count + regs->num_prelude);
514 for (idx = 0; idx < regs->num_prelude; ++idx)
515 radeon_emit(cs, 0);
516 for (idx = 0; idx < count; ++idx)
517 radeon_emit(cs, selectors[idx] | regs->select_or);
518
519 select1 = regs->select0 + 4 * regs->num_counters;
520 select1_count = MIN2(count, regs->num_multi);
521 radeon_set_uconfig_reg_seq(cs, select1, select1_count);
522 for (idx = 0; idx < select1_count; ++idx)
523 radeon_emit(cs, 0);
524 } else if (layout_multi == SI_PC_MULTI_CUSTOM) {
525 unsigned *reg = regs->select;
526 for (idx = 0; idx < count; ++idx) {
527 radeon_set_uconfig_reg(cs, *reg++, selectors[idx] | regs->select_or);
528 if (idx < regs->num_multi)
529 radeon_set_uconfig_reg(cs, *reg++, 0);
530 }
531 } else {
532 assert(layout_multi == SI_PC_MULTI_ALTERNATE);
533
534 unsigned reg_base = regs->select0;
535 unsigned reg_count = count + MIN2(count, regs->num_multi);
536 reg_count += regs->num_prelude;
537
538 if (!(regs->layout & SI_PC_REG_REVERSE)) {
539 radeon_set_uconfig_reg_seq(cs, reg_base, reg_count);
540
541 for (idx = 0; idx < regs->num_prelude; ++idx)
542 radeon_emit(cs, 0);
543 for (idx = 0; idx < count; ++idx) {
544 radeon_emit(cs, selectors[idx] | regs->select_or);
545 if (idx < regs->num_multi)
546 radeon_emit(cs, 0);
547 }
548 } else {
549 reg_base -= (reg_count - 1) * 4;
550 radeon_set_uconfig_reg_seq(cs, reg_base, reg_count);
551
552 for (idx = count; idx > 0; --idx) {
553 if (idx <= regs->num_multi)
554 radeon_emit(cs, 0);
555 radeon_emit(cs, selectors[idx - 1] | regs->select_or);
556 }
557 for (idx = 0; idx < regs->num_prelude; ++idx)
558 radeon_emit(cs, 0);
559 }
560 }
561 }
562
563 static void si_pc_emit_start(struct r600_common_context *ctx,
564 struct r600_resource *buffer, uint64_t va)
565 {
566 struct radeon_winsys_cs *cs = ctx->gfx.cs;
567
568 radeon_add_to_buffer_list(ctx, &ctx->gfx, buffer,
569 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
570
571 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
572 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) |
573 COPY_DATA_DST_SEL(COPY_DATA_MEM));
574 radeon_emit(cs, 1); /* immediate */
575 radeon_emit(cs, 0); /* unused */
576 radeon_emit(cs, va);
577 radeon_emit(cs, va >> 32);
578
579 radeon_set_uconfig_reg(cs, R_036020_CP_PERFMON_CNTL,
580 S_036020_PERFMON_STATE(V_036020_DISABLE_AND_RESET));
581 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
582 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_PERFCOUNTER_START) | EVENT_INDEX(0));
583 radeon_set_uconfig_reg(cs, R_036020_CP_PERFMON_CNTL,
584 S_036020_PERFMON_STATE(V_036020_START_COUNTING));
585 }
586
587 /* Note: The buffer was already added in si_pc_emit_start, so we don't have to
588 * do it again in here. */
589 static void si_pc_emit_stop(struct r600_common_context *ctx,
590 struct r600_resource *buffer, uint64_t va)
591 {
592 struct radeon_winsys_cs *cs = ctx->gfx.cs;
593
594 if (ctx->screen->chip_class == CIK) {
595 /* Two EOP events are required to make all engines go idle
596 * (and optional cache flushes executed) before the timestamp
597 * is written.
598 *
599 * Write 1, because we need to wait for the second EOP event.
600 */
601 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
602 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) |
603 EVENT_INDEX(5));
604 radeon_emit(cs, va);
605 radeon_emit(cs, (va >> 32) | EOP_DATA_SEL(1));
606 radeon_emit(cs, 1); /* immediate data */
607 radeon_emit(cs, 0); /* unused */
608 }
609
610 /* Write 0. */
611 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
612 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) |
613 EVENT_INDEX(5));
614 radeon_emit(cs, va);
615 radeon_emit(cs, (va >> 32) | EOP_DATA_SEL(1));
616 radeon_emit(cs, 0); /* immediate data */
617 radeon_emit(cs, 0); /* unused */
618
619 /* Wait until the memory location is 0. */
620 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
621 radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
622 radeon_emit(cs, va);
623 radeon_emit(cs, va >> 32);
624 radeon_emit(cs, 0); /* reference value */
625 radeon_emit(cs, 0xffffffff); /* mask */
626 radeon_emit(cs, 4); /* poll interval */
627
628 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
629 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_PERFCOUNTER_SAMPLE) | EVENT_INDEX(0));
630 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
631 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_PERFCOUNTER_STOP) | EVENT_INDEX(0));
632 radeon_set_uconfig_reg(cs, R_036020_CP_PERFMON_CNTL,
633 S_036020_PERFMON_STATE(V_036020_STOP_COUNTING) |
634 S_036020_PERFMON_SAMPLE_ENABLE(1));
635 }
636
637 static void si_pc_emit_read(struct r600_common_context *ctx,
638 struct r600_perfcounter_block *group,
639 unsigned count, unsigned *selectors,
640 struct r600_resource *buffer, uint64_t va)
641 {
642 struct si_pc_block *sigroup = (struct si_pc_block *)group->data;
643 struct si_pc_block_base *regs = sigroup->b;
644 struct radeon_winsys_cs *cs = ctx->gfx.cs;
645 unsigned idx;
646 unsigned reg = regs->counter0_lo;
647 unsigned reg_delta = 8;
648
649 if (!(regs->layout & SI_PC_FAKE)) {
650 if (regs->layout & SI_PC_REG_REVERSE)
651 reg_delta = -reg_delta;
652
653 for (idx = 0; idx < count; ++idx) {
654 if (regs->counters)
655 reg = regs->counters[idx];
656
657 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
658 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_PERF) |
659 COPY_DATA_DST_SEL(COPY_DATA_MEM) |
660 COPY_DATA_COUNT_SEL); /* 64 bits */
661 radeon_emit(cs, reg >> 2);
662 radeon_emit(cs, 0); /* unused */
663 radeon_emit(cs, va);
664 radeon_emit(cs, va >> 32);
665 va += sizeof(uint64_t);
666 reg += reg_delta;
667 }
668 } else {
669 for (idx = 0; idx < count; ++idx) {
670 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
671 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) |
672 COPY_DATA_DST_SEL(COPY_DATA_MEM) |
673 COPY_DATA_COUNT_SEL);
674 radeon_emit(cs, 0); /* immediate */
675 radeon_emit(cs, 0);
676 radeon_emit(cs, va);
677 radeon_emit(cs, va >> 32);
678 va += sizeof(uint64_t);
679 }
680 }
681 }
682
683 static void si_pc_cleanup(struct r600_common_screen *rscreen)
684 {
685 r600_perfcounters_do_destroy(rscreen->perfcounters);
686 rscreen->perfcounters = NULL;
687 }
688
689 void si_init_perfcounters(struct si_screen *screen)
690 {
691 struct r600_perfcounters *pc;
692 struct si_pc_block *blocks;
693 unsigned num_blocks;
694 unsigned i;
695
696 switch (screen->b.chip_class) {
697 case CIK:
698 blocks = groups_CIK;
699 num_blocks = ARRAY_SIZE(groups_CIK);
700 break;
701 case VI:
702 blocks = groups_VI;
703 num_blocks = ARRAY_SIZE(groups_VI);
704 break;
705 case SI:
706 default:
707 return; /* not implemented */
708 }
709
710 if (screen->b.info.max_sh_per_se != 1) {
711 /* This should not happen on non-SI chips. */
712 fprintf(stderr, "si_init_perfcounters: max_sh_per_se = %d not "
713 "supported (inaccurate performance counters)\n",
714 screen->b.info.max_sh_per_se);
715 }
716
717 pc = CALLOC_STRUCT(r600_perfcounters);
718 if (!pc)
719 return;
720
721 pc->num_start_cs_dwords = 14;
722 pc->num_stop_cs_dwords = 20;
723 pc->num_instance_cs_dwords = 3;
724 pc->num_shaders_cs_dwords = 4;
725
726 if (screen->b.chip_class == CIK) {
727 pc->num_stop_cs_dwords += 6;
728 }
729
730 pc->num_shader_types = ARRAY_SIZE(si_pc_shader_type_bits);
731 pc->shader_type_suffixes = si_pc_shader_type_suffixes;
732 pc->shader_type_bits = si_pc_shader_type_bits;
733
734 pc->get_size = si_pc_get_size;
735 pc->emit_instance = si_pc_emit_instance;
736 pc->emit_shaders = si_pc_emit_shaders;
737 pc->emit_select = si_pc_emit_select;
738 pc->emit_start = si_pc_emit_start;
739 pc->emit_stop = si_pc_emit_stop;
740 pc->emit_read = si_pc_emit_read;
741 pc->cleanup = si_pc_cleanup;
742
743 if (!r600_perfcounters_init(pc, num_blocks))
744 goto error;
745
746 for (i = 0; i < num_blocks; ++i) {
747 struct si_pc_block *block = &blocks[i];
748 unsigned instances = block->instances;
749
750 if (!strcmp(block->b->name, "IA")) {
751 if (screen->b.info.max_se > 2)
752 instances = 2;
753 }
754
755 r600_perfcounters_add_block(&screen->b, pc,
756 block->b->name,
757 block->b->flags,
758 block->b->num_counters,
759 block->selectors,
760 instances,
761 block);
762 }
763
764 screen->b.perfcounters = pc;
765 return;
766
767 error:
768 r600_perfcounters_do_destroy(pc);
769 }