2 * Copyright © 2020 Google, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 #include "ir3/ir3_compiler.h"
26 #include "util/u_math.h"
28 #include "adreno_pm4.xml.h"
29 #include "adreno_common.xml.h"
38 struct ir3_compiler
*compiler
;
39 struct fd_device
*dev
;
42 struct fd_bo
*control_mem
;
44 struct fd_bo
*query_mem
;
45 const struct perfcntr
*perfcntrs
;
46 unsigned num_perfcntrs
;
48 define_cast(backend
, a6xx_backend
);
51 * Data structures shared with GPU:
54 /* This struct defines the layout of the fd6_context::control buffer: */
56 uint32_t seqno
; /* seqno for async CP_EVENT_WRITE, etc */
58 volatile uint32_t vsc_overflow
;
60 /* flag set from cmdstream when VSC overflow detected: */
66 /* scratch space for VPC_SO[i].FLUSH_BASE_LO/HI, start on 32 byte boundary. */
73 #define control_ptr(a6xx_backend, member) \
74 (a6xx_backend)->control_mem, offsetof(struct fd6_control, member), 0, 0
77 struct PACKED fd6_query_sample
{
84 /* offset of a single field of an array of fd6_query_sample: */
85 #define query_sample_idx(a6xx_backend, idx, field) \
86 (a6xx_backend)->query_mem, \
87 (idx * sizeof(struct fd6_query_sample)) + \
88 offsetof(struct fd6_query_sample, field), \
93 * Backend implementation:
96 static struct kernel
*
97 a6xx_assemble(struct backend
*b
, FILE *in
)
99 struct a6xx_backend
*a6xx_backend
= to_a6xx_backend(b
);
100 struct ir3_kernel
*ir3_kernel
=
101 ir3_asm_assemble(a6xx_backend
->compiler
, in
);
102 ir3_kernel
->backend
= b
;
103 return &ir3_kernel
->base
;
107 a6xx_disassemble(struct kernel
*kernel
, FILE *out
)
109 ir3_asm_disassemble(to_ir3_kernel(kernel
), out
);
113 cs_program_emit(struct fd_ringbuffer
*ring
, struct kernel
*kernel
)
115 struct ir3_kernel
*ir3_kernel
= to_ir3_kernel(kernel
);
116 struct ir3_shader_variant
*v
= ir3_kernel
->v
;
117 const struct ir3_info
*i
= &v
->info
;
118 enum a3xx_threadsize thrsz
= FOUR_QUADS
;
120 OUT_PKT4(ring
, REG_A6XX_SP_MODE_CONTROL
, 1);
121 OUT_RING(ring
, A6XX_SP_MODE_CONTROL_CONSTANT_DEMOTION_ENABLE
| 4);
123 OUT_PKT4(ring
, REG_A6XX_HLSQ_INVALIDATE_CMD
, 1);
124 OUT_RING(ring
, A6XX_HLSQ_INVALIDATE_CMD_VS_STATE
|
125 A6XX_HLSQ_INVALIDATE_CMD_HS_STATE
|
126 A6XX_HLSQ_INVALIDATE_CMD_DS_STATE
|
127 A6XX_HLSQ_INVALIDATE_CMD_GS_STATE
|
128 A6XX_HLSQ_INVALIDATE_CMD_FS_STATE
|
129 A6XX_HLSQ_INVALIDATE_CMD_CS_STATE
|
130 A6XX_HLSQ_INVALIDATE_CMD_CS_IBO
|
131 A6XX_HLSQ_INVALIDATE_CMD_GFX_IBO
);
133 unsigned constlen
= align(v
->constlen
, 4);
134 OUT_PKT4(ring
, REG_A6XX_HLSQ_CS_CNTL
, 1);
135 OUT_RING(ring
, A6XX_HLSQ_CS_CNTL_CONSTLEN(constlen
) |
136 A6XX_HLSQ_CS_CNTL_ENABLED
);
138 OUT_PKT4(ring
, REG_A6XX_SP_CS_CONFIG
, 2);
139 OUT_RING(ring
, A6XX_SP_CS_CONFIG_ENABLED
|
140 A6XX_SP_CS_CONFIG_NIBO(kernel
->num_bufs
) |
141 A6XX_SP_CS_CONFIG_NTEX(v
->num_samp
) |
142 A6XX_SP_CS_CONFIG_NSAMP(v
->num_samp
)); /* SP_VS_CONFIG */
143 OUT_RING(ring
, v
->instrlen
); /* SP_VS_INSTRLEN */
145 OUT_PKT4(ring
, REG_A6XX_SP_CS_CTRL_REG0
, 1);
146 OUT_RING(ring
, A6XX_SP_CS_CTRL_REG0_THREADSIZE(thrsz
) |
147 A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(i
->max_reg
+ 1) |
148 A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(i
->max_half_reg
+ 1) |
149 COND(v
->mergedregs
, A6XX_SP_CS_CTRL_REG0_MERGEDREGS
) |
150 A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(v
->branchstack
) |
151 COND(v
->need_pixlod
, A6XX_SP_CS_CTRL_REG0_PIXLODENABLE
));
153 OUT_PKT4(ring
, REG_A6XX_SP_CS_UNKNOWN_A9B1
, 1);
154 OUT_RING(ring
, 0x41);
156 uint32_t local_invocation_id
, work_group_id
;
157 local_invocation_id
= ir3_find_sysval_regid(v
, SYSTEM_VALUE_LOCAL_INVOCATION_ID
);
158 work_group_id
= ir3_find_sysval_regid(v
, SYSTEM_VALUE_WORK_GROUP_ID
);
160 OUT_PKT4(ring
, REG_A6XX_HLSQ_CS_CNTL_0
, 2);
161 OUT_RING(ring
, A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(work_group_id
) |
162 A6XX_HLSQ_CS_CNTL_0_UNK0(regid(63, 0)) |
163 A6XX_HLSQ_CS_CNTL_0_UNK1(regid(63, 0)) |
164 A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id
));
165 OUT_RING(ring
, 0x2fc); /* HLSQ_CS_UNKNOWN_B998 */
167 OUT_PKT4(ring
, REG_A6XX_SP_CS_OBJ_START_LO
, 2);
168 OUT_RELOC(ring
, v
->bo
, 0, 0, 0); /* SP_CS_OBJ_START_LO/HI */
170 OUT_PKT4(ring
, REG_A6XX_SP_CS_INSTRLEN
, 1);
171 OUT_RING(ring
, v
->instrlen
);
173 OUT_PKT4(ring
, REG_A6XX_SP_CS_OBJ_START_LO
, 2);
174 OUT_RELOC(ring
, v
->bo
, 0, 0, 0);
176 OUT_PKT7(ring
, CP_LOAD_STATE6_FRAG
, 3);
177 OUT_RING(ring
, CP_LOAD_STATE6_0_DST_OFF(0) |
178 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER
) |
179 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
180 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER
) |
181 CP_LOAD_STATE6_0_NUM_UNIT(v
->instrlen
));
182 OUT_RELOC(ring
, v
->bo
, 0, 0, 0);
186 emit_const(struct fd_ringbuffer
*ring
, uint32_t regid
,
187 uint32_t sizedwords
, const uint32_t *dwords
)
191 debug_assert((regid
% 4) == 0);
193 align_sz
= align(sizedwords
, 4);
195 OUT_PKT7(ring
, CP_LOAD_STATE6_FRAG
, 3 + align_sz
);
196 OUT_RING(ring
, CP_LOAD_STATE6_0_DST_OFF(regid
/4) |
197 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
198 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
199 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER
) |
200 CP_LOAD_STATE6_0_NUM_UNIT(DIV_ROUND_UP(sizedwords
, 4)));
201 OUT_RING(ring
, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
202 OUT_RING(ring
, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
204 for (uint32_t i
= 0; i
< sizedwords
; i
++) {
205 OUT_RING(ring
, dwords
[i
]);
208 /* Zero-pad to multiple of 4 dwords */
209 for (uint32_t i
= sizedwords
; i
< align_sz
; i
++) {
216 cs_const_emit(struct fd_ringbuffer
*ring
, struct kernel
*kernel
, uint32_t grid
[3])
218 struct ir3_kernel
*ir3_kernel
= to_ir3_kernel(kernel
);
219 struct ir3_shader_variant
*v
= ir3_kernel
->v
;
221 const struct ir3_const_state
*const_state
= ir3_const_state(v
);
222 uint32_t base
= const_state
->offsets
.immediate
;
223 int size
= DIV_ROUND_UP(const_state
->immediates_count
, 4);
225 if (ir3_kernel
->info
.numwg
!= INVALID_REG
) {
226 assert((ir3_kernel
->info
.numwg
& 0x3) == 0);
227 int idx
= ir3_kernel
->info
.numwg
>> 2;
228 const_state
->immediates
[idx
* 4 + 0] = grid
[0];
229 const_state
->immediates
[idx
* 4 + 1] = grid
[1];
230 const_state
->immediates
[idx
* 4 + 2] = grid
[2];
233 /* truncate size to avoid writing constants that shader
236 size
= MIN2(size
+ base
, v
->constlen
) - base
;
238 /* convert out of vec4: */
243 emit_const(ring
, base
, size
, const_state
->immediates
);
248 cs_ibo_emit(struct fd_ringbuffer
*ring
, struct fd_submit
*submit
,
249 struct kernel
*kernel
)
251 struct fd_ringbuffer
*state
=
252 fd_submit_new_ringbuffer(submit
,
253 kernel
->num_bufs
* 16 * 4,
254 FD_RINGBUFFER_STREAMING
);
256 for (unsigned i
= 0; i
< kernel
->num_bufs
; i
++) {
257 /* size is encoded with low 15b in WIDTH and high bits in HEIGHT,
258 * in units of elements:
260 unsigned sz
= kernel
->buf_sizes
[i
];
261 unsigned width
= sz
& MASK(15);
262 unsigned height
= sz
>> 15;
264 OUT_RING(state
, A6XX_IBO_0_FMT(FMT6_32_UINT
) |
265 A6XX_IBO_0_TILE_MODE(0));
266 OUT_RING(state
, A6XX_IBO_1_WIDTH(width
) |
267 A6XX_IBO_1_HEIGHT(height
));
268 OUT_RING(state
, A6XX_IBO_2_PITCH(0) |
269 A6XX_IBO_2_UNK4
| A6XX_IBO_2_UNK31
|
270 A6XX_IBO_2_TYPE(A6XX_TEX_1D
));
271 OUT_RING(state
, A6XX_IBO_3_ARRAY_PITCH(0));
272 OUT_RELOC(state
, kernel
->bufs
[i
], 0, 0, 0);
273 OUT_RING(state
, 0x00000000);
274 OUT_RING(state
, 0x00000000);
275 OUT_RING(state
, 0x00000000);
276 OUT_RING(state
, 0x00000000);
277 OUT_RING(state
, 0x00000000);
278 OUT_RING(state
, 0x00000000);
279 OUT_RING(state
, 0x00000000);
280 OUT_RING(state
, 0x00000000);
281 OUT_RING(state
, 0x00000000);
282 OUT_RING(state
, 0x00000000);
285 OUT_PKT7(ring
, CP_LOAD_STATE6_FRAG
, 3);
286 OUT_RING(ring
, CP_LOAD_STATE6_0_DST_OFF(0) |
287 CP_LOAD_STATE6_0_STATE_TYPE(ST6_IBO
) |
288 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
289 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER
) |
290 CP_LOAD_STATE6_0_NUM_UNIT(kernel
->num_bufs
));
293 OUT_PKT4(ring
, REG_A6XX_SP_CS_IBO_LO
, 2);
296 OUT_PKT4(ring
, REG_A6XX_SP_CS_IBO_COUNT
, 1);
297 OUT_RING(ring
, kernel
->num_bufs
);
299 fd_ringbuffer_del(state
);
302 static inline unsigned
303 event_write(struct fd_ringbuffer
*ring
, struct kernel
*kernel
,
304 enum vgt_event_type evt
, bool timestamp
)
308 OUT_PKT7(ring
, CP_EVENT_WRITE
, timestamp
? 4 : 1);
309 OUT_RING(ring
, CP_EVENT_WRITE_0_EVENT(evt
));
311 struct ir3_kernel
*ir3_kernel
= to_ir3_kernel(kernel
);
312 struct a6xx_backend
*a6xx_backend
= to_a6xx_backend(ir3_kernel
->backend
);
313 seqno
= ++a6xx_backend
->seqno
;
314 OUT_RELOC(ring
, control_ptr(a6xx_backend
, seqno
)); /* ADDR_LO/HI */
315 OUT_RING(ring
, seqno
);
322 cache_flush(struct fd_ringbuffer
*ring
, struct kernel
*kernel
)
324 struct ir3_kernel
*ir3_kernel
= to_ir3_kernel(kernel
);
325 struct a6xx_backend
*a6xx_backend
= to_a6xx_backend(ir3_kernel
->backend
);
328 seqno
= event_write(ring
, kernel
, RB_DONE_TS
, true);
330 OUT_PKT7(ring
, CP_WAIT_REG_MEM
, 6);
331 OUT_RING(ring
, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ
) |
332 CP_WAIT_REG_MEM_0_POLL_MEMORY
);
333 OUT_RELOC(ring
, control_ptr(a6xx_backend
, seqno
));
334 OUT_RING(ring
, CP_WAIT_REG_MEM_3_REF(seqno
));
335 OUT_RING(ring
, CP_WAIT_REG_MEM_4_MASK(~0));
336 OUT_RING(ring
, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
338 seqno
= event_write(ring
, kernel
, CACHE_FLUSH_TS
, true);
340 OUT_PKT7(ring
, CP_WAIT_MEM_GTE
, 4);
341 OUT_RING(ring
, CP_WAIT_MEM_GTE_0_RESERVED(0));
342 OUT_RELOC(ring
, control_ptr(a6xx_backend
, seqno
));
343 OUT_RING(ring
, CP_WAIT_MEM_GTE_3_REF(seqno
));
347 a6xx_emit_grid(struct kernel
*kernel
, uint32_t grid
[3], struct fd_submit
*submit
)
349 struct ir3_kernel
*ir3_kernel
= to_ir3_kernel(kernel
);
350 struct a6xx_backend
*a6xx_backend
= to_a6xx_backend(ir3_kernel
->backend
);
351 struct fd_ringbuffer
*ring
= fd_submit_new_ringbuffer(submit
, 0,
352 FD_RINGBUFFER_PRIMARY
| FD_RINGBUFFER_GROWABLE
);
354 cs_program_emit(ring
, kernel
);
355 cs_const_emit(ring
, kernel
, grid
);
356 cs_ibo_emit(ring
, submit
, kernel
);
358 OUT_PKT7(ring
, CP_SET_MARKER
, 1);
359 OUT_RING(ring
, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE
));
361 const unsigned *local_size
= kernel
->local_size
;
362 const unsigned *num_groups
= grid
;
364 unsigned work_dim
= 0;
365 for (int i
= 0; i
< 3; i
++) {
371 OUT_PKT4(ring
, REG_A6XX_HLSQ_CS_NDRANGE_0
, 7);
372 OUT_RING(ring
, A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM(work_dim
) |
373 A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(local_size
[0] - 1) |
374 A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(local_size
[1] - 1) |
375 A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(local_size
[2] - 1));
376 OUT_RING(ring
, A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(local_size
[0] * num_groups
[0]));
377 OUT_RING(ring
, 0); /* HLSQ_CS_NDRANGE_2_GLOBALOFF_X */
378 OUT_RING(ring
, A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(local_size
[1] * num_groups
[1]));
379 OUT_RING(ring
, 0); /* HLSQ_CS_NDRANGE_4_GLOBALOFF_Y */
380 OUT_RING(ring
, A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(local_size
[2] * num_groups
[2]));
381 OUT_RING(ring
, 0); /* HLSQ_CS_NDRANGE_6_GLOBALOFF_Z */
383 OUT_PKT4(ring
, REG_A6XX_HLSQ_CS_KERNEL_GROUP_X
, 3);
384 OUT_RING(ring
, 1); /* HLSQ_CS_KERNEL_GROUP_X */
385 OUT_RING(ring
, 1); /* HLSQ_CS_KERNEL_GROUP_Y */
386 OUT_RING(ring
, 1); /* HLSQ_CS_KERNEL_GROUP_Z */
388 if (a6xx_backend
->num_perfcntrs
> 0) {
389 a6xx_backend
->query_mem
= fd_bo_new(a6xx_backend
->dev
,
390 a6xx_backend
->num_perfcntrs
* sizeof(struct fd6_query_sample
),
391 DRM_FREEDRENO_GEM_TYPE_KMEM
, "query");
393 /* configure the performance counters to count the requested
396 for (unsigned i
= 0; i
< a6xx_backend
->num_perfcntrs
; i
++) {
397 const struct perfcntr
*counter
= &a6xx_backend
->perfcntrs
[i
];
399 OUT_PKT4(ring
, counter
->select_reg
, 1);
400 OUT_RING(ring
, counter
->selector
);
403 OUT_PKT7(ring
, CP_WAIT_FOR_IDLE
, 0);
405 /* and snapshot the start values: */
406 for (unsigned i
= 0; i
< a6xx_backend
->num_perfcntrs
; i
++) {
407 const struct perfcntr
*counter
= &a6xx_backend
->perfcntrs
[i
];
409 OUT_PKT7(ring
, CP_REG_TO_MEM
, 3);
410 OUT_RING(ring
, CP_REG_TO_MEM_0_64B
|
411 CP_REG_TO_MEM_0_REG(counter
->counter_reg_lo
));
412 OUT_RELOC(ring
, query_sample_idx(a6xx_backend
, i
, start
));
416 OUT_PKT7(ring
, CP_EXEC_CS
, 4);
417 OUT_RING(ring
, 0x00000000);
418 OUT_RING(ring
, CP_EXEC_CS_1_NGROUPS_X(grid
[0]));
419 OUT_RING(ring
, CP_EXEC_CS_2_NGROUPS_Y(grid
[1]));
420 OUT_RING(ring
, CP_EXEC_CS_3_NGROUPS_Z(grid
[2]));
422 OUT_PKT7(ring
, CP_WAIT_FOR_IDLE
, 0);
424 if (a6xx_backend
->num_perfcntrs
> 0) {
425 /* snapshot the end values: */
426 for (unsigned i
= 0; i
< a6xx_backend
->num_perfcntrs
; i
++) {
427 const struct perfcntr
*counter
= &a6xx_backend
->perfcntrs
[i
];
429 OUT_PKT7(ring
, CP_REG_TO_MEM
, 3);
430 OUT_RING(ring
, CP_REG_TO_MEM_0_64B
|
431 CP_REG_TO_MEM_0_REG(counter
->counter_reg_lo
));
432 OUT_RELOC(ring
, query_sample_idx(a6xx_backend
, i
, stop
));
435 /* and compute the result: */
436 for (unsigned i
= 0; i
< a6xx_backend
->num_perfcntrs
; i
++) {
437 /* result += stop - start: */
438 OUT_PKT7(ring
, CP_MEM_TO_MEM
, 9);
439 OUT_RING(ring
, CP_MEM_TO_MEM_0_DOUBLE
|
440 CP_MEM_TO_MEM_0_NEG_C
);
441 OUT_RELOC(ring
, query_sample_idx(a6xx_backend
, i
, result
)); /* dst */
442 OUT_RELOC(ring
, query_sample_idx(a6xx_backend
, i
, result
)); /* srcA */
443 OUT_RELOC(ring
, query_sample_idx(a6xx_backend
, i
, stop
)); /* srcB */
444 OUT_RELOC(ring
, query_sample_idx(a6xx_backend
, i
, start
)); /* srcC */
448 cache_flush(ring
, kernel
);
452 a6xx_set_perfcntrs(struct backend
*b
, const struct perfcntr
*perfcntrs
,
453 unsigned num_perfcntrs
)
455 struct a6xx_backend
*a6xx_backend
= to_a6xx_backend(b
);
457 a6xx_backend
->perfcntrs
= perfcntrs
;
458 a6xx_backend
->num_perfcntrs
= num_perfcntrs
;
462 a6xx_read_perfcntrs(struct backend
*b
, uint64_t *results
)
464 struct a6xx_backend
*a6xx_backend
= to_a6xx_backend(b
);
466 fd_bo_cpu_prep(a6xx_backend
->query_mem
, NULL
, DRM_FREEDRENO_PREP_READ
);
467 struct fd6_query_sample
*samples
= fd_bo_map(a6xx_backend
->query_mem
);
469 for (unsigned i
= 0; i
< a6xx_backend
->num_perfcntrs
; i
++) {
470 results
[i
] = samples
[i
].result
;
475 a6xx_init(struct fd_device
*dev
, uint32_t gpu_id
)
477 struct a6xx_backend
*a6xx_backend
= calloc(1, sizeof(*a6xx_backend
));
479 a6xx_backend
->base
= (struct backend
) {
480 .assemble
= a6xx_assemble
,
481 .disassemble
= a6xx_disassemble
,
482 .emit_grid
= a6xx_emit_grid
,
483 .set_perfcntrs
= a6xx_set_perfcntrs
,
484 .read_perfcntrs
= a6xx_read_perfcntrs
,
487 a6xx_backend
->compiler
= ir3_compiler_create(dev
, gpu_id
);
488 a6xx_backend
->dev
= dev
;
490 a6xx_backend
->control_mem
= fd_bo_new(dev
, 0x1000,
491 DRM_FREEDRENO_GEM_TYPE_KMEM
, "control");
493 return &a6xx_backend
->base
;