2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "ir3/ir3_nir.h"
29 /* This has to reach into the fd_context a bit more than the rest of
30 * ir3, but it needs to be aligned with the compiler, so both agree
31 * on which const regs hold what. And the logic is identical between
32 * ir3 generations, the only difference is small details in the actual
33 * CP_LOAD_STATE packets (which is handled inside the generation
34 * specific ctx->emit_const(_bo)() fxns)
36 * This file should be included in only a single .c file per gen, which
37 * defines the following functions:
40 static bool is_stateobj(struct fd_ringbuffer
*ring
);
42 static void emit_const_user(struct fd_ringbuffer
*ring
,
43 const struct ir3_shader_variant
*v
, uint32_t regid
,
44 uint32_t size
, const uint32_t *user_buffer
);
46 static void emit_const_bo(struct fd_ringbuffer
*ring
,
47 const struct ir3_shader_variant
*v
, uint32_t regid
,
48 uint32_t offset
, uint32_t size
,
51 static void emit_const_prsc(struct fd_ringbuffer
*ring
,
52 const struct ir3_shader_variant
*v
, uint32_t regid
,
53 uint32_t offset
, uint32_t size
,
54 struct pipe_resource
*buffer
)
56 struct fd_resource
*rsc
= fd_resource(buffer
);
57 emit_const_bo(ring
, v
, regid
, offset
, size
, rsc
->bo
);
60 static void emit_const_ptrs(struct fd_ringbuffer
*ring
,
61 const struct ir3_shader_variant
*v
, uint32_t dst_offset
,
62 uint32_t num
, struct pipe_resource
**prscs
, uint32_t *offsets
);
65 emit_const_asserts(struct fd_ringbuffer
*ring
,
66 const struct ir3_shader_variant
*v
,
67 uint32_t regid
, uint32_t sizedwords
)
69 assert((regid
% 4) == 0);
70 assert((sizedwords
% 4) == 0);
71 assert(regid
+ sizedwords
<= v
->constlen
* 4);
75 ring_wfi(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
)
77 /* when we emit const state via ring (IB2) we need a WFI, but when
78 * it is emit'd via stateobj, we don't
80 if (is_stateobj(ring
))
87 * Indirectly calculates size of cmdstream needed for ir3_emit_user_consts().
88 * Returns number of packets, and total size of all the payload.
90 * The value can be a worst-case, ie. some shader variants may not read all
93 * Returns size in dwords.
96 ir3_user_consts_size(struct ir3_ubo_analysis_state
*state
,
97 unsigned *packets
, unsigned *size
)
101 for (uint32_t i
= 0; i
< ARRAY_SIZE(state
->range
); i
++) {
102 if (state
->range
[i
].start
< state
->range
[i
].end
) {
103 *size
+= state
->range
[i
].end
- state
->range
[i
].start
;
110 * Uploads sub-ranges of UBOs to the hardware's constant buffer (UBO access
111 * outside of these ranges will be done using full UBO accesses in the
115 ir3_emit_user_consts(struct fd_screen
*screen
, const struct ir3_shader_variant
*v
,
116 struct fd_ringbuffer
*ring
, struct fd_constbuf_stateobj
*constbuf
)
118 const struct ir3_const_state
*const_state
= ir3_const_state(v
);
119 const struct ir3_ubo_analysis_state
*state
= &const_state
->ubo_state
;
121 for (unsigned i
= 0; i
< state
->num_enabled
; i
++) {
122 assert(!state
->range
[i
].ubo
.bindless
);
123 unsigned ubo
= state
->range
[i
].ubo
.block
;
124 if (!(constbuf
->enabled_mask
& (1 << ubo
)))
126 struct pipe_constant_buffer
*cb
= &constbuf
->cb
[ubo
];
128 uint32_t size
= state
->range
[i
].end
- state
->range
[i
].start
;
129 uint32_t offset
= cb
->buffer_offset
+ state
->range
[i
].start
;
131 /* Pre-a6xx, we might have ranges enabled in the shader that aren't
132 * used in the binning variant.
134 if (16 * v
->constlen
<= state
->range
[i
].offset
)
137 /* and even if the start of the const buffer is before
138 * first_immediate, the end may not be:
140 size
= MIN2(size
, (16 * v
->constlen
) - state
->range
[i
].offset
);
145 /* things should be aligned to vec4: */
146 debug_assert((state
->range
[i
].offset
% 16) == 0);
147 debug_assert((size
% 16) == 0);
148 debug_assert((offset
% 16) == 0);
150 if (cb
->user_buffer
) {
151 emit_const_user(ring
, v
, state
->range
[i
].offset
/ 4,
152 size
/ 4, cb
->user_buffer
+ state
->range
[i
].start
);
154 emit_const_prsc(ring
, v
, state
->range
[i
].offset
/ 4,
155 offset
, size
/ 4, cb
->buffer
);
161 ir3_emit_ubos(struct fd_context
*ctx
, const struct ir3_shader_variant
*v
,
162 struct fd_ringbuffer
*ring
, struct fd_constbuf_stateobj
*constbuf
)
164 const struct ir3_const_state
*const_state
= ir3_const_state(v
);
165 uint32_t offset
= const_state
->offsets
.ubo
;
167 /* a6xx+ uses UBO state and ldc instead of pointers emitted in
168 * const state and ldg:
170 if (ctx
->screen
->gpu_id
>= 600)
173 if (v
->constlen
> offset
) {
174 uint32_t params
= const_state
->num_ubos
;
175 uint32_t offsets
[params
];
176 struct pipe_resource
*prscs
[params
];
178 for (uint32_t i
= 0; i
< params
; i
++) {
179 struct pipe_constant_buffer
*cb
= &constbuf
->cb
[i
];
181 /* If we have user pointers (constbuf 0, aka GL uniforms), upload
182 * them to a buffer now, and save it in the constbuf so that we
183 * don't have to reupload until they get changed.
185 if (cb
->user_buffer
) {
186 struct pipe_context
*pctx
= &ctx
->base
;
187 u_upload_data(pctx
->stream_uploader
, 0,
191 &cb
->buffer_offset
, &cb
->buffer
);
192 cb
->user_buffer
= NULL
;
195 if ((constbuf
->enabled_mask
& (1 << i
)) && cb
->buffer
) {
196 offsets
[i
] = cb
->buffer_offset
;
197 prscs
[i
] = cb
->buffer
;
204 assert(offset
* 4 + params
<= v
->constlen
* 4);
206 emit_const_ptrs(ring
, v
, offset
* 4, params
, prscs
, offsets
);
211 ir3_emit_ssbo_sizes(struct fd_screen
*screen
, const struct ir3_shader_variant
*v
,
212 struct fd_ringbuffer
*ring
, struct fd_shaderbuf_stateobj
*sb
)
214 const struct ir3_const_state
*const_state
= ir3_const_state(v
);
215 uint32_t offset
= const_state
->offsets
.ssbo_sizes
;
216 if (v
->constlen
> offset
) {
217 uint32_t sizes
[align(const_state
->ssbo_size
.count
, 4)];
218 unsigned mask
= const_state
->ssbo_size
.mask
;
221 unsigned index
= u_bit_scan(&mask
);
222 unsigned off
= const_state
->ssbo_size
.off
[index
];
223 sizes
[off
] = sb
->sb
[index
].buffer_size
;
226 emit_const_user(ring
, v
, offset
* 4, ARRAY_SIZE(sizes
), sizes
);
231 ir3_emit_image_dims(struct fd_screen
*screen
, const struct ir3_shader_variant
*v
,
232 struct fd_ringbuffer
*ring
, struct fd_shaderimg_stateobj
*si
)
234 const struct ir3_const_state
*const_state
= ir3_const_state(v
);
235 uint32_t offset
= const_state
->offsets
.image_dims
;
236 if (v
->constlen
> offset
) {
237 uint32_t dims
[align(const_state
->image_dims
.count
, 4)];
238 unsigned mask
= const_state
->image_dims
.mask
;
241 struct pipe_image_view
*img
;
242 struct fd_resource
*rsc
;
243 unsigned index
= u_bit_scan(&mask
);
244 unsigned off
= const_state
->image_dims
.off
[index
];
246 img
= &si
->si
[index
];
247 rsc
= fd_resource(img
->resource
);
249 dims
[off
+ 0] = util_format_get_blocksize(img
->format
);
250 if (img
->resource
->target
!= PIPE_BUFFER
) {
251 struct fdl_slice
*slice
=
252 fd_resource_slice(rsc
, img
->u
.tex
.level
);
253 /* note for 2d/cube/etc images, even if re-interpreted
254 * as a different color format, the pixel size should
255 * be the same, so use original dimensions for y and z
258 dims
[off
+ 1] = fd_resource_pitch(rsc
, img
->u
.tex
.level
);
259 /* see corresponding logic in fd_resource_offset(): */
260 if (rsc
->layout
.layer_first
) {
261 dims
[off
+ 2] = rsc
->layout
.layer_size
;
263 dims
[off
+ 2] = slice
->size0
;
266 /* For buffer-backed images, the log2 of the format's
267 * bytes-per-pixel is placed on the 2nd slot. This is useful
268 * when emitting image_size instructions, for which we need
269 * to divide by bpp for image buffers. Since the bpp
270 * can only be power-of-two, the division is implemented
271 * as a SHR, and for that it is handy to have the log2 of
272 * bpp as a constant. (log2 = first-set-bit - 1)
274 dims
[off
+ 1] = ffs(dims
[off
+ 0]) - 1;
277 uint32_t size
= MIN2(ARRAY_SIZE(dims
), v
->constlen
* 4 - offset
* 4);
279 emit_const_user(ring
, v
, offset
* 4, size
, dims
);
284 ir3_emit_immediates(struct fd_screen
*screen
, const struct ir3_shader_variant
*v
,
285 struct fd_ringbuffer
*ring
)
287 const struct ir3_const_state
*const_state
= ir3_const_state(v
);
288 uint32_t base
= const_state
->offsets
.immediate
;
289 int size
= DIV_ROUND_UP(const_state
->immediates_count
, 4);
291 /* truncate size to avoid writing constants that shader
294 size
= MIN2(size
+ base
, v
->constlen
) - base
;
296 /* convert out of vec4: */
301 emit_const_user(ring
, v
, base
, size
, const_state
->immediates
);
305 ir3_emit_link_map(struct fd_screen
*screen
,
306 const struct ir3_shader_variant
*producer
,
307 const struct ir3_shader_variant
*v
, struct fd_ringbuffer
*ring
)
309 const struct ir3_const_state
*const_state
= ir3_const_state(v
);
310 uint32_t base
= const_state
->offsets
.primitive_map
;
311 uint32_t patch_locs
[MAX_VARYING
] = { }, num_loc
;
313 num_loc
= ir3_link_geometry_stages(producer
, v
, patch_locs
);
315 int size
= DIV_ROUND_UP(num_loc
, 4);
317 /* truncate size to avoid writing constants that shader
320 size
= MIN2(size
+ base
, v
->constlen
) - base
;
322 /* convert out of vec4: */
327 emit_const_user(ring
, v
, base
, size
, patch_locs
);
330 /* emit stream-out buffers: */
332 emit_tfbos(struct fd_context
*ctx
, const struct ir3_shader_variant
*v
,
333 struct fd_ringbuffer
*ring
)
335 /* streamout addresses after driver-params: */
336 const struct ir3_const_state
*const_state
= ir3_const_state(v
);
337 uint32_t offset
= const_state
->offsets
.tfbo
;
338 if (v
->constlen
> offset
) {
339 struct fd_streamout_stateobj
*so
= &ctx
->streamout
;
340 struct ir3_stream_output_info
*info
= &v
->shader
->stream_output
;
342 uint32_t offsets
[params
];
343 struct pipe_resource
*prscs
[params
];
345 for (uint32_t i
= 0; i
< params
; i
++) {
346 struct pipe_stream_output_target
*target
= so
->targets
[i
];
349 offsets
[i
] = (so
->offsets
[i
] * info
->stride
[i
] * 4) +
350 target
->buffer_offset
;
351 prscs
[i
] = target
->buffer
;
358 assert(offset
* 4 + params
<= v
->constlen
* 4);
360 emit_const_ptrs(ring
, v
, offset
* 4, params
, prscs
, offsets
);
364 static inline uint32_t
365 max_tf_vtx(struct fd_context
*ctx
, const struct ir3_shader_variant
*v
)
367 struct fd_streamout_stateobj
*so
= &ctx
->streamout
;
368 struct ir3_stream_output_info
*info
= &v
->shader
->stream_output
;
369 uint32_t maxvtxcnt
= 0x7fffffff;
371 if (ctx
->screen
->gpu_id
>= 500)
375 if (v
->shader
->stream_output
.num_outputs
== 0)
377 if (so
->num_targets
== 0)
380 /* offset to write to is:
382 * total_vtxcnt = vtxcnt + offsets[i]
383 * offset = total_vtxcnt * stride[i]
385 * offset = vtxcnt * stride[i] ; calculated in shader
386 * + offsets[i] * stride[i] ; calculated at emit_tfbos()
388 * assuming for each vtx, each target buffer will have data written
389 * up to 'offset + stride[i]', that leaves maxvtxcnt as:
391 * buffer_size = (maxvtxcnt * stride[i]) + stride[i]
392 * maxvtxcnt = (buffer_size - stride[i]) / stride[i]
394 * but shader is actually doing a less-than (rather than less-than-
395 * equal) check, so we can drop the -stride[i].
397 * TODO is assumption about `offset + stride[i]` legit?
399 for (unsigned i
= 0; i
< so
->num_targets
; i
++) {
400 struct pipe_stream_output_target
*target
= so
->targets
[i
];
401 unsigned stride
= info
->stride
[i
] * 4; /* convert dwords->bytes */
403 uint32_t max
= target
->buffer_size
/ stride
;
404 maxvtxcnt
= MIN2(maxvtxcnt
, max
);
412 emit_common_consts(const struct ir3_shader_variant
*v
, struct fd_ringbuffer
*ring
,
413 struct fd_context
*ctx
, enum pipe_shader_type t
)
415 enum fd_dirty_shader_state dirty
= ctx
->dirty_shader
[t
];
417 /* When we use CP_SET_DRAW_STATE objects to emit constant state,
418 * if we emit any of it we need to emit all. This is because
419 * we are using the same state-group-id each time for uniform
420 * state, and if previous update is never evaluated (due to no
421 * visible primitives in the current tile) then the new stateobj
422 * completely replaces the old one.
424 * Possibly if we split up different parts of the const state to
425 * different state-objects we could avoid this.
427 if (dirty
&& is_stateobj(ring
))
430 if (dirty
& (FD_DIRTY_SHADER_PROG
| FD_DIRTY_SHADER_CONST
)) {
431 struct fd_constbuf_stateobj
*constbuf
;
434 constbuf
= &ctx
->constbuf
[t
];
435 shader_dirty
= !!(dirty
& FD_DIRTY_SHADER_PROG
);
437 ring_wfi(ctx
->batch
, ring
);
439 ir3_emit_user_consts(ctx
->screen
, v
, ring
, constbuf
);
440 ir3_emit_ubos(ctx
, v
, ring
, constbuf
);
442 ir3_emit_immediates(ctx
->screen
, v
, ring
);
445 if (dirty
& (FD_DIRTY_SHADER_PROG
| FD_DIRTY_SHADER_SSBO
)) {
446 struct fd_shaderbuf_stateobj
*sb
= &ctx
->shaderbuf
[t
];
447 ring_wfi(ctx
->batch
, ring
);
448 ir3_emit_ssbo_sizes(ctx
->screen
, v
, ring
, sb
);
451 if (dirty
& (FD_DIRTY_SHADER_PROG
| FD_DIRTY_SHADER_IMAGE
)) {
452 struct fd_shaderimg_stateobj
*si
= &ctx
->shaderimg
[t
];
453 ring_wfi(ctx
->batch
, ring
);
454 ir3_emit_image_dims(ctx
->screen
, v
, ring
, si
);
459 ir3_needs_vs_driver_params(const struct ir3_shader_variant
*v
)
461 const struct ir3_const_state
*const_state
= ir3_const_state(v
);
462 uint32_t offset
= const_state
->offsets
.driver_param
;
464 return v
->constlen
> offset
;
468 ir3_emit_vs_driver_params(const struct ir3_shader_variant
*v
,
469 struct fd_ringbuffer
*ring
, struct fd_context
*ctx
,
470 const struct pipe_draw_info
*info
)
472 debug_assert(ir3_needs_vs_driver_params(v
));
474 const struct ir3_const_state
*const_state
= ir3_const_state(v
);
475 uint32_t offset
= const_state
->offsets
.driver_param
;
476 uint32_t vertex_params
[IR3_DP_VS_COUNT
] = {
477 [IR3_DP_DRAWID
] = 0, /* filled by hw (CP_DRAW_INDIRECT_MULTI) */
478 [IR3_DP_VTXID_BASE
] = info
->index_size
?
479 info
->index_bias
: info
->start
,
480 [IR3_DP_INSTID_BASE
] = info
->start_instance
,
481 [IR3_DP_VTXCNT_MAX
] = max_tf_vtx(ctx
, v
),
483 if (v
->key
.ucp_enables
) {
484 struct pipe_clip_state
*ucp
= &ctx
->ucp
;
485 unsigned pos
= IR3_DP_UCP0_X
;
486 for (unsigned i
= 0; pos
<= IR3_DP_UCP7_W
; i
++) {
487 for (unsigned j
= 0; j
< 4; j
++) {
488 vertex_params
[pos
] = fui(ucp
->ucp
[i
][j
]);
494 /* Only emit as many params as needed, i.e. up to the highest enabled UCP
495 * plane. However a binning pass may drop even some of these, so limit to
498 const uint32_t vertex_params_size
= MIN2(
499 const_state
->num_driver_params
,
500 (v
->constlen
- offset
) * 4);
501 assert(vertex_params_size
<= IR3_DP_VS_COUNT
);
503 bool needs_vtxid_base
=
504 ir3_find_sysval_regid(v
, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
) != regid(63, 0);
506 /* for indirect draw, we need to copy VTXID_BASE from
507 * indirect-draw parameters buffer.. which is annoying
508 * and means we can't easily emit these consts in cmd
509 * stream so need to copy them to bo.
511 if (info
->indirect
&& needs_vtxid_base
) {
512 struct pipe_draw_indirect_info
*indirect
= info
->indirect
;
513 struct pipe_resource
*vertex_params_rsc
=
514 pipe_buffer_create(&ctx
->screen
->base
,
515 PIPE_BIND_CONSTANT_BUFFER
, PIPE_USAGE_STREAM
,
516 vertex_params_size
* 4);
517 unsigned src_off
= info
->indirect
->offset
;;
520 ptr
= fd_bo_map(fd_resource(vertex_params_rsc
)->bo
);
521 memcpy(ptr
, vertex_params
, vertex_params_size
* 4);
523 if (info
->index_size
) {
524 /* indexed draw, index_bias is 4th field: */
527 /* non-indexed draw, start is 3rd field: */
531 /* copy index_bias or start from draw params: */
532 ctx
->screen
->mem_to_mem(ring
, vertex_params_rsc
, 0,
533 indirect
->buffer
, src_off
, 1);
535 emit_const_prsc(ring
, v
, offset
* 4, 0,
536 vertex_params_size
, vertex_params_rsc
);
538 pipe_resource_reference(&vertex_params_rsc
, NULL
);
540 emit_const_user(ring
, v
, offset
* 4,
541 vertex_params_size
, vertex_params
);
544 /* if needed, emit stream-out buffer addresses: */
545 if (vertex_params
[IR3_DP_VTXCNT_MAX
] > 0) {
546 emit_tfbos(ctx
, v
, ring
);
551 ir3_emit_vs_consts(const struct ir3_shader_variant
*v
, struct fd_ringbuffer
*ring
,
552 struct fd_context
*ctx
, const struct pipe_draw_info
*info
)
554 debug_assert(v
->type
== MESA_SHADER_VERTEX
);
556 emit_common_consts(v
, ring
, ctx
, PIPE_SHADER_VERTEX
);
558 /* emit driver params every time: */
559 if (info
&& ir3_needs_vs_driver_params(v
)) {
560 ring_wfi(ctx
->batch
, ring
);
561 ir3_emit_vs_driver_params(v
, ring
, ctx
, info
);
566 ir3_emit_fs_consts(const struct ir3_shader_variant
*v
, struct fd_ringbuffer
*ring
,
567 struct fd_context
*ctx
)
569 debug_assert(v
->type
== MESA_SHADER_FRAGMENT
);
571 emit_common_consts(v
, ring
, ctx
, PIPE_SHADER_FRAGMENT
);
574 /* emit compute-shader consts: */
576 ir3_emit_cs_consts(const struct ir3_shader_variant
*v
, struct fd_ringbuffer
*ring
,
577 struct fd_context
*ctx
, const struct pipe_grid_info
*info
)
579 debug_assert(gl_shader_stage_is_compute(v
->type
));
581 emit_common_consts(v
, ring
, ctx
, PIPE_SHADER_COMPUTE
);
583 /* emit compute-shader driver-params: */
584 const struct ir3_const_state
*const_state
= ir3_const_state(v
);
585 uint32_t offset
= const_state
->offsets
.driver_param
;
586 if (v
->constlen
> offset
) {
587 ring_wfi(ctx
->batch
, ring
);
589 if (info
->indirect
) {
590 struct pipe_resource
*indirect
= NULL
;
591 unsigned indirect_offset
;
593 /* This is a bit awkward, but CP_LOAD_STATE.EXT_SRC_ADDR needs
594 * to be aligned more strongly than 4 bytes. So in this case
595 * we need a temporary buffer to copy NumWorkGroups.xyz to.
597 * TODO if previous compute job is writing to info->indirect,
598 * we might need a WFI.. but since we currently flush for each
599 * compute job, we are probably ok for now.
601 if (info
->indirect_offset
& 0xf) {
602 indirect
= pipe_buffer_create(&ctx
->screen
->base
,
603 PIPE_BIND_COMMAND_ARGS_BUFFER
, PIPE_USAGE_STREAM
,
607 ctx
->screen
->mem_to_mem(ring
, indirect
, 0, info
->indirect
,
608 info
->indirect_offset
, 3);
610 pipe_resource_reference(&indirect
, info
->indirect
);
611 indirect_offset
= info
->indirect_offset
;
614 emit_const_prsc(ring
, v
, offset
* 4, indirect_offset
, 16, indirect
);
616 pipe_resource_reference(&indirect
, NULL
);
618 uint32_t compute_params
[IR3_DP_CS_COUNT
] = {
619 [IR3_DP_NUM_WORK_GROUPS_X
] = info
->grid
[0],
620 [IR3_DP_NUM_WORK_GROUPS_Y
] = info
->grid
[1],
621 [IR3_DP_NUM_WORK_GROUPS_Z
] = info
->grid
[2],
622 [IR3_DP_LOCAL_GROUP_SIZE_X
] = info
->block
[0],
623 [IR3_DP_LOCAL_GROUP_SIZE_Y
] = info
->block
[1],
624 [IR3_DP_LOCAL_GROUP_SIZE_Z
] = info
->block
[2],
626 uint32_t size
= MIN2(const_state
->num_driver_params
,
627 v
->constlen
* 4 - offset
* 4);
629 emit_const_user(ring
, v
, offset
* 4, size
, compute_params
);