2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "fd6_const.h"
28 #include "ir3_const.h"
30 /* regid: base const register
31 * prsc or dwords: buffer containing constant values
32 * sizedwords: size of const value buffer
35 fd6_emit_const(struct fd_ringbuffer
*ring
, gl_shader_stage type
,
36 uint32_t regid
, uint32_t offset
, uint32_t sizedwords
,
37 const uint32_t *dwords
, struct pipe_resource
*prsc
)
40 struct fd_bo
*bo
= fd_resource(prsc
)->bo
;
42 if (fd6_geom_stage(type
)) {
43 OUT_PKT(ring
, CP_LOAD_STATE6_GEOM
,
46 .state_type
= ST6_CONSTANTS
,
47 .state_src
= SS6_INDIRECT
,
48 .state_block
= fd6_stage2shadersb(type
),
49 .num_unit
= DIV_ROUND_UP(sizedwords
, 4)
51 CP_LOAD_STATE6_EXT_SRC_ADDR(
57 OUT_PKT(ring
, CP_LOAD_STATE6_FRAG
,
60 .state_type
= ST6_CONSTANTS
,
61 .state_src
= SS6_INDIRECT
,
62 .state_block
= fd6_stage2shadersb(type
),
63 .num_unit
= DIV_ROUND_UP(sizedwords
, 4)
65 CP_LOAD_STATE6_EXT_SRC_ADDR(
72 /* NOTE we cheat a bit here, since we know mesa is aligning
73 * the size of the user buffer to 16 bytes. And we want to
74 * cut cycles in a hot path.
76 uint32_t align_sz
= align(sizedwords
, 4);
77 dwords
= (uint32_t *)&((uint8_t *)dwords
)[offset
];
79 if (fd6_geom_stage(type
)) {
80 OUT_PKTBUF(ring
, CP_LOAD_STATE6_GEOM
, dwords
, align_sz
,
83 .state_type
= ST6_CONSTANTS
,
84 .state_src
= SS6_DIRECT
,
85 .state_block
= fd6_stage2shadersb(type
),
86 .num_unit
= DIV_ROUND_UP(sizedwords
, 4)
92 OUT_PKTBUF(ring
, CP_LOAD_STATE6_FRAG
, dwords
, align_sz
,
95 .state_type
= ST6_CONSTANTS
,
96 .state_src
= SS6_DIRECT
,
97 .state_block
= fd6_stage2shadersb(type
),
98 .num_unit
= DIV_ROUND_UP(sizedwords
, 4)
108 is_stateobj(struct fd_ringbuffer
*ring
)
114 emit_const(struct fd_ringbuffer
*ring
,
115 const struct ir3_shader_variant
*v
, uint32_t dst_offset
,
116 uint32_t offset
, uint32_t size
, const void *user_buffer
,
117 struct pipe_resource
*buffer
)
119 /* TODO inline this */
120 assert(dst_offset
+ size
<= v
->constlen
* 4);
121 fd6_emit_const(ring
, v
->type
, dst_offset
,
122 offset
, size
, user_buffer
, buffer
);
126 emit_const_bo(struct fd_ringbuffer
*ring
,
127 const struct ir3_shader_variant
*v
, uint32_t dst_offset
,
128 uint32_t num
, struct pipe_resource
**prscs
, uint32_t *offsets
)
130 unreachable("shouldn't be called on a6xx");
134 emit_tess_bos(struct fd_ringbuffer
*ring
, struct fd6_emit
*emit
, struct ir3_shader_variant
*s
)
136 struct fd_context
*ctx
= emit
->ctx
;
137 const struct ir3_const_state
*const_state
= ir3_const_state(s
);
138 const unsigned regid
= const_state
->offsets
.primitive_param
* 4 + 4;
139 uint32_t dwords
= 16;
141 OUT_PKT7(ring
, fd6_stage2opcode(s
->type
), 3);
142 OUT_RING(ring
, CP_LOAD_STATE6_0_DST_OFF(regid
/ 4) |
143 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
)|
144 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
145 CP_LOAD_STATE6_0_STATE_BLOCK(fd6_stage2shadersb(s
->type
)) |
146 CP_LOAD_STATE6_0_NUM_UNIT(dwords
/ 4));
147 OUT_RB(ring
, ctx
->batch
->tess_addrs_constobj
);
151 emit_stage_tess_consts(struct fd_ringbuffer
*ring
, struct ir3_shader_variant
*v
,
152 uint32_t *params
, int num_params
)
154 const struct ir3_const_state
*const_state
= ir3_const_state(v
);
155 const unsigned regid
= const_state
->offsets
.primitive_param
;
156 int size
= MIN2(1 + regid
, v
->constlen
) - regid
;
158 fd6_emit_const(ring
, v
->type
, regid
* 4, 0, num_params
, params
, NULL
);
162 emit_tess_consts(struct fd6_emit
*emit
)
164 struct fd_context
*ctx
= emit
->ctx
;
166 struct fd_ringbuffer
*constobj
= fd_submit_new_ringbuffer(
167 ctx
->batch
->submit
, 0x1000, FD_RINGBUFFER_STREAMING
);
169 /* VS sizes are in bytes since that's what STLW/LDLW use, while the HS
170 * size is dwords, since that's what LDG/STG use.
172 unsigned num_vertices
=
174 emit
->info
->vertices_per_patch
:
175 emit
->gs
->shader
->nir
->info
.gs
.vertices_in
;
177 uint32_t vs_params
[4] = {
178 emit
->vs
->output_size
* num_vertices
* 4, /* vs primitive stride */
179 emit
->vs
->output_size
* 4, /* vs vertex stride */
184 emit_stage_tess_consts(constobj
, emit
->vs
, vs_params
, ARRAY_SIZE(vs_params
));
187 uint32_t hs_params
[4] = {
188 emit
->vs
->output_size
* num_vertices
* 4, /* vs primitive stride */
189 emit
->vs
->output_size
* 4, /* vs vertex stride */
190 emit
->hs
->output_size
,
191 emit
->info
->vertices_per_patch
194 emit_stage_tess_consts(constobj
, emit
->hs
, hs_params
, ARRAY_SIZE(hs_params
));
195 emit_tess_bos(constobj
, emit
, emit
->hs
);
198 num_vertices
= emit
->gs
->shader
->nir
->info
.gs
.vertices_in
;
200 uint32_t ds_params
[4] = {
201 emit
->ds
->output_size
* num_vertices
* 4, /* ds primitive stride */
202 emit
->ds
->output_size
* 4, /* ds vertex stride */
203 emit
->hs
->output_size
, /* hs vertex stride (dwords) */
204 emit
->hs
->shader
->nir
->info
.tess
.tcs_vertices_out
207 emit_stage_tess_consts(constobj
, emit
->ds
, ds_params
, ARRAY_SIZE(ds_params
));
208 emit_tess_bos(constobj
, emit
, emit
->ds
);
212 struct ir3_shader_variant
*prev
;
218 uint32_t gs_params
[4] = {
219 prev
->output_size
* num_vertices
* 4, /* ds primitive stride */
220 prev
->output_size
* 4, /* ds vertex stride */
225 num_vertices
= emit
->gs
->shader
->nir
->info
.gs
.vertices_in
;
226 emit_stage_tess_consts(constobj
, emit
->gs
, gs_params
, ARRAY_SIZE(gs_params
));
229 fd6_emit_take_group(emit
, constobj
, FD6_GROUP_PRIMITIVE_PARAMS
, ENABLE_ALL
);
233 fd6_emit_ubos(struct fd_context
*ctx
, const struct ir3_shader_variant
*v
,
234 struct fd_ringbuffer
*ring
, struct fd_constbuf_stateobj
*constbuf
)
236 const struct ir3_const_state
*const_state
= ir3_const_state(v
);
237 int num_ubos
= const_state
->num_ubos
;
242 OUT_PKT7(ring
, fd6_stage2opcode(v
->type
), 3 + (2 * num_ubos
));
243 OUT_RING(ring
, CP_LOAD_STATE6_0_DST_OFF(0) |
244 CP_LOAD_STATE6_0_STATE_TYPE(ST6_UBO
)|
245 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
246 CP_LOAD_STATE6_0_STATE_BLOCK(fd6_stage2shadersb(v
->type
)) |
247 CP_LOAD_STATE6_0_NUM_UNIT(num_ubos
));
248 OUT_RING(ring
, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
249 OUT_RING(ring
, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
251 for (int i
= 0; i
< num_ubos
; i
++) {
252 struct pipe_constant_buffer
*cb
= &constbuf
->cb
[i
];
254 /* If we have user pointers (constbuf 0, aka GL uniforms), upload them
255 * to a buffer now, and save it in the constbuf so that we don't have
256 * to reupload until they get changed.
258 if (cb
->user_buffer
) {
259 struct pipe_context
*pctx
= &ctx
->base
;
260 u_upload_data(pctx
->stream_uploader
, 0,
264 &cb
->buffer_offset
, &cb
->buffer
);
265 cb
->user_buffer
= NULL
;
269 int size_vec4s
= DIV_ROUND_UP(cb
->buffer_size
, 16);
270 OUT_RELOC(ring
, fd_resource(cb
->buffer
)->bo
,
272 (uint64_t)A6XX_UBO_1_SIZE(size_vec4s
) << 32,
275 OUT_RING(ring
, 0xbad00000 | (i
<< 16));
276 OUT_RING(ring
, 0xbad00000 | (i
<< 16));
282 user_consts_cmdstream_size(struct ir3_shader_variant
*v
)
284 struct ir3_const_state
*const_state
= ir3_const_state(v
);
285 struct ir3_ubo_analysis_state
*ubo_state
= &const_state
->ubo_state
;
287 if (unlikely(!ubo_state
->cmdstream_size
)) {
288 unsigned packets
, size
;
290 /* pre-calculate size required for userconst stateobj: */
291 ir3_user_consts_size(ubo_state
, &packets
, &size
);
293 /* also account for UBO addresses: */
295 size
+= 2 * const_state
->num_ubos
;
297 unsigned sizedwords
= (4 * packets
) + size
;
298 ubo_state
->cmdstream_size
= sizedwords
* 4;
301 return ubo_state
->cmdstream_size
;
305 emit_user_consts(struct fd6_emit
*emit
)
307 static const enum pipe_shader_type types
[] = {
308 PIPE_SHADER_VERTEX
, PIPE_SHADER_TESS_CTRL
, PIPE_SHADER_TESS_EVAL
,
309 PIPE_SHADER_GEOMETRY
, PIPE_SHADER_FRAGMENT
,
311 struct ir3_shader_variant
*variants
[] = {
312 emit
->vs
, emit
->hs
, emit
->ds
, emit
->gs
, emit
->fs
,
314 struct fd_context
*ctx
= emit
->ctx
;
317 for (unsigned i
= 0; i
< ARRAY_SIZE(types
); i
++) {
320 sz
+= user_consts_cmdstream_size(variants
[i
]);
323 struct fd_ringbuffer
*constobj
= fd_submit_new_ringbuffer(
324 ctx
->batch
->submit
, sz
, FD_RINGBUFFER_STREAMING
);
326 for (unsigned i
= 0; i
< ARRAY_SIZE(types
); i
++) {
329 ir3_emit_user_consts(ctx
->screen
, variants
[i
], constobj
, &ctx
->constbuf
[types
[i
]]);
330 fd6_emit_ubos(ctx
, variants
[i
], constobj
, &ctx
->constbuf
[types
[i
]]);
333 fd6_emit_take_group(emit
, constobj
, FD6_GROUP_CONST
, ENABLE_ALL
);
337 fd6_emit_consts(struct fd6_emit
*emit
)
339 struct fd_context
*ctx
= emit
->ctx
;
340 struct fd6_context
*fd6_ctx
= fd6_context(ctx
);
342 if (emit
->dirty
& (FD_DIRTY_CONST
| FD_DIRTY_PROG
))
343 emit_user_consts(emit
);
345 if (emit
->key
.key
.has_gs
|| emit
->key
.key
.tessellation
)
346 emit_tess_consts(emit
);
348 /* if driver-params are needed, emit each time: */
349 const struct ir3_shader_variant
*vs
= emit
->vs
;
350 if (ir3_needs_vs_driver_params(vs
)) {
351 struct fd_ringbuffer
*dpconstobj
= fd_submit_new_ringbuffer(
352 ctx
->batch
->submit
, IR3_DP_VS_COUNT
* 4, FD_RINGBUFFER_STREAMING
);
353 ir3_emit_vs_driver_params(vs
, dpconstobj
, ctx
, emit
->info
);
354 fd6_emit_take_group(emit
, dpconstobj
, FD6_GROUP_VS_DRIVER_PARAMS
, ENABLE_ALL
);
355 fd6_ctx
->has_dp_state
= true;
356 } else if (fd6_ctx
->has_dp_state
) {
357 fd6_emit_take_group(emit
, NULL
, FD6_GROUP_VS_DRIVER_PARAMS
, ENABLE_ALL
);
358 fd6_ctx
->has_dp_state
= false;
363 fd6_emit_ibo_consts(struct fd6_emit
*emit
, const struct ir3_shader_variant
*v
,
364 enum pipe_shader_type stage
, struct fd_ringbuffer
*ring
)
366 struct fd_context
*ctx
= emit
->ctx
;
368 ir3_emit_ssbo_sizes(ctx
->screen
, v
, ring
, &ctx
->shaderbuf
[stage
]);
369 ir3_emit_image_dims(ctx
->screen
, v
, ring
, &ctx
->shaderimg
[stage
]);
373 fd6_emit_cs_consts(const struct ir3_shader_variant
*v
, struct fd_ringbuffer
*ring
,
374 struct fd_context
*ctx
, const struct pipe_grid_info
*info
)
376 ir3_emit_cs_consts(v
, ring
, ctx
, info
);
377 fd6_emit_ubos(ctx
, v
, ring
, &ctx
->constbuf
[PIPE_SHADER_COMPUTE
]);
381 fd6_emit_immediates(struct fd_screen
*screen
, const struct ir3_shader_variant
*v
,
382 struct fd_ringbuffer
*ring
)
384 ir3_emit_immediates(screen
, v
, ring
);
388 fd6_emit_link_map(struct fd_screen
*screen
,
389 const struct ir3_shader_variant
*producer
,
390 const struct ir3_shader_variant
*v
, struct fd_ringbuffer
*ring
)
392 ir3_emit_link_map(screen
, producer
, v
, ring
);