freedreno/a6xx: don't emit a bogus size for empty cb slots
[mesa.git] / src / gallium / drivers / freedreno / a6xx / fd6_const.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "fd6_const.h"
26 #include "fd6_pack.h"
27
28 #include "ir3_const.h"
29
30 /* regid: base const register
31 * prsc or dwords: buffer containing constant values
32 * sizedwords: size of const value buffer
33 */
34 static void
35 fd6_emit_const(struct fd_ringbuffer *ring, gl_shader_stage type,
36 uint32_t regid, uint32_t offset, uint32_t sizedwords,
37 const uint32_t *dwords, struct pipe_resource *prsc)
38 {
39 if (prsc) {
40 struct fd_bo *bo = fd_resource(prsc)->bo;
41
42 if (fd6_geom_stage(type)) {
43 OUT_PKT(ring, CP_LOAD_STATE6_GEOM,
44 CP_LOAD_STATE6_0(
45 .dst_off = regid/4,
46 .state_type = ST6_CONSTANTS,
47 .state_src = SS6_INDIRECT,
48 .state_block = fd6_stage2shadersb(type),
49 .num_unit = DIV_ROUND_UP(sizedwords, 4)
50 ),
51 CP_LOAD_STATE6_EXT_SRC_ADDR(
52 .bo = bo,
53 .bo_offset = offset
54 )
55 );
56 } else {
57 OUT_PKT(ring, CP_LOAD_STATE6_FRAG,
58 CP_LOAD_STATE6_0(
59 .dst_off = regid/4,
60 .state_type = ST6_CONSTANTS,
61 .state_src = SS6_INDIRECT,
62 .state_block = fd6_stage2shadersb(type),
63 .num_unit = DIV_ROUND_UP(sizedwords, 4)
64 ),
65 CP_LOAD_STATE6_EXT_SRC_ADDR(
66 .bo = bo,
67 .bo_offset = offset
68 )
69 );
70 }
71 } else {
72 /* NOTE we cheat a bit here, since we know mesa is aligning
73 * the size of the user buffer to 16 bytes. And we want to
74 * cut cycles in a hot path.
75 */
76 uint32_t align_sz = align(sizedwords, 4);
77 dwords = (uint32_t *)&((uint8_t *)dwords)[offset];
78
79 if (fd6_geom_stage(type)) {
80 OUT_PKTBUF(ring, CP_LOAD_STATE6_GEOM, dwords, align_sz,
81 CP_LOAD_STATE6_0(
82 .dst_off = regid/4,
83 .state_type = ST6_CONSTANTS,
84 .state_src = SS6_DIRECT,
85 .state_block = fd6_stage2shadersb(type),
86 .num_unit = DIV_ROUND_UP(sizedwords, 4)
87 ),
88 CP_LOAD_STATE6_1(),
89 CP_LOAD_STATE6_2()
90 );
91 } else {
92 OUT_PKTBUF(ring, CP_LOAD_STATE6_FRAG, dwords, align_sz,
93 CP_LOAD_STATE6_0(
94 .dst_off = regid/4,
95 .state_type = ST6_CONSTANTS,
96 .state_src = SS6_DIRECT,
97 .state_block = fd6_stage2shadersb(type),
98 .num_unit = DIV_ROUND_UP(sizedwords, 4)
99 ),
100 CP_LOAD_STATE6_1(),
101 CP_LOAD_STATE6_2()
102 );
103 }
104 }
105 }
106
107 static bool
108 is_stateobj(struct fd_ringbuffer *ring)
109 {
110 return true;
111 }
112
113 void
114 emit_const(struct fd_ringbuffer *ring,
115 const struct ir3_shader_variant *v, uint32_t dst_offset,
116 uint32_t offset, uint32_t size, const void *user_buffer,
117 struct pipe_resource *buffer)
118 {
119 /* TODO inline this */
120 assert(dst_offset + size <= v->constlen * 4);
121 fd6_emit_const(ring, v->type, dst_offset,
122 offset, size, user_buffer, buffer);
123 }
124
125 static void
126 emit_const_bo(struct fd_ringbuffer *ring,
127 const struct ir3_shader_variant *v, uint32_t dst_offset,
128 uint32_t num, struct pipe_resource **prscs, uint32_t *offsets)
129 {
130 unreachable("shouldn't be called on a6xx");
131 }
132
133 static void
134 emit_tess_bos(struct fd_ringbuffer *ring, struct fd6_emit *emit, struct ir3_shader_variant *s)
135 {
136 struct fd_context *ctx = emit->ctx;
137 const struct ir3_const_state *const_state = ir3_const_state(s);
138 const unsigned regid = const_state->offsets.primitive_param * 4 + 4;
139 uint32_t dwords = 16;
140
141 OUT_PKT7(ring, fd6_stage2opcode(s->type), 3);
142 OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(regid / 4) |
143 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS)|
144 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
145 CP_LOAD_STATE6_0_STATE_BLOCK(fd6_stage2shadersb(s->type)) |
146 CP_LOAD_STATE6_0_NUM_UNIT(dwords / 4));
147 OUT_RB(ring, ctx->batch->tess_addrs_constobj);
148 }
149
150 static void
151 emit_stage_tess_consts(struct fd_ringbuffer *ring, struct ir3_shader_variant *v,
152 uint32_t *params, int num_params)
153 {
154 const struct ir3_const_state *const_state = ir3_const_state(v);
155 const unsigned regid = const_state->offsets.primitive_param;
156 int size = MIN2(1 + regid, v->constlen) - regid;
157 if (size > 0)
158 fd6_emit_const(ring, v->type, regid * 4, 0, num_params, params, NULL);
159 }
160
161 static void
162 emit_tess_consts(struct fd6_emit *emit)
163 {
164 struct fd_context *ctx = emit->ctx;
165
166 struct fd_ringbuffer *constobj = fd_submit_new_ringbuffer(
167 ctx->batch->submit, 0x1000, FD_RINGBUFFER_STREAMING);
168
169 /* VS sizes are in bytes since that's what STLW/LDLW use, while the HS
170 * size is dwords, since that's what LDG/STG use.
171 */
172 unsigned num_vertices =
173 emit->hs ?
174 emit->info->vertices_per_patch :
175 emit->gs->shader->nir->info.gs.vertices_in;
176
177 uint32_t vs_params[4] = {
178 emit->vs->output_size * num_vertices * 4, /* vs primitive stride */
179 emit->vs->output_size * 4, /* vs vertex stride */
180 0,
181 0
182 };
183
184 emit_stage_tess_consts(constobj, emit->vs, vs_params, ARRAY_SIZE(vs_params));
185
186 if (emit->hs) {
187 uint32_t hs_params[4] = {
188 emit->vs->output_size * num_vertices * 4, /* vs primitive stride */
189 emit->vs->output_size * 4, /* vs vertex stride */
190 emit->hs->output_size,
191 emit->info->vertices_per_patch
192 };
193
194 emit_stage_tess_consts(constobj, emit->hs, hs_params, ARRAY_SIZE(hs_params));
195 emit_tess_bos(constobj, emit, emit->hs);
196
197 if (emit->gs)
198 num_vertices = emit->gs->shader->nir->info.gs.vertices_in;
199
200 uint32_t ds_params[4] = {
201 emit->ds->output_size * num_vertices * 4, /* ds primitive stride */
202 emit->ds->output_size * 4, /* ds vertex stride */
203 emit->hs->output_size, /* hs vertex stride (dwords) */
204 emit->hs->shader->nir->info.tess.tcs_vertices_out
205 };
206
207 emit_stage_tess_consts(constobj, emit->ds, ds_params, ARRAY_SIZE(ds_params));
208 emit_tess_bos(constobj, emit, emit->ds);
209 }
210
211 if (emit->gs) {
212 struct ir3_shader_variant *prev;
213 if (emit->ds)
214 prev = emit->ds;
215 else
216 prev = emit->vs;
217
218 uint32_t gs_params[4] = {
219 prev->output_size * num_vertices * 4, /* ds primitive stride */
220 prev->output_size * 4, /* ds vertex stride */
221 0,
222 0,
223 };
224
225 num_vertices = emit->gs->shader->nir->info.gs.vertices_in;
226 emit_stage_tess_consts(constobj, emit->gs, gs_params, ARRAY_SIZE(gs_params));
227 }
228
229 fd6_emit_take_group(emit, constobj, FD6_GROUP_PRIMITIVE_PARAMS, ENABLE_ALL);
230 }
231
232 static void
233 fd6_emit_ubos(struct fd_context *ctx, const struct ir3_shader_variant *v,
234 struct fd_ringbuffer *ring, struct fd_constbuf_stateobj *constbuf)
235 {
236 const struct ir3_const_state *const_state = ir3_const_state(v);
237 int num_ubos = const_state->num_ubos;
238
239 if (!num_ubos)
240 return;
241
242 OUT_PKT7(ring, fd6_stage2opcode(v->type), 3 + (2 * num_ubos));
243 OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
244 CP_LOAD_STATE6_0_STATE_TYPE(ST6_UBO)|
245 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
246 CP_LOAD_STATE6_0_STATE_BLOCK(fd6_stage2shadersb(v->type)) |
247 CP_LOAD_STATE6_0_NUM_UNIT(num_ubos));
248 OUT_RING(ring, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
249 OUT_RING(ring, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
250
251 for (int i = 0; i < num_ubos; i++) {
252 struct pipe_constant_buffer *cb = &constbuf->cb[i];
253
254 /* If we have user pointers (constbuf 0, aka GL uniforms), upload them
255 * to a buffer now, and save it in the constbuf so that we don't have
256 * to reupload until they get changed.
257 */
258 if (cb->user_buffer) {
259 struct pipe_context *pctx = &ctx->base;
260 u_upload_data(pctx->stream_uploader, 0,
261 cb->buffer_size,
262 64,
263 cb->user_buffer,
264 &cb->buffer_offset, &cb->buffer);
265 cb->user_buffer = NULL;
266 }
267
268 if (cb->buffer) {
269 int size_vec4s = DIV_ROUND_UP(cb->buffer_size, 16);
270 OUT_RELOC(ring, fd_resource(cb->buffer)->bo,
271 cb->buffer_offset,
272 (uint64_t)A6XX_UBO_1_SIZE(size_vec4s) << 32,
273 0);
274 } else {
275 OUT_RING(ring, 0xbad00000 | (i << 16));
276 OUT_RING(ring, A6XX_UBO_1_SIZE(0));
277 }
278 }
279 }
280
281 static unsigned
282 user_consts_cmdstream_size(struct ir3_shader_variant *v)
283 {
284 struct ir3_const_state *const_state = ir3_const_state(v);
285 struct ir3_ubo_analysis_state *ubo_state = &const_state->ubo_state;
286
287 if (unlikely(!ubo_state->cmdstream_size)) {
288 unsigned packets, size;
289
290 /* pre-calculate size required for userconst stateobj: */
291 ir3_user_consts_size(ubo_state, &packets, &size);
292
293 /* also account for UBO addresses: */
294 packets += 1;
295 size += 2 * const_state->num_ubos;
296
297 unsigned sizedwords = (4 * packets) + size;
298 ubo_state->cmdstream_size = sizedwords * 4;
299 }
300
301 return ubo_state->cmdstream_size;
302 }
303
304 static void
305 emit_user_consts(struct fd6_emit *emit)
306 {
307 static const enum pipe_shader_type types[] = {
308 PIPE_SHADER_VERTEX, PIPE_SHADER_TESS_CTRL, PIPE_SHADER_TESS_EVAL,
309 PIPE_SHADER_GEOMETRY, PIPE_SHADER_FRAGMENT,
310 };
311 struct ir3_shader_variant *variants[] = {
312 emit->vs, emit->hs, emit->ds, emit->gs, emit->fs,
313 };
314 struct fd_context *ctx = emit->ctx;
315 unsigned sz = 0;
316
317 for (unsigned i = 0; i < ARRAY_SIZE(types); i++) {
318 if (!variants[i])
319 continue;
320 sz += user_consts_cmdstream_size(variants[i]);
321 }
322
323 struct fd_ringbuffer *constobj = fd_submit_new_ringbuffer(
324 ctx->batch->submit, sz, FD_RINGBUFFER_STREAMING);
325
326 for (unsigned i = 0; i < ARRAY_SIZE(types); i++) {
327 if (!variants[i])
328 continue;
329 ir3_emit_user_consts(ctx->screen, variants[i], constobj, &ctx->constbuf[types[i]]);
330 fd6_emit_ubos(ctx, variants[i], constobj, &ctx->constbuf[types[i]]);
331 }
332
333 fd6_emit_take_group(emit, constobj, FD6_GROUP_CONST, ENABLE_ALL);
334 }
335
336 void
337 fd6_emit_consts(struct fd6_emit *emit)
338 {
339 struct fd_context *ctx = emit->ctx;
340 struct fd6_context *fd6_ctx = fd6_context(ctx);
341
342 if (emit->dirty & (FD_DIRTY_CONST | FD_DIRTY_PROG))
343 emit_user_consts(emit);
344
345 if (emit->key.key.has_gs || emit->key.key.tessellation)
346 emit_tess_consts(emit);
347
348 /* if driver-params are needed, emit each time: */
349 const struct ir3_shader_variant *vs = emit->vs;
350 if (ir3_needs_vs_driver_params(vs)) {
351 struct fd_ringbuffer *dpconstobj = fd_submit_new_ringbuffer(
352 ctx->batch->submit, IR3_DP_VS_COUNT * 4, FD_RINGBUFFER_STREAMING);
353 ir3_emit_vs_driver_params(vs, dpconstobj, ctx, emit->info);
354 fd6_emit_take_group(emit, dpconstobj, FD6_GROUP_VS_DRIVER_PARAMS, ENABLE_ALL);
355 fd6_ctx->has_dp_state = true;
356 } else if (fd6_ctx->has_dp_state) {
357 fd6_emit_take_group(emit, NULL, FD6_GROUP_VS_DRIVER_PARAMS, ENABLE_ALL);
358 fd6_ctx->has_dp_state = false;
359 }
360 }
361
362 void
363 fd6_emit_ibo_consts(struct fd6_emit *emit, const struct ir3_shader_variant *v,
364 enum pipe_shader_type stage, struct fd_ringbuffer *ring)
365 {
366 struct fd_context *ctx = emit->ctx;
367
368 ir3_emit_ssbo_sizes(ctx->screen, v, ring, &ctx->shaderbuf[stage]);
369 ir3_emit_image_dims(ctx->screen, v, ring, &ctx->shaderimg[stage]);
370 }
371
372 void
373 fd6_emit_cs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
374 struct fd_context *ctx, const struct pipe_grid_info *info)
375 {
376 ir3_emit_cs_consts(v, ring, ctx, info);
377 fd6_emit_ubos(ctx, v, ring, &ctx->constbuf[PIPE_SHADER_COMPUTE]);
378 }
379
380 void
381 fd6_emit_immediates(struct fd_screen *screen, const struct ir3_shader_variant *v,
382 struct fd_ringbuffer *ring)
383 {
384 ir3_emit_immediates(screen, v, ring);
385 }
386
387 void
388 fd6_emit_link_map(struct fd_screen *screen,
389 const struct ir3_shader_variant *producer,
390 const struct ir3_shader_variant *v, struct fd_ringbuffer *ring)
391 {
392 ir3_emit_link_map(screen, producer, v, ring);
393 }