freedreno: Split ir3_const's user buffer and indirect upload APIs.
[mesa.git] / src / gallium / drivers / freedreno / a6xx / fd6_const.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "fd6_const.h"
26 #include "fd6_pack.h"
27
28 #define emit_const_user fd6_emit_const_user
29 #define emit_const_bo fd6_emit_const_bo
30 #include "ir3_const.h"
31
32 /* regid: base const register
33 * prsc or dwords: buffer containing constant values
34 * sizedwords: size of const value buffer
35 */
36 void
37 fd6_emit_const_user(struct fd_ringbuffer *ring,
38 const struct ir3_shader_variant *v, uint32_t regid,
39 uint32_t sizedwords, const uint32_t *dwords)
40 {
41 emit_const_asserts(ring, v, regid, sizedwords);
42
43 /* NOTE we cheat a bit here, since we know mesa is aligning
44 * the size of the user buffer to 16 bytes. And we want to
45 * cut cycles in a hot path.
46 */
47 uint32_t align_sz = align(sizedwords, 4);
48
49 if (fd6_geom_stage(v->type)) {
50 OUT_PKTBUF(ring, CP_LOAD_STATE6_GEOM, dwords, align_sz,
51 CP_LOAD_STATE6_0(
52 .dst_off = regid/4,
53 .state_type = ST6_CONSTANTS,
54 .state_src = SS6_DIRECT,
55 .state_block = fd6_stage2shadersb(v->type),
56 .num_unit = DIV_ROUND_UP(sizedwords, 4)
57 ),
58 CP_LOAD_STATE6_1(),
59 CP_LOAD_STATE6_2()
60 );
61 } else {
62 OUT_PKTBUF(ring, CP_LOAD_STATE6_FRAG, dwords, align_sz,
63 CP_LOAD_STATE6_0(
64 .dst_off = regid/4,
65 .state_type = ST6_CONSTANTS,
66 .state_src = SS6_DIRECT,
67 .state_block = fd6_stage2shadersb(v->type),
68 .num_unit = DIV_ROUND_UP(sizedwords, 4)
69 ),
70 CP_LOAD_STATE6_1(),
71 CP_LOAD_STATE6_2()
72 );
73 }
74 }
75 void
76 fd6_emit_const_bo(struct fd_ringbuffer *ring,
77 const struct ir3_shader_variant *v, uint32_t regid,
78 uint32_t offset, uint32_t sizedwords, struct fd_bo *bo)
79 {
80 emit_const_asserts(ring, v, regid, sizedwords);
81
82 if (fd6_geom_stage(v->type)) {
83 OUT_PKT(ring, CP_LOAD_STATE6_GEOM,
84 CP_LOAD_STATE6_0(
85 .dst_off = regid/4,
86 .state_type = ST6_CONSTANTS,
87 .state_src = SS6_INDIRECT,
88 .state_block = fd6_stage2shadersb(v->type),
89 .num_unit = DIV_ROUND_UP(sizedwords, 4)
90 ),
91 CP_LOAD_STATE6_EXT_SRC_ADDR(
92 .bo = bo,
93 .bo_offset = offset
94 )
95 );
96 } else {
97 OUT_PKT(ring, CP_LOAD_STATE6_FRAG,
98 CP_LOAD_STATE6_0(
99 .dst_off = regid/4,
100 .state_type = ST6_CONSTANTS,
101 .state_src = SS6_INDIRECT,
102 .state_block = fd6_stage2shadersb(v->type),
103 .num_unit = DIV_ROUND_UP(sizedwords, 4)
104 ),
105 CP_LOAD_STATE6_EXT_SRC_ADDR(
106 .bo = bo,
107 .bo_offset = offset
108 )
109 );
110 }
111 }
112
113 static bool
114 is_stateobj(struct fd_ringbuffer *ring)
115 {
116 return true;
117 }
118
119 static void
120 emit_const_ptrs(struct fd_ringbuffer *ring,
121 const struct ir3_shader_variant *v, uint32_t dst_offset,
122 uint32_t num, struct pipe_resource **prscs, uint32_t *offsets)
123 {
124 unreachable("shouldn't be called on a6xx");
125 }
126
127 static void
128 emit_tess_bos(struct fd_ringbuffer *ring, struct fd6_emit *emit, struct ir3_shader_variant *s)
129 {
130 struct fd_context *ctx = emit->ctx;
131 const struct ir3_const_state *const_state = ir3_const_state(s);
132 const unsigned regid = const_state->offsets.primitive_param * 4 + 4;
133 uint32_t dwords = 16;
134
135 OUT_PKT7(ring, fd6_stage2opcode(s->type), 3);
136 OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(regid / 4) |
137 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS)|
138 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
139 CP_LOAD_STATE6_0_STATE_BLOCK(fd6_stage2shadersb(s->type)) |
140 CP_LOAD_STATE6_0_NUM_UNIT(dwords / 4));
141 OUT_RB(ring, ctx->batch->tess_addrs_constobj);
142 }
143
144 static void
145 emit_stage_tess_consts(struct fd_ringbuffer *ring, struct ir3_shader_variant *v,
146 uint32_t *params, int num_params)
147 {
148 const struct ir3_const_state *const_state = ir3_const_state(v);
149 const unsigned regid = const_state->offsets.primitive_param;
150 int size = MIN2(1 + regid, v->constlen) - regid;
151 if (size > 0)
152 fd6_emit_const_user(ring, v, regid * 4, num_params, params);
153 }
154
155 static void
156 emit_tess_consts(struct fd6_emit *emit)
157 {
158 struct fd_context *ctx = emit->ctx;
159
160 struct fd_ringbuffer *constobj = fd_submit_new_ringbuffer(
161 ctx->batch->submit, 0x1000, FD_RINGBUFFER_STREAMING);
162
163 /* VS sizes are in bytes since that's what STLW/LDLW use, while the HS
164 * size is dwords, since that's what LDG/STG use.
165 */
166 unsigned num_vertices =
167 emit->hs ?
168 emit->info->vertices_per_patch :
169 emit->gs->shader->nir->info.gs.vertices_in;
170
171 uint32_t vs_params[4] = {
172 emit->vs->output_size * num_vertices * 4, /* vs primitive stride */
173 emit->vs->output_size * 4, /* vs vertex stride */
174 0,
175 0
176 };
177
178 emit_stage_tess_consts(constobj, emit->vs, vs_params, ARRAY_SIZE(vs_params));
179
180 if (emit->hs) {
181 uint32_t hs_params[4] = {
182 emit->vs->output_size * num_vertices * 4, /* vs primitive stride */
183 emit->vs->output_size * 4, /* vs vertex stride */
184 emit->hs->output_size,
185 emit->info->vertices_per_patch
186 };
187
188 emit_stage_tess_consts(constobj, emit->hs, hs_params, ARRAY_SIZE(hs_params));
189 emit_tess_bos(constobj, emit, emit->hs);
190
191 if (emit->gs)
192 num_vertices = emit->gs->shader->nir->info.gs.vertices_in;
193
194 uint32_t ds_params[4] = {
195 emit->ds->output_size * num_vertices * 4, /* ds primitive stride */
196 emit->ds->output_size * 4, /* ds vertex stride */
197 emit->hs->output_size, /* hs vertex stride (dwords) */
198 emit->hs->shader->nir->info.tess.tcs_vertices_out
199 };
200
201 emit_stage_tess_consts(constobj, emit->ds, ds_params, ARRAY_SIZE(ds_params));
202 emit_tess_bos(constobj, emit, emit->ds);
203 }
204
205 if (emit->gs) {
206 struct ir3_shader_variant *prev;
207 if (emit->ds)
208 prev = emit->ds;
209 else
210 prev = emit->vs;
211
212 uint32_t gs_params[4] = {
213 prev->output_size * num_vertices * 4, /* ds primitive stride */
214 prev->output_size * 4, /* ds vertex stride */
215 0,
216 0,
217 };
218
219 num_vertices = emit->gs->shader->nir->info.gs.vertices_in;
220 emit_stage_tess_consts(constobj, emit->gs, gs_params, ARRAY_SIZE(gs_params));
221 }
222
223 fd6_emit_take_group(emit, constobj, FD6_GROUP_PRIMITIVE_PARAMS, ENABLE_ALL);
224 }
225
226 static void
227 fd6_emit_ubos(struct fd_context *ctx, const struct ir3_shader_variant *v,
228 struct fd_ringbuffer *ring, struct fd_constbuf_stateobj *constbuf)
229 {
230 const struct ir3_const_state *const_state = ir3_const_state(v);
231 int num_ubos = const_state->num_ubos;
232
233 if (!num_ubos)
234 return;
235
236 OUT_PKT7(ring, fd6_stage2opcode(v->type), 3 + (2 * num_ubos));
237 OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
238 CP_LOAD_STATE6_0_STATE_TYPE(ST6_UBO)|
239 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
240 CP_LOAD_STATE6_0_STATE_BLOCK(fd6_stage2shadersb(v->type)) |
241 CP_LOAD_STATE6_0_NUM_UNIT(num_ubos));
242 OUT_RING(ring, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
243 OUT_RING(ring, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
244
245 for (int i = 0; i < num_ubos; i++) {
246 struct pipe_constant_buffer *cb = &constbuf->cb[i];
247
248 /* If we have user pointers (constbuf 0, aka GL uniforms), upload them
249 * to a buffer now, and save it in the constbuf so that we don't have
250 * to reupload until they get changed.
251 */
252 if (cb->user_buffer) {
253 struct pipe_context *pctx = &ctx->base;
254 u_upload_data(pctx->stream_uploader, 0,
255 cb->buffer_size,
256 64,
257 cb->user_buffer,
258 &cb->buffer_offset, &cb->buffer);
259 cb->user_buffer = NULL;
260 }
261
262 if (cb->buffer) {
263 int size_vec4s = DIV_ROUND_UP(cb->buffer_size, 16);
264 OUT_RELOC(ring, fd_resource(cb->buffer)->bo,
265 cb->buffer_offset,
266 (uint64_t)A6XX_UBO_1_SIZE(size_vec4s) << 32,
267 0);
268 } else {
269 OUT_RING(ring, 0xbad00000 | (i << 16));
270 OUT_RING(ring, A6XX_UBO_1_SIZE(0));
271 }
272 }
273 }
274
275 static unsigned
276 user_consts_cmdstream_size(struct ir3_shader_variant *v)
277 {
278 struct ir3_const_state *const_state = ir3_const_state(v);
279 struct ir3_ubo_analysis_state *ubo_state = &const_state->ubo_state;
280
281 if (unlikely(!ubo_state->cmdstream_size)) {
282 unsigned packets, size;
283
284 /* pre-calculate size required for userconst stateobj: */
285 ir3_user_consts_size(ubo_state, &packets, &size);
286
287 /* also account for UBO addresses: */
288 packets += 1;
289 size += 2 * const_state->num_ubos;
290
291 unsigned sizedwords = (4 * packets) + size;
292 ubo_state->cmdstream_size = sizedwords * 4;
293 }
294
295 return ubo_state->cmdstream_size;
296 }
297
298 static void
299 emit_user_consts(struct fd6_emit *emit)
300 {
301 static const enum pipe_shader_type types[] = {
302 PIPE_SHADER_VERTEX, PIPE_SHADER_TESS_CTRL, PIPE_SHADER_TESS_EVAL,
303 PIPE_SHADER_GEOMETRY, PIPE_SHADER_FRAGMENT,
304 };
305 struct ir3_shader_variant *variants[] = {
306 emit->vs, emit->hs, emit->ds, emit->gs, emit->fs,
307 };
308 struct fd_context *ctx = emit->ctx;
309 unsigned sz = 0;
310
311 for (unsigned i = 0; i < ARRAY_SIZE(types); i++) {
312 if (!variants[i])
313 continue;
314 sz += user_consts_cmdstream_size(variants[i]);
315 }
316
317 struct fd_ringbuffer *constobj = fd_submit_new_ringbuffer(
318 ctx->batch->submit, sz, FD_RINGBUFFER_STREAMING);
319
320 for (unsigned i = 0; i < ARRAY_SIZE(types); i++) {
321 if (!variants[i])
322 continue;
323 ir3_emit_user_consts(ctx->screen, variants[i], constobj, &ctx->constbuf[types[i]]);
324 fd6_emit_ubos(ctx, variants[i], constobj, &ctx->constbuf[types[i]]);
325 }
326
327 fd6_emit_take_group(emit, constobj, FD6_GROUP_CONST, ENABLE_ALL);
328 }
329
330 void
331 fd6_emit_consts(struct fd6_emit *emit)
332 {
333 struct fd_context *ctx = emit->ctx;
334 struct fd6_context *fd6_ctx = fd6_context(ctx);
335
336 if (emit->dirty & (FD_DIRTY_CONST | FD_DIRTY_PROG))
337 emit_user_consts(emit);
338
339 if (emit->key.key.has_gs || emit->key.key.tessellation)
340 emit_tess_consts(emit);
341
342 /* if driver-params are needed, emit each time: */
343 const struct ir3_shader_variant *vs = emit->vs;
344 if (ir3_needs_vs_driver_params(vs)) {
345 struct fd_ringbuffer *dpconstobj = fd_submit_new_ringbuffer(
346 ctx->batch->submit, IR3_DP_VS_COUNT * 4, FD_RINGBUFFER_STREAMING);
347 ir3_emit_vs_driver_params(vs, dpconstobj, ctx, emit->info);
348 fd6_emit_take_group(emit, dpconstobj, FD6_GROUP_VS_DRIVER_PARAMS, ENABLE_ALL);
349 fd6_ctx->has_dp_state = true;
350 } else if (fd6_ctx->has_dp_state) {
351 fd6_emit_take_group(emit, NULL, FD6_GROUP_VS_DRIVER_PARAMS, ENABLE_ALL);
352 fd6_ctx->has_dp_state = false;
353 }
354 }
355
356 void
357 fd6_emit_ibo_consts(struct fd6_emit *emit, const struct ir3_shader_variant *v,
358 enum pipe_shader_type stage, struct fd_ringbuffer *ring)
359 {
360 struct fd_context *ctx = emit->ctx;
361
362 ir3_emit_ssbo_sizes(ctx->screen, v, ring, &ctx->shaderbuf[stage]);
363 ir3_emit_image_dims(ctx->screen, v, ring, &ctx->shaderimg[stage]);
364 }
365
366 void
367 fd6_emit_cs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
368 struct fd_context *ctx, const struct pipe_grid_info *info)
369 {
370 ir3_emit_cs_consts(v, ring, ctx, info);
371 fd6_emit_ubos(ctx, v, ring, &ctx->constbuf[PIPE_SHADER_COMPUTE]);
372 }
373
374 void
375 fd6_emit_immediates(struct fd_screen *screen, const struct ir3_shader_variant *v,
376 struct fd_ringbuffer *ring)
377 {
378 ir3_emit_immediates(screen, v, ring);
379 }
380
381 void
382 fd6_emit_link_map(struct fd_screen *screen,
383 const struct ir3_shader_variant *producer,
384 const struct ir3_shader_variant *v, struct fd_ringbuffer *ring)
385 {
386 ir3_emit_link_map(screen, producer, v, ring);
387 }