freedreno/a6xx: pre-calculate expected vsc stream sizes
[mesa.git] / src / gallium / drivers / freedreno / a6xx / fd6_draw.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Rob Clark <robclark@freedesktop.org>
26 */
27
28 #include "pipe/p_state.h"
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_prim.h"
32
33 #include "freedreno_state.h"
34 #include "freedreno_resource.h"
35
36 #include "fd6_draw.h"
37 #include "fd6_context.h"
38 #include "fd6_emit.h"
39 #include "fd6_program.h"
40 #include "fd6_format.h"
41 #include "fd6_vsc.h"
42 #include "fd6_zsa.h"
43
44 static void
45 draw_emit_indirect(struct fd_ringbuffer *ring,
46 uint32_t draw0,
47 const struct pipe_draw_info *info,
48 unsigned index_offset)
49 {
50 struct fd_resource *ind = fd_resource(info->indirect->buffer);
51
52 if (info->index_size) {
53 struct pipe_resource *idx = info->index.resource;
54 unsigned max_indicies = (idx->width0 - index_offset) / info->index_size;
55
56 OUT_PKT7(ring, CP_DRAW_INDX_INDIRECT, 6);
57 OUT_RING(ring, draw0);
58 OUT_RELOC(ring, fd_resource(idx)->bo,
59 index_offset, 0, 0);
60 OUT_RING(ring, A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(max_indicies));
61 OUT_RELOC(ring, ind->bo, info->indirect->offset, 0, 0);
62 } else {
63 OUT_PKT7(ring, CP_DRAW_INDIRECT, 3);
64 OUT_RING(ring, draw0);
65 OUT_RELOC(ring, ind->bo, info->indirect->offset, 0, 0);
66 }
67 }
68
69 static void
70 draw_emit(struct fd_ringbuffer *ring,
71 uint32_t draw0,
72 const struct pipe_draw_info *info,
73 unsigned index_offset)
74 {
75 if (info->index_size) {
76 assert(!info->has_user_indices);
77
78 struct pipe_resource *idx_buffer = info->index.resource;
79 uint32_t idx_size = info->index_size * info->count;
80 uint32_t idx_offset = index_offset + info->start * info->index_size;
81
82 OUT_PKT7(ring, CP_DRAW_INDX_OFFSET, 7);
83 OUT_RING(ring, draw0);
84 OUT_RING(ring, info->instance_count); /* NumInstances */
85 OUT_RING(ring, info->count); /* NumIndices */
86 OUT_RING(ring, 0x0); /* XXX */
87 OUT_RELOC(ring, fd_resource(idx_buffer)->bo, idx_offset, 0, 0);
88 OUT_RING (ring, idx_size);
89 } else {
90 OUT_PKT7(ring, CP_DRAW_INDX_OFFSET, 3);
91 OUT_RING(ring, draw0);
92 OUT_RING(ring, info->instance_count); /* NumInstances */
93 OUT_RING(ring, info->count); /* NumIndices */
94 }
95 }
96
97 /* fixup dirty shader state in case some "unrelated" (from the state-
98 * tracker's perspective) state change causes us to switch to a
99 * different variant.
100 */
101 static void
102 fixup_shader_state(struct fd_context *ctx, struct ir3_shader_key *key)
103 {
104 struct fd6_context *fd6_ctx = fd6_context(ctx);
105 struct ir3_shader_key *last_key = &fd6_ctx->last_key;
106
107 if (!ir3_shader_key_equal(last_key, key)) {
108 if (ir3_shader_key_changes_fs(last_key, key)) {
109 ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= FD_DIRTY_SHADER_PROG;
110 ctx->dirty |= FD_DIRTY_PROG;
111 }
112
113 if (ir3_shader_key_changes_vs(last_key, key)) {
114 ctx->dirty_shader[PIPE_SHADER_VERTEX] |= FD_DIRTY_SHADER_PROG;
115 ctx->dirty |= FD_DIRTY_PROG;
116 }
117
118 fd6_ctx->last_key = *key;
119 }
120 }
121
122 static void
123 fixup_draw_state(struct fd_context *ctx, struct fd6_emit *emit)
124 {
125 if (ctx->last.dirty ||
126 (ctx->last.primitive_restart != emit->primitive_restart)) {
127 /* rasterizer state is effected by primitive-restart: */
128 ctx->dirty |= FD_DIRTY_RASTERIZER;
129 ctx->last.primitive_restart = emit->primitive_restart;
130 }
131
132 ctx->last.dirty = false;
133 }
134
135 static bool
136 fd6_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
137 unsigned index_offset)
138 {
139 struct fd6_context *fd6_ctx = fd6_context(ctx);
140 struct fd6_emit emit = {
141 .ctx = ctx,
142 .vtx = &ctx->vtx,
143 .info = info,
144 .key = {
145 .vs = ctx->prog.vs,
146 .gs = ctx->prog.gs,
147 .fs = ctx->prog.fs,
148 .key = {
149 .color_two_side = ctx->rasterizer->light_twoside,
150 .vclamp_color = ctx->rasterizer->clamp_vertex_color,
151 .fclamp_color = ctx->rasterizer->clamp_fragment_color,
152 .rasterflat = ctx->rasterizer->flatshade,
153 .ucp_enables = ctx->rasterizer->clip_plane_enable,
154 .has_per_samp = (fd6_ctx->fsaturate || fd6_ctx->vsaturate),
155 .vsaturate_s = fd6_ctx->vsaturate_s,
156 .vsaturate_t = fd6_ctx->vsaturate_t,
157 .vsaturate_r = fd6_ctx->vsaturate_r,
158 .fsaturate_s = fd6_ctx->fsaturate_s,
159 .fsaturate_t = fd6_ctx->fsaturate_t,
160 .fsaturate_r = fd6_ctx->fsaturate_r,
161 .vsamples = ctx->tex[PIPE_SHADER_VERTEX].samples,
162 .fsamples = ctx->tex[PIPE_SHADER_FRAGMENT].samples,
163 .sample_shading = (ctx->min_samples > 1),
164 .msaa = (ctx->framebuffer.samples > 1),
165 },
166 },
167 .rasterflat = ctx->rasterizer->flatshade,
168 .sprite_coord_enable = ctx->rasterizer->sprite_coord_enable,
169 .sprite_coord_mode = ctx->rasterizer->sprite_coord_mode,
170 .primitive_restart = info->primitive_restart && info->index_size,
171 };
172
173 if (info->mode == PIPE_PRIM_PATCHES) {
174 emit.key.hs = ctx->prog.hs;
175 emit.key.ds = ctx->prog.ds;
176
177 shader_info *ds_info = &emit.key.ds->nir->info;
178 switch (ds_info->tess.primitive_mode) {
179 case GL_ISOLINES:
180 emit.key.key.tessellation = IR3_TESS_ISOLINES;
181 break;
182 case GL_TRIANGLES:
183 emit.key.key.tessellation = IR3_TESS_TRIANGLES;
184 break;
185 case GL_QUADS:
186 emit.key.key.tessellation = IR3_TESS_QUADS;
187 break;
188 default:
189 unreachable("bad tessmode");
190 }
191 }
192
193 if (emit.key.gs)
194 emit.key.key.has_gs = true;
195
196 if (!(emit.key.hs || emit.key.ds || emit.key.gs || info->indirect))
197 fd6_vsc_update_sizes(ctx->batch, info);
198
199 fixup_shader_state(ctx, &emit.key.key);
200
201 if (!(ctx->dirty & FD_DIRTY_PROG)) {
202 emit.prog = fd6_ctx->prog;
203 } else {
204 fd6_ctx->prog = fd6_emit_get_prog(&emit);
205 }
206
207 /* bail if compile failed: */
208 if (!fd6_ctx->prog)
209 return NULL;
210
211 emit.dirty = ctx->dirty; /* *after* fixup_shader_state() */
212 emit.bs = fd6_emit_get_prog(&emit)->bs;
213 emit.vs = fd6_emit_get_prog(&emit)->vs;
214 emit.hs = fd6_emit_get_prog(&emit)->hs;
215 emit.ds = fd6_emit_get_prog(&emit)->ds;
216 emit.gs = fd6_emit_get_prog(&emit)->gs;
217 emit.fs = fd6_emit_get_prog(&emit)->fs;
218
219 ctx->stats.vs_regs += ir3_shader_halfregs(emit.vs);
220 ctx->stats.hs_regs += COND(emit.hs, ir3_shader_halfregs(emit.hs));
221 ctx->stats.ds_regs += COND(emit.ds, ir3_shader_halfregs(emit.ds));
222 ctx->stats.gs_regs += COND(emit.gs, ir3_shader_halfregs(emit.gs));
223 ctx->stats.fs_regs += ir3_shader_halfregs(emit.fs);
224
225 /* figure out whether we need to disable LRZ write for binning
226 * pass using draw pass's fs:
227 */
228 emit.no_lrz_write = emit.fs->writes_pos || emit.fs->no_earlyz;
229
230 struct fd_ringbuffer *ring = ctx->batch->draw;
231 enum pc_di_primtype primtype = ctx->primtypes[info->mode];
232
233 uint32_t tess_draw0 = 0;
234 if (info->mode == PIPE_PRIM_PATCHES) {
235 shader_info *ds_info = &emit.ds->shader->nir->info;
236 uint32_t factor_stride;
237 uint32_t patch_type;
238
239 switch (ds_info->tess.primitive_mode) {
240 case GL_ISOLINES:
241 patch_type = TESS_ISOLINES;
242 factor_stride = 12;
243 break;
244 case GL_TRIANGLES:
245 patch_type = TESS_TRIANGLES;
246 factor_stride = 20;
247 break;
248 case GL_QUADS:
249 patch_type = TESS_QUADS;
250 factor_stride = 28;
251 break;
252 default:
253 unreachable("bad tessmode");
254 }
255
256 primtype = DI_PT_PATCHES0 + info->vertices_per_patch;
257 tess_draw0 |= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(patch_type) |
258 CP_DRAW_INDX_OFFSET_0_TESS_ENABLE;
259
260 ctx->batch->tessellation = true;
261 ctx->batch->tessparam_size = MAX2(ctx->batch->tessparam_size,
262 emit.hs->shader->output_size * 4 * info->count);
263 ctx->batch->tessfactor_size = MAX2(ctx->batch->tessfactor_size,
264 factor_stride * info->count);
265
266 if (!ctx->batch->tess_addrs_constobj) {
267 /* Reserve space for the bo address - we'll write them later in
268 * setup_tess_buffers(). We need 2 bo address, but indirect
269 * constant upload needs at least 4 vec4s.
270 */
271 unsigned size = 4 * 16;
272
273 ctx->batch->tess_addrs_constobj = fd_submit_new_ringbuffer(
274 ctx->batch->submit, size, FD_RINGBUFFER_STREAMING);
275
276 ctx->batch->tess_addrs_constobj->cur += size;
277 }
278 }
279
280 uint32_t index_start = info->index_size ? info->index_bias : info->start;
281 if (ctx->last.dirty || (ctx->last.index_start != index_start)) {
282 OUT_PKT4(ring, REG_A6XX_VFD_INDEX_OFFSET, 1);
283 OUT_RING(ring, index_start); /* VFD_INDEX_OFFSET */
284 ctx->last.index_start = index_start;
285 }
286
287 if (ctx->last.dirty || (ctx->last.instance_start != info->start_instance)) {
288 OUT_PKT4(ring, REG_A6XX_VFD_INSTANCE_START_OFFSET, 1);
289 OUT_RING(ring, info->start_instance); /* VFD_INSTANCE_START_OFFSET */
290 ctx->last.instance_start = info->start_instance;
291 }
292
293 uint32_t restart_index = info->primitive_restart ? info->restart_index : 0xffffffff;
294 if (ctx->last.dirty || (ctx->last.restart_index != restart_index)) {
295 OUT_PKT4(ring, REG_A6XX_PC_RESTART_INDEX, 1);
296 OUT_RING(ring, restart_index); /* PC_RESTART_INDEX */
297 ctx->last.restart_index = restart_index;
298 }
299
300 fixup_draw_state(ctx, &emit);
301
302 fd6_emit_state(ring, &emit);
303
304 /* for debug after a lock up, write a unique counter value
305 * to scratch7 for each draw, to make it easier to match up
306 * register dumps to cmdstream. The combination of IB
307 * (scratch6) and DRAW is enough to "triangulate" the
308 * particular draw that caused lockup.
309 */
310 emit_marker6(ring, 7);
311
312 uint32_t draw0 =
313 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) |
314 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
315 tess_draw0 |
316 COND(emit.key.gs, CP_DRAW_INDX_OFFSET_0_GS_ENABLE);
317
318 if (info->index_size) {
319 draw0 |=
320 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA) |
321 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(fd4_size2indextype(info->index_size));
322 } else {
323 draw0 |=
324 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX);
325 }
326
327 if (info->indirect) {
328 draw_emit_indirect(ring, draw0, info, index_offset);
329 } else {
330 draw_emit(ring, draw0, info, index_offset);
331 }
332
333 emit_marker6(ring, 7);
334 fd_reset_wfi(ctx->batch);
335
336 if (emit.streamout_mask) {
337 struct fd_ringbuffer *ring = ctx->batch->draw;
338
339 for (unsigned i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
340 if (emit.streamout_mask & (1 << i)) {
341 fd6_event_write(ctx->batch, ring, FLUSH_SO_0 + i, false);
342 }
343 }
344 }
345
346 fd_context_all_clean(ctx);
347
348 return true;
349 }
350
351 static void
352 fd6_clear_lrz(struct fd_batch *batch, struct fd_resource *zsbuf, double depth)
353 {
354 struct fd_ringbuffer *ring;
355 struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
356
357 if (batch->lrz_clear) {
358 fd_ringbuffer_del(batch->lrz_clear);
359 }
360
361 batch->lrz_clear = fd_submit_new_ringbuffer(batch->submit, 0x1000, 0);
362 ring = batch->lrz_clear;
363
364 emit_marker6(ring, 7);
365 OUT_PKT7(ring, CP_SET_MARKER, 1);
366 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
367 emit_marker6(ring, 7);
368
369 OUT_WFI5(ring);
370
371 OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
372 OUT_RING(ring, fd6_ctx->magic.RB_CCU_CNTL_bypass);
373
374 OUT_PKT4(ring, REG_A6XX_HLSQ_UPDATE_CNTL, 1);
375 OUT_RING(ring, 0x7ffff);
376
377 emit_marker6(ring, 7);
378 OUT_PKT7(ring, CP_SET_MARKER, 1);
379 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BLIT2DSCALE));
380 emit_marker6(ring, 7);
381
382 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8C01, 1);
383 OUT_RING(ring, 0x0);
384
385 OUT_PKT4(ring, REG_A6XX_SP_PS_2D_SRC_INFO, 13);
386 OUT_RING(ring, 0x00000000);
387 OUT_RING(ring, 0x00000000);
388 OUT_RING(ring, 0x00000000);
389 OUT_RING(ring, 0x00000000);
390 OUT_RING(ring, 0x00000000);
391 OUT_RING(ring, 0x00000000);
392 OUT_RING(ring, 0x00000000);
393 OUT_RING(ring, 0x00000000);
394 OUT_RING(ring, 0x00000000);
395 OUT_RING(ring, 0x00000000);
396 OUT_RING(ring, 0x00000000);
397 OUT_RING(ring, 0x00000000);
398 OUT_RING(ring, 0x00000000);
399
400 OUT_PKT4(ring, REG_A6XX_SP_2D_SRC_FORMAT, 1);
401 OUT_RING(ring, 0x0000f410);
402
403 OUT_PKT4(ring, REG_A6XX_GRAS_2D_BLIT_CNTL, 1);
404 OUT_RING(ring, A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(FMT6_16_UNORM) |
405 0x4f00080);
406
407 OUT_PKT4(ring, REG_A6XX_RB_2D_BLIT_CNTL, 1);
408 OUT_RING(ring, A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(FMT6_16_UNORM) |
409 0x4f00080);
410
411 fd6_event_write(batch, ring, PC_CCU_FLUSH_COLOR_TS, true);
412 fd6_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false);
413
414 OUT_PKT4(ring, REG_A6XX_RB_2D_SRC_SOLID_C0, 4);
415 OUT_RING(ring, fui(depth));
416 OUT_RING(ring, 0x00000000);
417 OUT_RING(ring, 0x00000000);
418 OUT_RING(ring, 0x00000000);
419
420 OUT_PKT4(ring, REG_A6XX_RB_2D_DST_INFO, 9);
421 OUT_RING(ring, A6XX_RB_2D_DST_INFO_COLOR_FORMAT(FMT6_16_UNORM) |
422 A6XX_RB_2D_DST_INFO_TILE_MODE(TILE6_LINEAR) |
423 A6XX_RB_2D_DST_INFO_COLOR_SWAP(WZYX));
424 OUT_RELOCW(ring, zsbuf->lrz, 0, 0, 0);
425 OUT_RING(ring, A6XX_RB_2D_DST_SIZE_PITCH(zsbuf->lrz_pitch * 2));
426 OUT_RING(ring, 0x00000000);
427 OUT_RING(ring, 0x00000000);
428 OUT_RING(ring, 0x00000000);
429 OUT_RING(ring, 0x00000000);
430 OUT_RING(ring, 0x00000000);
431
432 OUT_PKT4(ring, REG_A6XX_GRAS_2D_SRC_TL_X, 4);
433 OUT_RING(ring, A6XX_GRAS_2D_SRC_TL_X_X(0));
434 OUT_RING(ring, A6XX_GRAS_2D_SRC_BR_X_X(0));
435 OUT_RING(ring, A6XX_GRAS_2D_SRC_TL_Y_Y(0));
436 OUT_RING(ring, A6XX_GRAS_2D_SRC_BR_Y_Y(0));
437
438 OUT_PKT4(ring, REG_A6XX_GRAS_2D_DST_TL, 2);
439 OUT_RING(ring, A6XX_GRAS_2D_DST_TL_X(0) |
440 A6XX_GRAS_2D_DST_TL_Y(0));
441 OUT_RING(ring, A6XX_GRAS_2D_DST_BR_X(zsbuf->lrz_width - 1) |
442 A6XX_GRAS_2D_DST_BR_Y(zsbuf->lrz_height - 1));
443
444 fd6_event_write(batch, ring, 0x3f, false);
445
446 OUT_WFI5(ring);
447
448 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8E04, 1);
449 OUT_RING(ring, fd6_ctx->magic.RB_UNKNOWN_8E04_blit);
450
451 OUT_PKT7(ring, CP_BLIT, 1);
452 OUT_RING(ring, CP_BLIT_0_OP(BLIT_OP_SCALE));
453
454 OUT_WFI5(ring);
455
456 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8E04, 1);
457 OUT_RING(ring, 0x0); /* RB_UNKNOWN_8E04 */
458
459 fd6_event_write(batch, ring, PC_CCU_FLUSH_COLOR_TS, true);
460 fd6_event_write(batch, ring, PC_CCU_FLUSH_DEPTH_TS, true);
461 fd6_event_write(batch, ring, CACHE_FLUSH_TS, true);
462
463 fd6_cache_inv(batch, ring);
464 }
465
466 static bool is_z32(enum pipe_format format)
467 {
468 switch (format) {
469 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
470 case PIPE_FORMAT_Z32_UNORM:
471 case PIPE_FORMAT_Z32_FLOAT:
472 return true;
473 default:
474 return false;
475 }
476 }
477
478 static bool
479 fd6_clear(struct fd_context *ctx, unsigned buffers,
480 const union pipe_color_union *color, double depth, unsigned stencil)
481 {
482 struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
483 const bool has_depth = pfb->zsbuf;
484 unsigned color_buffers = buffers >> 2;
485 unsigned i;
486
487 /* If we're clearing after draws, fallback to 3D pipe clears. We could
488 * use blitter clears in the draw batch but then we'd have to patch up the
489 * gmem offsets. This doesn't seem like a useful thing to optimize for
490 * however.*/
491 if (ctx->batch->num_draws > 0)
492 return false;
493
494 foreach_bit(i, color_buffers)
495 ctx->batch->clear_color[i] = *color;
496 if (buffers & PIPE_CLEAR_DEPTH)
497 ctx->batch->clear_depth = depth;
498 if (buffers & PIPE_CLEAR_STENCIL)
499 ctx->batch->clear_stencil = stencil;
500
501 ctx->batch->fast_cleared |= buffers;
502
503 if (has_depth && (buffers & PIPE_CLEAR_DEPTH)) {
504 struct fd_resource *zsbuf = fd_resource(pfb->zsbuf->texture);
505 if (zsbuf->lrz && !is_z32(pfb->zsbuf->format)) {
506 zsbuf->lrz_valid = true;
507 fd6_clear_lrz(ctx->batch, zsbuf, depth);
508 }
509 }
510
511 return true;
512 }
513
514 void
515 fd6_draw_init(struct pipe_context *pctx)
516 {
517 struct fd_context *ctx = fd_context(pctx);
518 ctx->draw_vbo = fd6_draw_vbo;
519 ctx->clear = fd6_clear;
520 }