freedreno/a6xx: Only set emit.hs/ds when we're drawing patches
[mesa.git] / src / gallium / drivers / freedreno / a6xx / fd6_draw.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Rob Clark <robclark@freedesktop.org>
26 */
27
28 #include "pipe/p_state.h"
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_prim.h"
32
33 #include "freedreno_state.h"
34 #include "freedreno_resource.h"
35
36 #include "fd6_draw.h"
37 #include "fd6_context.h"
38 #include "fd6_emit.h"
39 #include "fd6_program.h"
40 #include "fd6_format.h"
41 #include "fd6_zsa.h"
42
43 static void
44 draw_emit_indirect(struct fd_ringbuffer *ring,
45 uint32_t draw0,
46 const struct pipe_draw_info *info,
47 unsigned index_offset)
48 {
49 struct fd_resource *ind = fd_resource(info->indirect->buffer);
50
51 if (info->index_size) {
52 struct pipe_resource *idx = info->index.resource;
53 unsigned max_indicies = (idx->width0 - index_offset) / info->index_size;
54
55 OUT_PKT7(ring, CP_DRAW_INDX_INDIRECT, 6);
56 OUT_RING(ring, draw0);
57 OUT_RELOC(ring, fd_resource(idx)->bo,
58 index_offset, 0, 0);
59 OUT_RING(ring, A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(max_indicies));
60 OUT_RELOC(ring, ind->bo, info->indirect->offset, 0, 0);
61 } else {
62 OUT_PKT7(ring, CP_DRAW_INDIRECT, 3);
63 OUT_RING(ring, draw0);
64 OUT_RELOC(ring, ind->bo, info->indirect->offset, 0, 0);
65 }
66 }
67
68 static void
69 draw_emit(struct fd_ringbuffer *ring,
70 uint32_t draw0,
71 const struct pipe_draw_info *info,
72 unsigned index_offset)
73 {
74 if (info->index_size) {
75 assert(!info->has_user_indices);
76
77 struct pipe_resource *idx_buffer = info->index.resource;
78 uint32_t idx_size = info->index_size * info->count;
79 uint32_t idx_offset = index_offset + info->start * info->index_size;
80
81 OUT_PKT7(ring, CP_DRAW_INDX_OFFSET, 7);
82 OUT_RING(ring, draw0);
83 OUT_RING(ring, info->instance_count); /* NumInstances */
84 OUT_RING(ring, info->count); /* NumIndices */
85 OUT_RING(ring, 0x0); /* XXX */
86 OUT_RELOC(ring, fd_resource(idx_buffer)->bo, idx_offset, 0, 0);
87 OUT_RING (ring, idx_size);
88 } else {
89 OUT_PKT7(ring, CP_DRAW_INDX_OFFSET, 3);
90 OUT_RING(ring, draw0);
91 OUT_RING(ring, info->instance_count); /* NumInstances */
92 OUT_RING(ring, info->count); /* NumIndices */
93 }
94 }
95
96 /* fixup dirty shader state in case some "unrelated" (from the state-
97 * tracker's perspective) state change causes us to switch to a
98 * different variant.
99 */
100 static void
101 fixup_shader_state(struct fd_context *ctx, struct ir3_shader_key *key)
102 {
103 struct fd6_context *fd6_ctx = fd6_context(ctx);
104 struct ir3_shader_key *last_key = &fd6_ctx->last_key;
105
106 if (!ir3_shader_key_equal(last_key, key)) {
107 if (ir3_shader_key_changes_fs(last_key, key)) {
108 ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= FD_DIRTY_SHADER_PROG;
109 ctx->dirty |= FD_DIRTY_PROG;
110 }
111
112 if (ir3_shader_key_changes_vs(last_key, key)) {
113 ctx->dirty_shader[PIPE_SHADER_VERTEX] |= FD_DIRTY_SHADER_PROG;
114 ctx->dirty |= FD_DIRTY_PROG;
115 }
116
117 fd6_ctx->last_key = *key;
118 }
119 }
120
121 static bool
122 fd6_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
123 unsigned index_offset)
124 {
125 struct fd6_context *fd6_ctx = fd6_context(ctx);
126 struct fd6_emit emit = {
127 .ctx = ctx,
128 .vtx = &ctx->vtx,
129 .info = info,
130 .key = {
131 .vs = ctx->prog.vs,
132 .gs = ctx->prog.gs,
133 .fs = ctx->prog.fs,
134 .key = {
135 .color_two_side = ctx->rasterizer->light_twoside,
136 .vclamp_color = ctx->rasterizer->clamp_vertex_color,
137 .fclamp_color = ctx->rasterizer->clamp_fragment_color,
138 .rasterflat = ctx->rasterizer->flatshade,
139 .ucp_enables = ctx->rasterizer->clip_plane_enable,
140 .has_per_samp = (fd6_ctx->fsaturate || fd6_ctx->vsaturate),
141 .vsaturate_s = fd6_ctx->vsaturate_s,
142 .vsaturate_t = fd6_ctx->vsaturate_t,
143 .vsaturate_r = fd6_ctx->vsaturate_r,
144 .fsaturate_s = fd6_ctx->fsaturate_s,
145 .fsaturate_t = fd6_ctx->fsaturate_t,
146 .fsaturate_r = fd6_ctx->fsaturate_r,
147 .vsamples = ctx->tex[PIPE_SHADER_VERTEX].samples,
148 .fsamples = ctx->tex[PIPE_SHADER_FRAGMENT].samples,
149 .sample_shading = (ctx->min_samples > 1),
150 .msaa = (ctx->framebuffer.samples > 1),
151 },
152 },
153 .rasterflat = ctx->rasterizer->flatshade,
154 .sprite_coord_enable = ctx->rasterizer->sprite_coord_enable,
155 .sprite_coord_mode = ctx->rasterizer->sprite_coord_mode,
156 };
157
158 if (info->mode == PIPE_PRIM_PATCHES) {
159 emit.key.hs = ctx->prog.hs;
160 emit.key.ds = ctx->prog.ds;
161
162 shader_info *ds_info = &emit.key.ds->nir->info;
163 switch (ds_info->tess.primitive_mode) {
164 case GL_ISOLINES:
165 emit.key.key.tessellation = IR3_TESS_ISOLINES;
166 break;
167 case GL_TRIANGLES:
168 emit.key.key.tessellation = IR3_TESS_TRIANGLES;
169 break;
170 case GL_QUADS:
171 emit.key.key.tessellation = IR3_TESS_QUADS;
172 break;
173 default:
174 unreachable("bad tessmode");
175 }
176 }
177
178 if (emit.key.gs)
179 emit.key.key.has_gs = true;
180
181 fixup_shader_state(ctx, &emit.key.key);
182
183 if (!(ctx->dirty & FD_DIRTY_PROG)) {
184 emit.prog = fd6_ctx->prog;
185 } else {
186 fd6_ctx->prog = fd6_emit_get_prog(&emit);
187 }
188
189 /* bail if compile failed: */
190 if (!fd6_ctx->prog)
191 return NULL;
192
193 emit.dirty = ctx->dirty; /* *after* fixup_shader_state() */
194 emit.bs = fd6_emit_get_prog(&emit)->bs;
195 emit.vs = fd6_emit_get_prog(&emit)->vs;
196 emit.hs = fd6_emit_get_prog(&emit)->hs;
197 emit.ds = fd6_emit_get_prog(&emit)->ds;
198 emit.gs = fd6_emit_get_prog(&emit)->gs;
199 emit.fs = fd6_emit_get_prog(&emit)->fs;
200
201 ctx->stats.vs_regs += ir3_shader_halfregs(emit.vs);
202 ctx->stats.hs_regs += COND(emit.hs, ir3_shader_halfregs(emit.hs));
203 ctx->stats.ds_regs += COND(emit.ds, ir3_shader_halfregs(emit.ds));
204 ctx->stats.gs_regs += COND(emit.gs, ir3_shader_halfregs(emit.gs));
205 ctx->stats.fs_regs += ir3_shader_halfregs(emit.fs);
206
207 /* figure out whether we need to disable LRZ write for binning
208 * pass using draw pass's fs:
209 */
210 emit.no_lrz_write = emit.fs->writes_pos || emit.fs->no_earlyz;
211
212 struct fd_ringbuffer *ring = ctx->batch->draw;
213 enum pc_di_primtype primtype = ctx->primtypes[info->mode];
214
215 uint32_t tess_draw0 = 0;
216 if (info->mode == PIPE_PRIM_PATCHES) {
217 shader_info *ds_info = &emit.ds->shader->nir->info;
218 uint32_t factor_stride;
219 uint32_t patch_type;
220
221 switch (ds_info->tess.primitive_mode) {
222 case GL_ISOLINES:
223 patch_type = TESS_ISOLINES;
224 factor_stride = 12;
225 break;
226 case GL_TRIANGLES:
227 patch_type = TESS_TRIANGLES;
228 factor_stride = 20;
229 break;
230 case GL_QUADS:
231 patch_type = TESS_QUADS;
232 factor_stride = 28;
233 break;
234 default:
235 unreachable("bad tessmode");
236 }
237
238 primtype = DI_PT_PATCHES0 + info->vertices_per_patch;
239 tess_draw0 |= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(patch_type) |
240 CP_DRAW_INDX_OFFSET_0_TESS_ENABLE;
241
242 ctx->batch->tessellation = true;
243 ctx->batch->tessparam_size = MAX2(ctx->batch->tessparam_size,
244 emit.hs->shader->output_size * 4 * info->count);
245 ctx->batch->tessfactor_size = MAX2(ctx->batch->tessfactor_size,
246 factor_stride * info->count);
247
248 if (!ctx->batch->tess_addrs_constobj) {
249 /* Reserve space for the bo address - we'll write them later in
250 * setup_tess_buffers(). We need 2 bo address, but indirect
251 * constant upload needs at least 4 vec4s.
252 */
253 unsigned size = 4 * 16;
254
255 ctx->batch->tess_addrs_constobj = fd_submit_new_ringbuffer(
256 ctx->batch->submit, size, FD_RINGBUFFER_STREAMING);
257
258 ctx->batch->tess_addrs_constobj->cur += size;
259 }
260 }
261
262 fd6_emit_state(ring, &emit);
263
264 OUT_PKT4(ring, REG_A6XX_VFD_INDEX_OFFSET, 2);
265 OUT_RING(ring, info->index_size ? info->index_bias : info->start); /* VFD_INDEX_OFFSET */
266 OUT_RING(ring, info->start_instance); /* VFD_INSTANCE_START_OFFSET */
267
268 OUT_PKT4(ring, REG_A6XX_PC_RESTART_INDEX, 1);
269 OUT_RING(ring, info->primitive_restart ? /* PC_RESTART_INDEX */
270 info->restart_index : 0xffffffff);
271
272 /* for debug after a lock up, write a unique counter value
273 * to scratch7 for each draw, to make it easier to match up
274 * register dumps to cmdstream. The combination of IB
275 * (scratch6) and DRAW is enough to "triangulate" the
276 * particular draw that caused lockup.
277 */
278 emit_marker6(ring, 7);
279
280 uint32_t draw0 =
281 CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) |
282 CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
283 tess_draw0 |
284 COND(emit.key.gs, CP_DRAW_INDX_OFFSET_0_GS_ENABLE);
285
286 if (info->index_size) {
287 draw0 |=
288 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA) |
289 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(fd4_size2indextype(info->index_size));
290 } else {
291 draw0 |=
292 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX);
293 }
294
295 if (info->indirect) {
296 draw_emit_indirect(ring, draw0, info, index_offset);
297 } else {
298 draw_emit(ring, draw0, info, index_offset);
299 }
300
301 emit_marker6(ring, 7);
302 fd_reset_wfi(ctx->batch);
303
304 if (emit.streamout_mask) {
305 struct fd_ringbuffer *ring = ctx->batch->draw;
306
307 for (unsigned i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
308 if (emit.streamout_mask & (1 << i)) {
309 fd6_event_write(ctx->batch, ring, FLUSH_SO_0 + i, false);
310 }
311 }
312 }
313
314 fd_context_all_clean(ctx);
315
316 return true;
317 }
318
319 static void
320 fd6_clear_lrz(struct fd_batch *batch, struct fd_resource *zsbuf, double depth)
321 {
322 struct fd_ringbuffer *ring;
323 struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
324
325 // TODO mid-frame clears (ie. app doing crazy stuff)?? Maybe worth
326 // splitting both clear and lrz clear out into their own rb's. And
327 // just throw away any draws prior to clear. (Anything not fullscreen
328 // clear, just fallback to generic path that treats it as a normal
329 // draw
330
331 if (!batch->lrz_clear) {
332 batch->lrz_clear = fd_submit_new_ringbuffer(batch->submit, 0x1000, 0);
333 }
334
335 ring = batch->lrz_clear;
336
337 emit_marker6(ring, 7);
338 OUT_PKT7(ring, CP_SET_MARKER, 1);
339 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
340 emit_marker6(ring, 7);
341
342 OUT_WFI5(ring);
343
344 OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
345 OUT_RING(ring, fd6_ctx->magic.RB_CCU_CNTL_bypass);
346
347 OUT_PKT4(ring, REG_A6XX_HLSQ_UPDATE_CNTL, 1);
348 OUT_RING(ring, 0x7ffff);
349
350 emit_marker6(ring, 7);
351 OUT_PKT7(ring, CP_SET_MARKER, 1);
352 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(0xc));
353 emit_marker6(ring, 7);
354
355 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8C01, 1);
356 OUT_RING(ring, 0x0);
357
358 OUT_PKT4(ring, REG_A6XX_SP_PS_2D_SRC_INFO, 13);
359 OUT_RING(ring, 0x00000000);
360 OUT_RING(ring, 0x00000000);
361 OUT_RING(ring, 0x00000000);
362 OUT_RING(ring, 0x00000000);
363 OUT_RING(ring, 0x00000000);
364 OUT_RING(ring, 0x00000000);
365 OUT_RING(ring, 0x00000000);
366 OUT_RING(ring, 0x00000000);
367 OUT_RING(ring, 0x00000000);
368 OUT_RING(ring, 0x00000000);
369 OUT_RING(ring, 0x00000000);
370 OUT_RING(ring, 0x00000000);
371 OUT_RING(ring, 0x00000000);
372
373 OUT_PKT4(ring, REG_A6XX_SP_2D_SRC_FORMAT, 1);
374 OUT_RING(ring, 0x0000f410);
375
376 OUT_PKT4(ring, REG_A6XX_GRAS_2D_BLIT_CNTL, 1);
377 OUT_RING(ring, A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(RB6_R16_UNORM) |
378 0x4f00080);
379
380 OUT_PKT4(ring, REG_A6XX_RB_2D_BLIT_CNTL, 1);
381 OUT_RING(ring, A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(RB6_R16_UNORM) |
382 0x4f00080);
383
384 fd6_event_write(batch, ring, UNK_1D, true);
385 fd6_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false);
386
387 OUT_PKT4(ring, REG_A6XX_RB_2D_SRC_SOLID_C0, 4);
388 OUT_RING(ring, fui(depth));
389 OUT_RING(ring, 0x00000000);
390 OUT_RING(ring, 0x00000000);
391 OUT_RING(ring, 0x00000000);
392
393 OUT_PKT4(ring, REG_A6XX_RB_2D_DST_INFO, 9);
394 OUT_RING(ring, A6XX_RB_2D_DST_INFO_COLOR_FORMAT(RB6_R16_UNORM) |
395 A6XX_RB_2D_DST_INFO_TILE_MODE(TILE6_LINEAR) |
396 A6XX_RB_2D_DST_INFO_COLOR_SWAP(WZYX));
397 OUT_RELOCW(ring, zsbuf->lrz, 0, 0, 0);
398 OUT_RING(ring, A6XX_RB_2D_DST_SIZE_PITCH(zsbuf->lrz_pitch * 2));
399 OUT_RING(ring, 0x00000000);
400 OUT_RING(ring, 0x00000000);
401 OUT_RING(ring, 0x00000000);
402 OUT_RING(ring, 0x00000000);
403 OUT_RING(ring, 0x00000000);
404
405 OUT_PKT4(ring, REG_A6XX_GRAS_2D_SRC_TL_X, 4);
406 OUT_RING(ring, A6XX_GRAS_2D_SRC_TL_X_X(0));
407 OUT_RING(ring, A6XX_GRAS_2D_SRC_BR_X_X(0));
408 OUT_RING(ring, A6XX_GRAS_2D_SRC_TL_Y_Y(0));
409 OUT_RING(ring, A6XX_GRAS_2D_SRC_BR_Y_Y(0));
410
411 OUT_PKT4(ring, REG_A6XX_GRAS_2D_DST_TL, 2);
412 OUT_RING(ring, A6XX_GRAS_2D_DST_TL_X(0) |
413 A6XX_GRAS_2D_DST_TL_Y(0));
414 OUT_RING(ring, A6XX_GRAS_2D_DST_BR_X(zsbuf->lrz_width - 1) |
415 A6XX_GRAS_2D_DST_BR_Y(zsbuf->lrz_height - 1));
416
417 fd6_event_write(batch, ring, 0x3f, false);
418
419 OUT_WFI5(ring);
420
421 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8E04, 1);
422 OUT_RING(ring, fd6_ctx->magic.RB_UNKNOWN_8E04_blit);
423
424 OUT_PKT7(ring, CP_BLIT, 1);
425 OUT_RING(ring, CP_BLIT_0_OP(BLIT_OP_SCALE));
426
427 OUT_WFI5(ring);
428
429 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8E04, 1);
430 OUT_RING(ring, 0x0); /* RB_UNKNOWN_8E04 */
431
432 fd6_event_write(batch, ring, UNK_1D, true);
433 fd6_event_write(batch, ring, FACENESS_FLUSH, true);
434 fd6_event_write(batch, ring, CACHE_FLUSH_TS, true);
435
436 fd6_cache_inv(batch, ring);
437 }
438
439 static bool is_z32(enum pipe_format format)
440 {
441 switch (format) {
442 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
443 case PIPE_FORMAT_Z32_UNORM:
444 case PIPE_FORMAT_Z32_FLOAT:
445 return true;
446 default:
447 return false;
448 }
449 }
450
451 static bool
452 fd6_clear(struct fd_context *ctx, unsigned buffers,
453 const union pipe_color_union *color, double depth, unsigned stencil)
454 {
455 struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
456 const bool has_depth = pfb->zsbuf;
457 unsigned color_buffers = buffers >> 2;
458 unsigned i;
459
460 /* If we're clearing after draws, fallback to 3D pipe clears. We could
461 * use blitter clears in the draw batch but then we'd have to patch up the
462 * gmem offsets. This doesn't seem like a useful thing to optimize for
463 * however.*/
464 if (ctx->batch->num_draws > 0)
465 return false;
466
467 foreach_bit(i, color_buffers)
468 ctx->batch->clear_color[i] = *color;
469 if (buffers & PIPE_CLEAR_DEPTH)
470 ctx->batch->clear_depth = depth;
471 if (buffers & PIPE_CLEAR_STENCIL)
472 ctx->batch->clear_stencil = stencil;
473
474 ctx->batch->fast_cleared |= buffers;
475
476 if (has_depth && (buffers & PIPE_CLEAR_DEPTH)) {
477 struct fd_resource *zsbuf = fd_resource(pfb->zsbuf->texture);
478 if (zsbuf->lrz && !is_z32(pfb->zsbuf->format)) {
479 zsbuf->lrz_valid = true;
480 fd6_clear_lrz(ctx->batch, zsbuf, depth);
481 }
482 }
483
484 return true;
485 }
486
487 void
488 fd6_draw_init(struct pipe_context *pctx)
489 {
490 struct fd_context *ctx = fd_context(pctx);
491 ctx->draw_vbo = fd6_draw_vbo;
492 ctx->clear = fd6_clear;
493 }