freedreno/ir3: rename has_kill to no_earlyz
[mesa.git] / src / gallium / drivers / freedreno / a6xx / fd6_draw.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Rob Clark <robclark@freedesktop.org>
26 */
27
28 #include "pipe/p_state.h"
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_prim.h"
32
33 #include "freedreno_state.h"
34 #include "freedreno_resource.h"
35
36 #include "fd6_draw.h"
37 #include "fd6_context.h"
38 #include "fd6_emit.h"
39 #include "fd6_program.h"
40 #include "fd6_format.h"
41 #include "fd6_zsa.h"
42
43 /* some bits in common w/ a4xx: */
44 #include "a4xx/fd4_draw.h"
45
46 static void
47 draw_emit_indirect(struct fd_batch *batch, struct fd_ringbuffer *ring,
48 enum pc_di_primtype primtype,
49 const struct pipe_draw_info *info,
50 unsigned index_offset)
51 {
52 struct fd_resource *ind = fd_resource(info->indirect->buffer);
53
54 if (info->index_size) {
55 struct pipe_resource *idx = info->index.resource;
56 unsigned max_indicies = idx->width0 / info->index_size;
57
58 OUT_PKT7(ring, CP_DRAW_INDX_INDIRECT, 6);
59 OUT_RINGP(ring, DRAW4(primtype, DI_SRC_SEL_DMA,
60 fd4_size2indextype(info->index_size), 0),
61 &batch->draw_patches);
62 OUT_RELOC(ring, fd_resource(idx)->bo,
63 index_offset, 0, 0);
64 // XXX: Check A5xx vs A6xx
65 OUT_RING(ring, A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(max_indicies));
66 OUT_RELOC(ring, ind->bo, info->indirect->offset, 0, 0);
67 } else {
68 OUT_PKT7(ring, CP_DRAW_INDIRECT, 3);
69 OUT_RINGP(ring, DRAW4(primtype, DI_SRC_SEL_AUTO_INDEX, 0, 0),
70 &batch->draw_patches);
71 OUT_RELOC(ring, ind->bo, info->indirect->offset, 0, 0);
72 }
73 }
74
75 static void
76 draw_emit(struct fd_batch *batch, struct fd_ringbuffer *ring,
77 enum pc_di_primtype primtype,
78 const struct pipe_draw_info *info,
79 unsigned index_offset)
80 {
81 if (info->index_size) {
82 assert(!info->has_user_indices);
83
84 struct pipe_resource *idx_buffer = info->index.resource;
85 uint32_t idx_size = info->index_size * info->count;
86 uint32_t idx_offset = index_offset + info->start * info->index_size;
87
88 /* leave vis mode blank for now, it will be patched up when
89 * we know if we are binning or not
90 */
91 uint32_t draw = CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
92 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA) |
93 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(fd4_size2indextype(info->index_size)) |
94 0x2000;
95
96 OUT_PKT7(ring, CP_DRAW_INDX_OFFSET, 7);
97 OUT_RINGP(ring, draw, &batch->draw_patches);
98 OUT_RING(ring, info->instance_count); /* NumInstances */
99 OUT_RING(ring, info->count); /* NumIndices */
100 OUT_RING(ring, 0x0); /* XXX */
101 OUT_RELOC(ring, fd_resource(idx_buffer)->bo, idx_offset, 0, 0);
102 OUT_RING (ring, idx_size);
103 } else {
104 /* leave vis mode blank for now, it will be patched up when
105 * we know if we are binning or not
106 */
107 uint32_t draw = CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
108 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX) |
109 0x2000;
110
111 OUT_PKT7(ring, CP_DRAW_INDX_OFFSET, 3);
112 OUT_RINGP(ring, draw, &batch->draw_patches);
113 OUT_RING(ring, info->instance_count); /* NumInstances */
114 OUT_RING(ring, info->count); /* NumIndices */
115 }
116 }
117
118 /* fixup dirty shader state in case some "unrelated" (from the state-
119 * tracker's perspective) state change causes us to switch to a
120 * different variant.
121 */
122 static void
123 fixup_shader_state(struct fd_context *ctx, struct ir3_shader_key *key)
124 {
125 struct fd6_context *fd6_ctx = fd6_context(ctx);
126 struct ir3_shader_key *last_key = &fd6_ctx->last_key;
127
128 if (!ir3_shader_key_equal(last_key, key)) {
129 if (ir3_shader_key_changes_fs(last_key, key)) {
130 ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= FD_DIRTY_SHADER_PROG;
131 ctx->dirty |= FD_DIRTY_PROG;
132 }
133
134 if (ir3_shader_key_changes_vs(last_key, key)) {
135 ctx->dirty_shader[PIPE_SHADER_VERTEX] |= FD_DIRTY_SHADER_PROG;
136 ctx->dirty |= FD_DIRTY_PROG;
137 }
138
139 fd6_ctx->last_key = *key;
140 }
141 }
142
143 static bool
144 fd6_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
145 unsigned index_offset)
146 {
147 struct fd6_context *fd6_ctx = fd6_context(ctx);
148 struct fd6_emit emit = {
149 .ctx = ctx,
150 .vtx = &ctx->vtx,
151 .info = info,
152 .key = {
153 .vs = ctx->prog.vp,
154 .fs = ctx->prog.fp,
155 .key = {
156 .color_two_side = ctx->rasterizer->light_twoside,
157 .vclamp_color = ctx->rasterizer->clamp_vertex_color,
158 .fclamp_color = ctx->rasterizer->clamp_fragment_color,
159 .rasterflat = ctx->rasterizer->flatshade,
160 .ucp_enables = ctx->rasterizer->clip_plane_enable,
161 .has_per_samp = (fd6_ctx->fsaturate || fd6_ctx->vsaturate),
162 .vsaturate_s = fd6_ctx->vsaturate_s,
163 .vsaturate_t = fd6_ctx->vsaturate_t,
164 .vsaturate_r = fd6_ctx->vsaturate_r,
165 .fsaturate_s = fd6_ctx->fsaturate_s,
166 .fsaturate_t = fd6_ctx->fsaturate_t,
167 .fsaturate_r = fd6_ctx->fsaturate_r,
168 .vsamples = ctx->tex[PIPE_SHADER_VERTEX].samples,
169 .fsamples = ctx->tex[PIPE_SHADER_FRAGMENT].samples,
170 }
171 },
172 .rasterflat = ctx->rasterizer->flatshade,
173 .sprite_coord_enable = ctx->rasterizer->sprite_coord_enable,
174 .sprite_coord_mode = ctx->rasterizer->sprite_coord_mode,
175 };
176
177 fixup_shader_state(ctx, &emit.key.key);
178
179 if (!(ctx->dirty & FD_DIRTY_PROG)) {
180 emit.prog = fd6_ctx->prog;
181 } else {
182 fd6_ctx->prog = fd6_emit_get_prog(&emit);
183 }
184
185 /* bail if compile failed: */
186 if (!fd6_ctx->prog)
187 return NULL;
188
189 emit.dirty = ctx->dirty; /* *after* fixup_shader_state() */
190 emit.bs = fd6_emit_get_prog(&emit)->bs;
191 emit.vs = fd6_emit_get_prog(&emit)->vs;
192 emit.fs = fd6_emit_get_prog(&emit)->fs;
193
194 const struct ir3_shader_variant *vp = emit.vs;
195 const struct ir3_shader_variant *fp = emit.fs;
196
197 ctx->stats.vs_regs += ir3_shader_halfregs(vp);
198 ctx->stats.fs_regs += ir3_shader_halfregs(fp);
199
200 /* figure out whether we need to disable LRZ write for binning
201 * pass using draw pass's fp:
202 */
203 emit.no_lrz_write = fp->writes_pos || fp->no_earlyz;
204
205 struct fd_ringbuffer *ring = ctx->batch->draw;
206 enum pc_di_primtype primtype = ctx->primtypes[info->mode];
207
208 fd6_emit_state(ring, &emit);
209
210 OUT_PKT4(ring, REG_A6XX_VFD_INDEX_OFFSET, 2);
211 OUT_RING(ring, info->index_size ? info->index_bias : info->start); /* VFD_INDEX_OFFSET */
212 OUT_RING(ring, info->start_instance); /* VFD_INSTANCE_START_OFFSET */
213
214 OUT_PKT4(ring, REG_A6XX_PC_RESTART_INDEX, 1);
215 OUT_RING(ring, info->primitive_restart ? /* PC_RESTART_INDEX */
216 info->restart_index : 0xffffffff);
217
218 /* for debug after a lock up, write a unique counter value
219 * to scratch7 for each draw, to make it easier to match up
220 * register dumps to cmdstream. The combination of IB
221 * (scratch6) and DRAW is enough to "triangulate" the
222 * particular draw that caused lockup.
223 */
224 emit_marker6(ring, 7);
225
226 if (info->indirect) {
227 draw_emit_indirect(ctx->batch, ring, primtype,
228 info, index_offset);
229 } else {
230 draw_emit(ctx->batch, ring, primtype,
231 info, index_offset);
232 }
233
234 emit_marker6(ring, 7);
235 fd_reset_wfi(ctx->batch);
236
237 if (emit.streamout_mask) {
238 struct fd_ringbuffer *ring = ctx->batch->draw;
239
240 for (unsigned i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
241 if (emit.streamout_mask & (1 << i)) {
242 fd6_event_write(ctx->batch, ring, FLUSH_SO_0 + i, false);
243 }
244 }
245 }
246
247 fd_context_all_clean(ctx);
248
249 return true;
250 }
251
252 static void
253 fd6_clear_lrz(struct fd_batch *batch, struct fd_resource *zsbuf, double depth)
254 {
255 struct fd_ringbuffer *ring;
256
257 // TODO mid-frame clears (ie. app doing crazy stuff)?? Maybe worth
258 // splitting both clear and lrz clear out into their own rb's. And
259 // just throw away any draws prior to clear. (Anything not fullscreen
260 // clear, just fallback to generic path that treats it as a normal
261 // draw
262
263 if (!batch->lrz_clear) {
264 batch->lrz_clear = fd_submit_new_ringbuffer(batch->submit, 0x1000, 0);
265 }
266
267 ring = batch->lrz_clear;
268
269 emit_marker6(ring, 7);
270 OUT_PKT7(ring, CP_SET_MARKER, 1);
271 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
272 emit_marker6(ring, 7);
273
274 OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
275 OUT_RING(ring, 0x10000000);
276
277 OUT_PKT4(ring, REG_A6XX_HLSQ_UPDATE_CNTL, 1);
278 OUT_RING(ring, 0x7ffff);
279
280 emit_marker6(ring, 7);
281 OUT_PKT7(ring, CP_SET_MARKER, 1);
282 OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(0xc));
283 emit_marker6(ring, 7);
284
285 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8C01, 1);
286 OUT_RING(ring, 0x0);
287
288 OUT_PKT4(ring, REG_A6XX_SP_PS_2D_SRC_INFO, 13);
289 OUT_RING(ring, 0x00000000);
290 OUT_RING(ring, 0x00000000);
291 OUT_RING(ring, 0x00000000);
292 OUT_RING(ring, 0x00000000);
293 OUT_RING(ring, 0x00000000);
294 OUT_RING(ring, 0x00000000);
295 OUT_RING(ring, 0x00000000);
296 OUT_RING(ring, 0x00000000);
297 OUT_RING(ring, 0x00000000);
298 OUT_RING(ring, 0x00000000);
299 OUT_RING(ring, 0x00000000);
300 OUT_RING(ring, 0x00000000);
301 OUT_RING(ring, 0x00000000);
302
303 OUT_PKT4(ring, REG_A6XX_SP_2D_SRC_FORMAT, 1);
304 OUT_RING(ring, 0x0000f410);
305
306 OUT_PKT4(ring, REG_A6XX_GRAS_2D_BLIT_CNTL, 1);
307 OUT_RING(ring, A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(RB6_R16_UNORM) |
308 0x4f00080);
309
310 OUT_PKT4(ring, REG_A6XX_RB_2D_BLIT_CNTL, 1);
311 OUT_RING(ring, A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(RB6_R16_UNORM) |
312 0x4f00080);
313
314 fd6_event_write(batch, ring, UNK_1D, true);
315 fd6_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false);
316
317 OUT_PKT4(ring, REG_A6XX_RB_2D_SRC_SOLID_C0, 4);
318 OUT_RING(ring, fui(depth));
319 OUT_RING(ring, 0x00000000);
320 OUT_RING(ring, 0x00000000);
321 OUT_RING(ring, 0x00000000);
322
323 OUT_PKT4(ring, REG_A6XX_RB_2D_DST_INFO, 9);
324 OUT_RING(ring, A6XX_RB_2D_DST_INFO_COLOR_FORMAT(RB6_R16_UNORM) |
325 A6XX_RB_2D_DST_INFO_TILE_MODE(TILE6_LINEAR) |
326 A6XX_RB_2D_DST_INFO_COLOR_SWAP(WZYX));
327 OUT_RELOCW(ring, zsbuf->lrz, 0, 0, 0);
328 OUT_RING(ring, A6XX_RB_2D_DST_SIZE_PITCH(zsbuf->lrz_pitch * 2));
329 OUT_RING(ring, 0x00000000);
330 OUT_RING(ring, 0x00000000);
331 OUT_RING(ring, 0x00000000);
332 OUT_RING(ring, 0x00000000);
333 OUT_RING(ring, 0x00000000);
334
335 OUT_PKT4(ring, REG_A6XX_GRAS_2D_SRC_TL_X, 4);
336 OUT_RING(ring, A6XX_GRAS_2D_SRC_TL_X_X(0));
337 OUT_RING(ring, A6XX_GRAS_2D_SRC_BR_X_X(0));
338 OUT_RING(ring, A6XX_GRAS_2D_SRC_TL_Y_Y(0));
339 OUT_RING(ring, A6XX_GRAS_2D_SRC_BR_Y_Y(0));
340
341 OUT_PKT4(ring, REG_A6XX_GRAS_2D_DST_TL, 2);
342 OUT_RING(ring, A6XX_GRAS_2D_DST_TL_X(0) |
343 A6XX_GRAS_2D_DST_TL_Y(0));
344 OUT_RING(ring, A6XX_GRAS_2D_DST_BR_X(zsbuf->lrz_width - 1) |
345 A6XX_GRAS_2D_DST_BR_Y(zsbuf->lrz_height - 1));
346
347 fd6_event_write(batch, ring, 0x3f, false);
348
349 OUT_WFI5(ring);
350
351 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8E04, 1);
352 OUT_RING(ring, 0x1000000);
353
354 OUT_PKT7(ring, CP_BLIT, 1);
355 OUT_RING(ring, CP_BLIT_0_OP(BLIT_OP_SCALE));
356
357 OUT_WFI5(ring);
358
359 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8E04, 1);
360 OUT_RING(ring, 0x0);
361
362 fd6_event_write(batch, ring, UNK_1D, true);
363 fd6_event_write(batch, ring, FACENESS_FLUSH, true);
364 fd6_event_write(batch, ring, CACHE_FLUSH_TS, true);
365
366 fd6_cache_inv(batch, ring);
367 }
368
369 static bool is_z32(enum pipe_format format)
370 {
371 switch (format) {
372 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
373 case PIPE_FORMAT_Z32_UNORM:
374 case PIPE_FORMAT_Z32_FLOAT:
375 return true;
376 default:
377 return false;
378 }
379 }
380
381 static bool
382 fd6_clear(struct fd_context *ctx, unsigned buffers,
383 const union pipe_color_union *color, double depth, unsigned stencil)
384 {
385 struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
386 const bool has_depth = pfb->zsbuf;
387 unsigned color_buffers = buffers >> 2;
388 unsigned i;
389
390 /* If we're clearing after draws, fallback to 3D pipe clears. We could
391 * use blitter clears in the draw batch but then we'd have to patch up the
392 * gmem offsets. This doesn't seem like a useful thing to optimize for
393 * however.*/
394 if (ctx->batch->num_draws > 0)
395 return false;
396
397 foreach_bit(i, color_buffers)
398 ctx->batch->clear_color[i] = *color;
399 if (buffers & PIPE_CLEAR_DEPTH)
400 ctx->batch->clear_depth = depth;
401 if (buffers & PIPE_CLEAR_STENCIL)
402 ctx->batch->clear_stencil = stencil;
403
404 ctx->batch->fast_cleared |= buffers;
405
406 if (has_depth && (buffers & PIPE_CLEAR_DEPTH)) {
407 struct fd_resource *zsbuf = fd_resource(pfb->zsbuf->texture);
408 if (zsbuf->lrz && !is_z32(pfb->zsbuf->format)) {
409 zsbuf->lrz_valid = true;
410 fd6_clear_lrz(ctx->batch, zsbuf, depth);
411 }
412 }
413
414 return true;
415 }
416
417 void
418 fd6_draw_init(struct pipe_context *pctx)
419 {
420 struct fd_context *ctx = fd_context(pctx);
421 ctx->draw_vbo = fd6_draw_vbo;
422 ctx->clear = fd6_clear;
423 }