2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * Rob Clark <robclark@freedesktop.org>
28 #include "pipe/p_state.h"
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_prim.h"
33 #include "freedreno_state.h"
34 #include "freedreno_resource.h"
37 #include "fd6_context.h"
39 #include "fd6_program.h"
40 #include "fd6_format.h"
47 draw_emit_indirect(struct fd_ringbuffer
*ring
,
48 struct CP_DRAW_INDX_OFFSET_0
*draw0
,
49 const struct pipe_draw_info
*info
,
50 unsigned index_offset
)
52 struct fd_resource
*ind
= fd_resource(info
->indirect
->buffer
);
54 if (info
->index_size
) {
55 struct pipe_resource
*idx
= info
->index
.resource
;
56 unsigned max_indices
= (idx
->width0
- index_offset
) / info
->index_size
;
58 OUT_PKT(ring
, CP_DRAW_INDX_INDIRECT
,
59 pack_CP_DRAW_INDX_OFFSET_0(*draw0
),
60 A5XX_CP_DRAW_INDX_INDIRECT_INDX_BASE(
61 fd_resource(idx
)->bo
, index_offset
),
62 A5XX_CP_DRAW_INDX_INDIRECT_3(.max_indices
= max_indices
),
63 A5XX_CP_DRAW_INDX_INDIRECT_INDIRECT(
64 ind
->bo
, info
->indirect
->offset
)
67 OUT_PKT(ring
, CP_DRAW_INDIRECT
,
68 pack_CP_DRAW_INDX_OFFSET_0(*draw0
),
69 A5XX_CP_DRAW_INDIRECT_INDIRECT(
70 ind
->bo
, info
->indirect
->offset
)
76 draw_emit(struct fd_ringbuffer
*ring
,
77 struct CP_DRAW_INDX_OFFSET_0
*draw0
,
78 const struct pipe_draw_info
*info
,
79 unsigned index_offset
)
81 if (info
->index_size
) {
82 assert(!info
->has_user_indices
);
84 struct pipe_resource
*idx_buffer
= info
->index
.resource
;
85 unsigned max_indices
= (idx_buffer
->width0
- index_offset
) / info
->index_size
;
87 OUT_PKT(ring
, CP_DRAW_INDX_OFFSET
,
88 pack_CP_DRAW_INDX_OFFSET_0(*draw0
),
89 CP_DRAW_INDX_OFFSET_1(.num_instances
= info
->instance_count
),
90 CP_DRAW_INDX_OFFSET_2(.num_indices
= info
->count
),
91 CP_DRAW_INDX_OFFSET_3(.first_indx
= info
->start
),
92 A5XX_CP_DRAW_INDX_OFFSET_INDX_BASE(
93 fd_resource(idx_buffer
)->bo
, index_offset
),
94 A5XX_CP_DRAW_INDX_OFFSET_6(.max_indices
= max_indices
)
97 OUT_PKT(ring
, CP_DRAW_INDX_OFFSET
,
98 pack_CP_DRAW_INDX_OFFSET_0(*draw0
),
99 CP_DRAW_INDX_OFFSET_1(.num_instances
= info
->instance_count
),
100 CP_DRAW_INDX_OFFSET_2(.num_indices
= info
->count
)
105 /* fixup dirty shader state in case some "unrelated" (from the state-
106 * tracker's perspective) state change causes us to switch to a
110 fixup_shader_state(struct fd_context
*ctx
, struct ir3_shader_key
*key
)
112 struct fd6_context
*fd6_ctx
= fd6_context(ctx
);
113 struct ir3_shader_key
*last_key
= &fd6_ctx
->last_key
;
115 if (!ir3_shader_key_equal(last_key
, key
)) {
116 if (ir3_shader_key_changes_fs(last_key
, key
)) {
117 ctx
->dirty_shader
[PIPE_SHADER_FRAGMENT
] |= FD_DIRTY_SHADER_PROG
;
118 ctx
->dirty
|= FD_DIRTY_PROG
;
121 if (ir3_shader_key_changes_vs(last_key
, key
)) {
122 ctx
->dirty_shader
[PIPE_SHADER_VERTEX
] |= FD_DIRTY_SHADER_PROG
;
123 ctx
->dirty
|= FD_DIRTY_PROG
;
126 fd6_ctx
->last_key
= *key
;
131 fixup_draw_state(struct fd_context
*ctx
, struct fd6_emit
*emit
)
133 if (ctx
->last
.dirty
||
134 (ctx
->last
.primitive_restart
!= emit
->primitive_restart
)) {
135 /* rasterizer state is effected by primitive-restart: */
136 ctx
->dirty
|= FD_DIRTY_RASTERIZER
;
137 ctx
->last
.primitive_restart
= emit
->primitive_restart
;
142 fd6_draw_vbo(struct fd_context
*ctx
, const struct pipe_draw_info
*info
,
143 unsigned index_offset
)
145 struct fd6_context
*fd6_ctx
= fd6_context(ctx
);
146 struct ir3_shader
*gs
= ctx
->prog
.gs
;
147 struct fd6_emit emit
= {
156 .color_two_side
= ctx
->rasterizer
->light_twoside
,
157 .vclamp_color
= ctx
->rasterizer
->clamp_vertex_color
,
158 .fclamp_color
= ctx
->rasterizer
->clamp_fragment_color
,
159 .rasterflat
= ctx
->rasterizer
->flatshade
,
160 .ucp_enables
= ctx
->rasterizer
->clip_plane_enable
,
161 .has_per_samp
= (fd6_ctx
->fsaturate
|| fd6_ctx
->vsaturate
),
162 .vsaturate_s
= fd6_ctx
->vsaturate_s
,
163 .vsaturate_t
= fd6_ctx
->vsaturate_t
,
164 .vsaturate_r
= fd6_ctx
->vsaturate_r
,
165 .fsaturate_s
= fd6_ctx
->fsaturate_s
,
166 .fsaturate_t
= fd6_ctx
->fsaturate_t
,
167 .fsaturate_r
= fd6_ctx
->fsaturate_r
,
168 .layer_zero
= !gs
|| !(gs
->nir
->info
.outputs_written
& VARYING_BIT_LAYER
),
169 .vsamples
= ctx
->tex
[PIPE_SHADER_VERTEX
].samples
,
170 .fsamples
= ctx
->tex
[PIPE_SHADER_FRAGMENT
].samples
,
171 .sample_shading
= (ctx
->min_samples
> 1),
172 .msaa
= (ctx
->framebuffer
.samples
> 1),
175 .rasterflat
= ctx
->rasterizer
->flatshade
,
176 .sprite_coord_enable
= ctx
->rasterizer
->sprite_coord_enable
,
177 .sprite_coord_mode
= ctx
->rasterizer
->sprite_coord_mode
,
178 .primitive_restart
= info
->primitive_restart
&& info
->index_size
,
181 if (!(ctx
->prog
.vs
&& ctx
->prog
.fs
))
184 if (info
->mode
== PIPE_PRIM_PATCHES
) {
185 emit
.key
.hs
= ctx
->prog
.hs
;
186 emit
.key
.ds
= ctx
->prog
.ds
;
188 if (!(ctx
->prog
.hs
&& ctx
->prog
.ds
))
191 shader_info
*ds_info
= &emit
.key
.ds
->nir
->info
;
192 emit
.key
.key
.tessellation
= ir3_tess_mode(ds_info
->tess
.primitive_mode
);
196 emit
.key
.key
.has_gs
= true;
198 if (!(emit
.key
.hs
|| emit
.key
.ds
|| emit
.key
.gs
|| info
->indirect
))
199 fd6_vsc_update_sizes(ctx
->batch
, info
);
201 fixup_shader_state(ctx
, &emit
.key
.key
);
203 if (!(ctx
->dirty
& FD_DIRTY_PROG
)) {
204 emit
.prog
= fd6_ctx
->prog
;
206 fd6_ctx
->prog
= fd6_emit_get_prog(&emit
);
209 /* bail if compile failed: */
213 emit
.dirty
= ctx
->dirty
; /* *after* fixup_shader_state() */
214 emit
.bs
= fd6_emit_get_prog(&emit
)->bs
;
215 emit
.vs
= fd6_emit_get_prog(&emit
)->vs
;
216 emit
.hs
= fd6_emit_get_prog(&emit
)->hs
;
217 emit
.ds
= fd6_emit_get_prog(&emit
)->ds
;
218 emit
.gs
= fd6_emit_get_prog(&emit
)->gs
;
219 emit
.fs
= fd6_emit_get_prog(&emit
)->fs
;
221 ctx
->stats
.vs_regs
+= ir3_shader_halfregs(emit
.vs
);
222 ctx
->stats
.hs_regs
+= COND(emit
.hs
, ir3_shader_halfregs(emit
.hs
));
223 ctx
->stats
.ds_regs
+= COND(emit
.ds
, ir3_shader_halfregs(emit
.ds
));
224 ctx
->stats
.gs_regs
+= COND(emit
.gs
, ir3_shader_halfregs(emit
.gs
));
225 ctx
->stats
.fs_regs
+= ir3_shader_halfregs(emit
.fs
);
227 struct fd_ringbuffer
*ring
= ctx
->batch
->draw
;
229 struct CP_DRAW_INDX_OFFSET_0 draw0
= {
230 .prim_type
= ctx
->primtypes
[info
->mode
],
231 .vis_cull
= USE_VISIBILITY
,
232 .gs_enable
= !!emit
.key
.gs
,
235 if (info
->index_size
) {
236 draw0
.source_select
= DI_SRC_SEL_DMA
;
237 draw0
.index_size
= fd4_size2indextype(info
->index_size
);
239 draw0
.source_select
= DI_SRC_SEL_AUTO_INDEX
;
242 if (info
->mode
== PIPE_PRIM_PATCHES
) {
243 shader_info
*ds_info
= &emit
.ds
->shader
->nir
->info
;
244 uint32_t factor_stride
;
246 switch (ds_info
->tess
.primitive_mode
) {
248 draw0
.patch_type
= TESS_ISOLINES
;
252 draw0
.patch_type
= TESS_TRIANGLES
;
256 draw0
.patch_type
= TESS_QUADS
;
260 unreachable("bad tessmode");
263 draw0
.prim_type
= DI_PT_PATCHES0
+ info
->vertices_per_patch
;
264 draw0
.tess_enable
= true;
266 ctx
->batch
->tessellation
= true;
267 ctx
->batch
->tessparam_size
= MAX2(ctx
->batch
->tessparam_size
,
268 emit
.hs
->output_size
* 4 * info
->count
);
269 ctx
->batch
->tessfactor_size
= MAX2(ctx
->batch
->tessfactor_size
,
270 factor_stride
* info
->count
);
272 if (!ctx
->batch
->tess_addrs_constobj
) {
273 /* Reserve space for the bo address - we'll write them later in
274 * setup_tess_buffers(). We need 2 bo address, but indirect
275 * constant upload needs at least 4 vec4s.
277 unsigned size
= 4 * 16;
279 ctx
->batch
->tess_addrs_constobj
= fd_submit_new_ringbuffer(
280 ctx
->batch
->submit
, size
, FD_RINGBUFFER_STREAMING
);
282 ctx
->batch
->tess_addrs_constobj
->cur
+= size
;
286 uint32_t index_start
= info
->index_size
? info
->index_bias
: info
->start
;
287 if (ctx
->last
.dirty
|| (ctx
->last
.index_start
!= index_start
)) {
288 OUT_PKT4(ring
, REG_A6XX_VFD_INDEX_OFFSET
, 1);
289 OUT_RING(ring
, index_start
); /* VFD_INDEX_OFFSET */
290 ctx
->last
.index_start
= index_start
;
293 if (ctx
->last
.dirty
|| (ctx
->last
.instance_start
!= info
->start_instance
)) {
294 OUT_PKT4(ring
, REG_A6XX_VFD_INSTANCE_START_OFFSET
, 1);
295 OUT_RING(ring
, info
->start_instance
); /* VFD_INSTANCE_START_OFFSET */
296 ctx
->last
.instance_start
= info
->start_instance
;
299 uint32_t restart_index
= info
->primitive_restart
? info
->restart_index
: 0xffffffff;
300 if (ctx
->last
.dirty
|| (ctx
->last
.restart_index
!= restart_index
)) {
301 OUT_PKT4(ring
, REG_A6XX_PC_RESTART_INDEX
, 1);
302 OUT_RING(ring
, restart_index
); /* PC_RESTART_INDEX */
303 ctx
->last
.restart_index
= restart_index
;
306 fixup_draw_state(ctx
, &emit
);
308 fd6_emit_state(ring
, &emit
);
310 /* for debug after a lock up, write a unique counter value
311 * to scratch7 for each draw, to make it easier to match up
312 * register dumps to cmdstream. The combination of IB
313 * (scratch6) and DRAW is enough to "triangulate" the
314 * particular draw that caused lockup.
316 emit_marker6(ring
, 7);
318 if (info
->indirect
) {
319 draw_emit_indirect(ring
, &draw0
, info
, index_offset
);
321 draw_emit(ring
, &draw0
, info
, index_offset
);
324 emit_marker6(ring
, 7);
325 fd_reset_wfi(ctx
->batch
);
327 if (emit
.streamout_mask
) {
328 struct fd_ringbuffer
*ring
= ctx
->batch
->draw
;
330 for (unsigned i
= 0; i
< PIPE_MAX_SO_BUFFERS
; i
++) {
331 if (emit
.streamout_mask
& (1 << i
)) {
332 fd6_event_write(ctx
->batch
, ring
, FLUSH_SO_0
+ i
, false);
337 fd_context_all_clean(ctx
);
343 fd6_clear_lrz(struct fd_batch
*batch
, struct fd_resource
*zsbuf
, double depth
)
345 struct fd_ringbuffer
*ring
;
346 struct fd6_context
*fd6_ctx
= fd6_context(batch
->ctx
);
348 if (batch
->lrz_clear
) {
349 fd_ringbuffer_del(batch
->lrz_clear
);
352 batch
->lrz_clear
= fd_submit_new_ringbuffer(batch
->submit
, 0x1000, 0);
353 ring
= batch
->lrz_clear
;
355 emit_marker6(ring
, 7);
356 OUT_PKT7(ring
, CP_SET_MARKER
, 1);
357 OUT_RING(ring
, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS
));
358 emit_marker6(ring
, 7);
362 OUT_PKT4(ring
, REG_A6XX_RB_CCU_CNTL
, 1);
363 OUT_RING(ring
, fd6_ctx
->magic
.RB_CCU_CNTL_bypass
);
365 OUT_REG(ring
, A6XX_HLSQ_INVALIDATE_CMD(
374 .gfx_shared_const
= true,
375 .gfx_bindless
= 0x1f,
379 emit_marker6(ring
, 7);
380 OUT_PKT7(ring
, CP_SET_MARKER
, 1);
381 OUT_RING(ring
, A6XX_CP_SET_MARKER_0_MODE(RM6_BLIT2DSCALE
));
382 emit_marker6(ring
, 7);
384 OUT_PKT4(ring
, REG_A6XX_RB_2D_UNKNOWN_8C01
, 1);
387 OUT_PKT4(ring
, REG_A6XX_SP_PS_2D_SRC_INFO
, 13);
388 OUT_RING(ring
, 0x00000000);
389 OUT_RING(ring
, 0x00000000);
390 OUT_RING(ring
, 0x00000000);
391 OUT_RING(ring
, 0x00000000);
392 OUT_RING(ring
, 0x00000000);
393 OUT_RING(ring
, 0x00000000);
394 OUT_RING(ring
, 0x00000000);
395 OUT_RING(ring
, 0x00000000);
396 OUT_RING(ring
, 0x00000000);
397 OUT_RING(ring
, 0x00000000);
398 OUT_RING(ring
, 0x00000000);
399 OUT_RING(ring
, 0x00000000);
400 OUT_RING(ring
, 0x00000000);
402 OUT_PKT4(ring
, REG_A6XX_SP_2D_DST_FORMAT
, 1);
403 OUT_RING(ring
, 0x0000f410);
405 OUT_PKT4(ring
, REG_A6XX_GRAS_2D_BLIT_CNTL
, 1);
406 OUT_RING(ring
, A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(FMT6_16_UNORM
) |
409 OUT_PKT4(ring
, REG_A6XX_RB_2D_BLIT_CNTL
, 1);
410 OUT_RING(ring
, A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(FMT6_16_UNORM
) |
413 fd6_event_write(batch
, ring
, PC_CCU_FLUSH_COLOR_TS
, true);
414 fd6_event_write(batch
, ring
, PC_CCU_INVALIDATE_COLOR
, false);
416 OUT_PKT4(ring
, REG_A6XX_RB_2D_SRC_SOLID_C0
, 4);
417 OUT_RING(ring
, fui(depth
));
418 OUT_RING(ring
, 0x00000000);
419 OUT_RING(ring
, 0x00000000);
420 OUT_RING(ring
, 0x00000000);
422 OUT_PKT4(ring
, REG_A6XX_RB_2D_DST_INFO
, 9);
423 OUT_RING(ring
, A6XX_RB_2D_DST_INFO_COLOR_FORMAT(FMT6_16_UNORM
) |
424 A6XX_RB_2D_DST_INFO_TILE_MODE(TILE6_LINEAR
) |
425 A6XX_RB_2D_DST_INFO_COLOR_SWAP(WZYX
));
426 OUT_RELOC(ring
, zsbuf
->lrz
, 0, 0, 0);
427 OUT_RING(ring
, A6XX_RB_2D_DST_PITCH(zsbuf
->lrz_pitch
* 2).value
);
428 OUT_RING(ring
, 0x00000000);
429 OUT_RING(ring
, 0x00000000);
430 OUT_RING(ring
, 0x00000000);
431 OUT_RING(ring
, 0x00000000);
432 OUT_RING(ring
, 0x00000000);
435 A6XX_GRAS_2D_SRC_TL_X(0),
436 A6XX_GRAS_2D_SRC_BR_X(0),
437 A6XX_GRAS_2D_SRC_TL_Y(0),
438 A6XX_GRAS_2D_SRC_BR_Y(0));
440 OUT_PKT4(ring
, REG_A6XX_GRAS_2D_DST_TL
, 2);
441 OUT_RING(ring
, A6XX_GRAS_2D_DST_TL_X(0) |
442 A6XX_GRAS_2D_DST_TL_Y(0));
443 OUT_RING(ring
, A6XX_GRAS_2D_DST_BR_X(zsbuf
->lrz_width
- 1) |
444 A6XX_GRAS_2D_DST_BR_Y(zsbuf
->lrz_height
- 1));
446 fd6_event_write(batch
, ring
, 0x3f, false);
450 OUT_PKT4(ring
, REG_A6XX_RB_UNKNOWN_8E04
, 1);
451 OUT_RING(ring
, fd6_ctx
->magic
.RB_UNKNOWN_8E04_blit
);
453 OUT_PKT7(ring
, CP_BLIT
, 1);
454 OUT_RING(ring
, CP_BLIT_0_OP(BLIT_OP_SCALE
));
458 OUT_PKT4(ring
, REG_A6XX_RB_UNKNOWN_8E04
, 1);
459 OUT_RING(ring
, 0x0); /* RB_UNKNOWN_8E04 */
461 fd6_event_write(batch
, ring
, PC_CCU_FLUSH_COLOR_TS
, true);
462 fd6_event_write(batch
, ring
, PC_CCU_FLUSH_DEPTH_TS
, true);
463 fd6_event_write(batch
, ring
, CACHE_FLUSH_TS
, true);
465 fd6_cache_inv(batch
, ring
);
468 static bool is_z32(enum pipe_format format
)
471 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
:
472 case PIPE_FORMAT_Z32_UNORM
:
473 case PIPE_FORMAT_Z32_FLOAT
:
481 fd6_clear(struct fd_context
*ctx
, unsigned buffers
,
482 const union pipe_color_union
*color
, double depth
, unsigned stencil
)
484 struct pipe_framebuffer_state
*pfb
= &ctx
->batch
->framebuffer
;
485 const bool has_depth
= pfb
->zsbuf
;
486 unsigned color_buffers
= buffers
>> 2;
488 /* If we're clearing after draws, fallback to 3D pipe clears. We could
489 * use blitter clears in the draw batch but then we'd have to patch up the
490 * gmem offsets. This doesn't seem like a useful thing to optimize for
492 if (ctx
->batch
->num_draws
> 0)
495 foreach_bit(i
, color_buffers
)
496 ctx
->batch
->clear_color
[i
] = *color
;
497 if (buffers
& PIPE_CLEAR_DEPTH
)
498 ctx
->batch
->clear_depth
= depth
;
499 if (buffers
& PIPE_CLEAR_STENCIL
)
500 ctx
->batch
->clear_stencil
= stencil
;
502 ctx
->batch
->fast_cleared
|= buffers
;
504 if (has_depth
&& (buffers
& PIPE_CLEAR_DEPTH
)) {
505 struct fd_resource
*zsbuf
= fd_resource(pfb
->zsbuf
->texture
);
506 if (zsbuf
->lrz
&& !is_z32(pfb
->zsbuf
->format
)) {
507 zsbuf
->lrz_valid
= true;
508 zsbuf
->lrz_direction
= FD_LRZ_UNKNOWN
;
509 fd6_clear_lrz(ctx
->batch
, zsbuf
, depth
);
517 fd6_draw_init(struct pipe_context
*pctx
)
519 struct fd_context
*ctx
= fd_context(pctx
);
520 ctx
->draw_vbo
= fd6_draw_vbo
;
521 ctx
->clear
= fd6_clear
;