2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * Rob Clark <robclark@freedesktop.org>
28 #include "pipe/p_state.h"
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_prim.h"
33 #include "freedreno_state.h"
34 #include "freedreno_resource.h"
37 #include "fd6_context.h"
39 #include "fd6_program.h"
40 #include "fd6_format.h"
43 /* some bits in common w/ a4xx: */
44 #include "a4xx/fd4_draw.h"
47 draw_emit_indirect(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
,
48 enum pc_di_primtype primtype
,
49 const struct pipe_draw_info
*info
,
50 unsigned index_offset
)
52 struct fd_resource
*ind
= fd_resource(info
->indirect
->buffer
);
54 if (info
->index_size
) {
55 struct pipe_resource
*idx
= info
->index
.resource
;
56 unsigned max_indicies
= idx
->width0
/ info
->index_size
;
58 OUT_PKT7(ring
, CP_DRAW_INDX_INDIRECT
, 6);
59 OUT_RINGP(ring
, DRAW4(primtype
, DI_SRC_SEL_DMA
,
60 fd4_size2indextype(info
->index_size
), 0),
61 &batch
->draw_patches
);
62 OUT_RELOC(ring
, fd_resource(idx
)->bo
,
64 // XXX: Check A5xx vs A6xx
65 OUT_RING(ring
, A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(max_indicies
));
66 OUT_RELOC(ring
, ind
->bo
, info
->indirect
->offset
, 0, 0);
68 OUT_PKT7(ring
, CP_DRAW_INDIRECT
, 3);
69 OUT_RINGP(ring
, DRAW4(primtype
, DI_SRC_SEL_AUTO_INDEX
, 0, 0),
70 &batch
->draw_patches
);
71 OUT_RELOC(ring
, ind
->bo
, info
->indirect
->offset
, 0, 0);
76 draw_emit(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
,
77 enum pc_di_primtype primtype
,
78 const struct pipe_draw_info
*info
,
79 unsigned index_offset
)
81 if (info
->index_size
) {
82 assert(!info
->has_user_indices
);
84 struct pipe_resource
*idx_buffer
= info
->index
.resource
;
85 uint32_t idx_size
= info
->index_size
* info
->count
;
86 uint32_t idx_offset
= index_offset
+ info
->start
* info
->index_size
;
88 /* leave vis mode blank for now, it will be patched up when
89 * we know if we are binning or not
91 uint32_t draw
= CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
92 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA
) |
93 CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(fd4_size2indextype(info
->index_size
)) |
96 OUT_PKT7(ring
, CP_DRAW_INDX_OFFSET
, 7);
97 OUT_RINGP(ring
, draw
, &batch
->draw_patches
);
98 OUT_RING(ring
, info
->instance_count
); /* NumInstances */
99 OUT_RING(ring
, info
->count
); /* NumIndices */
100 OUT_RING(ring
, 0x0); /* XXX */
101 OUT_RELOC(ring
, fd_resource(idx_buffer
)->bo
, idx_offset
, 0, 0);
102 OUT_RING (ring
, idx_size
);
104 /* leave vis mode blank for now, it will be patched up when
105 * we know if we are binning or not
107 uint32_t draw
= CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype
) |
108 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX
) |
111 OUT_PKT7(ring
, CP_DRAW_INDX_OFFSET
, 3);
112 OUT_RINGP(ring
, draw
, &batch
->draw_patches
);
113 OUT_RING(ring
, info
->instance_count
); /* NumInstances */
114 OUT_RING(ring
, info
->count
); /* NumIndices */
118 /* fixup dirty shader state in case some "unrelated" (from the state-
119 * tracker's perspective) state change causes us to switch to a
123 fixup_shader_state(struct fd_context
*ctx
, struct ir3_shader_key
*key
)
125 struct fd6_context
*fd6_ctx
= fd6_context(ctx
);
126 struct ir3_shader_key
*last_key
= &fd6_ctx
->last_key
;
128 if (!ir3_shader_key_equal(last_key
, key
)) {
129 if (ir3_shader_key_changes_fs(last_key
, key
)) {
130 ctx
->dirty_shader
[PIPE_SHADER_FRAGMENT
] |= FD_DIRTY_SHADER_PROG
;
131 ctx
->dirty
|= FD_DIRTY_PROG
;
134 if (ir3_shader_key_changes_vs(last_key
, key
)) {
135 ctx
->dirty_shader
[PIPE_SHADER_VERTEX
] |= FD_DIRTY_SHADER_PROG
;
136 ctx
->dirty
|= FD_DIRTY_PROG
;
139 fd6_ctx
->last_key
= *key
;
144 fd6_draw_vbo(struct fd_context
*ctx
, const struct pipe_draw_info
*info
,
145 unsigned index_offset
)
147 struct fd6_context
*fd6_ctx
= fd6_context(ctx
);
148 struct fd6_emit emit
= {
156 .color_two_side
= ctx
->rasterizer
->light_twoside
,
157 .vclamp_color
= ctx
->rasterizer
->clamp_vertex_color
,
158 .fclamp_color
= ctx
->rasterizer
->clamp_fragment_color
,
159 .rasterflat
= ctx
->rasterizer
->flatshade
,
160 .ucp_enables
= ctx
->rasterizer
->clip_plane_enable
,
161 .has_per_samp
= (fd6_ctx
->fsaturate
|| fd6_ctx
->vsaturate
),
162 .vsaturate_s
= fd6_ctx
->vsaturate_s
,
163 .vsaturate_t
= fd6_ctx
->vsaturate_t
,
164 .vsaturate_r
= fd6_ctx
->vsaturate_r
,
165 .fsaturate_s
= fd6_ctx
->fsaturate_s
,
166 .fsaturate_t
= fd6_ctx
->fsaturate_t
,
167 .fsaturate_r
= fd6_ctx
->fsaturate_r
,
168 .vsamples
= ctx
->tex
[PIPE_SHADER_VERTEX
].samples
,
169 .fsamples
= ctx
->tex
[PIPE_SHADER_FRAGMENT
].samples
,
172 .rasterflat
= ctx
->rasterizer
->flatshade
,
173 .sprite_coord_enable
= ctx
->rasterizer
->sprite_coord_enable
,
174 .sprite_coord_mode
= ctx
->rasterizer
->sprite_coord_mode
,
177 fixup_shader_state(ctx
, &emit
.key
.key
);
179 if (!(ctx
->dirty
& FD_DIRTY_PROG
)) {
180 emit
.prog
= fd6_ctx
->prog
;
182 fd6_ctx
->prog
= fd6_emit_get_prog(&emit
);
185 /* bail if compile failed: */
189 emit
.dirty
= ctx
->dirty
; /* *after* fixup_shader_state() */
190 emit
.bs
= fd6_emit_get_prog(&emit
)->bs
;
191 emit
.vs
= fd6_emit_get_prog(&emit
)->vs
;
192 emit
.fs
= fd6_emit_get_prog(&emit
)->fs
;
194 const struct ir3_shader_variant
*vp
= emit
.vs
;
195 const struct ir3_shader_variant
*fp
= emit
.fs
;
197 ctx
->stats
.vs_regs
+= ir3_shader_halfregs(vp
);
198 ctx
->stats
.fs_regs
+= ir3_shader_halfregs(fp
);
200 /* figure out whether we need to disable LRZ write for binning
201 * pass using draw pass's fp:
203 emit
.no_lrz_write
= fp
->writes_pos
|| fp
->no_earlyz
;
205 struct fd_ringbuffer
*ring
= ctx
->batch
->draw
;
206 enum pc_di_primtype primtype
= ctx
->primtypes
[info
->mode
];
208 fd6_emit_state(ring
, &emit
);
210 OUT_PKT4(ring
, REG_A6XX_VFD_INDEX_OFFSET
, 2);
211 OUT_RING(ring
, info
->index_size
? info
->index_bias
: info
->start
); /* VFD_INDEX_OFFSET */
212 OUT_RING(ring
, info
->start_instance
); /* VFD_INSTANCE_START_OFFSET */
214 OUT_PKT4(ring
, REG_A6XX_PC_RESTART_INDEX
, 1);
215 OUT_RING(ring
, info
->primitive_restart
? /* PC_RESTART_INDEX */
216 info
->restart_index
: 0xffffffff);
218 /* for debug after a lock up, write a unique counter value
219 * to scratch7 for each draw, to make it easier to match up
220 * register dumps to cmdstream. The combination of IB
221 * (scratch6) and DRAW is enough to "triangulate" the
222 * particular draw that caused lockup.
224 emit_marker6(ring
, 7);
226 if (info
->indirect
) {
227 draw_emit_indirect(ctx
->batch
, ring
, primtype
,
230 draw_emit(ctx
->batch
, ring
, primtype
,
234 emit_marker6(ring
, 7);
235 fd_reset_wfi(ctx
->batch
);
237 if (emit
.streamout_mask
) {
238 struct fd_ringbuffer
*ring
= ctx
->batch
->draw
;
240 for (unsigned i
= 0; i
< PIPE_MAX_SO_BUFFERS
; i
++) {
241 if (emit
.streamout_mask
& (1 << i
)) {
242 fd6_event_write(ctx
->batch
, ring
, FLUSH_SO_0
+ i
, false);
247 fd_context_all_clean(ctx
);
253 fd6_clear_lrz(struct fd_batch
*batch
, struct fd_resource
*zsbuf
, double depth
)
255 struct fd_ringbuffer
*ring
;
257 // TODO mid-frame clears (ie. app doing crazy stuff)?? Maybe worth
258 // splitting both clear and lrz clear out into their own rb's. And
259 // just throw away any draws prior to clear. (Anything not fullscreen
260 // clear, just fallback to generic path that treats it as a normal
263 if (!batch
->lrz_clear
) {
264 batch
->lrz_clear
= fd_submit_new_ringbuffer(batch
->submit
, 0x1000, 0);
267 ring
= batch
->lrz_clear
;
269 emit_marker6(ring
, 7);
270 OUT_PKT7(ring
, CP_SET_MARKER
, 1);
271 OUT_RING(ring
, A2XX_CP_SET_MARKER_0_MODE(RM6_BYPASS
));
272 emit_marker6(ring
, 7);
274 OUT_PKT4(ring
, REG_A6XX_RB_CCU_CNTL
, 1);
275 OUT_RING(ring
, 0x10000000);
277 OUT_PKT4(ring
, REG_A6XX_HLSQ_UPDATE_CNTL
, 1);
278 OUT_RING(ring
, 0x7ffff);
280 emit_marker6(ring
, 7);
281 OUT_PKT7(ring
, CP_SET_MARKER
, 1);
282 OUT_RING(ring
, A2XX_CP_SET_MARKER_0_MODE(0xc));
283 emit_marker6(ring
, 7);
285 OUT_PKT4(ring
, REG_A6XX_RB_UNKNOWN_8C01
, 1);
288 OUT_PKT4(ring
, REG_A6XX_SP_PS_2D_SRC_INFO
, 13);
289 OUT_RING(ring
, 0x00000000);
290 OUT_RING(ring
, 0x00000000);
291 OUT_RING(ring
, 0x00000000);
292 OUT_RING(ring
, 0x00000000);
293 OUT_RING(ring
, 0x00000000);
294 OUT_RING(ring
, 0x00000000);
295 OUT_RING(ring
, 0x00000000);
296 OUT_RING(ring
, 0x00000000);
297 OUT_RING(ring
, 0x00000000);
298 OUT_RING(ring
, 0x00000000);
299 OUT_RING(ring
, 0x00000000);
300 OUT_RING(ring
, 0x00000000);
301 OUT_RING(ring
, 0x00000000);
303 OUT_PKT4(ring
, REG_A6XX_SP_2D_SRC_FORMAT
, 1);
304 OUT_RING(ring
, 0x0000f410);
306 OUT_PKT4(ring
, REG_A6XX_GRAS_2D_BLIT_CNTL
, 1);
307 OUT_RING(ring
, A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(RB6_R16_UNORM
) |
310 OUT_PKT4(ring
, REG_A6XX_RB_2D_BLIT_CNTL
, 1);
311 OUT_RING(ring
, A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(RB6_R16_UNORM
) |
314 fd6_event_write(batch
, ring
, UNK_1D
, true);
315 fd6_event_write(batch
, ring
, PC_CCU_INVALIDATE_COLOR
, false);
317 OUT_PKT4(ring
, REG_A6XX_RB_2D_SRC_SOLID_C0
, 4);
318 OUT_RING(ring
, fui(depth
));
319 OUT_RING(ring
, 0x00000000);
320 OUT_RING(ring
, 0x00000000);
321 OUT_RING(ring
, 0x00000000);
323 OUT_PKT4(ring
, REG_A6XX_RB_2D_DST_INFO
, 9);
324 OUT_RING(ring
, A6XX_RB_2D_DST_INFO_COLOR_FORMAT(RB6_R16_UNORM
) |
325 A6XX_RB_2D_DST_INFO_TILE_MODE(TILE6_LINEAR
) |
326 A6XX_RB_2D_DST_INFO_COLOR_SWAP(WZYX
));
327 OUT_RELOCW(ring
, zsbuf
->lrz
, 0, 0, 0);
328 OUT_RING(ring
, A6XX_RB_2D_DST_SIZE_PITCH(zsbuf
->lrz_pitch
* 2));
329 OUT_RING(ring
, 0x00000000);
330 OUT_RING(ring
, 0x00000000);
331 OUT_RING(ring
, 0x00000000);
332 OUT_RING(ring
, 0x00000000);
333 OUT_RING(ring
, 0x00000000);
335 OUT_PKT4(ring
, REG_A6XX_GRAS_2D_SRC_TL_X
, 4);
336 OUT_RING(ring
, A6XX_GRAS_2D_SRC_TL_X_X(0));
337 OUT_RING(ring
, A6XX_GRAS_2D_SRC_BR_X_X(0));
338 OUT_RING(ring
, A6XX_GRAS_2D_SRC_TL_Y_Y(0));
339 OUT_RING(ring
, A6XX_GRAS_2D_SRC_BR_Y_Y(0));
341 OUT_PKT4(ring
, REG_A6XX_GRAS_2D_DST_TL
, 2);
342 OUT_RING(ring
, A6XX_GRAS_2D_DST_TL_X(0) |
343 A6XX_GRAS_2D_DST_TL_Y(0));
344 OUT_RING(ring
, A6XX_GRAS_2D_DST_BR_X(zsbuf
->lrz_width
- 1) |
345 A6XX_GRAS_2D_DST_BR_Y(zsbuf
->lrz_height
- 1));
347 fd6_event_write(batch
, ring
, 0x3f, false);
351 OUT_PKT4(ring
, REG_A6XX_RB_UNKNOWN_8E04
, 1);
352 OUT_RING(ring
, 0x1000000);
354 OUT_PKT7(ring
, CP_BLIT
, 1);
355 OUT_RING(ring
, CP_BLIT_0_OP(BLIT_OP_SCALE
));
359 OUT_PKT4(ring
, REG_A6XX_RB_UNKNOWN_8E04
, 1);
362 fd6_event_write(batch
, ring
, UNK_1D
, true);
363 fd6_event_write(batch
, ring
, FACENESS_FLUSH
, true);
364 fd6_event_write(batch
, ring
, CACHE_FLUSH_TS
, true);
366 fd6_cache_inv(batch
, ring
);
369 static bool is_z32(enum pipe_format format
)
372 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
:
373 case PIPE_FORMAT_Z32_UNORM
:
374 case PIPE_FORMAT_Z32_FLOAT
:
382 fd6_clear(struct fd_context
*ctx
, unsigned buffers
,
383 const union pipe_color_union
*color
, double depth
, unsigned stencil
)
385 struct pipe_framebuffer_state
*pfb
= &ctx
->batch
->framebuffer
;
386 const bool has_depth
= pfb
->zsbuf
;
387 unsigned color_buffers
= buffers
>> 2;
390 /* If we're clearing after draws, fallback to 3D pipe clears. We could
391 * use blitter clears in the draw batch but then we'd have to patch up the
392 * gmem offsets. This doesn't seem like a useful thing to optimize for
394 if (ctx
->batch
->num_draws
> 0)
397 foreach_bit(i
, color_buffers
)
398 ctx
->batch
->clear_color
[i
] = *color
;
399 if (buffers
& PIPE_CLEAR_DEPTH
)
400 ctx
->batch
->clear_depth
= depth
;
401 if (buffers
& PIPE_CLEAR_STENCIL
)
402 ctx
->batch
->clear_stencil
= stencil
;
404 ctx
->batch
->fast_cleared
|= buffers
;
406 if (has_depth
&& (buffers
& PIPE_CLEAR_DEPTH
)) {
407 struct fd_resource
*zsbuf
= fd_resource(pfb
->zsbuf
->texture
);
408 if (zsbuf
->lrz
&& !is_z32(pfb
->zsbuf
->format
)) {
409 zsbuf
->lrz_valid
= true;
410 fd6_clear_lrz(ctx
->batch
, zsbuf
, depth
);
418 fd6_draw_init(struct pipe_context
*pctx
)
420 struct fd_context
*ctx
= fd_context(pctx
);
421 ctx
->draw_vbo
= fd6_draw_vbo
;
422 ctx
->clear
= fd6_clear
;