2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * Rob Clark <robclark@freedesktop.org>
31 #include "pipe/p_context.h"
33 #include "freedreno_context.h"
34 #include "fd6_context.h"
35 #include "fd6_format.h"
36 #include "fd6_program.h"
37 #include "ir3_gallium.h"
41 /* To collect all the state objects to emit in a single CP_SET_DRAW_STATE
42 * packet, the emit tracks a collection of however many state_group's that
46 FD6_GROUP_PROG_CONFIG
,
48 FD6_GROUP_PROG_BINNING
,
49 FD6_GROUP_PROG_INTERP
,
50 FD6_GROUP_PROG_FB_RAST
,
52 FD6_GROUP_LRZ_BINNING
,
55 FD6_GROUP_VS_DRIVER_PARAMS
,
56 FD6_GROUP_PRIMITIVE_PARAMS
,
67 FD6_GROUP_BLEND_COLOR
,
71 #define ENABLE_ALL (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
72 #define ENABLE_DRAW (CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
74 struct fd6_state_group
{
75 struct fd_ringbuffer
*stateobj
;
76 enum fd6_state_id group_id
;
77 /* enable_mask controls which states the stateobj is evaluated in,
78 * b0 is binning pass b1 and/or b2 is draw pass
83 /* grouped together emit-state for prog/vertex/state emit: */
85 struct fd_context
*ctx
;
86 const struct fd_vertex_state
*vtx
;
87 const struct pipe_draw_info
*info
;
88 struct ir3_cache_key key
;
89 enum fd_dirty_3d_state dirty
;
91 uint32_t sprite_coord_enable
; /* bitmask */
92 bool sprite_coord_mode
;
95 bool primitive_restart
;
97 /* in binning pass, we don't have real frag shader, so we
98 * don't know if real draw disqualifies lrz write. So just
99 * figure that out up-front and stash it in the emit.
103 /* cached to avoid repeated lookups: */
104 const struct fd6_program_state
*prog
;
106 struct ir3_shader_variant
*bs
;
107 struct ir3_shader_variant
*vs
;
108 struct ir3_shader_variant
*hs
;
109 struct ir3_shader_variant
*ds
;
110 struct ir3_shader_variant
*gs
;
111 struct ir3_shader_variant
*fs
;
113 unsigned streamout_mask
;
115 struct fd6_state_group groups
[32];
119 static inline const struct fd6_program_state
*
120 fd6_emit_get_prog(struct fd6_emit
*emit
)
123 struct fd6_context
*fd6_ctx
= fd6_context(emit
->ctx
);
124 struct ir3_program_state
*s
=
125 ir3_cache_lookup(fd6_ctx
->shader_cache
, &emit
->key
, &emit
->ctx
->debug
);
126 emit
->prog
= fd6_program_state(s
);
132 fd6_emit_take_group(struct fd6_emit
*emit
, struct fd_ringbuffer
*stateobj
,
133 enum fd6_state_id group_id
, unsigned enable_mask
)
135 debug_assert(emit
->num_groups
< ARRAY_SIZE(emit
->groups
));
136 struct fd6_state_group
*g
= &emit
->groups
[emit
->num_groups
++];
137 g
->stateobj
= stateobj
;
138 g
->group_id
= group_id
;
139 g
->enable_mask
= enable_mask
;
143 fd6_emit_add_group(struct fd6_emit
*emit
, struct fd_ringbuffer
*stateobj
,
144 enum fd6_state_id group_id
, unsigned enable_mask
)
146 fd6_emit_take_group(emit
, fd_ringbuffer_ref(stateobj
), group_id
, enable_mask
);
149 static inline unsigned
150 fd6_event_write(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
,
151 enum vgt_event_type evt
, bool timestamp
)
157 OUT_PKT7(ring
, CP_EVENT_WRITE
, timestamp
? 4 : 1);
158 OUT_RING(ring
, CP_EVENT_WRITE_0_EVENT(evt
));
160 struct fd6_context
*fd6_ctx
= fd6_context(batch
->ctx
);
161 seqno
= ++fd6_ctx
->seqno
;
162 OUT_RELOC(ring
, control_ptr(fd6_ctx
, seqno
)); /* ADDR_LO/HI */
163 OUT_RING(ring
, seqno
);
170 fd6_cache_inv(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
)
172 fd6_event_write(batch
, ring
, CACHE_INVALIDATE
, false);
176 fd6_cache_flush(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
)
178 struct fd6_context
*fd6_ctx
= fd6_context(batch
->ctx
);
181 seqno
= fd6_event_write(batch
, ring
, RB_DONE_TS
, true);
183 OUT_PKT7(ring
, CP_WAIT_REG_MEM
, 6);
184 OUT_RING(ring
, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ
) |
185 CP_WAIT_REG_MEM_0_POLL_MEMORY
);
186 OUT_RELOC(ring
, control_ptr(fd6_ctx
, seqno
));
187 OUT_RING(ring
, CP_WAIT_REG_MEM_3_REF(seqno
));
188 OUT_RING(ring
, CP_WAIT_REG_MEM_4_MASK(~0));
189 OUT_RING(ring
, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
191 seqno
= fd6_event_write(batch
, ring
, CACHE_FLUSH_TS
, true);
193 OUT_PKT7(ring
, CP_WAIT_MEM_GTE
, 4);
194 OUT_RING(ring
, CP_WAIT_MEM_GTE_0_RESERVED(0));
195 OUT_RELOC(ring
, control_ptr(fd6_ctx
, seqno
));
196 OUT_RING(ring
, CP_WAIT_MEM_GTE_3_REF(seqno
));
200 fd6_emit_blit(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
)
202 emit_marker6(ring
, 7);
203 fd6_event_write(batch
, ring
, BLIT
, false);
204 emit_marker6(ring
, 7);
208 fd6_emit_lrz_flush(struct fd_ringbuffer
*ring
)
210 OUT_PKT7(ring
, CP_EVENT_WRITE
, 1);
211 OUT_RING(ring
, LRZ_FLUSH
);
215 fd6_geom_stage(gl_shader_stage type
)
218 case MESA_SHADER_VERTEX
:
219 case MESA_SHADER_TESS_CTRL
:
220 case MESA_SHADER_TESS_EVAL
:
221 case MESA_SHADER_GEOMETRY
:
223 case MESA_SHADER_FRAGMENT
:
224 case MESA_SHADER_COMPUTE
:
225 case MESA_SHADER_KERNEL
:
228 unreachable("bad shader type");
232 static inline uint32_t
233 fd6_stage2opcode(gl_shader_stage type
)
235 return fd6_geom_stage(type
) ? CP_LOAD_STATE6_GEOM
: CP_LOAD_STATE6_FRAG
;
238 static inline enum a6xx_state_block
239 fd6_stage2shadersb(gl_shader_stage type
)
242 case MESA_SHADER_VERTEX
:
243 return SB6_VS_SHADER
;
244 case MESA_SHADER_TESS_CTRL
:
245 return SB6_HS_SHADER
;
246 case MESA_SHADER_TESS_EVAL
:
247 return SB6_DS_SHADER
;
248 case MESA_SHADER_GEOMETRY
:
249 return SB6_GS_SHADER
;
250 case MESA_SHADER_FRAGMENT
:
251 return SB6_FS_SHADER
;
252 case MESA_SHADER_COMPUTE
:
253 case MESA_SHADER_KERNEL
:
254 return SB6_CS_SHADER
;
256 unreachable("bad shader type");
261 static inline enum a6xx_tess_spacing
262 fd6_gl2spacing(enum gl_tess_spacing spacing
)
265 case TESS_SPACING_EQUAL
:
267 case TESS_SPACING_FRACTIONAL_ODD
:
268 return TESS_FRACTIONAL_ODD
;
269 case TESS_SPACING_FRACTIONAL_EVEN
:
270 return TESS_FRACTIONAL_EVEN
;
271 case TESS_SPACING_UNSPECIFIED
:
273 unreachable("spacing must be specified");
277 bool fd6_emit_textures(struct fd_pipe
*pipe
, struct fd_ringbuffer
*ring
,
278 enum pipe_shader_type type
, struct fd_texture_stateobj
*tex
,
279 unsigned bcolor_offset
,
280 const struct ir3_shader_variant
*v
, struct fd_context
*ctx
);
282 void fd6_emit_state(struct fd_ringbuffer
*ring
, struct fd6_emit
*emit
);
284 void fd6_emit_cs_state(struct fd_context
*ctx
, struct fd_ringbuffer
*ring
,
285 struct ir3_shader_variant
*cp
);
287 void fd6_emit_restore(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
);
289 void fd6_emit_init_screen(struct pipe_screen
*pscreen
);
290 void fd6_emit_init(struct pipe_context
*pctx
);
293 fd6_emit_ib(struct fd_ringbuffer
*ring
, struct fd_ringbuffer
*target
)
295 emit_marker6(ring
, 6);
296 __OUT_IB5(ring
, target
);
297 emit_marker6(ring
, 6);
300 #define WRITE(reg, val) do { \
301 OUT_PKT4(ring, reg, 1); \
302 OUT_RING(ring, val); \
306 #endif /* FD6_EMIT_H */