freedreno/a6xx: Create shader dependent streamout state at compile time
[mesa.git] / src / gallium / drivers / freedreno / a6xx / fd6_emit.h
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Rob Clark <robclark@freedesktop.org>
26 */
27
28 #ifndef FD6_EMIT_H
29 #define FD6_EMIT_H
30
31 #include "pipe/p_context.h"
32
33 #include "freedreno_context.h"
34 #include "fd6_context.h"
35 #include "fd6_format.h"
36 #include "fd6_program.h"
37 #include "ir3_gallium.h"
38
39 struct fd_ringbuffer;
40
41 /* To collect all the state objects to emit in a single CP_SET_DRAW_STATE
42 * packet, the emit tracks a collection of however many state_group's that
43 * need to be emit'd.
44 */
45 enum fd6_state_id {
46 FD6_GROUP_PROG_CONFIG,
47 FD6_GROUP_PROG,
48 FD6_GROUP_PROG_BINNING,
49 FD6_GROUP_PROG_INTERP,
50 FD6_GROUP_PROG_FB_RAST,
51 FD6_GROUP_LRZ,
52 FD6_GROUP_LRZ_BINNING,
53 FD6_GROUP_VBO,
54 FD6_GROUP_CONST,
55 FD6_GROUP_VS_DRIVER_PARAMS,
56 FD6_GROUP_PRIMITIVE_PARAMS,
57 FD6_GROUP_VS_TEX,
58 FD6_GROUP_HS_TEX,
59 FD6_GROUP_DS_TEX,
60 FD6_GROUP_GS_TEX,
61 FD6_GROUP_FS_TEX,
62 FD6_GROUP_IBO,
63 FD6_GROUP_RASTERIZER,
64 FD6_GROUP_ZSA,
65 FD6_GROUP_BLEND,
66 FD6_GROUP_SCISSOR,
67 FD6_GROUP_BLEND_COLOR,
68 FD6_GROUP_SO,
69 };
70
71 #define ENABLE_ALL (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
72 #define ENABLE_DRAW (CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
73
74 struct fd6_state_group {
75 struct fd_ringbuffer *stateobj;
76 enum fd6_state_id group_id;
77 /* enable_mask controls which states the stateobj is evaluated in,
78 * b0 is binning pass b1 and/or b2 is draw pass
79 */
80 uint32_t enable_mask;
81 };
82
83 /* grouped together emit-state for prog/vertex/state emit: */
84 struct fd6_emit {
85 struct fd_context *ctx;
86 const struct fd_vertex_state *vtx;
87 const struct pipe_draw_info *info;
88 struct ir3_cache_key key;
89 enum fd_dirty_3d_state dirty;
90
91 uint32_t sprite_coord_enable; /* bitmask */
92 bool sprite_coord_mode;
93 bool rasterflat;
94 bool no_decode_srgb;
95 bool primitive_restart;
96
97 /* in binning pass, we don't have real frag shader, so we
98 * don't know if real draw disqualifies lrz write. So just
99 * figure that out up-front and stash it in the emit.
100 */
101 bool no_lrz_write;
102
103 /* cached to avoid repeated lookups: */
104 const struct fd6_program_state *prog;
105
106 struct ir3_shader_variant *bs;
107 struct ir3_shader_variant *vs;
108 struct ir3_shader_variant *hs;
109 struct ir3_shader_variant *ds;
110 struct ir3_shader_variant *gs;
111 struct ir3_shader_variant *fs;
112
113 unsigned streamout_mask;
114
115 struct fd6_state_group groups[32];
116 unsigned num_groups;
117 };
118
119 static inline const struct fd6_program_state *
120 fd6_emit_get_prog(struct fd6_emit *emit)
121 {
122 if (!emit->prog) {
123 struct fd6_context *fd6_ctx = fd6_context(emit->ctx);
124 struct ir3_program_state *s =
125 ir3_cache_lookup(fd6_ctx->shader_cache, &emit->key, &emit->ctx->debug);
126 emit->prog = fd6_program_state(s);
127 }
128 return emit->prog;
129 }
130
131 static inline void
132 fd6_emit_take_group(struct fd6_emit *emit, struct fd_ringbuffer *stateobj,
133 enum fd6_state_id group_id, unsigned enable_mask)
134 {
135 debug_assert(emit->num_groups < ARRAY_SIZE(emit->groups));
136 struct fd6_state_group *g = &emit->groups[emit->num_groups++];
137 g->stateobj = stateobj;
138 g->group_id = group_id;
139 g->enable_mask = enable_mask;
140 }
141
142 static inline void
143 fd6_emit_add_group(struct fd6_emit *emit, struct fd_ringbuffer *stateobj,
144 enum fd6_state_id group_id, unsigned enable_mask)
145 {
146 fd6_emit_take_group(emit, fd_ringbuffer_ref(stateobj), group_id, enable_mask);
147 }
148
149 static inline unsigned
150 fd6_event_write(struct fd_batch *batch, struct fd_ringbuffer *ring,
151 enum vgt_event_type evt, bool timestamp)
152 {
153 unsigned seqno = 0;
154
155 fd_reset_wfi(batch);
156
157 OUT_PKT7(ring, CP_EVENT_WRITE, timestamp ? 4 : 1);
158 OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(evt));
159 if (timestamp) {
160 struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
161 seqno = ++fd6_ctx->seqno;
162 OUT_RELOC(ring, control_ptr(fd6_ctx, seqno)); /* ADDR_LO/HI */
163 OUT_RING(ring, seqno);
164 }
165
166 return seqno;
167 }
168
169 static inline void
170 fd6_cache_inv(struct fd_batch *batch, struct fd_ringbuffer *ring)
171 {
172 fd6_event_write(batch, ring, CACHE_INVALIDATE, false);
173 }
174
175 static inline void
176 fd6_cache_flush(struct fd_batch *batch, struct fd_ringbuffer *ring)
177 {
178 struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
179 unsigned seqno;
180
181 seqno = fd6_event_write(batch, ring, RB_DONE_TS, true);
182
183 OUT_PKT7(ring, CP_WAIT_REG_MEM, 6);
184 OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
185 CP_WAIT_REG_MEM_0_POLL_MEMORY);
186 OUT_RELOC(ring, control_ptr(fd6_ctx, seqno));
187 OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(seqno));
188 OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(~0));
189 OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
190
191 seqno = fd6_event_write(batch, ring, CACHE_FLUSH_TS, true);
192
193 OUT_PKT7(ring, CP_WAIT_MEM_GTE, 4);
194 OUT_RING(ring, CP_WAIT_MEM_GTE_0_RESERVED(0));
195 OUT_RELOC(ring, control_ptr(fd6_ctx, seqno));
196 OUT_RING(ring, CP_WAIT_MEM_GTE_3_REF(seqno));
197 }
198
199 static inline void
200 fd6_emit_blit(struct fd_batch *batch, struct fd_ringbuffer *ring)
201 {
202 emit_marker6(ring, 7);
203 fd6_event_write(batch, ring, BLIT, false);
204 emit_marker6(ring, 7);
205 }
206
207 static inline void
208 fd6_emit_lrz_flush(struct fd_ringbuffer *ring)
209 {
210 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
211 OUT_RING(ring, LRZ_FLUSH);
212 }
213
214 static inline bool
215 fd6_geom_stage(gl_shader_stage type)
216 {
217 switch (type) {
218 case MESA_SHADER_VERTEX:
219 case MESA_SHADER_TESS_CTRL:
220 case MESA_SHADER_TESS_EVAL:
221 case MESA_SHADER_GEOMETRY:
222 return true;
223 case MESA_SHADER_FRAGMENT:
224 case MESA_SHADER_COMPUTE:
225 case MESA_SHADER_KERNEL:
226 return false;
227 default:
228 unreachable("bad shader type");
229 }
230 }
231
232 static inline uint32_t
233 fd6_stage2opcode(gl_shader_stage type)
234 {
235 return fd6_geom_stage(type) ? CP_LOAD_STATE6_GEOM : CP_LOAD_STATE6_FRAG;
236 }
237
238 static inline enum a6xx_state_block
239 fd6_stage2shadersb(gl_shader_stage type)
240 {
241 switch (type) {
242 case MESA_SHADER_VERTEX:
243 return SB6_VS_SHADER;
244 case MESA_SHADER_TESS_CTRL:
245 return SB6_HS_SHADER;
246 case MESA_SHADER_TESS_EVAL:
247 return SB6_DS_SHADER;
248 case MESA_SHADER_GEOMETRY:
249 return SB6_GS_SHADER;
250 case MESA_SHADER_FRAGMENT:
251 return SB6_FS_SHADER;
252 case MESA_SHADER_COMPUTE:
253 case MESA_SHADER_KERNEL:
254 return SB6_CS_SHADER;
255 default:
256 unreachable("bad shader type");
257 return ~0;
258 }
259 }
260
261 static inline enum a6xx_tess_spacing
262 fd6_gl2spacing(enum gl_tess_spacing spacing)
263 {
264 switch (spacing) {
265 case TESS_SPACING_EQUAL:
266 return TESS_EQUAL;
267 case TESS_SPACING_FRACTIONAL_ODD:
268 return TESS_FRACTIONAL_ODD;
269 case TESS_SPACING_FRACTIONAL_EVEN:
270 return TESS_FRACTIONAL_EVEN;
271 case TESS_SPACING_UNSPECIFIED:
272 default:
273 unreachable("spacing must be specified");
274 }
275 }
276
277 bool fd6_emit_textures(struct fd_pipe *pipe, struct fd_ringbuffer *ring,
278 enum pipe_shader_type type, struct fd_texture_stateobj *tex,
279 unsigned bcolor_offset,
280 const struct ir3_shader_variant *v, struct fd_context *ctx);
281
282 void fd6_emit_state(struct fd_ringbuffer *ring, struct fd6_emit *emit);
283
284 void fd6_emit_cs_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
285 struct ir3_shader_variant *cp);
286
287 void fd6_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring);
288
289 void fd6_emit_init_screen(struct pipe_screen *pscreen);
290 void fd6_emit_init(struct pipe_context *pctx);
291
292 static inline void
293 fd6_emit_ib(struct fd_ringbuffer *ring, struct fd_ringbuffer *target)
294 {
295 emit_marker6(ring, 6);
296 __OUT_IB5(ring, target);
297 emit_marker6(ring, 6);
298 }
299
300 #define WRITE(reg, val) do { \
301 OUT_PKT4(ring, reg, 1); \
302 OUT_RING(ring, val); \
303 } while (0)
304
305
306 #endif /* FD6_EMIT_H */