freedreno/a6xx: add fd_emit_take_group()
[mesa.git] / src / gallium / drivers / freedreno / a6xx / fd6_emit.h
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Rob Clark <robclark@freedesktop.org>
26 */
27
28 #ifndef FD6_EMIT_H
29 #define FD6_EMIT_H
30
31 #include "pipe/p_context.h"
32
33 #include "freedreno_context.h"
34 #include "fd6_context.h"
35 #include "fd6_format.h"
36 #include "fd6_program.h"
37 #include "ir3_gallium.h"
38
39 struct fd_ringbuffer;
40
41 /* To collect all the state objects to emit in a single CP_SET_DRAW_STATE
42 * packet, the emit tracks a collection of however many state_group's that
43 * need to be emit'd.
44 */
45 enum fd6_state_id {
46 FD6_GROUP_PROG_CONFIG,
47 FD6_GROUP_PROG,
48 FD6_GROUP_PROG_BINNING,
49 FD6_GROUP_LRZ,
50 FD6_GROUP_LRZ_BINNING,
51 FD6_GROUP_VBO,
52 FD6_GROUP_VBO_BINNING,
53 FD6_GROUP_VS_CONST,
54 FD6_GROUP_FS_CONST,
55 FD6_GROUP_VS_TEX,
56 FD6_GROUP_FS_TEX,
57 FD6_GROUP_IBO,
58 FD6_GROUP_RASTERIZER,
59 FD6_GROUP_ZSA,
60 };
61
62 struct fd6_state_group {
63 struct fd_ringbuffer *stateobj;
64 enum fd6_state_id group_id;
65 /* enable_mask controls which states the stateobj is evaluated in,
66 * b0 is binning pass b1 and/or b2 is draw pass
67 */
68 uint8_t enable_mask;
69 };
70
71 /* grouped together emit-state for prog/vertex/state emit: */
72 struct fd6_emit {
73 struct fd_context *ctx;
74 const struct fd_vertex_state *vtx;
75 const struct pipe_draw_info *info;
76 struct ir3_cache_key key;
77 enum fd_dirty_3d_state dirty;
78
79 uint32_t sprite_coord_enable; /* bitmask */
80 bool sprite_coord_mode;
81 bool rasterflat;
82 bool no_decode_srgb;
83
84 /* in binning pass, we don't have real frag shader, so we
85 * don't know if real draw disqualifies lrz write. So just
86 * figure that out up-front and stash it in the emit.
87 */
88 bool no_lrz_write;
89
90 /* cached to avoid repeated lookups: */
91 const struct fd6_program_state *prog;
92
93 struct ir3_shader_variant *bs;
94 struct ir3_shader_variant *vs;
95 struct ir3_shader_variant *fs;
96
97 unsigned streamout_mask;
98
99 struct fd6_state_group groups[32];
100 unsigned num_groups;
101 };
102
103 static inline const struct fd6_program_state *
104 fd6_emit_get_prog(struct fd6_emit *emit)
105 {
106 if (!emit->prog) {
107 struct fd6_context *fd6_ctx = fd6_context(emit->ctx);
108 struct ir3_program_state *s =
109 ir3_cache_lookup(fd6_ctx->shader_cache, &emit->key, &emit->ctx->debug);
110 emit->prog = fd6_program_state(s);
111 }
112 return emit->prog;
113 }
114
115 static inline void
116 fd6_emit_take_group(struct fd6_emit *emit, struct fd_ringbuffer *stateobj,
117 enum fd6_state_id group_id, unsigned enable_mask)
118 {
119 debug_assert(emit->num_groups < ARRAY_SIZE(emit->groups));
120 struct fd6_state_group *g = &emit->groups[emit->num_groups++];
121 g->stateobj = stateobj;
122 g->group_id = group_id;
123 g->enable_mask = enable_mask;
124 }
125
126 static inline void
127 fd6_emit_add_group(struct fd6_emit *emit, struct fd_ringbuffer *stateobj,
128 enum fd6_state_id group_id, unsigned enable_mask)
129 {
130 fd6_emit_take_group(emit, fd_ringbuffer_ref(stateobj), group_id, enable_mask);
131 }
132
133 static inline unsigned
134 fd6_event_write(struct fd_batch *batch, struct fd_ringbuffer *ring,
135 enum vgt_event_type evt, bool timestamp)
136 {
137 unsigned seqno = 0;
138
139 fd_reset_wfi(batch);
140
141 OUT_PKT7(ring, CP_EVENT_WRITE, timestamp ? 4 : 1);
142 OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(evt));
143 if (timestamp) {
144 struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
145 seqno = ++fd6_ctx->seqno;
146 OUT_RELOCW(ring, control_ptr(fd6_ctx, seqno)); /* ADDR_LO/HI */
147 OUT_RING(ring, seqno);
148 }
149
150 return seqno;
151 }
152
153 static inline void
154 fd6_cache_inv(struct fd_batch *batch, struct fd_ringbuffer *ring)
155 {
156 fd6_event_write(batch, ring, CACHE_INVALIDATE, false);
157 }
158
159 static inline void
160 fd6_cache_flush(struct fd_batch *batch, struct fd_ringbuffer *ring)
161 {
162 struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
163 unsigned seqno;
164
165 seqno = fd6_event_write(batch, ring, CACHE_FLUSH_AND_INV_EVENT, true);
166
167 OUT_PKT7(ring, CP_WAIT_REG_MEM, 6);
168 OUT_RING(ring, 0x00000013);
169 OUT_RELOC(ring, control_ptr(fd6_ctx, seqno));
170 OUT_RING(ring, seqno);
171 OUT_RING(ring, 0xffffffff);
172 OUT_RING(ring, 0x00000010);
173
174 seqno = fd6_event_write(batch, ring, CACHE_FLUSH_TS, true);
175
176 OUT_PKT7(ring, CP_UNK_A6XX_14, 4);
177 OUT_RING(ring, 0x00000000);
178 OUT_RELOC(ring, control_ptr(fd6_ctx, seqno));
179 OUT_RING(ring, seqno);
180 }
181
182 static inline void
183 fd6_emit_blit(struct fd_batch *batch, struct fd_ringbuffer *ring)
184 {
185 emit_marker6(ring, 7);
186 fd6_event_write(batch, ring, BLIT, false);
187 emit_marker6(ring, 7);
188 }
189
190 static inline void
191 fd6_emit_lrz_flush(struct fd_ringbuffer *ring)
192 {
193 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
194 OUT_RING(ring, LRZ_FLUSH);
195 }
196
197 static inline uint32_t
198 fd6_stage2opcode(gl_shader_stage type)
199 {
200 switch (type) {
201 case MESA_SHADER_VERTEX:
202 case MESA_SHADER_TESS_CTRL:
203 case MESA_SHADER_TESS_EVAL:
204 case MESA_SHADER_GEOMETRY:
205 return CP_LOAD_STATE6_GEOM;
206 case MESA_SHADER_FRAGMENT:
207 case MESA_SHADER_COMPUTE:
208 case MESA_SHADER_KERNEL:
209 return CP_LOAD_STATE6_FRAG;
210 default:
211 unreachable("bad shader type");
212 }
213 }
214
215 static inline enum a6xx_state_block
216 fd6_stage2shadersb(gl_shader_stage type)
217 {
218 switch (type) {
219 case MESA_SHADER_VERTEX:
220 return SB6_VS_SHADER;
221 case MESA_SHADER_FRAGMENT:
222 return SB6_FS_SHADER;
223 case MESA_SHADER_COMPUTE:
224 case MESA_SHADER_KERNEL:
225 return SB6_CS_SHADER;
226 default:
227 unreachable("bad shader type");
228 return ~0;
229 }
230 }
231
232 bool fd6_emit_textures(struct fd_pipe *pipe, struct fd_ringbuffer *ring,
233 enum pipe_shader_type type, struct fd_texture_stateobj *tex,
234 unsigned bcolor_offset,
235 const struct ir3_shader_variant *v, struct fd_context *ctx);
236
237 void fd6_emit_state(struct fd_ringbuffer *ring, struct fd6_emit *emit);
238
239 void fd6_emit_cs_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
240 struct ir3_shader_variant *cp);
241
242 void fd6_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring);
243
244 void fd6_emit_init_screen(struct pipe_screen *pscreen);
245 void fd6_emit_init(struct pipe_context *pctx);
246
247 static inline void
248 fd6_emit_ib(struct fd_ringbuffer *ring, struct fd_ringbuffer *target)
249 {
250 emit_marker6(ring, 6);
251 __OUT_IB5(ring, target);
252 emit_marker6(ring, 6);
253 }
254
255 #define WRITE(reg, val) do { \
256 OUT_PKT4(ring, reg, 1); \
257 OUT_RING(ring, val); \
258 } while (0)
259
260
261 #endif /* FD6_EMIT_H */