ca8808306888c4d5d65c31852ca25e4278692481
[mesa.git] / src / gallium / drivers / freedreno / a5xx / fd5_emit.h
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #ifndef FD5_EMIT_H
28 #define FD5_EMIT_H
29
30 #include "pipe/p_context.h"
31
32 #include "freedreno_context.h"
33 #include "fd5_context.h"
34 #include "fd5_format.h"
35 #include "fd5_program.h"
36 #include "ir3_shader.h"
37
38 struct fd_ringbuffer;
39
40 /* grouped together emit-state for prog/vertex/state emit: */
41 struct fd5_emit {
42 struct pipe_debug_callback *debug;
43 const struct fd_vertex_state *vtx;
44 const struct fd_program_stateobj *prog;
45 const struct pipe_draw_info *info;
46 struct ir3_shader_key key;
47 enum fd_dirty_3d_state dirty;
48
49 uint32_t sprite_coord_enable; /* bitmask */
50 bool sprite_coord_mode;
51 bool rasterflat;
52 bool no_decode_srgb;
53
54 /* cached to avoid repeated lookups of same variants: */
55 const struct ir3_shader_variant *vp, *fp;
56 /* TODO: other shader stages.. */
57
58 unsigned streamout_mask;
59 };
60
61 static inline enum a5xx_color_fmt fd5_emit_format(struct pipe_surface *surf)
62 {
63 if (!surf)
64 return 0;
65 return fd5_pipe2color(surf->format);
66 }
67
68 static inline const struct ir3_shader_variant *
69 fd5_emit_get_vp(struct fd5_emit *emit)
70 {
71 if (!emit->vp) {
72 struct fd5_shader_stateobj *so = emit->prog->vp;
73 emit->vp = ir3_shader_variant(so->shader, emit->key, emit->debug);
74 }
75 return emit->vp;
76 }
77
78 static inline const struct ir3_shader_variant *
79 fd5_emit_get_fp(struct fd5_emit *emit)
80 {
81 if (!emit->fp) {
82 if (emit->key.binning_pass) {
83 /* use dummy stateobj to simplify binning vs non-binning: */
84 static const struct ir3_shader_variant binning_fp = {};
85 emit->fp = &binning_fp;
86 } else {
87 struct fd5_shader_stateobj *so = emit->prog->fp;
88 emit->fp = ir3_shader_variant(so->shader, emit->key, emit->debug);
89 }
90 }
91 return emit->fp;
92 }
93
94 static inline void
95 fd5_cache_flush(struct fd_batch *batch, struct fd_ringbuffer *ring)
96 {
97 fd_reset_wfi(batch);
98 OUT_PKT4(ring, REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_LO, 5);
99 OUT_RING(ring, 0x00000000); /* UCHE_CACHE_INVALIDATE_MIN_LO */
100 OUT_RING(ring, 0x00000000); /* UCHE_CACHE_INVALIDATE_MIN_HI */
101 OUT_RING(ring, 0x00000000); /* UCHE_CACHE_INVALIDATE_MAX_LO */
102 OUT_RING(ring, 0x00000000); /* UCHE_CACHE_INVALIDATE_MAX_HI */
103 OUT_RING(ring, 0x00000012); /* UCHE_CACHE_INVALIDATE */
104 fd_wfi(batch, ring);
105 }
106
107 static inline void
108 fd5_set_render_mode(struct fd_context *ctx, struct fd_ringbuffer *ring,
109 enum render_mode_cmd mode)
110 {
111 /* TODO add preemption support, gmem bypass, etc */
112 emit_marker5(ring, 7);
113 OUT_PKT7(ring, CP_SET_RENDER_MODE, 5);
114 OUT_RING(ring, CP_SET_RENDER_MODE_0_MODE(mode));
115 OUT_RING(ring, 0x00000000); /* ADDR_LO */
116 OUT_RING(ring, 0x00000000); /* ADDR_HI */
117 OUT_RING(ring, COND(mode == GMEM, CP_SET_RENDER_MODE_3_GMEM_ENABLE) |
118 COND(mode == BINNING, CP_SET_RENDER_MODE_3_VSC_ENABLE));
119 OUT_RING(ring, 0x00000000);
120 emit_marker5(ring, 7);
121 }
122
123 static inline void
124 fd5_emit_blit(struct fd_context *ctx, struct fd_ringbuffer *ring)
125 {
126 struct fd5_context *fd5_ctx = fd5_context(ctx);
127
128 emit_marker5(ring, 7);
129
130 OUT_PKT7(ring, CP_EVENT_WRITE, 4);
131 OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(BLIT));
132 OUT_RELOCW(ring, fd5_ctx->blit_mem, 0, 0, 0); /* ADDR_LO/HI */
133 OUT_RING(ring, 0x00000000);
134
135 emit_marker5(ring, 7);
136 }
137
138 static inline void
139 fd5_emit_render_cntl(struct fd_context *ctx, bool blit, bool binning)
140 {
141 struct fd_ringbuffer *ring = binning ? ctx->batch->binning : ctx->batch->draw;
142
143 /* TODO eventually this partially depends on the pfb state, ie.
144 * which of the cbuf(s)/zsbuf has an UBWC flag buffer.. that part
145 * we could probably cache and just regenerate if framebuffer
146 * state is dirty (or something like that)..
147 *
148 * Other bits seem to depend on query state, like if samples-passed
149 * query is active.
150 */
151 bool samples_passed = (fd5_context(ctx)->samples_passed_queries > 0);
152 OUT_PKT4(ring, REG_A5XX_RB_RENDER_CNTL, 1);
153 OUT_RING(ring, 0x00000000 | /* RB_RENDER_CNTL */
154 COND(binning, A5XX_RB_RENDER_CNTL_BINNING_PASS) |
155 COND(binning, A5XX_RB_RENDER_CNTL_DISABLE_COLOR_PIPE) |
156 COND(samples_passed, A5XX_RB_RENDER_CNTL_SAMPLES_PASSED) |
157 COND(!blit, 0x8));
158
159 OUT_PKT4(ring, REG_A5XX_GRAS_SC_CNTL, 1);
160 OUT_RING(ring, 0x00000008 | /* GRAS_SC_CNTL */
161 COND(binning, A5XX_GRAS_SC_CNTL_BINNING_PASS) |
162 COND(samples_passed, A5XX_GRAS_SC_CNTL_SAMPLES_PASSED));
163
164 }
165
166 void fd5_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd5_emit *emit);
167
168 void fd5_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
169 struct fd5_emit *emit);
170
171 void fd5_emit_cs_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
172 struct ir3_shader_variant *cp);
173
174 void fd5_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring);
175
176 void fd5_emit_init(struct pipe_context *pctx);
177
178 #endif /* FD5_EMIT_H */