2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
30 #include "pipe/p_context.h"
32 #include "freedreno_context.h"
33 #include "fd5_context.h"
34 #include "fd5_format.h"
35 #include "fd5_program.h"
36 #include "ir3_shader.h"
40 /* grouped together emit-state for prog/vertex/state emit: */
42 struct pipe_debug_callback
*debug
;
43 const struct fd_vertex_state
*vtx
;
44 const struct fd_program_stateobj
*prog
;
45 const struct pipe_draw_info
*info
;
46 struct ir3_shader_key key
;
47 enum fd_dirty_3d_state dirty
;
49 uint32_t sprite_coord_enable
; /* bitmask */
50 bool sprite_coord_mode
;
54 /* cached to avoid repeated lookups of same variants: */
55 const struct ir3_shader_variant
*vp
, *fp
;
56 /* TODO: other shader stages.. */
58 unsigned streamout_mask
;
61 static inline enum a5xx_color_fmt
fd5_emit_format(struct pipe_surface
*surf
)
65 return fd5_pipe2color(surf
->format
);
68 static inline const struct ir3_shader_variant
*
69 fd5_emit_get_vp(struct fd5_emit
*emit
)
72 struct fd5_shader_stateobj
*so
= emit
->prog
->vp
;
73 emit
->vp
= ir3_shader_variant(so
->shader
, emit
->key
, emit
->debug
);
78 static inline const struct ir3_shader_variant
*
79 fd5_emit_get_fp(struct fd5_emit
*emit
)
82 if (emit
->key
.binning_pass
) {
83 /* use dummy stateobj to simplify binning vs non-binning: */
84 static const struct ir3_shader_variant binning_fp
= {};
85 emit
->fp
= &binning_fp
;
87 struct fd5_shader_stateobj
*so
= emit
->prog
->fp
;
88 emit
->fp
= ir3_shader_variant(so
->shader
, emit
->key
, emit
->debug
);
95 fd5_cache_flush(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
)
98 OUT_PKT4(ring
, REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_LO
, 5);
99 OUT_RING(ring
, 0x00000000); /* UCHE_CACHE_INVALIDATE_MIN_LO */
100 OUT_RING(ring
, 0x00000000); /* UCHE_CACHE_INVALIDATE_MIN_HI */
101 OUT_RING(ring
, 0x00000000); /* UCHE_CACHE_INVALIDATE_MAX_LO */
102 OUT_RING(ring
, 0x00000000); /* UCHE_CACHE_INVALIDATE_MAX_HI */
103 OUT_RING(ring
, 0x00000012); /* UCHE_CACHE_INVALIDATE */
108 fd5_set_render_mode(struct fd_context
*ctx
, struct fd_ringbuffer
*ring
,
109 enum render_mode_cmd mode
)
111 /* TODO add preemption support, gmem bypass, etc */
112 emit_marker5(ring
, 7);
113 OUT_PKT7(ring
, CP_SET_RENDER_MODE
, 5);
114 OUT_RING(ring
, CP_SET_RENDER_MODE_0_MODE(mode
));
115 OUT_RING(ring
, 0x00000000); /* ADDR_LO */
116 OUT_RING(ring
, 0x00000000); /* ADDR_HI */
117 OUT_RING(ring
, COND(mode
== GMEM
, CP_SET_RENDER_MODE_3_GMEM_ENABLE
) |
118 COND(mode
== BINNING
, CP_SET_RENDER_MODE_3_VSC_ENABLE
));
119 OUT_RING(ring
, 0x00000000);
120 emit_marker5(ring
, 7);
124 fd5_emit_blit(struct fd_context
*ctx
, struct fd_ringbuffer
*ring
)
126 struct fd5_context
*fd5_ctx
= fd5_context(ctx
);
128 emit_marker5(ring
, 7);
130 OUT_PKT7(ring
, CP_EVENT_WRITE
, 4);
131 OUT_RING(ring
, CP_EVENT_WRITE_0_EVENT(BLIT
));
132 OUT_RELOCW(ring
, fd5_ctx
->blit_mem
, 0, 0, 0); /* ADDR_LO/HI */
133 OUT_RING(ring
, 0x00000000);
135 emit_marker5(ring
, 7);
139 fd5_emit_render_cntl(struct fd_context
*ctx
, bool blit
, bool binning
)
141 struct fd_ringbuffer
*ring
= binning
? ctx
->batch
->binning
: ctx
->batch
->draw
;
143 /* TODO eventually this partially depends on the pfb state, ie.
144 * which of the cbuf(s)/zsbuf has an UBWC flag buffer.. that part
145 * we could probably cache and just regenerate if framebuffer
146 * state is dirty (or something like that)..
148 * Other bits seem to depend on query state, like if samples-passed
151 bool samples_passed
= (fd5_context(ctx
)->samples_passed_queries
> 0);
152 OUT_PKT4(ring
, REG_A5XX_RB_RENDER_CNTL
, 1);
153 OUT_RING(ring
, 0x00000000 | /* RB_RENDER_CNTL */
154 COND(binning
, A5XX_RB_RENDER_CNTL_BINNING_PASS
) |
155 COND(binning
, A5XX_RB_RENDER_CNTL_DISABLE_COLOR_PIPE
) |
156 COND(samples_passed
, A5XX_RB_RENDER_CNTL_SAMPLES_PASSED
) |
159 OUT_PKT4(ring
, REG_A5XX_GRAS_SC_CNTL
, 1);
160 OUT_RING(ring
, 0x00000008 | /* GRAS_SC_CNTL */
161 COND(binning
, A5XX_GRAS_SC_CNTL_BINNING_PASS
) |
162 COND(samples_passed
, A5XX_GRAS_SC_CNTL_SAMPLES_PASSED
));
166 void fd5_emit_vertex_bufs(struct fd_ringbuffer
*ring
, struct fd5_emit
*emit
);
168 void fd5_emit_state(struct fd_context
*ctx
, struct fd_ringbuffer
*ring
,
169 struct fd5_emit
*emit
);
171 void fd5_emit_cs_state(struct fd_context
*ctx
, struct fd_ringbuffer
*ring
,
172 struct ir3_shader_variant
*cp
);
174 void fd5_emit_restore(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
);
176 void fd5_emit_init(struct pipe_context
*pctx
);
178 #endif /* FD5_EMIT_H */