2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * Rob Clark <robclark@freedesktop.org>
31 #include "pipe/p_context.h"
33 #include "freedreno_context.h"
34 #include "fd6_context.h"
35 #include "fd6_format.h"
36 #include "fd6_program.h"
37 #include "ir3_gallium.h"
41 /* To collect all the state objects to emit in a single CP_SET_DRAW_STATE
42 * packet, the emit tracks a collection of however many state_group's that
47 FD6_GROUP_PROG_BINNING
,
49 FD6_GROUP_LRZ_BINNING
,
51 FD6_GROUP_VBO_BINNING
,
61 struct fd6_state_group
{
62 struct fd_ringbuffer
*stateobj
;
63 enum fd6_state_id group_id
;
64 /* enable_mask controls which states the stateobj is evaluated in,
65 * b0 is binning pass b1 and/or b2 is draw pass
70 /* grouped together emit-state for prog/vertex/state emit: */
72 struct fd_context
*ctx
;
73 const struct fd_vertex_state
*vtx
;
74 const struct pipe_draw_info
*info
;
75 struct ir3_cache_key key
;
76 enum fd_dirty_3d_state dirty
;
78 uint32_t sprite_coord_enable
; /* bitmask */
79 bool sprite_coord_mode
;
83 /* in binning pass, we don't have real frag shader, so we
84 * don't know if real draw disqualifies lrz write. So just
85 * figure that out up-front and stash it in the emit.
89 /* cached to avoid repeated lookups: */
90 const struct fd6_program_state
*prog
;
92 struct ir3_shader_variant
*bs
;
93 struct ir3_shader_variant
*vs
;
94 struct ir3_shader_variant
*fs
;
96 unsigned streamout_mask
;
98 struct fd6_state_group groups
[32];
102 static inline const struct fd6_program_state
*
103 fd6_emit_get_prog(struct fd6_emit
*emit
)
106 struct fd6_context
*fd6_ctx
= fd6_context(emit
->ctx
);
107 struct ir3_program_state
*s
=
108 ir3_cache_lookup(fd6_ctx
->shader_cache
, &emit
->key
, &emit
->ctx
->debug
);
109 emit
->prog
= fd6_program_state(s
);
115 fd6_emit_add_group(struct fd6_emit
*emit
, struct fd_ringbuffer
*stateobj
,
116 enum fd6_state_id group_id
, unsigned enable_mask
)
118 debug_assert(emit
->num_groups
< ARRAY_SIZE(emit
->groups
));
119 struct fd6_state_group
*g
= &emit
->groups
[emit
->num_groups
++];
120 g
->stateobj
= fd_ringbuffer_ref(stateobj
);
121 g
->group_id
= group_id
;
122 g
->enable_mask
= enable_mask
;
125 static inline unsigned
126 fd6_event_write(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
,
127 enum vgt_event_type evt
, bool timestamp
)
133 OUT_PKT7(ring
, CP_EVENT_WRITE
, timestamp
? 4 : 1);
134 OUT_RING(ring
, CP_EVENT_WRITE_0_EVENT(evt
));
136 struct fd6_context
*fd6_ctx
= fd6_context(batch
->ctx
);
137 seqno
= ++fd6_ctx
->seqno
;
138 OUT_RELOCW(ring
, fd6_ctx
->blit_mem
, 0, 0, 0); /* ADDR_LO/HI */
139 OUT_RING(ring
, seqno
);
146 fd6_cache_inv(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
)
148 fd6_event_write(batch
, ring
, 0x31, false);
152 fd6_cache_flush(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
)
154 struct fd6_context
*fd6_ctx
= fd6_context(batch
->ctx
);
157 seqno
= fd6_event_write(batch
, ring
, CACHE_FLUSH_AND_INV_EVENT
, true);
159 OUT_PKT7(ring
, CP_WAIT_REG_MEM
, 6);
160 OUT_RING(ring
, 0x00000013);
161 OUT_RELOC(ring
, fd6_ctx
->blit_mem
, 0, 0, 0);
162 OUT_RING(ring
, seqno
);
163 OUT_RING(ring
, 0xffffffff);
164 OUT_RING(ring
, 0x00000010);
166 seqno
= fd6_event_write(batch
, ring
, CACHE_FLUSH_TS
, true);
168 OUT_PKT7(ring
, CP_UNK_A6XX_14
, 4);
169 OUT_RING(ring
, 0x00000000);
170 OUT_RELOC(ring
, fd6_ctx
->blit_mem
, 0, 0, 0);
171 OUT_RING(ring
, seqno
);
175 fd6_emit_blit(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
)
177 emit_marker6(ring
, 7);
178 fd6_event_write(batch
, ring
, BLIT
, false);
179 emit_marker6(ring
, 7);
183 fd6_emit_lrz_flush(struct fd_ringbuffer
*ring
)
185 OUT_PKT7(ring
, CP_EVENT_WRITE
, 1);
186 OUT_RING(ring
, LRZ_FLUSH
);
189 static inline enum a6xx_state_block
190 fd6_stage2shadersb(gl_shader_stage type
)
193 case MESA_SHADER_VERTEX
:
194 return SB6_VS_SHADER
;
195 case MESA_SHADER_FRAGMENT
:
196 return SB6_FS_SHADER
;
197 case MESA_SHADER_COMPUTE
:
198 case MESA_SHADER_KERNEL
:
199 return SB6_CS_SHADER
;
201 unreachable("bad shader type");
206 bool fd6_emit_textures(struct fd_pipe
*pipe
, struct fd_ringbuffer
*ring
,
207 enum pipe_shader_type type
, struct fd_texture_stateobj
*tex
,
208 unsigned bcolor_offset
,
209 const struct ir3_shader_variant
*v
, struct fd_context
*ctx
);
211 void fd6_emit_state(struct fd_ringbuffer
*ring
, struct fd6_emit
*emit
);
213 void fd6_emit_cs_state(struct fd_context
*ctx
, struct fd_ringbuffer
*ring
,
214 struct ir3_shader_variant
*cp
);
216 void fd6_emit_restore(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
);
218 void fd6_emit_init(struct pipe_context
*pctx
);
221 fd6_emit_ib(struct fd_ringbuffer
*ring
, struct fd_ringbuffer
*target
)
223 emit_marker6(ring
, 6);
224 __OUT_IB5(ring
, target
);
225 emit_marker6(ring
, 6);
228 #define WRITE(reg, val) do { \
229 OUT_PKT4(ring, reg, 1); \
230 OUT_RING(ring, val); \
234 #endif /* FD6_EMIT_H */