2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include "../../winsys/radeon/drm/radeon_winsys.h"
30 #include "util/u_double_list.h"
31 #include "util/u_vbuf.h"
33 #define R600_ERR(fmt, args...) \
34 fprintf(stderr, "EE %s:%d %s - "fmt, __FILE__, __LINE__, __func__, ##args)
79 struct r600_tiling_info
{
80 unsigned num_channels
;
85 struct r600_resource
{
86 struct u_vbuf_resource b
;
89 struct pb_buffer
*buf
;
90 struct radeon_winsys_cs_handle
*cs_buf
;
96 /* R600/R700 STATES */
97 #define R600_GROUP_MAX 16
98 #define R600_BLOCK_MAX_BO 32
99 #define R600_BLOCK_MAX_REG 128
101 /* each range covers 9 bits of dword space = 512 dwords = 2k bytes */
102 /* there is a block entry for each register so 512 blocks */
103 /* we have no registers to read/write below 0x8000 (0x2000 in dw space) */
104 /* we use some fake offsets at 0x40000 to do evergreen sampler borders so take 0x42000 as a max bound*/
105 #define RANGE_OFFSET_START 0x8000
107 #define NUM_RANGES (0x42000 - RANGE_OFFSET_START) / (4 << HASH_SHIFT) /* 128 << 9 = 64k */
109 #define CTX_RANGE_ID(offset) ((((offset - RANGE_OFFSET_START) >> 2) >> HASH_SHIFT) & 255)
110 #define CTX_BLOCK_ID(offset) (((offset - RANGE_OFFSET_START) >> 2) & ((1 << HASH_SHIFT) - 1))
112 struct r600_pipe_reg
{
115 struct r600_block
*block
;
116 struct r600_resource
*bo
;
117 enum radeon_bo_usage bo_usage
;
121 struct r600_pipe_state
{
124 struct r600_pipe_reg regs
[R600_BLOCK_MAX_REG
];
127 struct r600_pipe_resource_state
{
130 struct r600_resource
*bo
[2];
131 enum radeon_bo_usage bo_usage
[2];
134 #define R600_BLOCK_STATUS_ENABLED (1 << 0)
135 #define R600_BLOCK_STATUS_DIRTY (1 << 1)
136 #define R600_BLOCK_STATUS_RESOURCE_DIRTY (1 << 2)
138 #define R600_BLOCK_STATUS_RESOURCE_VERTEX (1 << 3)
140 struct r600_block_reloc
{
141 struct r600_resource
*bo
;
142 enum radeon_bo_usage bo_usage
;
143 unsigned flush_flags
;
145 unsigned bo_pm4_index
;
149 struct list_head list
;
150 struct list_head enable_list
;
153 unsigned start_offset
;
154 unsigned pm4_ndwords
;
155 unsigned pm4_flush_ndwords
;
160 u32 pm4
[R600_BLOCK_MAX_REG
];
161 unsigned pm4_bo_index
[R600_BLOCK_MAX_REG
];
162 struct r600_block_reloc reloc
[R600_BLOCK_MAX_BO
];
166 struct r600_block
**blocks
;
173 struct pipe_query_data_so_statistics so
;
175 /* The kind of query */
177 /* Offset of the first result for current query */
178 unsigned results_start
;
179 /* Offset of the next free result after current query data */
180 unsigned results_end
;
181 /* Size of the result in memory for both begin_query and end_query,
182 * this can be one or two numbers, or it could even be a size of a structure. */
183 unsigned result_size
;
184 /* The buffer where query results are stored. It's used as a ring,
185 * data blocks for current query are stored sequentially from
186 * results_start to results_end, with wrapping on the buffer end */
187 struct r600_resource
*buffer
;
188 /* The number of dwords for begin_query or end_query. */
190 /* linked list of queries */
191 struct list_head list
;
194 struct r600_so_target
{
195 struct pipe_stream_output_target b
;
197 /* The buffer where BUFFER_FILLED_SIZE is stored. */
198 struct r600_resource
*filled_size
;
199 unsigned stride_in_dw
;
203 #define R600_CONTEXT_DRAW_PENDING (1 << 0)
204 #define R600_CONTEXT_DST_CACHES_DIRTY (1 << 1)
205 #define R600_CONTEXT_CHECK_EVENT_FLUSH (1 << 2)
207 struct r600_context
{
208 struct r600_screen
*screen
;
209 struct radeon_winsys
*ws
;
210 struct radeon_winsys_cs
*cs
;
211 struct pipe_context
*pipe
;
213 void (*flush
)(void *pipe
, unsigned flags
);
215 struct r600_range
*range
;
217 struct r600_block
**blocks
;
218 struct list_head dirty
;
219 struct list_head resource_dirty
;
220 struct list_head enable_list
;
221 unsigned pm4_dirty_cdwords
;
222 unsigned ctx_pm4_ndwords
;
223 unsigned init_dwords
;
226 struct r600_resource
**bo
;
229 unsigned pm4_cdwords
;
231 /* The list of active queries. Only one query of each type can be active. */
232 struct list_head active_query_list
;
233 unsigned num_cs_dw_queries_suspend
;
234 unsigned num_cs_dw_streamout_end
;
236 unsigned backend_mask
;
237 unsigned max_db
; /* for OQ */
238 unsigned num_dest_buffers
;
240 boolean predicate_drawing
;
241 struct r600_range ps_resources
;
242 struct r600_range vs_resources
;
243 struct r600_range fs_resources
;
244 int num_ps_resources
, num_vs_resources
, num_fs_resources
;
245 boolean have_depth_texture
, have_depth_fb
;
247 unsigned num_so_targets
;
248 struct r600_so_target
*so_targets
[PIPE_MAX_SO_BUFFERS
];
249 boolean streamout_start
;
250 unsigned streamout_append_bitmask
;
251 unsigned *vs_so_stride_in_dw
;
256 u32 vgt_num_instances
;
258 u32 vgt_draw_initiator
;
259 u32 indices_bo_offset
;
260 struct r600_resource
*indices
;
263 void r600_get_backend_mask(struct r600_context
*ctx
);
264 int r600_context_init(struct r600_context
*ctx
, struct r600_screen
*screen
);
265 void r600_context_fini(struct r600_context
*ctx
);
266 void r600_context_pipe_state_set(struct r600_context
*ctx
, struct r600_pipe_state
*state
);
267 void r600_context_pipe_state_set_ps_resource(struct r600_context
*ctx
, struct r600_pipe_resource_state
*state
, unsigned rid
);
268 void r600_context_pipe_state_set_vs_resource(struct r600_context
*ctx
, struct r600_pipe_resource_state
*state
, unsigned rid
);
269 void r600_context_pipe_state_set_fs_resource(struct r600_context
*ctx
, struct r600_pipe_resource_state
*state
, unsigned rid
);
270 void r600_context_pipe_state_set_ps_sampler(struct r600_context
*ctx
, struct r600_pipe_state
*state
, unsigned id
);
271 void r600_context_pipe_state_set_vs_sampler(struct r600_context
*ctx
, struct r600_pipe_state
*state
, unsigned id
);
272 void r600_context_flush(struct r600_context
*ctx
, unsigned flags
);
273 void r600_context_draw(struct r600_context
*ctx
, const struct r600_draw
*draw
);
275 struct r600_query
*r600_context_query_create(struct r600_context
*ctx
, unsigned query_type
);
276 void r600_context_query_destroy(struct r600_context
*ctx
, struct r600_query
*query
);
277 boolean
r600_context_query_result(struct r600_context
*ctx
,
278 struct r600_query
*query
,
279 boolean wait
, void *vresult
);
280 void r600_query_begin(struct r600_context
*ctx
, struct r600_query
*query
);
281 void r600_query_end(struct r600_context
*ctx
, struct r600_query
*query
);
282 void r600_context_queries_suspend(struct r600_context
*ctx
);
283 void r600_context_queries_resume(struct r600_context
*ctx
);
284 void r600_query_predication(struct r600_context
*ctx
, struct r600_query
*query
, int operation
,
286 void r600_context_emit_fence(struct r600_context
*ctx
, struct r600_resource
*fence
,
287 unsigned offset
, unsigned value
);
288 void r600_context_flush_all(struct r600_context
*ctx
, unsigned flush_flags
);
289 void r600_context_flush_dest_caches(struct r600_context
*ctx
);
291 void r600_context_streamout_begin(struct r600_context
*ctx
);
292 void r600_context_streamout_end(struct r600_context
*ctx
);
293 void r600_context_draw_opaque_count(struct r600_context
*ctx
, struct r600_so_target
*t
);
295 int evergreen_context_init(struct r600_context
*ctx
, struct r600_screen
*screen
);
296 void evergreen_context_draw(struct r600_context
*ctx
, const struct r600_draw
*draw
);
297 void evergreen_context_flush_dest_caches(struct r600_context
*ctx
);
298 void evergreen_context_pipe_state_set_ps_resource(struct r600_context
*ctx
, struct r600_pipe_resource_state
*state
, unsigned rid
);
299 void evergreen_context_pipe_state_set_vs_resource(struct r600_context
*ctx
, struct r600_pipe_resource_state
*state
, unsigned rid
);
300 void evergreen_context_pipe_state_set_fs_resource(struct r600_context
*ctx
, struct r600_pipe_resource_state
*state
, unsigned rid
);
301 void evergreen_context_pipe_state_set_ps_sampler(struct r600_context
*ctx
, struct r600_pipe_state
*state
, unsigned id
);
302 void evergreen_context_pipe_state_set_vs_sampler(struct r600_context
*ctx
, struct r600_pipe_state
*state
, unsigned id
);
304 void _r600_pipe_state_add_reg(struct r600_context
*ctx
,
305 struct r600_pipe_state
*state
,
306 u32 offset
, u32 value
, u32 mask
,
307 u32 range_id
, u32 block_id
,
308 struct r600_resource
*bo
,
309 enum radeon_bo_usage usage
);
311 void r600_pipe_state_add_reg_noblock(struct r600_pipe_state
*state
,
312 u32 offset
, u32 value
, u32 mask
,
313 struct r600_resource
*bo
,
314 enum radeon_bo_usage usage
);
316 #define r600_pipe_state_add_reg(state, offset, value, mask, bo, usage) _r600_pipe_state_add_reg(&rctx->ctx, state, offset, value, mask, CTX_RANGE_ID(offset), CTX_BLOCK_ID(offset), bo, usage)
318 static inline void r600_pipe_state_mod_reg(struct r600_pipe_state
*state
,
321 state
->regs
[state
->nregs
].value
= value
;
325 static inline void r600_pipe_state_mod_reg_bo(struct r600_pipe_state
*state
,
326 u32 value
, struct r600_resource
*bo
,
327 enum radeon_bo_usage usage
)
329 state
->regs
[state
->nregs
].value
= value
;
330 state
->regs
[state
->nregs
].bo
= bo
;
331 state
->regs
[state
->nregs
].bo_usage
= usage
;