2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include "util/u_double_list.h"
30 #include "util/u_inlines.h"
32 #define RADEON_CTX_MAX_PM4 (64 * 1024 / 4)
34 #define R600_ERR(fmt, args...) \
35 fprintf(stderr, "EE %s:%d %s - "fmt, __FILE__, __LINE__, __func__, ##args)
81 struct r600_tiling_info
{
82 unsigned num_channels
;
87 enum radeon_family
r600_get_family(struct radeon
*rw
);
88 enum chip_class
r600_get_family_class(struct radeon
*radeon
);
89 struct r600_tiling_info
*r600_get_tiling_info(struct radeon
*radeon
);
90 unsigned r600_get_clock_crystal_freq(struct radeon
*radeon
);
91 unsigned r600_get_minor_version(struct radeon
*radeon
);
92 unsigned r600_get_num_backends(struct radeon
*radeon
);
93 unsigned r600_get_num_tile_pipes(struct radeon
*radeon
);
94 unsigned r600_get_backend_map(struct radeon
*radeon
);
99 struct r600_bo
*r600_bo(struct radeon
*radeon
,
100 unsigned size
, unsigned alignment
,
101 unsigned binding
, unsigned usage
);
102 struct r600_bo
*r600_bo_handle(struct radeon
*radeon
, struct winsys_handle
*whandle
,
103 unsigned *stride
, unsigned *array_mode
);
104 void *r600_bo_map(struct radeon
*radeon
, struct r600_bo
*bo
, unsigned usage
, void *ctx
);
105 void r600_bo_unmap(struct radeon
*radeon
, struct r600_bo
*bo
);
106 boolean
r600_bo_get_winsys_handle(struct radeon
*radeon
, struct r600_bo
*pb_bo
,
107 unsigned stride
, struct winsys_handle
*whandle
);
109 void r600_bo_destroy(struct radeon
*radeon
, struct r600_bo
*bo
);
111 /* this relies on the pipe_reference being the first member of r600_bo */
112 static INLINE
void r600_bo_reference(struct radeon
*radeon
, struct r600_bo
**dst
, struct r600_bo
*src
)
114 struct r600_bo
*old
= *dst
;
116 if (pipe_reference((struct pipe_reference
*)(*dst
), (struct pipe_reference
*)src
)) {
117 r600_bo_destroy(radeon
, old
);
123 /* R600/R700 STATES */
124 #define R600_GROUP_MAX 16
125 #define R600_BLOCK_MAX_BO 32
126 #define R600_BLOCK_MAX_REG 128
128 /* each range covers 9 bits of dword space = 512 dwords = 2k bytes */
129 /* there is a block entry for each register so 512 blocks */
130 /* we have no registers to read/write below 0x8000 (0x2000 in dw space) */
131 /* we use some fake offsets at 0x40000 to do evergreen sampler borders so take 0x42000 as a max bound*/
132 #define RANGE_OFFSET_START 0x8000
134 #define NUM_RANGES (0x42000 - RANGE_OFFSET_START) / (4 << HASH_SHIFT) /* 128 << 9 = 64k */
136 #define CTX_RANGE_ID(offset) ((((offset - RANGE_OFFSET_START) >> 2) >> HASH_SHIFT) & 255)
137 #define CTX_BLOCK_ID(offset) (((offset - RANGE_OFFSET_START) >> 2) & ((1 << HASH_SHIFT) - 1))
139 struct r600_pipe_reg
{
142 struct r600_block
*block
;
147 struct r600_pipe_state
{
150 struct r600_pipe_reg regs
[R600_BLOCK_MAX_REG
];
153 struct r600_pipe_resource_state
{
156 struct r600_bo
*bo
[2];
159 #define R600_BLOCK_STATUS_ENABLED (1 << 0)
160 #define R600_BLOCK_STATUS_DIRTY (1 << 1)
161 #define R600_BLOCK_STATUS_RESOURCE_DIRTY (1 << 2)
163 #define R600_BLOCK_STATUS_RESOURCE_VERTEX (1 << 3)
165 struct r600_block_reloc
{
167 unsigned flush_flags
;
169 unsigned bo_pm4_index
;
173 struct list_head list
;
174 struct list_head enable_list
;
177 unsigned start_offset
;
178 unsigned pm4_ndwords
;
179 unsigned pm4_flush_ndwords
;
184 u32 pm4
[R600_BLOCK_MAX_REG
];
185 unsigned pm4_bo_index
[R600_BLOCK_MAX_REG
];
186 struct r600_block_reloc reloc
[R600_BLOCK_MAX_BO
];
190 struct r600_block
**blocks
;
199 uint32_t read_domain
;
200 uint32_t write_domain
;
210 /* The kind of query */
212 /* Offset of the first result for current query */
213 unsigned results_start
;
214 /* Offset of the next free result after current query data */
215 unsigned results_end
;
216 /* Size of the result */
217 unsigned result_size
;
218 /* Count of new queries started in one stream without flushing */
219 unsigned queries_emitted
;
222 /* The buffer where query results are stored. It's used as a ring,
223 * data blocks for current query are stored sequentially from
224 * results_start to results_end, with wrapping on the buffer end */
225 struct r600_bo
*buffer
;
226 unsigned buffer_size
;
227 /* linked list of queries */
228 struct list_head list
;
231 #define R600_QUERY_STATE_STARTED (1 << 0)
232 #define R600_QUERY_STATE_ENDED (1 << 1)
233 #define R600_QUERY_STATE_SUSPENDED (1 << 2)
234 #define R600_QUERY_STATE_FLUSHED (1 << 3)
236 #define R600_CONTEXT_DRAW_PENDING (1 << 0)
237 #define R600_CONTEXT_DST_CACHES_DIRTY (1 << 1)
238 #define R600_CONTEXT_CHECK_EVENT_FLUSH (1 << 2)
240 struct r600_context
{
241 struct radeon
*radeon
;
242 struct radeon_winsys_cs
*cs
;
243 struct r600_range
*range
;
245 struct r600_block
**blocks
;
246 struct list_head dirty
;
247 struct list_head resource_dirty
;
248 struct list_head enable_list
;
249 unsigned pm4_ndwords
;
250 unsigned pm4_cdwords
;
251 unsigned pm4_dirty_cdwords
;
252 unsigned ctx_pm4_ndwords
;
253 unsigned init_dwords
;
256 struct r600_reloc
*reloc
;
257 struct radeon_bo
**bo
;
260 struct list_head query_list
;
261 unsigned num_query_running
;
262 unsigned backend_mask
;
263 unsigned max_db
; /* for OQ */
264 unsigned num_dest_buffers
;
266 boolean predicate_drawing
;
267 struct r600_range ps_resources
;
268 struct r600_range vs_resources
;
269 struct r600_range fs_resources
;
270 int num_ps_resources
, num_vs_resources
, num_fs_resources
;
271 boolean have_depth_texture
, have_depth_fb
;
276 u32 vgt_num_instances
;
278 u32 vgt_draw_initiator
;
279 u32 indices_bo_offset
;
280 struct r600_bo
*indices
;
283 void r600_get_backend_mask(struct r600_context
*ctx
);
284 int r600_context_init(struct r600_context
*ctx
, struct radeon
*radeon
);
285 void r600_context_fini(struct r600_context
*ctx
);
286 void r600_context_pipe_state_set(struct r600_context
*ctx
, struct r600_pipe_state
*state
);
287 void r600_context_pipe_state_set_ps_resource(struct r600_context
*ctx
, struct r600_pipe_resource_state
*state
, unsigned rid
);
288 void r600_context_pipe_state_set_vs_resource(struct r600_context
*ctx
, struct r600_pipe_resource_state
*state
, unsigned rid
);
289 void r600_context_pipe_state_set_fs_resource(struct r600_context
*ctx
, struct r600_pipe_resource_state
*state
, unsigned rid
);
290 void r600_context_pipe_state_set_ps_sampler(struct r600_context
*ctx
, struct r600_pipe_state
*state
, unsigned id
);
291 void r600_context_pipe_state_set_vs_sampler(struct r600_context
*ctx
, struct r600_pipe_state
*state
, unsigned id
);
292 void r600_context_flush(struct r600_context
*ctx
);
293 void r600_context_draw(struct r600_context
*ctx
, const struct r600_draw
*draw
);
295 struct r600_query
*r600_context_query_create(struct r600_context
*ctx
, unsigned query_type
);
296 void r600_context_query_destroy(struct r600_context
*ctx
, struct r600_query
*query
);
297 boolean
r600_context_query_result(struct r600_context
*ctx
,
298 struct r600_query
*query
,
299 boolean wait
, void *vresult
);
300 void r600_query_begin(struct r600_context
*ctx
, struct r600_query
*query
);
301 void r600_query_end(struct r600_context
*ctx
, struct r600_query
*query
);
302 void r600_context_queries_suspend(struct r600_context
*ctx
);
303 void r600_context_queries_resume(struct r600_context
*ctx
, boolean flushed
);
304 void r600_query_predication(struct r600_context
*ctx
, struct r600_query
*query
, int operation
,
306 void r600_context_emit_fence(struct r600_context
*ctx
, struct r600_bo
*fence
,
307 unsigned offset
, unsigned value
);
308 void r600_context_flush_all(struct r600_context
*ctx
, unsigned flush_flags
);
309 void r600_context_flush_dest_caches(struct r600_context
*ctx
);
311 int evergreen_context_init(struct r600_context
*ctx
, struct radeon
*radeon
);
312 void evergreen_context_draw(struct r600_context
*ctx
, const struct r600_draw
*draw
);
313 void evergreen_context_flush_dest_caches(struct r600_context
*ctx
);
314 void evergreen_context_pipe_state_set_ps_resource(struct r600_context
*ctx
, struct r600_pipe_resource_state
*state
, unsigned rid
);
315 void evergreen_context_pipe_state_set_vs_resource(struct r600_context
*ctx
, struct r600_pipe_resource_state
*state
, unsigned rid
);
316 void evergreen_context_pipe_state_set_fs_resource(struct r600_context
*ctx
, struct r600_pipe_resource_state
*state
, unsigned rid
);
317 void evergreen_context_pipe_state_set_ps_sampler(struct r600_context
*ctx
, struct r600_pipe_state
*state
, unsigned id
);
318 void evergreen_context_pipe_state_set_vs_sampler(struct r600_context
*ctx
, struct r600_pipe_state
*state
, unsigned id
);
320 struct radeon
*radeon_destroy(struct radeon
*radeon
);
322 void _r600_pipe_state_add_reg(struct r600_context
*ctx
,
323 struct r600_pipe_state
*state
,
324 u32 offset
, u32 value
, u32 mask
,
325 u32 range_id
, u32 block_id
,
328 void r600_pipe_state_add_reg_noblock(struct r600_pipe_state
*state
,
329 u32 offset
, u32 value
, u32 mask
,
331 #define r600_pipe_state_add_reg(state, offset, value, mask, bo) _r600_pipe_state_add_reg(&rctx->ctx, state, offset, value, mask, CTX_RANGE_ID(offset), CTX_BLOCK_ID(offset), bo)
333 static inline void r600_pipe_state_mod_reg(struct r600_pipe_state
*state
,
336 state
->regs
[state
->nregs
].value
= value
;
340 static inline void r600_pipe_state_mod_reg_bo(struct r600_pipe_state
*state
,
341 u32 value
, struct r600_bo
*bo
)
343 state
->regs
[state
->nregs
].value
= value
;
344 state
->regs
[state
->nregs
].bo
= bo
;