2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
32 #include <util/u_double_list.h>
33 #include <pipe/p_compiler.h>
35 #define RADEON_CTX_MAX_PM4 (64 * 1024 / 4)
37 #define R600_ERR(fmt, args...) \
38 fprintf(stderr, "EE %s:%d %s - "fmt, __FILE__, __LINE__, __func__, ##args)
107 struct r600_tiling_info
{
108 unsigned num_channels
;
110 unsigned group_bytes
;
113 enum radeon_family
r600_get_family(struct radeon
*rw
);
114 enum chip_class
r600_get_family_class(struct radeon
*radeon
);
115 struct r600_tiling_info
*r600_get_tiling_info(struct radeon
*radeon
);
116 unsigned r600_get_clock_crystal_freq(struct radeon
*radeon
);
117 unsigned r600_get_minor_version(struct radeon
*radeon
);
118 unsigned r600_get_num_backends(struct radeon
*radeon
);
122 struct r600_bo
*r600_bo(struct radeon
*radeon
,
123 unsigned size
, unsigned alignment
,
124 unsigned binding
, unsigned usage
);
125 struct r600_bo
*r600_bo_handle(struct radeon
*radeon
,
126 unsigned handle
, unsigned *array_mode
);
127 void *r600_bo_map(struct radeon
*radeon
, struct r600_bo
*bo
, unsigned usage
, void *ctx
);
128 void r600_bo_unmap(struct radeon
*radeon
, struct r600_bo
*bo
);
129 void r600_bo_reference(struct radeon
*radeon
, struct r600_bo
**dst
,
130 struct r600_bo
*src
);
131 boolean
r600_bo_get_winsys_handle(struct radeon
*radeon
, struct r600_bo
*pb_bo
,
132 unsigned stride
, struct winsys_handle
*whandle
);
133 static INLINE
unsigned r600_bo_offset(struct r600_bo
*bo
)
139 /* R600/R700 STATES */
140 #define R600_GROUP_MAX 16
141 #define R600_BLOCK_MAX_BO 32
142 #define R600_BLOCK_MAX_REG 128
144 struct r600_pipe_reg
{
151 struct r600_pipe_state
{
154 struct r600_pipe_reg regs
[R600_BLOCK_MAX_REG
];
157 static inline void r600_pipe_state_add_reg(struct r600_pipe_state
*state
,
158 u32 offset
, u32 value
, u32 mask
,
161 state
->regs
[state
->nregs
].offset
= offset
;
162 state
->regs
[state
->nregs
].value
= value
;
163 state
->regs
[state
->nregs
].mask
= mask
;
164 state
->regs
[state
->nregs
].bo
= bo
;
166 assert(state
->nregs
< R600_BLOCK_MAX_REG
);
169 #define R600_BLOCK_STATUS_ENABLED (1 << 0)
170 #define R600_BLOCK_STATUS_DIRTY (1 << 1)
172 struct r600_block_reloc
{
174 unsigned flush_flags
;
176 unsigned bo_pm4_index
;
180 struct list_head list
;
183 unsigned start_offset
;
184 unsigned pm4_ndwords
;
185 unsigned pm4_flush_ndwords
;
190 u32 pm4
[R600_BLOCK_MAX_REG
];
191 unsigned pm4_bo_index
[R600_BLOCK_MAX_REG
];
192 struct r600_block_reloc reloc
[R600_BLOCK_MAX_BO
];
196 unsigned start_offset
;
198 struct r600_block
**blocks
;
207 uint32_t read_domain
;
208 uint32_t write_domain
;
218 /* The kind of query. Currently only OQ is supported. */
220 /* How many results have been written, in dwords. It's incremented
221 * after end_query and flush. */
222 unsigned num_results
;
223 /* if we've flushed the query */
225 /* The buffer where query results are stored. */
226 struct r600_bo
*buffer
;
227 unsigned buffer_size
;
228 /* linked list of queries */
229 struct list_head list
;
232 #define R600_QUERY_STATE_STARTED (1 << 0)
233 #define R600_QUERY_STATE_ENDED (1 << 1)
234 #define R600_QUERY_STATE_SUSPENDED (1 << 2)
236 #define R600_CONTEXT_DRAW_PENDING (1 << 0)
237 #define R600_CONTEXT_DST_CACHES_DIRTY (1 << 1)
239 struct r600_context
{
240 struct radeon
*radeon
;
243 struct r600_range range
[256];
245 struct r600_block
**blocks
;
246 struct list_head dirty
;
247 unsigned pm4_ndwords
;
248 unsigned pm4_cdwords
;
249 unsigned pm4_dirty_cdwords
;
250 unsigned ctx_pm4_ndwords
;
253 struct r600_reloc
*reloc
;
254 struct radeon_bo
**bo
;
256 struct list_head query_list
;
257 unsigned num_query_running
;
258 struct list_head fenced_bo
;
259 unsigned max_db
; /* for OQ */
260 unsigned num_dest_buffers
;
262 boolean predicate_drawing
;
267 u32 vgt_num_instances
;
269 u32 vgt_draw_initiator
;
270 u32 indices_bo_offset
;
271 struct r600_bo
*indices
;
274 int r600_context_init(struct r600_context
*ctx
, struct radeon
*radeon
);
275 void r600_context_fini(struct r600_context
*ctx
);
276 void r600_context_pipe_state_set(struct r600_context
*ctx
, struct r600_pipe_state
*state
);
277 void r600_context_pipe_state_set_ps_resource(struct r600_context
*ctx
, struct r600_pipe_state
*state
, unsigned rid
);
278 void r600_context_pipe_state_set_vs_resource(struct r600_context
*ctx
, struct r600_pipe_state
*state
, unsigned rid
);
279 void r600_context_pipe_state_set_fs_resource(struct r600_context
*ctx
, struct r600_pipe_state
*state
, unsigned rid
);
280 void r600_context_pipe_state_set_ps_sampler(struct r600_context
*ctx
, struct r600_pipe_state
*state
, unsigned id
);
281 void r600_context_pipe_state_set_vs_sampler(struct r600_context
*ctx
, struct r600_pipe_state
*state
, unsigned id
);
282 void r600_context_flush(struct r600_context
*ctx
);
283 void r600_context_dump_bof(struct r600_context
*ctx
, const char *file
);
284 void r600_context_draw(struct r600_context
*ctx
, const struct r600_draw
*draw
);
286 struct r600_query
*r600_context_query_create(struct r600_context
*ctx
, unsigned query_type
);
287 void r600_context_query_destroy(struct r600_context
*ctx
, struct r600_query
*query
);
288 boolean
r600_context_query_result(struct r600_context
*ctx
,
289 struct r600_query
*query
,
290 boolean wait
, void *vresult
);
291 void r600_query_begin(struct r600_context
*ctx
, struct r600_query
*query
);
292 void r600_query_end(struct r600_context
*ctx
, struct r600_query
*query
);
293 void r600_context_queries_suspend(struct r600_context
*ctx
);
294 void r600_context_queries_resume(struct r600_context
*ctx
);
295 void r600_query_predication(struct r600_context
*ctx
, struct r600_query
*query
, int operation
,
297 void r600_context_emit_fence(struct r600_context
*ctx
, struct r600_bo
*fence
,
298 unsigned offset
, unsigned value
);
299 void r600_context_flush_all(struct r600_context
*ctx
, unsigned flush_flags
);
300 void r600_context_flush_dest_caches(struct r600_context
*ctx
);
302 int evergreen_context_init(struct r600_context
*ctx
, struct radeon
*radeon
);
303 void evergreen_context_draw(struct r600_context
*ctx
, const struct r600_draw
*draw
);
304 void evergreen_context_flush_dest_caches(struct r600_context
*ctx
);
305 void evergreen_context_pipe_state_set_ps_resource(struct r600_context
*ctx
, struct r600_pipe_state
*state
, unsigned rid
);
306 void evergreen_context_pipe_state_set_vs_resource(struct r600_context
*ctx
, struct r600_pipe_state
*state
, unsigned rid
);
307 void evergreen_context_pipe_state_set_fs_resource(struct r600_context
*ctx
, struct r600_pipe_state
*state
, unsigned rid
);
308 void evergreen_context_pipe_state_set_ps_sampler(struct r600_context
*ctx
, struct r600_pipe_state
*state
, unsigned id
);
309 void evergreen_context_pipe_state_set_vs_sampler(struct r600_context
*ctx
, struct r600_pipe_state
*state
, unsigned id
);
311 struct radeon
*radeon_decref(struct radeon
*radeon
);