r600g: set the flush callback in radeon_winsys
[mesa.git] / src / gallium / drivers / r600 / r600.h
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 */
26 #ifndef R600_H
27 #define R600_H
28
29 #include "util/u_double_list.h"
30 #include "util/u_inlines.h"
31
32 #define RADEON_CTX_MAX_PM4 (64 * 1024 / 4)
33
34 #define R600_ERR(fmt, args...) \
35 fprintf(stderr, "EE %s:%d %s - "fmt, __FILE__, __LINE__, __func__, ##args)
36
37 typedef uint64_t u64;
38 typedef uint32_t u32;
39 typedef uint16_t u16;
40 typedef uint8_t u8;
41
42 struct radeon;
43 struct winsys_handle;
44
45 enum radeon_family {
46 CHIP_UNKNOWN,
47 CHIP_R600,
48 CHIP_RV610,
49 CHIP_RV630,
50 CHIP_RV670,
51 CHIP_RV620,
52 CHIP_RV635,
53 CHIP_RS780,
54 CHIP_RS880,
55 CHIP_RV770,
56 CHIP_RV730,
57 CHIP_RV710,
58 CHIP_RV740,
59 CHIP_CEDAR,
60 CHIP_REDWOOD,
61 CHIP_JUNIPER,
62 CHIP_CYPRESS,
63 CHIP_HEMLOCK,
64 CHIP_PALM,
65 CHIP_SUMO,
66 CHIP_SUMO2,
67 CHIP_BARTS,
68 CHIP_TURKS,
69 CHIP_CAICOS,
70 CHIP_CAYMAN,
71 CHIP_LAST,
72 };
73
74 enum chip_class {
75 R600,
76 R700,
77 EVERGREEN,
78 CAYMAN,
79 };
80
81 struct r600_tiling_info {
82 unsigned num_channels;
83 unsigned num_banks;
84 unsigned group_bytes;
85 };
86
87 enum radeon_family r600_get_family(struct radeon *rw);
88 enum chip_class r600_get_family_class(struct radeon *radeon);
89 struct r600_tiling_info *r600_get_tiling_info(struct radeon *radeon);
90 unsigned r600_get_clock_crystal_freq(struct radeon *radeon);
91 unsigned r600_get_minor_version(struct radeon *radeon);
92 unsigned r600_get_num_backends(struct radeon *radeon);
93 unsigned r600_get_num_tile_pipes(struct radeon *radeon);
94 unsigned r600_get_backend_map(struct radeon *radeon);
95
96 /* r600_bo.c */
97 struct r600_bo;
98
99 struct r600_bo *r600_bo(struct radeon *radeon,
100 unsigned size, unsigned alignment,
101 unsigned binding, unsigned usage);
102 struct r600_bo *r600_bo_handle(struct radeon *radeon, struct winsys_handle *whandle,
103 unsigned *stride, unsigned *array_mode);
104 void *r600_bo_map(struct radeon *radeon, struct r600_bo *bo, unsigned usage, void *ctx);
105 void r600_bo_unmap(struct radeon *radeon, struct r600_bo *bo);
106 boolean r600_bo_get_winsys_handle(struct radeon *radeon, struct r600_bo *pb_bo,
107 unsigned stride, struct winsys_handle *whandle);
108
109 void r600_bo_destroy(struct radeon *radeon, struct r600_bo *bo);
110
111 /* this relies on the pipe_reference being the first member of r600_bo */
112 static INLINE void r600_bo_reference(struct radeon *radeon, struct r600_bo **dst, struct r600_bo *src)
113 {
114 struct r600_bo *old = *dst;
115
116 if (pipe_reference((struct pipe_reference *)(*dst), (struct pipe_reference *)src)) {
117 r600_bo_destroy(radeon, old);
118 }
119 *dst = src;
120 }
121
122
123 /* R600/R700 STATES */
124 #define R600_GROUP_MAX 16
125 #define R600_BLOCK_MAX_BO 32
126 #define R600_BLOCK_MAX_REG 128
127
128 /* each range covers 9 bits of dword space = 512 dwords = 2k bytes */
129 /* there is a block entry for each register so 512 blocks */
130 /* we have no registers to read/write below 0x8000 (0x2000 in dw space) */
131 /* we use some fake offsets at 0x40000 to do evergreen sampler borders so take 0x42000 as a max bound*/
132 #define RANGE_OFFSET_START 0x8000
133 #define HASH_SHIFT 9
134 #define NUM_RANGES (0x42000 - RANGE_OFFSET_START) / (4 << HASH_SHIFT) /* 128 << 9 = 64k */
135
136 #define CTX_RANGE_ID(offset) ((((offset - RANGE_OFFSET_START) >> 2) >> HASH_SHIFT) & 255)
137 #define CTX_BLOCK_ID(offset) (((offset - RANGE_OFFSET_START) >> 2) & ((1 << HASH_SHIFT) - 1))
138
139 struct r600_pipe_reg {
140 u32 value;
141 u32 mask;
142 struct r600_block *block;
143 struct r600_bo *bo;
144 u32 id;
145 };
146
147 struct r600_pipe_state {
148 unsigned id;
149 unsigned nregs;
150 struct r600_pipe_reg regs[R600_BLOCK_MAX_REG];
151 };
152
153 struct r600_pipe_resource_state {
154 unsigned id;
155 u32 val[8];
156 struct r600_bo *bo[2];
157 };
158
159 #define R600_BLOCK_STATUS_ENABLED (1 << 0)
160 #define R600_BLOCK_STATUS_DIRTY (1 << 1)
161 #define R600_BLOCK_STATUS_RESOURCE_DIRTY (1 << 2)
162
163 #define R600_BLOCK_STATUS_RESOURCE_VERTEX (1 << 3)
164
165 struct r600_block_reloc {
166 struct r600_bo *bo;
167 unsigned flush_flags;
168 unsigned flush_mask;
169 unsigned bo_pm4_index;
170 };
171
172 struct r600_block {
173 struct list_head list;
174 struct list_head enable_list;
175 unsigned status;
176 unsigned flags;
177 unsigned start_offset;
178 unsigned pm4_ndwords;
179 unsigned pm4_flush_ndwords;
180 unsigned nbo;
181 u16 nreg;
182 u16 nreg_dirty;
183 u32 *reg;
184 u32 pm4[R600_BLOCK_MAX_REG];
185 unsigned pm4_bo_index[R600_BLOCK_MAX_REG];
186 struct r600_block_reloc reloc[R600_BLOCK_MAX_BO];
187 };
188
189 struct r600_range {
190 struct r600_block **blocks;
191 };
192
193 /*
194 * query
195 */
196 struct r600_query {
197 u64 result;
198 /* The kind of query */
199 unsigned type;
200 /* Offset of the first result for current query */
201 unsigned results_start;
202 /* Offset of the next free result after current query data */
203 unsigned results_end;
204 /* Size of the result */
205 unsigned result_size;
206 /* Count of new queries started in one stream without flushing */
207 unsigned queries_emitted;
208 /* State flags */
209 unsigned state;
210 /* The buffer where query results are stored. It's used as a ring,
211 * data blocks for current query are stored sequentially from
212 * results_start to results_end, with wrapping on the buffer end */
213 struct r600_bo *buffer;
214 unsigned buffer_size;
215 /* linked list of queries */
216 struct list_head list;
217 };
218
219 #define R600_QUERY_STATE_STARTED (1 << 0)
220 #define R600_QUERY_STATE_ENDED (1 << 1)
221 #define R600_QUERY_STATE_SUSPENDED (1 << 2)
222 #define R600_QUERY_STATE_FLUSHED (1 << 3)
223
224 #define R600_CONTEXT_DRAW_PENDING (1 << 0)
225 #define R600_CONTEXT_DST_CACHES_DIRTY (1 << 1)
226 #define R600_CONTEXT_CHECK_EVENT_FLUSH (1 << 2)
227
228 struct r600_context {
229 struct radeon *radeon;
230 struct radeon_winsys_cs *cs;
231
232 struct r600_range *range;
233 unsigned nblocks;
234 struct r600_block **blocks;
235 struct list_head dirty;
236 struct list_head resource_dirty;
237 struct list_head enable_list;
238 unsigned pm4_ndwords;
239 unsigned pm4_dirty_cdwords;
240 unsigned ctx_pm4_ndwords;
241 unsigned init_dwords;
242
243 unsigned creloc;
244 struct radeon_bo **bo;
245
246 u32 *pm4;
247 unsigned pm4_cdwords;
248
249 struct list_head query_list;
250 unsigned num_query_running;
251 unsigned backend_mask;
252 unsigned max_db; /* for OQ */
253 unsigned num_dest_buffers;
254 unsigned flags;
255 boolean predicate_drawing;
256 struct r600_range ps_resources;
257 struct r600_range vs_resources;
258 struct r600_range fs_resources;
259 int num_ps_resources, num_vs_resources, num_fs_resources;
260 boolean have_depth_texture, have_depth_fb;
261 };
262
263 struct r600_draw {
264 u32 vgt_num_indices;
265 u32 vgt_num_instances;
266 u32 vgt_index_type;
267 u32 vgt_draw_initiator;
268 u32 indices_bo_offset;
269 struct r600_bo *indices;
270 };
271
272 void r600_get_backend_mask(struct r600_context *ctx);
273 int r600_context_init(struct r600_context *ctx, struct radeon *radeon);
274 void r600_context_fini(struct r600_context *ctx);
275 void r600_context_pipe_state_set(struct r600_context *ctx, struct r600_pipe_state *state);
276 void r600_context_pipe_state_set_ps_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid);
277 void r600_context_pipe_state_set_vs_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid);
278 void r600_context_pipe_state_set_fs_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid);
279 void r600_context_pipe_state_set_ps_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id);
280 void r600_context_pipe_state_set_vs_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id);
281 void r600_context_flush(struct r600_context *ctx, unsigned flags);
282 void r600_context_draw(struct r600_context *ctx, const struct r600_draw *draw);
283
284 struct r600_query *r600_context_query_create(struct r600_context *ctx, unsigned query_type);
285 void r600_context_query_destroy(struct r600_context *ctx, struct r600_query *query);
286 boolean r600_context_query_result(struct r600_context *ctx,
287 struct r600_query *query,
288 boolean wait, void *vresult);
289 void r600_query_begin(struct r600_context *ctx, struct r600_query *query);
290 void r600_query_end(struct r600_context *ctx, struct r600_query *query);
291 void r600_context_queries_suspend(struct r600_context *ctx);
292 void r600_context_queries_resume(struct r600_context *ctx, boolean flushed);
293 void r600_query_predication(struct r600_context *ctx, struct r600_query *query, int operation,
294 int flag_wait);
295 void r600_context_emit_fence(struct r600_context *ctx, struct r600_bo *fence,
296 unsigned offset, unsigned value);
297 void r600_context_flush_all(struct r600_context *ctx, unsigned flush_flags);
298 void r600_context_flush_dest_caches(struct r600_context *ctx);
299
300 int evergreen_context_init(struct r600_context *ctx, struct radeon *radeon);
301 void evergreen_context_draw(struct r600_context *ctx, const struct r600_draw *draw);
302 void evergreen_context_flush_dest_caches(struct r600_context *ctx);
303 void evergreen_context_pipe_state_set_ps_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid);
304 void evergreen_context_pipe_state_set_vs_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid);
305 void evergreen_context_pipe_state_set_fs_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid);
306 void evergreen_context_pipe_state_set_ps_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id);
307 void evergreen_context_pipe_state_set_vs_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id);
308
309 struct radeon *radeon_destroy(struct radeon *radeon);
310
311 void _r600_pipe_state_add_reg(struct r600_context *ctx,
312 struct r600_pipe_state *state,
313 u32 offset, u32 value, u32 mask,
314 u32 range_id, u32 block_id,
315 struct r600_bo *bo);
316
317 void r600_pipe_state_add_reg_noblock(struct r600_pipe_state *state,
318 u32 offset, u32 value, u32 mask,
319 struct r600_bo *bo);
320 #define r600_pipe_state_add_reg(state, offset, value, mask, bo) _r600_pipe_state_add_reg(&rctx->ctx, state, offset, value, mask, CTX_RANGE_ID(offset), CTX_BLOCK_ID(offset), bo)
321
322 static inline void r600_pipe_state_mod_reg(struct r600_pipe_state *state,
323 u32 value)
324 {
325 state->regs[state->nregs].value = value;
326 state->nregs++;
327 }
328
329 static inline void r600_pipe_state_mod_reg_bo(struct r600_pipe_state *state,
330 u32 value, struct r600_bo *bo)
331 {
332 state->regs[state->nregs].value = value;
333 state->regs[state->nregs].bo = bo;
334 state->nregs++;
335 }
336
337 #endif