2af4d311f60f788e626913dbab86b8413b7045ed
[mesa.git] / src / gallium / drivers / r600 / r600.h
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 */
26 #ifndef R600_H
27 #define R600_H
28
29 #include <assert.h>
30 #include <stdint.h>
31 #include <stdio.h>
32 #include <util/u_double_list.h>
33 #include <util/u_inlines.h>
34 #include <pipe/p_compiler.h>
35
36 #define RADEON_CTX_MAX_PM4 (64 * 1024 / 4)
37
38 #define R600_ERR(fmt, args...) \
39 fprintf(stderr, "EE %s:%d %s - "fmt, __FILE__, __LINE__, __func__, ##args)
40
41 typedef uint64_t u64;
42 typedef uint32_t u32;
43 typedef uint16_t u16;
44 typedef uint8_t u8;
45
46 struct radeon;
47 struct winsys_handle;
48
49 enum radeon_family {
50 CHIP_UNKNOWN,
51 CHIP_R600,
52 CHIP_RV610,
53 CHIP_RV630,
54 CHIP_RV670,
55 CHIP_RV620,
56 CHIP_RV635,
57 CHIP_RS780,
58 CHIP_RS880,
59 CHIP_RV770,
60 CHIP_RV730,
61 CHIP_RV710,
62 CHIP_RV740,
63 CHIP_CEDAR,
64 CHIP_REDWOOD,
65 CHIP_JUNIPER,
66 CHIP_CYPRESS,
67 CHIP_HEMLOCK,
68 CHIP_PALM,
69 CHIP_SUMO,
70 CHIP_SUMO2,
71 CHIP_BARTS,
72 CHIP_TURKS,
73 CHIP_CAICOS,
74 CHIP_CAYMAN,
75 CHIP_LAST,
76 };
77
78 enum chip_class {
79 R600,
80 R700,
81 EVERGREEN,
82 CAYMAN,
83 };
84
85 struct r600_tiling_info {
86 unsigned num_channels;
87 unsigned num_banks;
88 unsigned group_bytes;
89 };
90
91 enum radeon_family r600_get_family(struct radeon *rw);
92 enum chip_class r600_get_family_class(struct radeon *radeon);
93 struct r600_tiling_info *r600_get_tiling_info(struct radeon *radeon);
94 unsigned r600_get_clock_crystal_freq(struct radeon *radeon);
95 unsigned r600_get_minor_version(struct radeon *radeon);
96 unsigned r600_get_num_backends(struct radeon *radeon);
97
98 /* r600_bo.c */
99 struct r600_bo;
100 struct r600_bo *r600_bo(struct radeon *radeon,
101 unsigned size, unsigned alignment,
102 unsigned binding, unsigned usage);
103 struct r600_bo *r600_bo_handle(struct radeon *radeon,
104 unsigned handle, unsigned *array_mode);
105 void *r600_bo_map(struct radeon *radeon, struct r600_bo *bo, unsigned usage, void *ctx);
106 void r600_bo_unmap(struct radeon *radeon, struct r600_bo *bo);
107 boolean r600_bo_get_winsys_handle(struct radeon *radeon, struct r600_bo *pb_bo,
108 unsigned stride, struct winsys_handle *whandle);
109 static INLINE unsigned r600_bo_offset(struct r600_bo *bo)
110 {
111 return 0;
112 }
113 void r600_bo_destroy(struct radeon *radeon, struct r600_bo *bo);
114
115 /* this relies on the pipe_reference being the first member of r600_bo */
116 static INLINE void r600_bo_reference(struct radeon *radeon, struct r600_bo **dst, struct r600_bo *src)
117 {
118 struct r600_bo *old = *dst;
119
120 if (pipe_reference((struct pipe_reference *)(*dst), (struct pipe_reference *)src)) {
121 r600_bo_destroy(radeon, old);
122 }
123 *dst = src;
124 }
125
126
127 /* R600/R700 STATES */
128 #define R600_GROUP_MAX 16
129 #define R600_BLOCK_MAX_BO 32
130 #define R600_BLOCK_MAX_REG 128
131
132 /* each range covers 9 bits of dword space = 512 dwords = 2k bytes */
133 /* there is a block entry for each register so 512 blocks */
134 /* we have no registers to read/write below 0x8000 (0x2000 in dw space) */
135 /* we use some fake offsets at 0x40000 to do evergreen sampler borders so take 0x42000 as a max bound*/
136 #define RANGE_OFFSET_START 0x8000
137 #define HASH_SHIFT 9
138 #define NUM_RANGES (0x42000 - RANGE_OFFSET_START) / (4 << HASH_SHIFT) /* 128 << 9 = 64k */
139
140 #define CTX_RANGE_ID(offset) ((((offset - RANGE_OFFSET_START) >> 2) >> HASH_SHIFT) & 255)
141 #define CTX_BLOCK_ID(offset) (((offset - RANGE_OFFSET_START) >> 2) & ((1 << HASH_SHIFT) - 1))
142
143 struct r600_pipe_reg {
144 u32 value;
145 u32 mask;
146 struct r600_block *block;
147 struct r600_bo *bo;
148 u32 id;
149 };
150
151 struct r600_pipe_state {
152 unsigned id;
153 unsigned nregs;
154 struct r600_pipe_reg regs[R600_BLOCK_MAX_REG];
155 };
156
157 struct r600_pipe_resource_state {
158 unsigned id;
159 u32 val[8];
160 struct r600_bo *bo[2];
161 };
162
163 #define R600_BLOCK_STATUS_ENABLED (1 << 0)
164 #define R600_BLOCK_STATUS_DIRTY (1 << 1)
165 #define R600_BLOCK_STATUS_RESOURCE_DIRTY (1 << 2)
166
167 #define R600_BLOCK_STATUS_RESOURCE_VERTEX (1 << 3)
168
169 struct r600_block_reloc {
170 struct r600_bo *bo;
171 unsigned flush_flags;
172 unsigned flush_mask;
173 unsigned bo_pm4_index;
174 };
175
176 struct r600_block {
177 struct list_head list;
178 struct list_head enable_list;
179 unsigned status;
180 unsigned flags;
181 unsigned start_offset;
182 unsigned pm4_ndwords;
183 unsigned pm4_flush_ndwords;
184 unsigned nbo;
185 u16 nreg;
186 u16 nreg_dirty;
187 u32 *reg;
188 u32 pm4[R600_BLOCK_MAX_REG];
189 unsigned pm4_bo_index[R600_BLOCK_MAX_REG];
190 struct r600_block_reloc reloc[R600_BLOCK_MAX_BO];
191 };
192
193 struct r600_range {
194 struct r600_block **blocks;
195 };
196
197 /*
198 * relocation
199 */
200 #pragma pack(1)
201 struct r600_reloc {
202 uint32_t handle;
203 uint32_t read_domain;
204 uint32_t write_domain;
205 uint32_t flags;
206 };
207 #pragma pack()
208
209 /*
210 * query
211 */
212 struct r600_query {
213 u64 result;
214 /* The kind of query. Currently only OQ is supported. */
215 unsigned type;
216 /* How many results have been written, in dwords. It's incremented
217 * after end_query and flush. */
218 unsigned num_results;
219 /* if we've flushed the query */
220 unsigned state;
221 /* The buffer where query results are stored. */
222 struct r600_bo *buffer;
223 unsigned buffer_size;
224 /* linked list of queries */
225 struct list_head list;
226 };
227
228 #define R600_QUERY_STATE_STARTED (1 << 0)
229 #define R600_QUERY_STATE_ENDED (1 << 1)
230 #define R600_QUERY_STATE_SUSPENDED (1 << 2)
231 #define R600_QUERY_STATE_FLUSHED (1 << 3)
232
233 #define R600_CONTEXT_DRAW_PENDING (1 << 0)
234 #define R600_CONTEXT_DST_CACHES_DIRTY (1 << 1)
235 #define R600_CONTEXT_CHECK_EVENT_FLUSH (1 << 2)
236
237 struct r600_context {
238 struct radeon *radeon;
239 struct r600_range *range;
240 unsigned nblocks;
241 struct r600_block **blocks;
242 struct list_head dirty;
243 struct list_head resource_dirty;
244 struct list_head enable_list;
245 unsigned pm4_ndwords;
246 unsigned pm4_cdwords;
247 unsigned pm4_dirty_cdwords;
248 unsigned ctx_pm4_ndwords;
249 unsigned init_dwords;
250 unsigned nreloc;
251 unsigned creloc;
252 struct r600_reloc *reloc;
253 struct radeon_bo **bo;
254 u32 *pm4;
255 struct list_head query_list;
256 unsigned num_query_running;
257 struct list_head fenced_bo;
258 unsigned max_db; /* for OQ */
259 unsigned num_dest_buffers;
260 unsigned flags;
261 boolean predicate_drawing;
262 struct r600_range ps_resources;
263 struct r600_range vs_resources;
264 struct r600_range fs_resources;
265 int num_ps_resources, num_vs_resources, num_fs_resources;
266 boolean have_depth_texture, have_depth_fb;
267 };
268
269 struct r600_draw {
270 u32 vgt_num_indices;
271 u32 vgt_num_instances;
272 u32 vgt_index_type;
273 u32 vgt_draw_initiator;
274 u32 indices_bo_offset;
275 struct r600_bo *indices;
276 };
277
278 int r600_context_init(struct r600_context *ctx, struct radeon *radeon);
279 void r600_context_fini(struct r600_context *ctx);
280 void r600_context_pipe_state_set(struct r600_context *ctx, struct r600_pipe_state *state);
281 void r600_context_pipe_state_set_ps_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid);
282 void r600_context_pipe_state_set_vs_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid);
283 void r600_context_pipe_state_set_fs_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid);
284 void r600_context_pipe_state_set_ps_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id);
285 void r600_context_pipe_state_set_vs_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id);
286 void r600_context_flush(struct r600_context *ctx);
287 void r600_context_dump_bof(struct r600_context *ctx, const char *file);
288 void r600_context_draw(struct r600_context *ctx, const struct r600_draw *draw);
289
290 struct r600_query *r600_context_query_create(struct r600_context *ctx, unsigned query_type);
291 void r600_context_query_destroy(struct r600_context *ctx, struct r600_query *query);
292 boolean r600_context_query_result(struct r600_context *ctx,
293 struct r600_query *query,
294 boolean wait, void *vresult);
295 void r600_query_begin(struct r600_context *ctx, struct r600_query *query);
296 void r600_query_end(struct r600_context *ctx, struct r600_query *query);
297 void r600_context_queries_suspend(struct r600_context *ctx);
298 void r600_context_queries_resume(struct r600_context *ctx, boolean flushed);
299 void r600_query_predication(struct r600_context *ctx, struct r600_query *query, int operation,
300 int flag_wait);
301 void r600_context_emit_fence(struct r600_context *ctx, struct r600_bo *fence,
302 unsigned offset, unsigned value);
303 void r600_context_flush_all(struct r600_context *ctx, unsigned flush_flags);
304 void r600_context_flush_dest_caches(struct r600_context *ctx);
305
306 int evergreen_context_init(struct r600_context *ctx, struct radeon *radeon);
307 void evergreen_context_draw(struct r600_context *ctx, const struct r600_draw *draw);
308 void evergreen_context_flush_dest_caches(struct r600_context *ctx);
309 void evergreen_context_pipe_state_set_ps_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid);
310 void evergreen_context_pipe_state_set_vs_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid);
311 void evergreen_context_pipe_state_set_fs_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid);
312 void evergreen_context_pipe_state_set_ps_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id);
313 void evergreen_context_pipe_state_set_vs_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id);
314
315 struct radeon *radeon_decref(struct radeon *radeon);
316
317 void _r600_pipe_state_add_reg(struct r600_context *ctx,
318 struct r600_pipe_state *state,
319 u32 offset, u32 value, u32 mask,
320 u32 range_id, u32 block_id,
321 struct r600_bo *bo);
322
323 void r600_pipe_state_add_reg_noblock(struct r600_pipe_state *state,
324 u32 offset, u32 value, u32 mask,
325 struct r600_bo *bo);
326 #define r600_pipe_state_add_reg(state, offset, value, mask, bo) _r600_pipe_state_add_reg(&rctx->ctx, state, offset, value, mask, CTX_RANGE_ID(offset), CTX_BLOCK_ID(offset), bo)
327
328 static inline void r600_pipe_state_mod_reg(struct r600_pipe_state *state,
329 u32 value)
330 {
331 state->regs[state->nregs].value = value;
332 state->nregs++;
333 }
334
335 static inline void r600_pipe_state_mod_reg_bo(struct r600_pipe_state *state,
336 u32 value, struct r600_bo *bo)
337 {
338 state->regs[state->nregs].value = value;
339 state->regs[state->nregs].bo = bo;
340 state->nregs++;
341 }
342
343 #endif