84232b10037df93f2d1654bfb12790c8196a0f0e
[mesa.git] / src / gallium / drivers / r600 / r600.h
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 */
26 #ifndef R600_H
27 #define R600_H
28
29 #include <assert.h>
30 #include <stdint.h>
31 #include <stdio.h>
32 #include <util/u_double_list.h>
33 #include <pipe/p_compiler.h>
34
35 #define RADEON_CTX_MAX_PM4 (64 * 1024 / 4)
36
37 #define R600_ERR(fmt, args...) \
38 fprintf(stderr, "EE %s:%d %s - "fmt, __FILE__, __LINE__, __func__, ##args)
39
40 typedef uint64_t u64;
41 typedef uint32_t u32;
42 typedef uint16_t u16;
43 typedef uint8_t u8;
44
45 struct radeon;
46 struct winsys_handle;
47
48 enum radeon_family {
49 CHIP_UNKNOWN,
50 CHIP_R600,
51 CHIP_RV610,
52 CHIP_RV630,
53 CHIP_RV670,
54 CHIP_RV620,
55 CHIP_RV635,
56 CHIP_RS780,
57 CHIP_RS880,
58 CHIP_RV770,
59 CHIP_RV730,
60 CHIP_RV710,
61 CHIP_RV740,
62 CHIP_CEDAR,
63 CHIP_REDWOOD,
64 CHIP_JUNIPER,
65 CHIP_CYPRESS,
66 CHIP_HEMLOCK,
67 CHIP_PALM,
68 CHIP_SUMO,
69 CHIP_SUMO2,
70 CHIP_BARTS,
71 CHIP_TURKS,
72 CHIP_CAICOS,
73 CHIP_CAYMAN,
74 CHIP_LAST,
75 };
76
77 enum chip_class {
78 R600,
79 R700,
80 EVERGREEN,
81 CAYMAN,
82 };
83
84 struct r600_tiling_info {
85 unsigned num_channels;
86 unsigned num_banks;
87 unsigned group_bytes;
88 };
89
90 enum radeon_family r600_get_family(struct radeon *rw);
91 enum chip_class r600_get_family_class(struct radeon *radeon);
92 struct r600_tiling_info *r600_get_tiling_info(struct radeon *radeon);
93 unsigned r600_get_clock_crystal_freq(struct radeon *radeon);
94 unsigned r600_get_minor_version(struct radeon *radeon);
95 unsigned r600_get_num_backends(struct radeon *radeon);
96
97 /* r600_bo.c */
98 struct r600_bo;
99 struct r600_bo *r600_bo(struct radeon *radeon,
100 unsigned size, unsigned alignment,
101 unsigned binding, unsigned usage);
102 struct r600_bo *r600_bo_handle(struct radeon *radeon,
103 unsigned handle, unsigned *array_mode);
104 void *r600_bo_map(struct radeon *radeon, struct r600_bo *bo, unsigned usage, void *ctx);
105 void r600_bo_unmap(struct radeon *radeon, struct r600_bo *bo);
106 void r600_bo_reference(struct radeon *radeon, struct r600_bo **dst,
107 struct r600_bo *src);
108 boolean r600_bo_get_winsys_handle(struct radeon *radeon, struct r600_bo *pb_bo,
109 unsigned stride, struct winsys_handle *whandle);
110 static INLINE unsigned r600_bo_offset(struct r600_bo *bo)
111 {
112 return 0;
113 }
114
115
116 /* R600/R700 STATES */
117 #define R600_GROUP_MAX 16
118 #define R600_BLOCK_MAX_BO 32
119 #define R600_BLOCK_MAX_REG 128
120
121 /* each range covers 9 bits of dword space = 512 dwords = 2k bytes */
122 /* there is a block entry for each register so 512 blocks */
123 /* we have no registers to read/write below 0x8000 (0x2000 in dw space) */
124 /* we use some fake offsets at 0x40000 to do evergreen sampler borders so take 0x42000 as a max bound*/
125 #define RANGE_OFFSET_START 0x8000
126 #define HASH_SHIFT 9
127 #define NUM_RANGES (0x42000 - RANGE_OFFSET_START) / (4 << HASH_SHIFT) /* 128 << 9 = 64k */
128
129 #define CTX_RANGE_ID(offset) ((((offset - RANGE_OFFSET_START) >> 2) >> HASH_SHIFT) & 255)
130 #define CTX_BLOCK_ID(offset) (((offset - RANGE_OFFSET_START) >> 2) & ((1 << HASH_SHIFT) - 1))
131
132 struct r600_pipe_reg {
133 u32 value;
134 u32 mask;
135 struct r600_block *block;
136 struct r600_bo *bo;
137 u32 id;
138 };
139
140 struct r600_pipe_state {
141 unsigned id;
142 unsigned nregs;
143 struct r600_pipe_reg regs[R600_BLOCK_MAX_REG];
144 };
145
146 struct r600_pipe_resource_state {
147 unsigned id;
148 u32 val[8];
149 struct r600_bo *bo[2];
150 };
151
152 #define R600_BLOCK_STATUS_ENABLED (1 << 0)
153 #define R600_BLOCK_STATUS_DIRTY (1 << 1)
154
155 struct r600_block_reloc {
156 struct r600_bo *bo;
157 unsigned flush_flags;
158 unsigned flush_mask;
159 unsigned bo_pm4_index;
160 };
161
162 struct r600_block {
163 struct list_head list;
164 struct list_head enable_list;
165 unsigned status;
166 unsigned flags;
167 unsigned start_offset;
168 unsigned pm4_ndwords;
169 unsigned pm4_flush_ndwords;
170 unsigned nbo;
171 u16 nreg;
172 u16 nreg_dirty;
173 u32 *reg;
174 u32 pm4[R600_BLOCK_MAX_REG];
175 unsigned pm4_bo_index[R600_BLOCK_MAX_REG];
176 struct r600_block_reloc reloc[R600_BLOCK_MAX_BO];
177 };
178
179 struct r600_range {
180 struct r600_block **blocks;
181 };
182
183 /*
184 * relocation
185 */
186 #pragma pack(1)
187 struct r600_reloc {
188 uint32_t handle;
189 uint32_t read_domain;
190 uint32_t write_domain;
191 uint32_t flags;
192 };
193 #pragma pack()
194
195 /*
196 * query
197 */
198 struct r600_query {
199 u64 result;
200 /* The kind of query. Currently only OQ is supported. */
201 unsigned type;
202 /* How many results have been written, in dwords. It's incremented
203 * after end_query and flush. */
204 unsigned num_results;
205 /* if we've flushed the query */
206 unsigned state;
207 /* The buffer where query results are stored. */
208 struct r600_bo *buffer;
209 unsigned buffer_size;
210 /* linked list of queries */
211 struct list_head list;
212 };
213
214 #define R600_QUERY_STATE_STARTED (1 << 0)
215 #define R600_QUERY_STATE_ENDED (1 << 1)
216 #define R600_QUERY_STATE_SUSPENDED (1 << 2)
217
218 #define R600_CONTEXT_DRAW_PENDING (1 << 0)
219 #define R600_CONTEXT_DST_CACHES_DIRTY (1 << 1)
220 #define R600_CONTEXT_CHECK_EVENT_FLUSH (1 << 2)
221
222 struct r600_context {
223 struct radeon *radeon;
224 struct r600_range *range;
225 unsigned nblocks;
226 struct r600_block **blocks;
227 struct list_head dirty;
228 struct list_head enable_list;
229 unsigned pm4_ndwords;
230 unsigned pm4_cdwords;
231 unsigned pm4_dirty_cdwords;
232 unsigned ctx_pm4_ndwords;
233 unsigned nreloc;
234 unsigned creloc;
235 struct r600_reloc *reloc;
236 struct radeon_bo **bo;
237 u32 *pm4;
238 struct list_head query_list;
239 unsigned num_query_running;
240 struct list_head fenced_bo;
241 unsigned max_db; /* for OQ */
242 unsigned num_dest_buffers;
243 unsigned flags;
244 boolean predicate_drawing;
245 struct r600_range ps_resources;
246 struct r600_range vs_resources;
247 struct r600_range fs_resources;
248 int num_ps_resources, num_vs_resources, num_fs_resources;
249 };
250
251 struct r600_draw {
252 u32 vgt_num_indices;
253 u32 vgt_num_instances;
254 u32 vgt_index_type;
255 u32 vgt_draw_initiator;
256 u32 indices_bo_offset;
257 struct r600_bo *indices;
258 };
259
260 int r600_context_init(struct r600_context *ctx, struct radeon *radeon);
261 void r600_context_fini(struct r600_context *ctx);
262 void r600_context_pipe_state_set(struct r600_context *ctx, struct r600_pipe_state *state);
263 void r600_context_pipe_state_set_ps_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid);
264 void r600_context_pipe_state_set_vs_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid);
265 void r600_context_pipe_state_set_fs_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid);
266 void r600_context_pipe_state_set_ps_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id);
267 void r600_context_pipe_state_set_vs_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id);
268 void r600_context_flush(struct r600_context *ctx);
269 void r600_context_dump_bof(struct r600_context *ctx, const char *file);
270 void r600_context_draw(struct r600_context *ctx, const struct r600_draw *draw);
271
272 struct r600_query *r600_context_query_create(struct r600_context *ctx, unsigned query_type);
273 void r600_context_query_destroy(struct r600_context *ctx, struct r600_query *query);
274 boolean r600_context_query_result(struct r600_context *ctx,
275 struct r600_query *query,
276 boolean wait, void *vresult);
277 void r600_query_begin(struct r600_context *ctx, struct r600_query *query);
278 void r600_query_end(struct r600_context *ctx, struct r600_query *query);
279 void r600_context_queries_suspend(struct r600_context *ctx);
280 void r600_context_queries_resume(struct r600_context *ctx);
281 void r600_query_predication(struct r600_context *ctx, struct r600_query *query, int operation,
282 int flag_wait);
283 void r600_context_emit_fence(struct r600_context *ctx, struct r600_bo *fence,
284 unsigned offset, unsigned value);
285 void r600_context_flush_all(struct r600_context *ctx, unsigned flush_flags);
286 void r600_context_flush_dest_caches(struct r600_context *ctx);
287
288 int evergreen_context_init(struct r600_context *ctx, struct radeon *radeon);
289 void evergreen_context_draw(struct r600_context *ctx, const struct r600_draw *draw);
290 void evergreen_context_flush_dest_caches(struct r600_context *ctx);
291 void evergreen_context_pipe_state_set_ps_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid);
292 void evergreen_context_pipe_state_set_vs_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid);
293 void evergreen_context_pipe_state_set_fs_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid);
294 void evergreen_context_pipe_state_set_ps_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id);
295 void evergreen_context_pipe_state_set_vs_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id);
296
297 struct radeon *radeon_decref(struct radeon *radeon);
298
299 void _r600_pipe_state_add_reg(struct r600_context *ctx,
300 struct r600_pipe_state *state,
301 u32 offset, u32 value, u32 mask,
302 u32 range_id, u32 block_id,
303 struct r600_bo *bo);
304
305 void r600_pipe_state_add_reg_noblock(struct r600_pipe_state *state,
306 u32 offset, u32 value, u32 mask,
307 struct r600_bo *bo);
308 #define r600_pipe_state_add_reg(state, offset, value, mask, bo) _r600_pipe_state_add_reg(&rctx->ctx, state, offset, value, mask, CTX_RANGE_ID(offset), CTX_BLOCK_ID(offset), bo)
309
310 static inline void r600_pipe_state_mod_reg(struct r600_pipe_state *state,
311 u32 value)
312 {
313 state->regs[state->nregs].value = value;
314 state->nregs++;
315 }
316
317 static inline void r600_pipe_state_mod_reg_bo(struct r600_pipe_state *state,
318 u32 value, struct r600_bo *bo)
319 {
320 state->regs[state->nregs].value = value;
321 state->regs[state->nregs].bo = bo;
322 state->nregs++;
323 }
324
325 #endif