Added few more stubs so that control reaches to DestroyDevice().
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common_context.h
1
2 #ifndef COMMON_CONTEXT_H
3 #define COMMON_CONTEXT_H
4
5 #include "math/m_vector.h"
6 #include "tnl/t_context.h"
7 #include "main/colormac.h"
8
9 #include "radeon_screen.h"
10 #include "radeon_debug.h"
11 #include "radeon_drm.h"
12 #include "dri_util.h"
13 #include "tnl/t_vertex.h"
14 #include "swrast/s_context.h"
15
16 struct radeon_context;
17
18 #include "radeon_bo_gem.h"
19 #include "radeon_cs_gem.h"
20
21 /* This union is used to avoid warnings/miscompilation
22 with float to uint32_t casts due to strict-aliasing */
23 typedef union { GLfloat f; uint32_t ui32; } float_ui32_type;
24
25 struct radeon_context;
26 typedef struct radeon_context radeonContextRec;
27 typedef struct radeon_context *radeonContextPtr;
28
29
30 #define TEX_0 0x1
31 #define TEX_1 0x2
32 #define TEX_2 0x4
33 #define TEX_3 0x8
34 #define TEX_4 0x10
35 #define TEX_5 0x20
36
37 /* Rasterizing fallbacks */
38 /* See correponding strings in r200_swtcl.c */
39 #define RADEON_FALLBACK_TEXTURE 0x0001
40 #define RADEON_FALLBACK_DRAW_BUFFER 0x0002
41 #define RADEON_FALLBACK_STENCIL 0x0004
42 #define RADEON_FALLBACK_RENDER_MODE 0x0008
43 #define RADEON_FALLBACK_BLEND_EQ 0x0010
44 #define RADEON_FALLBACK_BLEND_FUNC 0x0020
45 #define RADEON_FALLBACK_DISABLE 0x0040
46 #define RADEON_FALLBACK_BORDER_MODE 0x0080
47 #define RADEON_FALLBACK_DEPTH_BUFFER 0x0100
48 #define RADEON_FALLBACK_STENCIL_BUFFER 0x0200
49
50 #define R200_FALLBACK_TEXTURE 0x01
51 #define R200_FALLBACK_DRAW_BUFFER 0x02
52 #define R200_FALLBACK_STENCIL 0x04
53 #define R200_FALLBACK_RENDER_MODE 0x08
54 #define R200_FALLBACK_DISABLE 0x10
55 #define R200_FALLBACK_BORDER_MODE 0x20
56
57 #define RADEON_TCL_FALLBACK_RASTER 0x1 /* rasterization */
58 #define RADEON_TCL_FALLBACK_UNFILLED 0x2 /* unfilled tris */
59 #define RADEON_TCL_FALLBACK_LIGHT_TWOSIDE 0x4 /* twoside tris */
60 #define RADEON_TCL_FALLBACK_MATERIAL 0x8 /* material in vb */
61 #define RADEON_TCL_FALLBACK_TEXGEN_0 0x10 /* texgen, unit 0 */
62 #define RADEON_TCL_FALLBACK_TEXGEN_1 0x20 /* texgen, unit 1 */
63 #define RADEON_TCL_FALLBACK_TEXGEN_2 0x40 /* texgen, unit 2 */
64 #define RADEON_TCL_FALLBACK_TCL_DISABLE 0x80 /* user disable */
65 #define RADEON_TCL_FALLBACK_FOGCOORDSPEC 0x100 /* fogcoord, sep. spec light */
66
67 /* The blit width for texture uploads
68 */
69 #define BLIT_WIDTH_BYTES 1024
70
71 /* Use the templated vertex format:
72 */
73 #define COLOR_IS_RGBA
74 #define TAG(x) radeon##x
75 #include "tnl_dd/t_dd_vertex.h"
76 #undef TAG
77
78 #define RADEON_RB_CLASS 0xdeadbeef
79
80 struct radeon_renderbuffer
81 {
82 struct swrast_renderbuffer base;
83
84 struct radeon_bo *bo;
85 unsigned int cpp;
86 /* unsigned int offset; */
87 unsigned int pitch;
88
89 struct radeon_bo *map_bo;
90 GLbitfield map_mode;
91 int map_x, map_y, map_w, map_h;
92 int map_pitch;
93 void *map_buffer;
94
95 uint32_t draw_offset; /* FBO */
96 /* boo Xorg 6.8.2 compat */
97 int has_surface;
98
99 GLuint pf_pending; /**< sequence number of pending flip */
100 __DRIdrawable *dPriv;
101 };
102
103 struct radeon_framebuffer
104 {
105 struct gl_framebuffer base;
106
107 struct radeon_renderbuffer *color_rb[2];
108 };
109
110
111 struct radeon_colorbuffer_state {
112 int roundEnable;
113 struct gl_renderbuffer *rb;
114 uint32_t draw_offset; /* offset into color renderbuffer - FBOs */
115 };
116
117 struct radeon_depthbuffer_state {
118 struct gl_renderbuffer *rb;
119 };
120
121 struct radeon_scissor_state {
122 drm_clip_rect_t rect;
123 GLboolean enabled;
124 };
125
126 struct radeon_state_atom {
127 struct radeon_state_atom *next, *prev;
128 const char *name; /* for debug */
129 int cmd_size; /* size in bytes */
130 GLuint idx;
131 GLuint is_tcl;
132 GLuint *cmd; /* one or more cmd's */
133 GLuint *lastcmd; /* one or more cmd's */
134 GLboolean dirty; /* dirty-mark in emit_state_list */
135 int (*check) (struct gl_context *, struct radeon_state_atom *atom); /* is this state active? */
136 void (*emit) (struct gl_context *, struct radeon_state_atom *atom);
137 };
138
139 struct radeon_hw_state {
140 /* Head of the linked list of state atoms. */
141 struct radeon_state_atom atomlist;
142 int max_state_size; /* Number of bytes necessary for a full state emit. */
143 int max_post_flush_size; /* Number of bytes necessary for post flushing emits */
144 GLboolean is_dirty, all_dirty;
145 };
146
147
148 /* Texture related */
149 typedef struct _radeon_texture_image radeon_texture_image;
150
151
152 /**
153 * This is a subclass of swrast_texture_image since we use swrast
154 * for software fallback rendering.
155 */
156 struct _radeon_texture_image {
157 struct swrast_texture_image base;
158
159 /**
160 * If mt != 0, the image is stored in hardware format in the
161 * given mipmap tree. In this case, base.Data may point into the
162 * mapping of the buffer object that contains the mipmap tree.
163 *
164 * If mt == 0, the image is stored in normal memory pointed to
165 * by base.Data.
166 */
167 struct _radeon_mipmap_tree *mt;
168 struct radeon_bo *bo;
169 GLboolean used_as_render_target;
170 };
171
172
173 static inline radeon_texture_image *get_radeon_texture_image(struct gl_texture_image *image)
174 {
175 return (radeon_texture_image*)image;
176 }
177
178
179 typedef struct radeon_tex_obj radeonTexObj, *radeonTexObjPtr;
180
181 #define RADEON_TXO_MICRO_TILE (1 << 3)
182
183 /* Texture object in locally shared texture space.
184 */
185 struct radeon_tex_obj {
186 struct gl_texture_object base;
187 struct _radeon_mipmap_tree *mt;
188
189 /**
190 * This is true if we've verified that the mipmap tree above is complete
191 * and so on.
192 */
193 GLboolean validated;
194 /* Minimum LOD to be used during rendering */
195 unsigned minLod;
196 /* Miximum LOD to be used during rendering */
197 unsigned maxLod;
198
199 GLuint override_offset;
200 GLboolean image_override; /* Image overridden by GLX_EXT_tfp */
201 GLuint tile_bits; /* hw texture tile bits used on this texture */
202 struct radeon_bo *bo;
203
204 GLuint pp_txfilter; /* hardware register values */
205 GLuint pp_txformat;
206 GLuint pp_txformat_x;
207 GLuint pp_txsize; /* npot only */
208 GLuint pp_txpitch; /* npot only */
209 GLuint pp_border_color;
210 GLuint pp_cubic_faces; /* cube face 1,2,3,4 log2 sizes */
211
212 GLboolean border_fallback;
213 };
214
215 static inline radeonTexObj* radeon_tex_obj(struct gl_texture_object *texObj)
216 {
217 return (radeonTexObj*)texObj;
218 }
219
220 /* occlusion query */
221 struct radeon_query_object {
222 struct gl_query_object Base;
223 struct radeon_bo *bo;
224 int curr_offset;
225 GLboolean emitted_begin;
226
227 /* Double linked list of not flushed query objects */
228 struct radeon_query_object *prev, *next;
229 };
230
231 /* Need refcounting on dma buffers:
232 */
233 struct radeon_dma_buffer {
234 int refcount; /* the number of retained regions in buf */
235 drmBufPtr buf;
236 };
237
238 struct radeon_aos {
239 struct radeon_bo *bo; /** Buffer object where vertex data is stored */
240 int offset; /** Offset into buffer object, in bytes */
241 int components; /** Number of components per vertex */
242 int stride; /** Stride in dwords (may be 0 for repeating) */
243 int count; /** Number of vertices */
244 };
245
246 #define DMA_BO_FREE_TIME 100
247
248 struct radeon_dma_bo {
249 struct radeon_dma_bo *next, *prev;
250 struct radeon_bo *bo;
251 int expire_counter;
252 };
253
254 struct radeon_dma {
255 /* Active dma region. Allocations for vertices and retained
256 * regions come from here. Also used for emitting random vertices,
257 * these may be flushed by calling flush_current();
258 */
259 struct radeon_dma_bo free;
260 struct radeon_dma_bo wait;
261 struct radeon_dma_bo reserved;
262 size_t current_used; /** Number of bytes allocated and forgotten about */
263 size_t current_vertexptr; /** End of active vertex region */
264 size_t minimum_size;
265
266 /**
267 * If current_vertexptr != current_used then flush must be non-zero.
268 * flush must be called before non-active vertex allocations can be
269 * performed.
270 */
271 void (*flush) (struct gl_context *);
272 };
273
274 /* radeon_swtcl.c
275 */
276 struct radeon_swtcl_info {
277
278 GLuint RenderIndex;
279 GLuint vertex_size;
280 GLubyte *verts;
281
282 /* Fallback rasterization functions
283 */
284 GLuint hw_primitive;
285 GLenum render_primitive;
286 GLuint numverts;
287
288 struct tnl_attr_map vertex_attrs[VERT_ATTRIB_MAX];
289 GLuint vertex_attr_count;
290
291 GLuint emit_prediction;
292 struct radeon_bo *bo;
293 };
294
295 #define RADEON_MAX_AOS_ARRAYS 16
296 struct radeon_tcl_info {
297 struct radeon_aos aos[RADEON_MAX_AOS_ARRAYS];
298 GLuint aos_count;
299 struct radeon_bo *elt_dma_bo; /** Buffer object that contains element indices */
300 int elt_dma_offset; /** Offset into this buffer object, in bytes */
301 };
302
303 struct radeon_ioctl {
304 GLuint vertex_offset;
305 GLuint vertex_max;
306 struct radeon_bo *bo;
307 GLuint vertex_size;
308 };
309
310 #define RADEON_MAX_PRIMS 64
311
312 struct radeon_prim {
313 GLuint start;
314 GLuint end;
315 GLuint prim;
316 };
317
318 static inline GLuint radeonPackColor(GLuint cpp,
319 GLubyte r, GLubyte g,
320 GLubyte b, GLubyte a)
321 {
322 switch (cpp) {
323 case 2:
324 return PACK_COLOR_565(r, g, b);
325 case 4:
326 return PACK_COLOR_8888(a, r, g, b);
327 default:
328 return 0;
329 }
330 }
331
332 #define MAX_CMD_BUF_SZ (16*1024)
333
334 #define MAX_DMA_BUF_SZ (64*1024)
335
336 struct radeon_store {
337 GLuint statenr;
338 GLuint primnr;
339 char cmd_buf[MAX_CMD_BUF_SZ];
340 int cmd_used;
341 int elts_start;
342 };
343
344 typedef void (*radeon_tri_func) (radeonContextPtr,
345 radeonVertex *,
346 radeonVertex *, radeonVertex *);
347
348 typedef void (*radeon_line_func) (radeonContextPtr,
349 radeonVertex *, radeonVertex *);
350
351 typedef void (*radeon_point_func) (radeonContextPtr, radeonVertex *);
352
353 #define RADEON_MAX_BOS 32
354 struct radeon_state {
355 struct radeon_colorbuffer_state color;
356 struct radeon_depthbuffer_state depth;
357 struct radeon_scissor_state scissor;
358 };
359
360 /**
361 * This structure holds the command buffer while it is being constructed.
362 *
363 * The first batch of commands in the buffer is always the state that needs
364 * to be re-emitted when the context is lost. This batch can be skipped
365 * otherwise.
366 */
367 struct radeon_cmdbuf {
368 struct radeon_cs_manager *csm;
369 struct radeon_cs *cs;
370 int size; /** # of dwords total */
371 unsigned int flushing:1; /** whether we're currently in FlushCmdBufLocked */
372 };
373
374 struct radeon_context {
375 struct gl_context glCtx; /**< base class, must be first */
376 __DRIcontext *driContext; /* DRI context */
377 radeonScreenPtr radeonScreen; /* Screen private DRI data */
378
379 /* Texture object bookkeeping
380 */
381 int texture_depth;
382 float initialMaxAnisotropy;
383 uint32_t texture_row_align;
384 uint32_t texture_rect_row_align;
385 uint32_t texture_compressed_row_align;
386
387 struct radeon_dma dma;
388 struct radeon_hw_state hw;
389 /* Rasterization and vertex state:
390 */
391 GLuint TclFallback;
392 GLuint Fallback;
393 GLuint NewGLState;
394 GLbitfield64 tnl_index_bitset; /* index of bits for last tnl_install_attrs */
395
396 /* Drawable information */
397 unsigned int lastStamp;
398
399 /* Busy waiting */
400 GLuint do_usleeps;
401 GLuint do_irqs;
402 GLuint irqsEmitted;
403 drm_radeon_irq_wait_t iw;
404
405 /* Derived state - for r300 only */
406 struct radeon_state state;
407
408 struct radeon_swtcl_info swtcl;
409 struct radeon_tcl_info tcl;
410 /* Configuration cache
411 */
412 driOptionCache optionCache;
413
414 struct radeon_cmdbuf cmdbuf;
415
416 struct radeon_debug debug;
417
418 drm_clip_rect_t fboRect;
419 GLboolean front_cliprects;
420
421 /**
422 * Set if rendering has occurred to the drawable's front buffer.
423 *
424 * This is used in the DRI2 case to detect that glFlush should also copy
425 * the contents of the fake front buffer to the real front buffer.
426 */
427 GLboolean front_buffer_dirty;
428
429 struct {
430 struct radeon_query_object *current;
431 struct radeon_state_atom queryobj;
432 } query;
433
434 struct {
435 void (*swtcl_flush)(struct gl_context *ctx, uint32_t offset);
436 void (*pre_emit_state)(radeonContextPtr rmesa);
437 void (*fallback)(struct gl_context *ctx, GLuint bit, GLboolean mode);
438 void (*free_context)(struct gl_context *ctx);
439 void (*emit_query_finish)(radeonContextPtr radeon);
440 void (*update_scissor)(struct gl_context *ctx);
441 unsigned (*check_blit)(mesa_format mesa_format, uint32_t dst_pitch);
442 unsigned (*blit)(struct gl_context *ctx,
443 struct radeon_bo *src_bo,
444 intptr_t src_offset,
445 mesa_format src_mesaformat,
446 unsigned src_pitch,
447 unsigned src_width,
448 unsigned src_height,
449 unsigned src_x_offset,
450 unsigned src_y_offset,
451 struct radeon_bo *dst_bo,
452 intptr_t dst_offset,
453 mesa_format dst_mesaformat,
454 unsigned dst_pitch,
455 unsigned dst_width,
456 unsigned dst_height,
457 unsigned dst_x_offset,
458 unsigned dst_y_offset,
459 unsigned reg_width,
460 unsigned reg_height,
461 unsigned flip_y);
462 unsigned (*is_format_renderable)(mesa_format mesa_format);
463 GLboolean (*revalidate_all_buffers)(struct gl_context *ctx);
464 } vtbl;
465 };
466
467 static inline radeonContextPtr RADEON_CONTEXT(struct gl_context *ctx)
468 {
469 return (radeonContextPtr) ctx;
470 }
471
472 static inline __DRIdrawable* radeon_get_drawable(radeonContextPtr radeon)
473 {
474 return radeon->driContext->driDrawablePriv;
475 }
476
477 static inline __DRIdrawable* radeon_get_readable(radeonContextPtr radeon)
478 {
479 return radeon->driContext->driReadablePriv;
480 }
481
482 extern const char *const radeonVendorString;
483
484 const char *radeonGetRendererString(radeonScreenPtr radeonScreen);
485
486 GLboolean radeonInitContext(radeonContextPtr radeon,
487 gl_api api,
488 struct dd_function_table* functions,
489 const struct gl_config * glVisual,
490 __DRIcontext * driContextPriv,
491 void *sharedContextPrivate);
492
493 void radeonCleanupContext(radeonContextPtr radeon);
494 GLboolean radeonUnbindContext(__DRIcontext * driContextPriv);
495 void radeon_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable,
496 GLboolean front_only);
497 GLboolean radeonMakeCurrent(__DRIcontext * driContextPriv,
498 __DRIdrawable * driDrawPriv,
499 __DRIdrawable * driReadPriv);
500 extern void radeonDestroyContext(__DRIcontext * driContextPriv);
501 void radeon_prepare_render(radeonContextPtr radeon);
502
503 #endif