radeon: Add protection against recursive DRM locking.
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common_context.h
1
2 #ifndef COMMON_CONTEXT_H
3 #define COMMON_CONTEXT_H
4
5 #include "main/mm.h"
6 #include "math/m_vector.h"
7 #include "texmem.h"
8 #include "tnl/t_context.h"
9 #include "main/colormac.h"
10
11 #include "radeon_screen.h"
12 #include "radeon_drm.h"
13 #include "dri_util.h"
14 #include "tnl/t_vertex.h"
15
16 #include "dri_metaops.h"
17 struct radeon_context;
18
19 #include "radeon_bocs_wrapper.h"
20
21 /* This union is used to avoid warnings/miscompilation
22 with float to uint32_t casts due to strict-aliasing */
23 typedef union { GLfloat f; uint32_t ui32; } float_ui32_type;
24
25 struct radeon_context;
26 typedef struct radeon_context radeonContextRec;
27 typedef struct radeon_context *radeonContextPtr;
28
29
30 #define TEX_0 0x1
31 #define TEX_1 0x2
32 #define TEX_2 0x4
33 #define TEX_3 0x8
34 #define TEX_4 0x10
35 #define TEX_5 0x20
36
37 /* Rasterizing fallbacks */
38 /* See correponding strings in r200_swtcl.c */
39 #define RADEON_FALLBACK_TEXTURE 0x0001
40 #define RADEON_FALLBACK_DRAW_BUFFER 0x0002
41 #define RADEON_FALLBACK_STENCIL 0x0004
42 #define RADEON_FALLBACK_RENDER_MODE 0x0008
43 #define RADEON_FALLBACK_BLEND_EQ 0x0010
44 #define RADEON_FALLBACK_BLEND_FUNC 0x0020
45 #define RADEON_FALLBACK_DISABLE 0x0040
46 #define RADEON_FALLBACK_BORDER_MODE 0x0080
47 #define RADEON_FALLBACK_DEPTH_BUFFER 0x0100
48 #define RADEON_FALLBACK_STENCIL_BUFFER 0x0200
49
50 #define R200_FALLBACK_TEXTURE 0x01
51 #define R200_FALLBACK_DRAW_BUFFER 0x02
52 #define R200_FALLBACK_STENCIL 0x04
53 #define R200_FALLBACK_RENDER_MODE 0x08
54 #define R200_FALLBACK_DISABLE 0x10
55 #define R200_FALLBACK_BORDER_MODE 0x20
56
57 #define RADEON_TCL_FALLBACK_RASTER 0x1 /* rasterization */
58 #define RADEON_TCL_FALLBACK_UNFILLED 0x2 /* unfilled tris */
59 #define RADEON_TCL_FALLBACK_LIGHT_TWOSIDE 0x4 /* twoside tris */
60 #define RADEON_TCL_FALLBACK_MATERIAL 0x8 /* material in vb */
61 #define RADEON_TCL_FALLBACK_TEXGEN_0 0x10 /* texgen, unit 0 */
62 #define RADEON_TCL_FALLBACK_TEXGEN_1 0x20 /* texgen, unit 1 */
63 #define RADEON_TCL_FALLBACK_TEXGEN_2 0x40 /* texgen, unit 2 */
64 #define RADEON_TCL_FALLBACK_TCL_DISABLE 0x80 /* user disable */
65 #define RADEON_TCL_FALLBACK_FOGCOORDSPEC 0x100 /* fogcoord, sep. spec light */
66
67 /* The blit width for texture uploads
68 */
69 #define BLIT_WIDTH_BYTES 1024
70
71 /* Use the templated vertex format:
72 */
73 #define COLOR_IS_RGBA
74 #define TAG(x) radeon##x
75 #include "tnl_dd/t_dd_vertex.h"
76 #undef TAG
77
78 #define RADEON_RB_CLASS 0xdeadbeef
79
80 struct radeon_renderbuffer
81 {
82 struct gl_renderbuffer base;
83 struct radeon_bo *bo;
84 unsigned int cpp;
85 /* unsigned int offset; */
86 unsigned int pitch;
87
88 uint32_t draw_offset; /* FBO */
89 /* boo Xorg 6.8.2 compat */
90 int has_surface;
91
92 GLuint pf_pending; /**< sequence number of pending flip */
93 GLuint vbl_pending; /**< vblank sequence number of pending flip */
94 __DRIdrawablePrivate *dPriv;
95 };
96
97 struct radeon_framebuffer
98 {
99 struct gl_framebuffer base;
100
101 struct radeon_renderbuffer *color_rb[2];
102
103 GLuint vbl_waited;
104
105 /* buffer swap */
106 int64_t swap_ust;
107 int64_t swap_missed_ust;
108
109 GLuint swap_count;
110 GLuint swap_missed_count;
111
112 /* Drawable page flipping state */
113 GLboolean pf_active;
114 GLint pf_current_page;
115 GLint pf_num_pages;
116
117 };
118
119
120 struct radeon_colorbuffer_state {
121 GLuint clear;
122 int roundEnable;
123 struct gl_renderbuffer *rb;
124 uint32_t draw_offset; /* offset into color renderbuffer - FBOs */
125 };
126
127 struct radeon_depthbuffer_state {
128 GLuint clear;
129 struct gl_renderbuffer *rb;
130 };
131
132 struct radeon_scissor_state {
133 drm_clip_rect_t rect;
134 GLboolean enabled;
135
136 GLuint numClipRects; /* Cliprects active */
137 GLuint numAllocedClipRects; /* Cliprects available */
138 drm_clip_rect_t *pClipRects;
139 };
140
141 struct radeon_stencilbuffer_state {
142 GLuint clear; /* rb3d_stencilrefmask value */
143 };
144
145 struct radeon_stipple_state {
146 GLuint mask[32];
147 };
148
149 struct radeon_state_atom {
150 struct radeon_state_atom *next, *prev;
151 const char *name; /* for debug */
152 int cmd_size; /* size in bytes */
153 GLuint idx;
154 GLuint is_tcl;
155 GLuint *cmd; /* one or more cmd's */
156 GLuint *lastcmd; /* one or more cmd's */
157 GLboolean dirty; /* dirty-mark in emit_state_list */
158 int (*check) (GLcontext *, struct radeon_state_atom *atom); /* is this state active? */
159 void (*emit) (GLcontext *, struct radeon_state_atom *atom);
160 };
161
162 struct radeon_hw_state {
163 /* Head of the linked list of state atoms. */
164 struct radeon_state_atom atomlist;
165 int max_state_size; /* Number of bytes necessary for a full state emit. */
166 GLboolean is_dirty, all_dirty;
167 };
168
169
170 /* Texture related */
171 typedef struct _radeon_texture_image radeon_texture_image;
172
173 struct _radeon_texture_image {
174 struct gl_texture_image base;
175
176 /**
177 * If mt != 0, the image is stored in hardware format in the
178 * given mipmap tree. In this case, base.Data may point into the
179 * mapping of the buffer object that contains the mipmap tree.
180 *
181 * If mt == 0, the image is stored in normal memory pointed to
182 * by base.Data.
183 */
184 struct _radeon_mipmap_tree *mt;
185 struct radeon_bo *bo;
186
187 int mtlevel; /** if mt != 0, this is the image's level in the mipmap tree */
188 int mtface; /** if mt != 0, this is the image's face in the mipmap tree */
189 };
190
191
192 static INLINE radeon_texture_image *get_radeon_texture_image(struct gl_texture_image *image)
193 {
194 return (radeon_texture_image*)image;
195 }
196
197
198 typedef struct radeon_tex_obj radeonTexObj, *radeonTexObjPtr;
199
200 #define RADEON_TXO_MICRO_TILE (1 << 3)
201
202 /* Texture object in locally shared texture space.
203 */
204 struct radeon_tex_obj {
205 struct gl_texture_object base;
206 struct _radeon_mipmap_tree *mt;
207
208 /**
209 * This is true if we've verified that the mipmap tree above is complete
210 * and so on.
211 */
212 GLboolean validated;
213
214 GLuint override_offset;
215 GLboolean image_override; /* Image overridden by GLX_EXT_tfp */
216 GLuint tile_bits; /* hw texture tile bits used on this texture */
217 struct radeon_bo *bo;
218
219 GLuint pp_txfilter; /* hardware register values */
220 GLuint pp_txformat;
221 GLuint pp_txformat_x;
222 GLuint pp_txsize; /* npot only */
223 GLuint pp_txpitch; /* npot only */
224 GLuint pp_border_color;
225 GLuint pp_cubic_faces; /* cube face 1,2,3,4 log2 sizes */
226
227 GLuint pp_txfilter_1; /* r300 */
228
229 /* r700 texture states */
230 GLuint SQ_TEX_RESOURCE0;
231 GLuint SQ_TEX_RESOURCE1;
232 GLuint SQ_TEX_RESOURCE2;
233 GLuint SQ_TEX_RESOURCE3;
234 GLuint SQ_TEX_RESOURCE4;
235 GLuint SQ_TEX_RESOURCE5;
236 GLuint SQ_TEX_RESOURCE6;
237
238 GLuint SQ_TEX_SAMPLER0;
239 GLuint SQ_TEX_SAMPLER1;
240 GLuint SQ_TEX_SAMPLER2;
241
242 GLuint TD_PS_SAMPLER0_BORDER_RED;
243 GLuint TD_PS_SAMPLER0_BORDER_GREEN;
244 GLuint TD_PS_SAMPLER0_BORDER_BLUE;
245 GLuint TD_PS_SAMPLER0_BORDER_ALPHA;
246
247 GLboolean border_fallback;
248
249
250 };
251
252 static INLINE radeonTexObj* radeon_tex_obj(struct gl_texture_object *texObj)
253 {
254 return (radeonTexObj*)texObj;
255 }
256
257 /* Need refcounting on dma buffers:
258 */
259 struct radeon_dma_buffer {
260 int refcount; /* the number of retained regions in buf */
261 drmBufPtr buf;
262 };
263
264 struct radeon_aos {
265 struct radeon_bo *bo; /** Buffer object where vertex data is stored */
266 int offset; /** Offset into buffer object, in bytes */
267 int components; /** Number of components per vertex */
268 int stride; /** Stride in dwords (may be 0 for repeating) */
269 int count; /** Number of vertices */
270 };
271
272 struct radeon_dma {
273 /* Active dma region. Allocations for vertices and retained
274 * regions come from here. Also used for emitting random vertices,
275 * these may be flushed by calling flush_current();
276 */
277 struct radeon_bo *current; /** Buffer that DMA memory is allocated from */
278 int current_used; /** Number of bytes allocated and forgotten about */
279 int current_vertexptr; /** End of active vertex region */
280
281 /**
282 * If current_vertexptr != current_used then flush must be non-zero.
283 * flush must be called before non-active vertex allocations can be
284 * performed.
285 */
286 void (*flush) (GLcontext *);
287
288 /* Number of "in-flight" DMA buffers, i.e. the number of buffers
289 * for which a DISCARD command is currently queued in the command buffer
290 .
291 */
292 GLuint nr_released_bufs;
293 };
294
295 /* radeon_swtcl.c
296 */
297 struct radeon_swtcl_info {
298
299 GLuint RenderIndex;
300 GLuint vertex_size;
301 GLubyte *verts;
302
303 /* Fallback rasterization functions
304 */
305 GLuint hw_primitive;
306 GLenum render_primitive;
307 GLuint numverts;
308
309 struct tnl_attr_map vertex_attrs[VERT_ATTRIB_MAX];
310 GLuint vertex_attr_count;
311
312 };
313
314 #define RADEON_MAX_AOS_ARRAYS 16
315 struct radeon_tcl_info {
316 struct radeon_aos aos[RADEON_MAX_AOS_ARRAYS];
317 GLuint aos_count;
318 struct radeon_bo *elt_dma_bo; /** Buffer object that contains element indices */
319 int elt_dma_offset; /** Offset into this buffer object, in bytes */
320 };
321
322 struct radeon_ioctl {
323 GLuint vertex_offset;
324 struct radeon_bo *bo;
325 GLuint vertex_size;
326 };
327
328 #define RADEON_MAX_PRIMS 64
329
330 struct radeon_prim {
331 GLuint start;
332 GLuint end;
333 GLuint prim;
334 };
335
336 static INLINE GLuint radeonPackColor(GLuint cpp,
337 GLubyte r, GLubyte g,
338 GLubyte b, GLubyte a)
339 {
340 switch (cpp) {
341 case 2:
342 return PACK_COLOR_565(r, g, b);
343 case 4:
344 return PACK_COLOR_8888(a, r, g, b);
345 default:
346 return 0;
347 }
348 }
349
350 #define MAX_CMD_BUF_SZ (16*1024)
351
352 #define MAX_DMA_BUF_SZ (64*1024)
353
354 struct radeon_store {
355 GLuint statenr;
356 GLuint primnr;
357 char cmd_buf[MAX_CMD_BUF_SZ];
358 int cmd_used;
359 int elts_start;
360 };
361
362 struct radeon_dri_mirror {
363 __DRIcontextPrivate *context; /* DRI context */
364 __DRIscreenPrivate *screen; /* DRI screen */
365
366 drm_context_t hwContext;
367 drm_hw_lock_t *hwLock;
368 int hwLockCount;
369 int fd;
370 int drmMinor;
371 };
372
373 #define DEBUG_TEXTURE 0x001
374 #define DEBUG_STATE 0x002
375 #define DEBUG_IOCTL 0x004
376 #define DEBUG_PRIMS 0x008
377 #define DEBUG_VERTS 0x010
378 #define DEBUG_FALLBACKS 0x020
379 #define DEBUG_VFMT 0x040
380 #define DEBUG_CODEGEN 0x080
381 #define DEBUG_VERBOSE 0x100
382 #define DEBUG_DRI 0x200
383 #define DEBUG_DMA 0x400
384 #define DEBUG_SANITY 0x800
385 #define DEBUG_SYNC 0x1000
386 #define DEBUG_PIXEL 0x2000
387 #define DEBUG_MEMORY 0x4000
388
389
390 typedef void (*radeon_tri_func) (radeonContextPtr,
391 radeonVertex *,
392 radeonVertex *, radeonVertex *);
393
394 typedef void (*radeon_line_func) (radeonContextPtr,
395 radeonVertex *, radeonVertex *);
396
397 typedef void (*radeon_point_func) (radeonContextPtr, radeonVertex *);
398
399 #define RADEON_MAX_BOS 32
400 struct radeon_state {
401 struct radeon_colorbuffer_state color;
402 struct radeon_depthbuffer_state depth;
403 struct radeon_scissor_state scissor;
404 struct radeon_stencilbuffer_state stencil;
405
406 struct radeon_cs_space_check bos[RADEON_MAX_BOS];
407 int validated_bo_count;
408 };
409
410 /**
411 * This structure holds the command buffer while it is being constructed.
412 *
413 * The first batch of commands in the buffer is always the state that needs
414 * to be re-emitted when the context is lost. This batch can be skipped
415 * otherwise.
416 */
417 struct radeon_cmdbuf {
418 struct radeon_cs_manager *csm;
419 struct radeon_cs *cs;
420 int size; /** # of dwords total */
421 unsigned int flushing:1; /** whether we're currently in FlushCmdBufLocked */
422 };
423
424 struct radeon_context {
425 GLcontext *glCtx;
426 radeonScreenPtr radeonScreen; /* Screen private DRI data */
427
428 /* Texture object bookkeeping
429 */
430 int texture_depth;
431 float initialMaxAnisotropy;
432 uint32_t texture_row_align;
433 uint32_t texture_rect_row_align;
434 uint32_t texture_compressed_row_align;
435
436 struct radeon_dma dma;
437 struct radeon_hw_state hw;
438 /* Rasterization and vertex state:
439 */
440 GLuint TclFallback;
441 GLuint Fallback;
442 GLuint NewGLState;
443 DECLARE_RENDERINPUTS(tnl_index_bitset); /* index of bits for last tnl_install_attrs */
444
445 /* Drawable, cliprect and scissor information */
446 GLuint numClipRects; /* Cliprects for the draw buffer */
447 drm_clip_rect_t *pClipRects;
448 unsigned int lastStamp;
449 drm_radeon_sarea_t *sarea; /* Private SAREA data */
450
451 /* Mirrors of some DRI state */
452 struct radeon_dri_mirror dri;
453
454 /* Busy waiting */
455 GLuint do_usleeps;
456 GLuint do_irqs;
457 GLuint irqsEmitted;
458 drm_radeon_irq_wait_t iw;
459
460 /* Derived state - for r300 only */
461 struct radeon_state state;
462
463 struct radeon_swtcl_info swtcl;
464 struct radeon_tcl_info tcl;
465 /* Configuration cache
466 */
467 driOptionCache optionCache;
468
469 struct radeon_cmdbuf cmdbuf;
470
471 drm_clip_rect_t fboRect;
472 GLboolean constant_cliprect; /* use for FBO or DRI2 rendering */
473 GLboolean front_cliprects;
474
475 /**
476 * Set if rendering has occured to the drawable's front buffer.
477 *
478 * This is used in the DRI2 case to detect that glFlush should also copy
479 * the contents of the fake front buffer to the real front buffer.
480 */
481 GLboolean front_buffer_dirty;
482
483 /**
484 * Track whether front-buffer rendering is currently enabled
485 *
486 * A separate flag is used to track this in order to support MRT more
487 * easily.
488 */
489 GLboolean is_front_buffer_rendering;
490
491 /**
492 * Track whether front-buffer is the current read target.
493 *
494 * This is closely associated with is_front_buffer_rendering, but may
495 * be set separately. The DRI2 fake front buffer must be referenced
496 * either way.
497 */
498 GLboolean is_front_buffer_reading;
499
500 struct dri_metaops meta;
501
502 struct {
503 void (*get_lock)(radeonContextPtr radeon);
504 void (*update_viewport_offset)(GLcontext *ctx);
505 void (*emit_cs_header)(struct radeon_cs *cs, radeonContextPtr rmesa);
506 void (*swtcl_flush)(GLcontext *ctx, uint32_t offset);
507 void (*pre_emit_atoms)(radeonContextPtr rmesa);
508 void (*pre_emit_state)(radeonContextPtr rmesa);
509 void (*fallback)(GLcontext *ctx, GLuint bit, GLboolean mode);
510 void (*free_context)(GLcontext *ctx);
511 } vtbl;
512 };
513
514 #define RADEON_CONTEXT(glctx) ((radeonContextPtr)(ctx->DriverCtx))
515
516 static inline __DRIdrawablePrivate* radeon_get_drawable(radeonContextPtr radeon)
517 {
518 return radeon->dri.context->driDrawablePriv;
519 }
520
521 static inline __DRIdrawablePrivate* radeon_get_readable(radeonContextPtr radeon)
522 {
523 return radeon->dri.context->driReadablePriv;
524 }
525
526
527 /**
528 * This function takes a float and packs it into a uint32_t
529 */
530 static INLINE uint32_t radeonPackFloat32(float fl)
531 {
532 union {
533 float fl;
534 uint32_t u;
535 } u;
536
537 u.fl = fl;
538 return u.u;
539 }
540
541 /* This is probably wrong for some values, I need to test this
542 * some more. Range checking would be a good idea also..
543 *
544 * But it works for most things. I'll fix it later if someone
545 * else with a better clue doesn't
546 */
547 static INLINE uint32_t radeonPackFloat24(float f)
548 {
549 float mantissa;
550 int exponent;
551 uint32_t float24 = 0;
552
553 if (f == 0.0)
554 return 0;
555
556 mantissa = frexpf(f, &exponent);
557
558 /* Handle -ve */
559 if (mantissa < 0) {
560 float24 |= (1 << 23);
561 mantissa = mantissa * -1.0;
562 }
563 /* Handle exponent, bias of 63 */
564 exponent += 62;
565 float24 |= (exponent << 16);
566 /* Kill 7 LSB of mantissa */
567 float24 |= (radeonPackFloat32(mantissa) & 0x7FFFFF) >> 7;
568
569 return float24;
570 }
571
572 GLboolean radeonInitContext(radeonContextPtr radeon,
573 struct dd_function_table* functions,
574 const __GLcontextModes * glVisual,
575 __DRIcontextPrivate * driContextPriv,
576 void *sharedContextPrivate);
577
578 void radeonCleanupContext(radeonContextPtr radeon);
579 GLboolean radeonUnbindContext(__DRIcontextPrivate * driContextPriv);
580 void radeon_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable);
581 GLboolean radeonMakeCurrent(__DRIcontextPrivate * driContextPriv,
582 __DRIdrawablePrivate * driDrawPriv,
583 __DRIdrawablePrivate * driReadPriv);
584 extern void radeonDestroyContext(__DRIcontextPrivate * driContextPriv);
585
586 /* ================================================================
587 * Debugging:
588 */
589 #define DO_DEBUG 1
590
591 #if DO_DEBUG
592 extern int RADEON_DEBUG;
593 #else
594 #define RADEON_DEBUG 0
595 #endif
596
597 #ifndef HAVE_LIBDRM_RADEON
598 #ifndef RADEON_DEBUG_BO
599 #define RADEON_DEBUG_BO 1
600 #endif
601 #endif
602
603 #endif