radeon: derive radeon_renderbuffer from swrast_renderbuffer
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common_context.h
1
2 #ifndef COMMON_CONTEXT_H
3 #define COMMON_CONTEXT_H
4
5 #include "main/mm.h"
6 #include "math/m_vector.h"
7 #include "tnl/t_context.h"
8 #include "main/colormac.h"
9
10 #include "radeon_debug.h"
11 #include "radeon_screen.h"
12 #include "radeon_drm.h"
13 #include "dri_util.h"
14 #include "tnl/t_vertex.h"
15 #include "swrast/s_context.h"
16
17 struct radeon_context;
18
19 #include "radeon_bo_gem.h"
20 #include "radeon_cs_gem.h"
21
22 /* This union is used to avoid warnings/miscompilation
23 with float to uint32_t casts due to strict-aliasing */
24 typedef union { GLfloat f; uint32_t ui32; } float_ui32_type;
25
26 struct radeon_context;
27 typedef struct radeon_context radeonContextRec;
28 typedef struct radeon_context *radeonContextPtr;
29
30
31 #define TEX_0 0x1
32 #define TEX_1 0x2
33 #define TEX_2 0x4
34 #define TEX_3 0x8
35 #define TEX_4 0x10
36 #define TEX_5 0x20
37
38 /* Rasterizing fallbacks */
39 /* See correponding strings in r200_swtcl.c */
40 #define RADEON_FALLBACK_TEXTURE 0x0001
41 #define RADEON_FALLBACK_DRAW_BUFFER 0x0002
42 #define RADEON_FALLBACK_STENCIL 0x0004
43 #define RADEON_FALLBACK_RENDER_MODE 0x0008
44 #define RADEON_FALLBACK_BLEND_EQ 0x0010
45 #define RADEON_FALLBACK_BLEND_FUNC 0x0020
46 #define RADEON_FALLBACK_DISABLE 0x0040
47 #define RADEON_FALLBACK_BORDER_MODE 0x0080
48 #define RADEON_FALLBACK_DEPTH_BUFFER 0x0100
49 #define RADEON_FALLBACK_STENCIL_BUFFER 0x0200
50
51 #define R200_FALLBACK_TEXTURE 0x01
52 #define R200_FALLBACK_DRAW_BUFFER 0x02
53 #define R200_FALLBACK_STENCIL 0x04
54 #define R200_FALLBACK_RENDER_MODE 0x08
55 #define R200_FALLBACK_DISABLE 0x10
56 #define R200_FALLBACK_BORDER_MODE 0x20
57
58 #define RADEON_TCL_FALLBACK_RASTER 0x1 /* rasterization */
59 #define RADEON_TCL_FALLBACK_UNFILLED 0x2 /* unfilled tris */
60 #define RADEON_TCL_FALLBACK_LIGHT_TWOSIDE 0x4 /* twoside tris */
61 #define RADEON_TCL_FALLBACK_MATERIAL 0x8 /* material in vb */
62 #define RADEON_TCL_FALLBACK_TEXGEN_0 0x10 /* texgen, unit 0 */
63 #define RADEON_TCL_FALLBACK_TEXGEN_1 0x20 /* texgen, unit 1 */
64 #define RADEON_TCL_FALLBACK_TEXGEN_2 0x40 /* texgen, unit 2 */
65 #define RADEON_TCL_FALLBACK_TCL_DISABLE 0x80 /* user disable */
66 #define RADEON_TCL_FALLBACK_FOGCOORDSPEC 0x100 /* fogcoord, sep. spec light */
67
68 /* The blit width for texture uploads
69 */
70 #define BLIT_WIDTH_BYTES 1024
71
72 /* Use the templated vertex format:
73 */
74 #define COLOR_IS_RGBA
75 #define TAG(x) radeon##x
76 #include "tnl_dd/t_dd_vertex.h"
77 #undef TAG
78
79 #define RADEON_RB_CLASS 0xdeadbeef
80
81 struct radeon_renderbuffer
82 {
83 struct swrast_renderbuffer base;
84
85 struct radeon_bo *bo;
86 unsigned int cpp;
87 /* unsigned int offset; */
88 unsigned int pitch;
89
90 struct radeon_bo *map_bo;
91 GLbitfield map_mode;
92 int map_x, map_y, map_w, map_h;
93 int map_pitch;
94 void *map_buffer;
95
96 uint32_t draw_offset; /* FBO */
97 /* boo Xorg 6.8.2 compat */
98 int has_surface;
99
100 GLuint pf_pending; /**< sequence number of pending flip */
101 __DRIdrawable *dPriv;
102 };
103
104 struct radeon_framebuffer
105 {
106 struct gl_framebuffer base;
107
108 struct radeon_renderbuffer *color_rb[2];
109 };
110
111
112 struct radeon_colorbuffer_state {
113 GLuint clear;
114 int roundEnable;
115 struct gl_renderbuffer *rb;
116 uint32_t draw_offset; /* offset into color renderbuffer - FBOs */
117 };
118
119 struct radeon_depthbuffer_state {
120 GLuint clear;
121 struct gl_renderbuffer *rb;
122 };
123
124 struct radeon_scissor_state {
125 drm_clip_rect_t rect;
126 GLboolean enabled;
127
128 GLuint numClipRects; /* Cliprects active */
129 GLuint numAllocedClipRects; /* Cliprects available */
130 drm_clip_rect_t *pClipRects;
131 };
132
133 struct radeon_stencilbuffer_state {
134 GLuint clear; /* rb3d_stencilrefmask value */
135 };
136
137 struct radeon_state_atom {
138 struct radeon_state_atom *next, *prev;
139 const char *name; /* for debug */
140 int cmd_size; /* size in bytes */
141 GLuint idx;
142 GLuint is_tcl;
143 GLuint *cmd; /* one or more cmd's */
144 GLuint *lastcmd; /* one or more cmd's */
145 GLboolean dirty; /* dirty-mark in emit_state_list */
146 int (*check) (struct gl_context *, struct radeon_state_atom *atom); /* is this state active? */
147 void (*emit) (struct gl_context *, struct radeon_state_atom *atom);
148 };
149
150 struct radeon_hw_state {
151 /* Head of the linked list of state atoms. */
152 struct radeon_state_atom atomlist;
153 int max_state_size; /* Number of bytes necessary for a full state emit. */
154 int max_post_flush_size; /* Number of bytes necessary for post flushing emits */
155 GLboolean is_dirty, all_dirty;
156 };
157
158
159 /* Texture related */
160 typedef struct _radeon_texture_image radeon_texture_image;
161
162
163 /**
164 * This is a subclass of swrast_texture_image since we use swrast
165 * for software fallback rendering.
166 */
167 struct _radeon_texture_image {
168 struct swrast_texture_image base;
169
170 /**
171 * If mt != 0, the image is stored in hardware format in the
172 * given mipmap tree. In this case, base.Data may point into the
173 * mapping of the buffer object that contains the mipmap tree.
174 *
175 * If mt == 0, the image is stored in normal memory pointed to
176 * by base.Data.
177 */
178 struct _radeon_mipmap_tree *mt;
179 struct radeon_bo *bo;
180 GLboolean used_as_render_target;
181 };
182
183
184 static INLINE radeon_texture_image *get_radeon_texture_image(struct gl_texture_image *image)
185 {
186 return (radeon_texture_image*)image;
187 }
188
189
190 typedef struct radeon_tex_obj radeonTexObj, *radeonTexObjPtr;
191
192 #define RADEON_TXO_MICRO_TILE (1 << 3)
193
194 /* Texture object in locally shared texture space.
195 */
196 struct radeon_tex_obj {
197 struct gl_texture_object base;
198 struct _radeon_mipmap_tree *mt;
199
200 /**
201 * This is true if we've verified that the mipmap tree above is complete
202 * and so on.
203 */
204 GLboolean validated;
205 /* Minimum LOD to be used during rendering */
206 unsigned minLod;
207 /* Miximum LOD to be used during rendering */
208 unsigned maxLod;
209
210 GLuint override_offset;
211 GLboolean image_override; /* Image overridden by GLX_EXT_tfp */
212 GLuint tile_bits; /* hw texture tile bits used on this texture */
213 struct radeon_bo *bo;
214
215 GLuint pp_txfilter; /* hardware register values */
216 GLuint pp_txformat;
217 GLuint pp_txformat_x;
218 GLuint pp_txsize; /* npot only */
219 GLuint pp_txpitch; /* npot only */
220 GLuint pp_border_color;
221 GLuint pp_cubic_faces; /* cube face 1,2,3,4 log2 sizes */
222
223 GLboolean border_fallback;
224 };
225
226 static INLINE radeonTexObj* radeon_tex_obj(struct gl_texture_object *texObj)
227 {
228 return (radeonTexObj*)texObj;
229 }
230
231 /* occlusion query */
232 struct radeon_query_object {
233 struct gl_query_object Base;
234 struct radeon_bo *bo;
235 int curr_offset;
236 GLboolean emitted_begin;
237
238 /* Double linked list of not flushed query objects */
239 struct radeon_query_object *prev, *next;
240 };
241
242 /* Need refcounting on dma buffers:
243 */
244 struct radeon_dma_buffer {
245 int refcount; /* the number of retained regions in buf */
246 drmBufPtr buf;
247 };
248
249 struct radeon_aos {
250 struct radeon_bo *bo; /** Buffer object where vertex data is stored */
251 int offset; /** Offset into buffer object, in bytes */
252 int components; /** Number of components per vertex */
253 int stride; /** Stride in dwords (may be 0 for repeating) */
254 int count; /** Number of vertices */
255 };
256
257 #define DMA_BO_FREE_TIME 100
258
259 struct radeon_dma_bo {
260 struct radeon_dma_bo *next, *prev;
261 struct radeon_bo *bo;
262 int expire_counter;
263 };
264
265 struct radeon_dma {
266 /* Active dma region. Allocations for vertices and retained
267 * regions come from here. Also used for emitting random vertices,
268 * these may be flushed by calling flush_current();
269 */
270 struct radeon_dma_bo free;
271 struct radeon_dma_bo wait;
272 struct radeon_dma_bo reserved;
273 size_t current_used; /** Number of bytes allocated and forgotten about */
274 size_t current_vertexptr; /** End of active vertex region */
275 size_t minimum_size;
276
277 /**
278 * If current_vertexptr != current_used then flush must be non-zero.
279 * flush must be called before non-active vertex allocations can be
280 * performed.
281 */
282 void (*flush) (struct gl_context *);
283 };
284
285 /* radeon_swtcl.c
286 */
287 struct radeon_swtcl_info {
288
289 GLuint RenderIndex;
290 GLuint vertex_size;
291 GLubyte *verts;
292
293 /* Fallback rasterization functions
294 */
295 GLuint hw_primitive;
296 GLenum render_primitive;
297 GLuint numverts;
298
299 struct tnl_attr_map vertex_attrs[VERT_ATTRIB_MAX];
300 GLuint vertex_attr_count;
301
302 GLuint emit_prediction;
303 struct radeon_bo *bo;
304 };
305
306 #define RADEON_MAX_AOS_ARRAYS 16
307 struct radeon_tcl_info {
308 struct radeon_aos aos[RADEON_MAX_AOS_ARRAYS];
309 GLuint aos_count;
310 struct radeon_bo *elt_dma_bo; /** Buffer object that contains element indices */
311 int elt_dma_offset; /** Offset into this buffer object, in bytes */
312 };
313
314 struct radeon_ioctl {
315 GLuint vertex_offset;
316 GLuint vertex_max;
317 struct radeon_bo *bo;
318 GLuint vertex_size;
319 };
320
321 #define RADEON_MAX_PRIMS 64
322
323 struct radeon_prim {
324 GLuint start;
325 GLuint end;
326 GLuint prim;
327 };
328
329 static INLINE GLuint radeonPackColor(GLuint cpp,
330 GLubyte r, GLubyte g,
331 GLubyte b, GLubyte a)
332 {
333 switch (cpp) {
334 case 2:
335 return PACK_COLOR_565(r, g, b);
336 case 4:
337 return PACK_COLOR_8888(a, r, g, b);
338 default:
339 return 0;
340 }
341 }
342
343 #define MAX_CMD_BUF_SZ (16*1024)
344
345 #define MAX_DMA_BUF_SZ (64*1024)
346
347 struct radeon_store {
348 GLuint statenr;
349 GLuint primnr;
350 char cmd_buf[MAX_CMD_BUF_SZ];
351 int cmd_used;
352 int elts_start;
353 };
354
355 struct radeon_dri_mirror {
356 __DRIcontext *context; /* DRI context */
357 __DRIscreen *screen; /* DRI screen */
358
359 drm_context_t hwContext;
360 drm_hw_lock_t *hwLock;
361 int hwLockCount;
362 int fd;
363 int drmMinor;
364 };
365
366 typedef void (*radeon_tri_func) (radeonContextPtr,
367 radeonVertex *,
368 radeonVertex *, radeonVertex *);
369
370 typedef void (*radeon_line_func) (radeonContextPtr,
371 radeonVertex *, radeonVertex *);
372
373 typedef void (*radeon_point_func) (radeonContextPtr, radeonVertex *);
374
375 #define RADEON_MAX_BOS 32
376 struct radeon_state {
377 struct radeon_colorbuffer_state color;
378 struct radeon_depthbuffer_state depth;
379 struct radeon_scissor_state scissor;
380 struct radeon_stencilbuffer_state stencil;
381 };
382
383 /**
384 * This structure holds the command buffer while it is being constructed.
385 *
386 * The first batch of commands in the buffer is always the state that needs
387 * to be re-emitted when the context is lost. This batch can be skipped
388 * otherwise.
389 */
390 struct radeon_cmdbuf {
391 struct radeon_cs_manager *csm;
392 struct radeon_cs *cs;
393 int size; /** # of dwords total */
394 unsigned int flushing:1; /** whether we're currently in FlushCmdBufLocked */
395 };
396
397 struct radeon_context {
398 struct gl_context *glCtx;
399 radeonScreenPtr radeonScreen; /* Screen private DRI data */
400
401 /* Texture object bookkeeping
402 */
403 int texture_depth;
404 float initialMaxAnisotropy;
405 uint32_t texture_row_align;
406 uint32_t texture_rect_row_align;
407 uint32_t texture_compressed_row_align;
408
409 struct radeon_dma dma;
410 struct radeon_hw_state hw;
411 /* Rasterization and vertex state:
412 */
413 GLuint TclFallback;
414 GLuint Fallback;
415 GLuint NewGLState;
416 GLbitfield64 tnl_index_bitset; /* index of bits for last tnl_install_attrs */
417
418 /* Drawable information */
419 unsigned int lastStamp;
420 drm_radeon_sarea_t *sarea; /* Private SAREA data */
421
422 /* Mirrors of some DRI state */
423 struct radeon_dri_mirror dri;
424
425 /* Busy waiting */
426 GLuint do_usleeps;
427 GLuint do_irqs;
428 GLuint irqsEmitted;
429 drm_radeon_irq_wait_t iw;
430
431 /* Derived state - for r300 only */
432 struct radeon_state state;
433
434 struct radeon_swtcl_info swtcl;
435 struct radeon_tcl_info tcl;
436 /* Configuration cache
437 */
438 driOptionCache optionCache;
439
440 struct radeon_cmdbuf cmdbuf;
441
442 struct radeon_debug debug;
443
444 drm_clip_rect_t fboRect;
445 GLboolean front_cliprects;
446
447 /**
448 * Set if rendering has occured to the drawable's front buffer.
449 *
450 * This is used in the DRI2 case to detect that glFlush should also copy
451 * the contents of the fake front buffer to the real front buffer.
452 */
453 GLboolean front_buffer_dirty;
454
455 /**
456 * Track whether front-buffer rendering is currently enabled
457 *
458 * A separate flag is used to track this in order to support MRT more
459 * easily.
460 */
461 GLboolean is_front_buffer_rendering;
462
463 /**
464 * Track whether front-buffer is the current read target.
465 *
466 * This is closely associated with is_front_buffer_rendering, but may
467 * be set separately. The DRI2 fake front buffer must be referenced
468 * either way.
469 */
470 GLboolean is_front_buffer_reading;
471
472 struct {
473 struct radeon_query_object *current;
474 struct radeon_state_atom queryobj;
475 } query;
476
477 struct {
478 void (*get_lock)(radeonContextPtr radeon);
479 void (*update_viewport_offset)(struct gl_context *ctx);
480 void (*emit_cs_header)(struct radeon_cs *cs, radeonContextPtr rmesa);
481 void (*swtcl_flush)(struct gl_context *ctx, uint32_t offset);
482 void (*pre_emit_atoms)(radeonContextPtr rmesa);
483 void (*pre_emit_state)(radeonContextPtr rmesa);
484 void (*fallback)(struct gl_context *ctx, GLuint bit, GLboolean mode);
485 void (*free_context)(struct gl_context *ctx);
486 void (*emit_query_finish)(radeonContextPtr radeon);
487 void (*update_scissor)(struct gl_context *ctx);
488 unsigned (*check_blit)(gl_format mesa_format, uint32_t dst_pitch);
489 unsigned (*blit)(struct gl_context *ctx,
490 struct radeon_bo *src_bo,
491 intptr_t src_offset,
492 gl_format src_mesaformat,
493 unsigned src_pitch,
494 unsigned src_width,
495 unsigned src_height,
496 unsigned src_x_offset,
497 unsigned src_y_offset,
498 struct radeon_bo *dst_bo,
499 intptr_t dst_offset,
500 gl_format dst_mesaformat,
501 unsigned dst_pitch,
502 unsigned dst_width,
503 unsigned dst_height,
504 unsigned dst_x_offset,
505 unsigned dst_y_offset,
506 unsigned reg_width,
507 unsigned reg_height,
508 unsigned flip_y);
509 unsigned (*is_format_renderable)(gl_format mesa_format);
510 } vtbl;
511 };
512
513 #define RADEON_CONTEXT(glctx) ((radeonContextPtr)(ctx->DriverCtx))
514
515 static inline __DRIdrawable* radeon_get_drawable(radeonContextPtr radeon)
516 {
517 return radeon->dri.context->driDrawablePriv;
518 }
519
520 static inline __DRIdrawable* radeon_get_readable(radeonContextPtr radeon)
521 {
522 return radeon->dri.context->driReadablePriv;
523 }
524
525 GLboolean radeonInitContext(radeonContextPtr radeon,
526 struct dd_function_table* functions,
527 const struct gl_config * glVisual,
528 __DRIcontext * driContextPriv,
529 void *sharedContextPrivate);
530
531 void radeonCleanupContext(radeonContextPtr radeon);
532 GLboolean radeonUnbindContext(__DRIcontext * driContextPriv);
533 void radeon_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable,
534 GLboolean front_only);
535 GLboolean radeonMakeCurrent(__DRIcontext * driContextPriv,
536 __DRIdrawable * driDrawPriv,
537 __DRIdrawable * driReadPriv);
538 extern void radeonDestroyContext(__DRIcontext * driContextPriv);
539 void radeon_prepare_render(radeonContextPtr radeon);
540
541 #endif