dri: Remove unused dri texmem.c
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_common_context.h
1
2 #ifndef COMMON_CONTEXT_H
3 #define COMMON_CONTEXT_H
4
5 #include "main/mm.h"
6 #include "math/m_vector.h"
7 #include "tnl/t_context.h"
8 #include "main/colormac.h"
9
10 #include "radeon_debug.h"
11 #include "radeon_screen.h"
12 #include "radeon_drm.h"
13 #include "dri_util.h"
14 #include "tnl/t_vertex.h"
15 #include "swrast/s_context.h"
16
17 struct radeon_context;
18
19 #include "radeon_bo_gem.h"
20 #include "radeon_cs_gem.h"
21
22 /* This union is used to avoid warnings/miscompilation
23 with float to uint32_t casts due to strict-aliasing */
24 typedef union { GLfloat f; uint32_t ui32; } float_ui32_type;
25
26 struct radeon_context;
27 typedef struct radeon_context radeonContextRec;
28 typedef struct radeon_context *radeonContextPtr;
29
30
31 #define TEX_0 0x1
32 #define TEX_1 0x2
33 #define TEX_2 0x4
34 #define TEX_3 0x8
35 #define TEX_4 0x10
36 #define TEX_5 0x20
37
38 /* Rasterizing fallbacks */
39 /* See correponding strings in r200_swtcl.c */
40 #define RADEON_FALLBACK_TEXTURE 0x0001
41 #define RADEON_FALLBACK_DRAW_BUFFER 0x0002
42 #define RADEON_FALLBACK_STENCIL 0x0004
43 #define RADEON_FALLBACK_RENDER_MODE 0x0008
44 #define RADEON_FALLBACK_BLEND_EQ 0x0010
45 #define RADEON_FALLBACK_BLEND_FUNC 0x0020
46 #define RADEON_FALLBACK_DISABLE 0x0040
47 #define RADEON_FALLBACK_BORDER_MODE 0x0080
48 #define RADEON_FALLBACK_DEPTH_BUFFER 0x0100
49 #define RADEON_FALLBACK_STENCIL_BUFFER 0x0200
50
51 #define R200_FALLBACK_TEXTURE 0x01
52 #define R200_FALLBACK_DRAW_BUFFER 0x02
53 #define R200_FALLBACK_STENCIL 0x04
54 #define R200_FALLBACK_RENDER_MODE 0x08
55 #define R200_FALLBACK_DISABLE 0x10
56 #define R200_FALLBACK_BORDER_MODE 0x20
57
58 #define RADEON_TCL_FALLBACK_RASTER 0x1 /* rasterization */
59 #define RADEON_TCL_FALLBACK_UNFILLED 0x2 /* unfilled tris */
60 #define RADEON_TCL_FALLBACK_LIGHT_TWOSIDE 0x4 /* twoside tris */
61 #define RADEON_TCL_FALLBACK_MATERIAL 0x8 /* material in vb */
62 #define RADEON_TCL_FALLBACK_TEXGEN_0 0x10 /* texgen, unit 0 */
63 #define RADEON_TCL_FALLBACK_TEXGEN_1 0x20 /* texgen, unit 1 */
64 #define RADEON_TCL_FALLBACK_TEXGEN_2 0x40 /* texgen, unit 2 */
65 #define RADEON_TCL_FALLBACK_TCL_DISABLE 0x80 /* user disable */
66 #define RADEON_TCL_FALLBACK_FOGCOORDSPEC 0x100 /* fogcoord, sep. spec light */
67
68 /* The blit width for texture uploads
69 */
70 #define BLIT_WIDTH_BYTES 1024
71
72 /* Use the templated vertex format:
73 */
74 #define COLOR_IS_RGBA
75 #define TAG(x) radeon##x
76 #include "tnl_dd/t_dd_vertex.h"
77 #undef TAG
78
79 #define RADEON_RB_CLASS 0xdeadbeef
80
81 struct radeon_renderbuffer
82 {
83 struct gl_renderbuffer base;
84 struct radeon_bo *bo;
85 unsigned int cpp;
86 /* unsigned int offset; */
87 unsigned int pitch;
88
89 struct radeon_bo *map_bo;
90 GLbitfield map_mode;
91 int map_x, map_y, map_w, map_h;
92
93 uint32_t draw_offset; /* FBO */
94 /* boo Xorg 6.8.2 compat */
95 int has_surface;
96
97 GLuint pf_pending; /**< sequence number of pending flip */
98 __DRIdrawable *dPriv;
99 };
100
101 struct radeon_framebuffer
102 {
103 struct gl_framebuffer base;
104
105 struct radeon_renderbuffer *color_rb[2];
106 };
107
108
109 struct radeon_colorbuffer_state {
110 GLuint clear;
111 int roundEnable;
112 struct gl_renderbuffer *rb;
113 uint32_t draw_offset; /* offset into color renderbuffer - FBOs */
114 };
115
116 struct radeon_depthbuffer_state {
117 GLuint clear;
118 struct gl_renderbuffer *rb;
119 };
120
121 struct radeon_scissor_state {
122 drm_clip_rect_t rect;
123 GLboolean enabled;
124
125 GLuint numClipRects; /* Cliprects active */
126 GLuint numAllocedClipRects; /* Cliprects available */
127 drm_clip_rect_t *pClipRects;
128 };
129
130 struct radeon_stencilbuffer_state {
131 GLuint clear; /* rb3d_stencilrefmask value */
132 };
133
134 struct radeon_state_atom {
135 struct radeon_state_atom *next, *prev;
136 const char *name; /* for debug */
137 int cmd_size; /* size in bytes */
138 GLuint idx;
139 GLuint is_tcl;
140 GLuint *cmd; /* one or more cmd's */
141 GLuint *lastcmd; /* one or more cmd's */
142 GLboolean dirty; /* dirty-mark in emit_state_list */
143 int (*check) (struct gl_context *, struct radeon_state_atom *atom); /* is this state active? */
144 void (*emit) (struct gl_context *, struct radeon_state_atom *atom);
145 };
146
147 struct radeon_hw_state {
148 /* Head of the linked list of state atoms. */
149 struct radeon_state_atom atomlist;
150 int max_state_size; /* Number of bytes necessary for a full state emit. */
151 int max_post_flush_size; /* Number of bytes necessary for post flushing emits */
152 GLboolean is_dirty, all_dirty;
153 };
154
155
156 /* Texture related */
157 typedef struct _radeon_texture_image radeon_texture_image;
158
159
160 /**
161 * This is a subclass of swrast_texture_image since we use swrast
162 * for software fallback rendering.
163 */
164 struct _radeon_texture_image {
165 struct swrast_texture_image base;
166
167 /**
168 * If mt != 0, the image is stored in hardware format in the
169 * given mipmap tree. In this case, base.Data may point into the
170 * mapping of the buffer object that contains the mipmap tree.
171 *
172 * If mt == 0, the image is stored in normal memory pointed to
173 * by base.Data.
174 */
175 struct _radeon_mipmap_tree *mt;
176 struct radeon_bo *bo;
177
178 int mtlevel; /** if mt != 0, this is the image's level in the mipmap tree */
179 int mtface; /** if mt != 0, this is the image's face in the mipmap tree */
180 };
181
182
183 static INLINE radeon_texture_image *get_radeon_texture_image(struct gl_texture_image *image)
184 {
185 return (radeon_texture_image*)image;
186 }
187
188
189 typedef struct radeon_tex_obj radeonTexObj, *radeonTexObjPtr;
190
191 #define RADEON_TXO_MICRO_TILE (1 << 3)
192
193 /* Texture object in locally shared texture space.
194 */
195 struct radeon_tex_obj {
196 struct gl_texture_object base;
197 struct _radeon_mipmap_tree *mt;
198
199 /**
200 * This is true if we've verified that the mipmap tree above is complete
201 * and so on.
202 */
203 GLboolean validated;
204 /* Minimum LOD to be used during rendering */
205 unsigned minLod;
206 /* Miximum LOD to be used during rendering */
207 unsigned maxLod;
208
209 GLuint override_offset;
210 GLboolean image_override; /* Image overridden by GLX_EXT_tfp */
211 GLuint tile_bits; /* hw texture tile bits used on this texture */
212 struct radeon_bo *bo;
213
214 GLuint pp_txfilter; /* hardware register values */
215 GLuint pp_txformat;
216 GLuint pp_txformat_x;
217 GLuint pp_txsize; /* npot only */
218 GLuint pp_txpitch; /* npot only */
219 GLuint pp_border_color;
220 GLuint pp_cubic_faces; /* cube face 1,2,3,4 log2 sizes */
221
222 GLuint pp_txfilter_1; /* r300 */
223
224 GLboolean border_fallback;
225 };
226
227 static INLINE radeonTexObj* radeon_tex_obj(struct gl_texture_object *texObj)
228 {
229 return (radeonTexObj*)texObj;
230 }
231
232 /* occlusion query */
233 struct radeon_query_object {
234 struct gl_query_object Base;
235 struct radeon_bo *bo;
236 int curr_offset;
237 GLboolean emitted_begin;
238
239 /* Double linked list of not flushed query objects */
240 struct radeon_query_object *prev, *next;
241 };
242
243 /* Need refcounting on dma buffers:
244 */
245 struct radeon_dma_buffer {
246 int refcount; /* the number of retained regions in buf */
247 drmBufPtr buf;
248 };
249
250 struct radeon_aos {
251 struct radeon_bo *bo; /** Buffer object where vertex data is stored */
252 int offset; /** Offset into buffer object, in bytes */
253 int components; /** Number of components per vertex */
254 int stride; /** Stride in dwords (may be 0 for repeating) */
255 int count; /** Number of vertices */
256 };
257
258 #define DMA_BO_FREE_TIME 100
259
260 struct radeon_dma_bo {
261 struct radeon_dma_bo *next, *prev;
262 struct radeon_bo *bo;
263 int expire_counter;
264 };
265
266 struct radeon_dma {
267 /* Active dma region. Allocations for vertices and retained
268 * regions come from here. Also used for emitting random vertices,
269 * these may be flushed by calling flush_current();
270 */
271 struct radeon_dma_bo free;
272 struct radeon_dma_bo wait;
273 struct radeon_dma_bo reserved;
274 size_t current_used; /** Number of bytes allocated and forgotten about */
275 size_t current_vertexptr; /** End of active vertex region */
276 size_t minimum_size;
277
278 /**
279 * If current_vertexptr != current_used then flush must be non-zero.
280 * flush must be called before non-active vertex allocations can be
281 * performed.
282 */
283 void (*flush) (struct gl_context *);
284 };
285
286 /* radeon_swtcl.c
287 */
288 struct radeon_swtcl_info {
289
290 GLuint RenderIndex;
291 GLuint vertex_size;
292 GLubyte *verts;
293
294 /* Fallback rasterization functions
295 */
296 GLuint hw_primitive;
297 GLenum render_primitive;
298 GLuint numverts;
299
300 struct tnl_attr_map vertex_attrs[VERT_ATTRIB_MAX];
301 GLuint vertex_attr_count;
302
303 GLuint emit_prediction;
304 struct radeon_bo *bo;
305 };
306
307 #define RADEON_MAX_AOS_ARRAYS 16
308 struct radeon_tcl_info {
309 struct radeon_aos aos[RADEON_MAX_AOS_ARRAYS];
310 GLuint aos_count;
311 struct radeon_bo *elt_dma_bo; /** Buffer object that contains element indices */
312 int elt_dma_offset; /** Offset into this buffer object, in bytes */
313 };
314
315 struct radeon_ioctl {
316 GLuint vertex_offset;
317 GLuint vertex_max;
318 struct radeon_bo *bo;
319 GLuint vertex_size;
320 };
321
322 #define RADEON_MAX_PRIMS 64
323
324 struct radeon_prim {
325 GLuint start;
326 GLuint end;
327 GLuint prim;
328 };
329
330 static INLINE GLuint radeonPackColor(GLuint cpp,
331 GLubyte r, GLubyte g,
332 GLubyte b, GLubyte a)
333 {
334 switch (cpp) {
335 case 2:
336 return PACK_COLOR_565(r, g, b);
337 case 4:
338 return PACK_COLOR_8888(a, r, g, b);
339 default:
340 return 0;
341 }
342 }
343
344 #define MAX_CMD_BUF_SZ (16*1024)
345
346 #define MAX_DMA_BUF_SZ (64*1024)
347
348 struct radeon_store {
349 GLuint statenr;
350 GLuint primnr;
351 char cmd_buf[MAX_CMD_BUF_SZ];
352 int cmd_used;
353 int elts_start;
354 };
355
356 struct radeon_dri_mirror {
357 __DRIcontext *context; /* DRI context */
358 __DRIscreen *screen; /* DRI screen */
359
360 drm_context_t hwContext;
361 drm_hw_lock_t *hwLock;
362 int hwLockCount;
363 int fd;
364 int drmMinor;
365 };
366
367 typedef void (*radeon_tri_func) (radeonContextPtr,
368 radeonVertex *,
369 radeonVertex *, radeonVertex *);
370
371 typedef void (*radeon_line_func) (radeonContextPtr,
372 radeonVertex *, radeonVertex *);
373
374 typedef void (*radeon_point_func) (radeonContextPtr, radeonVertex *);
375
376 #define RADEON_MAX_BOS 32
377 struct radeon_state {
378 struct radeon_colorbuffer_state color;
379 struct radeon_depthbuffer_state depth;
380 struct radeon_scissor_state scissor;
381 struct radeon_stencilbuffer_state stencil;
382 };
383
384 /**
385 * This structure holds the command buffer while it is being constructed.
386 *
387 * The first batch of commands in the buffer is always the state that needs
388 * to be re-emitted when the context is lost. This batch can be skipped
389 * otherwise.
390 */
391 struct radeon_cmdbuf {
392 struct radeon_cs_manager *csm;
393 struct radeon_cs *cs;
394 int size; /** # of dwords total */
395 unsigned int flushing:1; /** whether we're currently in FlushCmdBufLocked */
396 };
397
398 struct radeon_context {
399 struct gl_context *glCtx;
400 radeonScreenPtr radeonScreen; /* Screen private DRI data */
401
402 /* Texture object bookkeeping
403 */
404 int texture_depth;
405 float initialMaxAnisotropy;
406 uint32_t texture_row_align;
407 uint32_t texture_rect_row_align;
408 uint32_t texture_compressed_row_align;
409
410 struct radeon_dma dma;
411 struct radeon_hw_state hw;
412 /* Rasterization and vertex state:
413 */
414 GLuint TclFallback;
415 GLuint Fallback;
416 GLuint NewGLState;
417 DECLARE_RENDERINPUTS(tnl_index_bitset); /* index of bits for last tnl_install_attrs */
418
419 /* Drawable information */
420 unsigned int lastStamp;
421 drm_radeon_sarea_t *sarea; /* Private SAREA data */
422
423 /* Mirrors of some DRI state */
424 struct radeon_dri_mirror dri;
425
426 /* Busy waiting */
427 GLuint do_usleeps;
428 GLuint do_irqs;
429 GLuint irqsEmitted;
430 drm_radeon_irq_wait_t iw;
431
432 /* Derived state - for r300 only */
433 struct radeon_state state;
434
435 struct radeon_swtcl_info swtcl;
436 struct radeon_tcl_info tcl;
437 /* Configuration cache
438 */
439 driOptionCache optionCache;
440
441 struct radeon_cmdbuf cmdbuf;
442
443 struct radeon_debug debug;
444
445 drm_clip_rect_t fboRect;
446 GLboolean constant_cliprect; /* use for FBO or DRI2 rendering */
447 GLboolean front_cliprects;
448
449 /**
450 * Set if rendering has occured to the drawable's front buffer.
451 *
452 * This is used in the DRI2 case to detect that glFlush should also copy
453 * the contents of the fake front buffer to the real front buffer.
454 */
455 GLboolean front_buffer_dirty;
456
457 /**
458 * Track whether front-buffer rendering is currently enabled
459 *
460 * A separate flag is used to track this in order to support MRT more
461 * easily.
462 */
463 GLboolean is_front_buffer_rendering;
464
465 /**
466 * Track whether front-buffer is the current read target.
467 *
468 * This is closely associated with is_front_buffer_rendering, but may
469 * be set separately. The DRI2 fake front buffer must be referenced
470 * either way.
471 */
472 GLboolean is_front_buffer_reading;
473
474 struct {
475 struct radeon_query_object *current;
476 struct radeon_state_atom queryobj;
477 } query;
478
479 struct {
480 void (*get_lock)(radeonContextPtr radeon);
481 void (*update_viewport_offset)(struct gl_context *ctx);
482 void (*emit_cs_header)(struct radeon_cs *cs, radeonContextPtr rmesa);
483 void (*swtcl_flush)(struct gl_context *ctx, uint32_t offset);
484 void (*pre_emit_atoms)(radeonContextPtr rmesa);
485 void (*pre_emit_state)(radeonContextPtr rmesa);
486 void (*fallback)(struct gl_context *ctx, GLuint bit, GLboolean mode);
487 void (*free_context)(struct gl_context *ctx);
488 void (*emit_query_finish)(radeonContextPtr radeon);
489 void (*update_scissor)(struct gl_context *ctx);
490 unsigned (*check_blit)(gl_format mesa_format);
491 unsigned (*blit)(struct gl_context *ctx,
492 struct radeon_bo *src_bo,
493 intptr_t src_offset,
494 gl_format src_mesaformat,
495 unsigned src_pitch,
496 unsigned src_width,
497 unsigned src_height,
498 unsigned src_x_offset,
499 unsigned src_y_offset,
500 struct radeon_bo *dst_bo,
501 intptr_t dst_offset,
502 gl_format dst_mesaformat,
503 unsigned dst_pitch,
504 unsigned dst_width,
505 unsigned dst_height,
506 unsigned dst_x_offset,
507 unsigned dst_y_offset,
508 unsigned reg_width,
509 unsigned reg_height,
510 unsigned flip_y);
511 unsigned (*is_format_renderable)(gl_format mesa_format);
512 } vtbl;
513 };
514
515 #define RADEON_CONTEXT(glctx) ((radeonContextPtr)(ctx->DriverCtx))
516
517 static inline __DRIdrawable* radeon_get_drawable(radeonContextPtr radeon)
518 {
519 return radeon->dri.context->driDrawablePriv;
520 }
521
522 static inline __DRIdrawable* radeon_get_readable(radeonContextPtr radeon)
523 {
524 return radeon->dri.context->driReadablePriv;
525 }
526
527 GLboolean radeonInitContext(radeonContextPtr radeon,
528 struct dd_function_table* functions,
529 const struct gl_config * glVisual,
530 __DRIcontext * driContextPriv,
531 void *sharedContextPrivate);
532
533 void radeonCleanupContext(radeonContextPtr radeon);
534 GLboolean radeonUnbindContext(__DRIcontext * driContextPriv);
535 void radeon_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable,
536 GLboolean front_only);
537 GLboolean radeonMakeCurrent(__DRIcontext * driContextPriv,
538 __DRIdrawable * driDrawPriv,
539 __DRIdrawable * driReadPriv);
540 extern void radeonDestroyContext(__DRIcontext * driContextPriv);
541 void radeon_prepare_render(radeonContextPtr radeon);
542
543 #endif