ce8cf48eee6c616318ba084bfabfe303b2c721a7
[mesa.git] / src / mesa / drivers / dri / intel / intel_context.h
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef INTELCONTEXT_INC
29 #define INTELCONTEXT_INC
30
31
32 #include <stdbool.h>
33 #include <string.h>
34 #include "main/mtypes.h"
35 #include "main/mm.h"
36
37 #ifdef __cplusplus
38 extern "C" {
39 /* Evil hack for using libdrm in a c++ compiler. */
40 #define virtual virt
41 #endif
42
43 #include "drm.h"
44 #include "intel_bufmgr.h"
45
46 #include "intel_screen.h"
47 #include "intel_tex_obj.h"
48 #include "i915_drm.h"
49
50 #ifdef __cplusplus
51 #undef virtual
52 #endif
53
54 #include "tnl/t_vertex.h"
55
56 #define TAG(x) intel##x
57 #include "tnl_dd/t_dd_vertex.h"
58 #undef TAG
59
60 #define DV_PF_555 (1<<8)
61 #define DV_PF_565 (2<<8)
62 #define DV_PF_8888 (3<<8)
63 #define DV_PF_4444 (8<<8)
64 #define DV_PF_1555 (9<<8)
65
66 struct intel_region;
67 struct intel_context;
68
69 typedef void (*intel_tri_func) (struct intel_context *, intelVertex *,
70 intelVertex *, intelVertex *);
71 typedef void (*intel_line_func) (struct intel_context *, intelVertex *,
72 intelVertex *);
73 typedef void (*intel_point_func) (struct intel_context *, intelVertex *);
74
75 /**
76 * Bits for intel->Fallback field
77 */
78 /*@{*/
79 #define INTEL_FALLBACK_DRAW_BUFFER 0x1
80 #define INTEL_FALLBACK_READ_BUFFER 0x2
81 #define INTEL_FALLBACK_DEPTH_BUFFER 0x4
82 #define INTEL_FALLBACK_STENCIL_BUFFER 0x8
83 #define INTEL_FALLBACK_USER 0x10
84 #define INTEL_FALLBACK_RENDERMODE 0x20
85 #define INTEL_FALLBACK_TEXTURE 0x40
86 #define INTEL_FALLBACK_DRIVER 0x1000 /**< first for drivers */
87 /*@}*/
88
89 extern void intelFallback(struct intel_context *intel, GLbitfield bit,
90 bool mode);
91 #define FALLBACK( intel, bit, mode ) intelFallback( intel, bit, mode )
92
93
94 #define INTEL_WRITE_PART 0x1
95 #define INTEL_WRITE_FULL 0x2
96 #define INTEL_READ 0x4
97
98 #define INTEL_MAX_FIXUP 64
99
100 #ifndef likely
101 #ifdef __GNUC__
102 #define likely(expr) (__builtin_expect(expr, 1))
103 #define unlikely(expr) (__builtin_expect(expr, 0))
104 #else
105 #define likely(expr) (expr)
106 #define unlikely(expr) (expr)
107 #endif
108 #endif
109
110 struct intel_sync_object {
111 struct gl_sync_object Base;
112
113 /** Batch associated with this sync object */
114 drm_intel_bo *bo;
115 };
116
117 struct brw_context;
118
119 struct intel_batchbuffer {
120 /** Current batchbuffer being queued up. */
121 drm_intel_bo *bo;
122 /** Last BO submitted to the hardware. Used for glFinish(). */
123 drm_intel_bo *last_bo;
124 /** BO for post-sync nonzero writes for gen6 workaround. */
125 drm_intel_bo *workaround_bo;
126 bool need_workaround_flush;
127
128 struct cached_batch_item *cached_items;
129
130 uint16_t emit, total;
131 uint16_t used, reserved_space;
132 uint32_t map[8192];
133 #define BATCH_SZ (8192*sizeof(uint32_t))
134
135 uint32_t state_batch_offset;
136 bool is_blit;
137 bool needs_sol_reset;
138
139 struct {
140 uint16_t used;
141 int reloc_count;
142 } saved;
143 };
144
145 /**
146 * intel_context is derived from Mesa's context class: struct gl_context.
147 */
148 struct intel_context
149 {
150 struct gl_context ctx; /**< base class, must be first field */
151
152 struct
153 {
154 void (*destroy) (struct intel_context * intel);
155 void (*emit_state) (struct intel_context * intel);
156 void (*finish_batch) (struct intel_context * intel);
157 void (*new_batch) (struct intel_context * intel);
158 void (*emit_invarient_state) (struct intel_context * intel);
159 void (*update_texture_state) (struct intel_context * intel);
160
161 void (*render_start) (struct intel_context * intel);
162 void (*render_prevalidate) (struct intel_context * intel);
163 void (*set_draw_region) (struct intel_context * intel,
164 struct intel_region * draw_regions[],
165 struct intel_region * depth_region,
166 GLuint num_regions);
167 void (*update_draw_buffer)(struct intel_context *intel);
168
169 void (*reduced_primitive_state) (struct intel_context * intel,
170 GLenum rprim);
171
172 bool (*check_vertex_size) (struct intel_context * intel,
173 GLuint expected);
174 void (*invalidate_state) (struct intel_context *intel,
175 GLuint new_state);
176
177 void (*assert_not_dirty) (struct intel_context *intel);
178
179 void (*debug_batch)(struct intel_context *intel);
180 void (*annotate_aub)(struct intel_context *intel);
181 bool (*render_target_supported)(struct intel_context *intel,
182 struct gl_renderbuffer *rb);
183
184 /** Can HiZ be enabled on a depthbuffer of the given format? */
185 bool (*is_hiz_depth_format)(struct intel_context *intel,
186 gl_format format);
187
188 /**
189 * Surface state operations (i965+ only)
190 * \{
191 */
192 void (*update_texture_surface)(struct gl_context *ctx,
193 unsigned unit,
194 uint32_t *binding_table,
195 unsigned surf_index);
196 void (*update_renderbuffer_surface)(struct brw_context *brw,
197 struct gl_renderbuffer *rb,
198 unsigned unit);
199 void (*update_null_renderbuffer_surface)(struct brw_context *brw,
200 unsigned unit);
201 void (*create_constant_surface)(struct brw_context *brw,
202 drm_intel_bo *bo,
203 uint32_t offset,
204 int width,
205 uint32_t *out_offset);
206 /** \} */
207 } vtbl;
208
209 GLbitfield Fallback; /**< mask of INTEL_FALLBACK_x bits */
210 GLuint NewGLState;
211
212 dri_bufmgr *bufmgr;
213 unsigned int maxBatchSize;
214
215 /**
216 * Generation number of the hardware: 2 is 8xx, 3 is 9xx pre-965, 4 is 965.
217 */
218 int gen;
219 int gt;
220 bool needs_ff_sync;
221 bool is_haswell;
222 bool is_g4x;
223 bool is_945;
224 bool has_separate_stencil;
225 bool must_use_separate_stencil;
226 bool has_hiz;
227 bool has_llc;
228 bool has_swizzling;
229
230 int urb_size;
231
232 drm_intel_context *hw_ctx;
233
234 struct intel_batchbuffer batch;
235
236 drm_intel_bo *first_post_swapbuffers_batch;
237 bool need_throttle;
238 bool no_batch_wrap;
239 bool tnl_pipeline_running; /**< Set while i915's _tnl_run_pipeline. */
240
241 struct
242 {
243 GLuint id;
244 uint32_t start_ptr; /**< for i8xx */
245 uint32_t primitive; /**< Current hardware primitive type */
246 void (*flush) (struct intel_context *);
247 drm_intel_bo *vb_bo;
248 uint8_t *vb;
249 unsigned int start_offset; /**< Byte offset of primitive sequence */
250 unsigned int current_offset; /**< Byte offset of next vertex */
251 unsigned int count; /**< Number of vertices in current primitive */
252 } prim;
253
254 struct {
255 drm_intel_bo *bo;
256 GLuint offset;
257 uint32_t buffer_len;
258 uint32_t buffer_offset;
259 char buffer[4096];
260 } upload;
261
262 GLuint stats_wm;
263
264 /* Offsets of fields within the current vertex:
265 */
266 GLuint coloroffset;
267 GLuint specoffset;
268 GLuint wpos_offset;
269
270 struct tnl_attr_map vertex_attrs[VERT_ATTRIB_MAX];
271 GLuint vertex_attr_count;
272
273 GLfloat polygon_offset_scale; /* dependent on depth_scale, bpp */
274
275 bool hw_stencil;
276 bool hw_stipple;
277 bool no_rast;
278 bool always_flush_batch;
279 bool always_flush_cache;
280
281 /* State for intelvb.c and inteltris.c.
282 */
283 GLuint RenderIndex;
284 GLmatrix ViewportMatrix;
285 GLenum render_primitive;
286 GLenum reduced_primitive; /*< Only gen < 6 */
287 GLuint vertex_size;
288 GLubyte *verts; /* points to tnl->clipspace.vertex_buf */
289
290 /* Fallback rasterization functions
291 */
292 intel_point_func draw_point;
293 intel_line_func draw_line;
294 intel_tri_func draw_tri;
295
296 /**
297 * Set if rendering has occured to the drawable's front buffer.
298 *
299 * This is used in the DRI2 case to detect that glFlush should also copy
300 * the contents of the fake front buffer to the real front buffer.
301 */
302 bool front_buffer_dirty;
303
304 /**
305 * Track whether front-buffer rendering is currently enabled
306 *
307 * A separate flag is used to track this in order to support MRT more
308 * easily.
309 */
310 bool is_front_buffer_rendering;
311 /**
312 * Track whether front-buffer is the current read target.
313 *
314 * This is closely associated with is_front_buffer_rendering, but may
315 * be set separately. The DRI2 fake front buffer must be referenced
316 * either way.
317 */
318 bool is_front_buffer_reading;
319
320 /**
321 * Count of intel_regions that are mapped.
322 *
323 * This allows us to assert that no batch buffer is emitted if a
324 * region is mapped.
325 */
326 int num_mapped_regions;
327
328 bool use_texture_tiling;
329 bool use_early_z;
330
331 int driFd;
332
333 __DRIcontext *driContext;
334 struct intel_screen *intelScreen;
335 void (*saved_viewport)(struct gl_context * ctx,
336 GLint x, GLint y, GLsizei width, GLsizei height);
337
338 /**
339 * Configuration cache
340 */
341 driOptionCache optionCache;
342 };
343
344 extern char *__progname;
345
346
347 #define SUBPIXEL_X 0.125
348 #define SUBPIXEL_Y 0.125
349
350 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
351
352 /**
353 * Align a value up to an alignment value
354 *
355 * If \c value is not already aligned to the requested alignment value, it
356 * will be rounded up.
357 *
358 * \param value Value to be rounded
359 * \param alignment Alignment value to be used. This must be a power of two.
360 *
361 * \sa ROUND_DOWN_TO()
362 */
363 #define ALIGN(value, alignment) (((value) + alignment - 1) & ~(alignment - 1))
364
365 /**
366 * Align a value down to an alignment value
367 *
368 * If \c value is not already aligned to the requested alignment value, it
369 * will be rounded down.
370 *
371 * \param value Value to be rounded
372 * \param alignment Alignment value to be used. This must be a power of two.
373 *
374 * \sa ALIGN()
375 */
376 #define ROUND_DOWN_TO(value, alignment) ((value) & ~(alignment - 1))
377
378 #define IS_POWER_OF_TWO(val) (((val) & (val - 1)) == 0)
379
380 static INLINE uint32_t
381 U_FIXED(float value, uint32_t frac_bits)
382 {
383 value *= (1 << frac_bits);
384 return value < 0 ? 0 : value;
385 }
386
387 static INLINE uint32_t
388 S_FIXED(float value, uint32_t frac_bits)
389 {
390 return value * (1 << frac_bits);
391 }
392
393 #define INTEL_FIREVERTICES(intel) \
394 do { \
395 if ((intel)->prim.flush) \
396 (intel)->prim.flush(intel); \
397 } while (0)
398
399 /* ================================================================
400 * From linux kernel i386 header files, copes with odd sizes better
401 * than COPY_DWORDS would:
402 * XXX Put this in src/mesa/main/imports.h ???
403 */
404 #if defined(i386) || defined(__i386__)
405 static INLINE void * __memcpy(void * to, const void * from, size_t n)
406 {
407 int d0, d1, d2;
408 __asm__ __volatile__(
409 "rep ; movsl\n\t"
410 "testb $2,%b4\n\t"
411 "je 1f\n\t"
412 "movsw\n"
413 "1:\ttestb $1,%b4\n\t"
414 "je 2f\n\t"
415 "movsb\n"
416 "2:"
417 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
418 :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
419 : "memory");
420 return (to);
421 }
422 #else
423 #define __memcpy(a,b,c) memcpy(a,b,c)
424 #endif
425
426
427 /* ================================================================
428 * Debugging:
429 */
430 extern int INTEL_DEBUG;
431
432 #define DEBUG_TEXTURE 0x1
433 #define DEBUG_STATE 0x2
434 #define DEBUG_IOCTL 0x4
435 #define DEBUG_BLIT 0x8
436 #define DEBUG_MIPTREE 0x10
437 #define DEBUG_PERF 0x20
438 #define DEBUG_VERBOSE 0x40
439 #define DEBUG_BATCH 0x80
440 #define DEBUG_PIXEL 0x100
441 #define DEBUG_BUFMGR 0x200
442 #define DEBUG_REGION 0x400
443 #define DEBUG_FBO 0x800
444 #define DEBUG_GS 0x1000
445 #define DEBUG_SYNC 0x2000
446 #define DEBUG_PRIMS 0x4000
447 #define DEBUG_VERTS 0x8000
448 #define DEBUG_DRI 0x10000
449 #define DEBUG_SF 0x20000
450 #define DEBUG_SANITY 0x40000
451 #define DEBUG_SLEEP 0x80000
452 #define DEBUG_STATS 0x100000
453 #define DEBUG_TILE 0x200000
454 #define DEBUG_WM 0x400000
455 #define DEBUG_URB 0x800000
456 #define DEBUG_VS 0x1000000
457 #define DEBUG_CLIP 0x2000000
458 #define DEBUG_AUB 0x4000000
459
460 #define DBG(...) do { \
461 if (unlikely(INTEL_DEBUG & FILE_DEBUG_FLAG)) \
462 printf(__VA_ARGS__); \
463 } while(0)
464
465 #define fallback_debug(...) do { \
466 if (unlikely(INTEL_DEBUG & DEBUG_PERF)) \
467 printf(__VA_ARGS__); \
468 } while(0)
469
470 #define perf_debug(...) do { \
471 if (unlikely(INTEL_DEBUG & DEBUG_PERF)) \
472 printf(__VA_ARGS__); \
473 } while(0)
474
475 #define PCI_CHIP_845_G 0x2562
476 #define PCI_CHIP_I830_M 0x3577
477 #define PCI_CHIP_I855_GM 0x3582
478 #define PCI_CHIP_I865_G 0x2572
479 #define PCI_CHIP_I915_G 0x2582
480 #define PCI_CHIP_I915_GM 0x2592
481 #define PCI_CHIP_I945_G 0x2772
482 #define PCI_CHIP_I945_GM 0x27A2
483 #define PCI_CHIP_I945_GME 0x27AE
484 #define PCI_CHIP_G33_G 0x29C2
485 #define PCI_CHIP_Q35_G 0x29B2
486 #define PCI_CHIP_Q33_G 0x29D2
487
488
489 /* ================================================================
490 * intel_context.c:
491 */
492
493 extern bool intelInitContext(struct intel_context *intel,
494 int api,
495 const struct gl_config * mesaVis,
496 __DRIcontext * driContextPriv,
497 void *sharedContextPrivate,
498 struct dd_function_table *functions);
499
500 extern void intelFinish(struct gl_context * ctx);
501 extern void intel_flush_rendering_to_batch(struct gl_context *ctx);
502 extern void _intel_flush(struct gl_context * ctx, const char *file, int line);
503
504 #define intel_flush(ctx) _intel_flush(ctx, __FILE__, __LINE__)
505
506 extern void intelInitDriverFunctions(struct dd_function_table *functions);
507
508 void intel_init_syncobj_functions(struct dd_function_table *functions);
509
510
511 /* ================================================================
512 * intel_state.c:
513 */
514 extern void intelInitStateFuncs(struct dd_function_table *functions);
515
516 #define COMPAREFUNC_ALWAYS 0
517 #define COMPAREFUNC_NEVER 0x1
518 #define COMPAREFUNC_LESS 0x2
519 #define COMPAREFUNC_EQUAL 0x3
520 #define COMPAREFUNC_LEQUAL 0x4
521 #define COMPAREFUNC_GREATER 0x5
522 #define COMPAREFUNC_NOTEQUAL 0x6
523 #define COMPAREFUNC_GEQUAL 0x7
524
525 #define STENCILOP_KEEP 0
526 #define STENCILOP_ZERO 0x1
527 #define STENCILOP_REPLACE 0x2
528 #define STENCILOP_INCRSAT 0x3
529 #define STENCILOP_DECRSAT 0x4
530 #define STENCILOP_INCR 0x5
531 #define STENCILOP_DECR 0x6
532 #define STENCILOP_INVERT 0x7
533
534 #define LOGICOP_CLEAR 0
535 #define LOGICOP_NOR 0x1
536 #define LOGICOP_AND_INV 0x2
537 #define LOGICOP_COPY_INV 0x3
538 #define LOGICOP_AND_RVRSE 0x4
539 #define LOGICOP_INV 0x5
540 #define LOGICOP_XOR 0x6
541 #define LOGICOP_NAND 0x7
542 #define LOGICOP_AND 0x8
543 #define LOGICOP_EQUIV 0x9
544 #define LOGICOP_NOOP 0xa
545 #define LOGICOP_OR_INV 0xb
546 #define LOGICOP_COPY 0xc
547 #define LOGICOP_OR_RVRSE 0xd
548 #define LOGICOP_OR 0xe
549 #define LOGICOP_SET 0xf
550
551 #define BLENDFACT_ZERO 0x01
552 #define BLENDFACT_ONE 0x02
553 #define BLENDFACT_SRC_COLR 0x03
554 #define BLENDFACT_INV_SRC_COLR 0x04
555 #define BLENDFACT_SRC_ALPHA 0x05
556 #define BLENDFACT_INV_SRC_ALPHA 0x06
557 #define BLENDFACT_DST_ALPHA 0x07
558 #define BLENDFACT_INV_DST_ALPHA 0x08
559 #define BLENDFACT_DST_COLR 0x09
560 #define BLENDFACT_INV_DST_COLR 0x0a
561 #define BLENDFACT_SRC_ALPHA_SATURATE 0x0b
562 #define BLENDFACT_CONST_COLOR 0x0c
563 #define BLENDFACT_INV_CONST_COLOR 0x0d
564 #define BLENDFACT_CONST_ALPHA 0x0e
565 #define BLENDFACT_INV_CONST_ALPHA 0x0f
566 #define BLENDFACT_MASK 0x0f
567
568 enum {
569 DRI_CONF_BO_REUSE_DISABLED,
570 DRI_CONF_BO_REUSE_ALL
571 };
572
573 extern int intel_translate_shadow_compare_func(GLenum func);
574 extern int intel_translate_compare_func(GLenum func);
575 extern int intel_translate_stencil_op(GLenum op);
576 extern int intel_translate_blend_factor(GLenum factor);
577 extern int intel_translate_logic_op(GLenum opcode);
578
579 void intel_update_renderbuffers(__DRIcontext *context,
580 __DRIdrawable *drawable);
581 void intel_prepare_render(struct intel_context *intel);
582
583 void
584 intel_downsample_for_dri2_flush(struct intel_context *intel,
585 __DRIdrawable *drawable);
586
587 void i915_set_buf_info_for_region(uint32_t *state, struct intel_region *region,
588 uint32_t buffer_id);
589 void intel_init_texture_formats(struct gl_context *ctx);
590
591 /*======================================================================
592 * Inline conversion functions.
593 * These are better-typed than the macros used previously:
594 */
595 static INLINE struct intel_context *
596 intel_context(struct gl_context * ctx)
597 {
598 return (struct intel_context *) ctx;
599 }
600
601 static INLINE bool
602 is_power_of_two(uint32_t value)
603 {
604 return (value & (value - 1)) == 0;
605 }
606
607 #ifdef __cplusplus
608 }
609 #endif
610
611 #endif