intel: Hook up the WARN_ONCE macro to GL_ARB_debug_output.
[mesa.git] / src / mesa / drivers / dri / intel / intel_context.h
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef INTELCONTEXT_INC
29 #define INTELCONTEXT_INC
30
31
32 #include <stdbool.h>
33 #include <string.h>
34 #include "main/mtypes.h"
35 #include "main/mm.h"
36
37 #ifdef __cplusplus
38 extern "C" {
39 /* Evil hack for using libdrm in a c++ compiler. */
40 #define virtual virt
41 #endif
42
43 #include "drm.h"
44 #include "intel_bufmgr.h"
45
46 #include "intel_screen.h"
47 #include "intel_tex_obj.h"
48 #include "i915_drm.h"
49
50 #ifdef __cplusplus
51 #undef virtual
52 #endif
53
54 #include "tnl/t_vertex.h"
55
56 #define TAG(x) intel##x
57 #include "tnl_dd/t_dd_vertex.h"
58 #undef TAG
59
60 #define DV_PF_555 (1<<8)
61 #define DV_PF_565 (2<<8)
62 #define DV_PF_8888 (3<<8)
63 #define DV_PF_4444 (8<<8)
64 #define DV_PF_1555 (9<<8)
65
66 struct intel_region;
67 struct intel_context;
68
69 typedef void (*intel_tri_func) (struct intel_context *, intelVertex *,
70 intelVertex *, intelVertex *);
71 typedef void (*intel_line_func) (struct intel_context *, intelVertex *,
72 intelVertex *);
73 typedef void (*intel_point_func) (struct intel_context *, intelVertex *);
74
75 /**
76 * Bits for intel->Fallback field
77 */
78 /*@{*/
79 #define INTEL_FALLBACK_DRAW_BUFFER 0x1
80 #define INTEL_FALLBACK_READ_BUFFER 0x2
81 #define INTEL_FALLBACK_DEPTH_BUFFER 0x4
82 #define INTEL_FALLBACK_STENCIL_BUFFER 0x8
83 #define INTEL_FALLBACK_USER 0x10
84 #define INTEL_FALLBACK_RENDERMODE 0x20
85 #define INTEL_FALLBACK_TEXTURE 0x40
86 #define INTEL_FALLBACK_DRIVER 0x1000 /**< first for drivers */
87 /*@}*/
88
89 extern void intelFallback(struct intel_context *intel, GLbitfield bit,
90 bool mode);
91 #define FALLBACK( intel, bit, mode ) intelFallback( intel, bit, mode )
92
93
94 #define INTEL_WRITE_PART 0x1
95 #define INTEL_WRITE_FULL 0x2
96 #define INTEL_READ 0x4
97
98 #define INTEL_MAX_FIXUP 64
99
100 #ifndef likely
101 #ifdef __GNUC__
102 #define likely(expr) (__builtin_expect(expr, 1))
103 #define unlikely(expr) (__builtin_expect(expr, 0))
104 #else
105 #define likely(expr) (expr)
106 #define unlikely(expr) (expr)
107 #endif
108 #endif
109
110 struct intel_sync_object {
111 struct gl_sync_object Base;
112
113 /** Batch associated with this sync object */
114 drm_intel_bo *bo;
115 };
116
117 struct brw_context;
118
119 struct intel_batchbuffer {
120 /** Current batchbuffer being queued up. */
121 drm_intel_bo *bo;
122 /** Last BO submitted to the hardware. Used for glFinish(). */
123 drm_intel_bo *last_bo;
124 /** BO for post-sync nonzero writes for gen6 workaround. */
125 drm_intel_bo *workaround_bo;
126 bool need_workaround_flush;
127
128 struct cached_batch_item *cached_items;
129
130 uint16_t emit, total;
131 uint16_t used, reserved_space;
132 uint32_t *map;
133 uint32_t *cpu_map;
134 #define BATCH_SZ (8192*sizeof(uint32_t))
135
136 uint32_t state_batch_offset;
137 bool is_blit;
138 bool needs_sol_reset;
139
140 struct {
141 uint16_t used;
142 int reloc_count;
143 } saved;
144 };
145
146 /**
147 * intel_context is derived from Mesa's context class: struct gl_context.
148 */
149 struct intel_context
150 {
151 struct gl_context ctx; /**< base class, must be first field */
152
153 struct
154 {
155 void (*destroy) (struct intel_context * intel);
156 void (*emit_state) (struct intel_context * intel);
157 void (*finish_batch) (struct intel_context * intel);
158 void (*new_batch) (struct intel_context * intel);
159 void (*emit_invarient_state) (struct intel_context * intel);
160 void (*update_texture_state) (struct intel_context * intel);
161
162 void (*render_start) (struct intel_context * intel);
163 void (*render_prevalidate) (struct intel_context * intel);
164 void (*set_draw_region) (struct intel_context * intel,
165 struct intel_region * draw_regions[],
166 struct intel_region * depth_region,
167 GLuint num_regions);
168 void (*update_draw_buffer)(struct intel_context *intel);
169
170 void (*reduced_primitive_state) (struct intel_context * intel,
171 GLenum rprim);
172
173 bool (*check_vertex_size) (struct intel_context * intel,
174 GLuint expected);
175 void (*invalidate_state) (struct intel_context *intel,
176 GLuint new_state);
177
178 void (*assert_not_dirty) (struct intel_context *intel);
179
180 void (*debug_batch)(struct intel_context *intel);
181 void (*annotate_aub)(struct intel_context *intel);
182 bool (*render_target_supported)(struct intel_context *intel,
183 struct gl_renderbuffer *rb);
184
185 /** Can HiZ be enabled on a depthbuffer of the given format? */
186 bool (*is_hiz_depth_format)(struct intel_context *intel,
187 gl_format format);
188
189 /**
190 * Surface state operations (i965+ only)
191 * \{
192 */
193 void (*update_texture_surface)(struct gl_context *ctx,
194 unsigned unit,
195 uint32_t *binding_table,
196 unsigned surf_index);
197 void (*update_renderbuffer_surface)(struct brw_context *brw,
198 struct gl_renderbuffer *rb,
199 unsigned unit);
200 void (*update_null_renderbuffer_surface)(struct brw_context *brw,
201 unsigned unit);
202 void (*create_constant_surface)(struct brw_context *brw,
203 drm_intel_bo *bo,
204 uint32_t offset,
205 int width,
206 uint32_t *out_offset);
207 /** \} */
208 } vtbl;
209
210 GLbitfield Fallback; /**< mask of INTEL_FALLBACK_x bits */
211 GLuint NewGLState;
212
213 dri_bufmgr *bufmgr;
214 unsigned int maxBatchSize;
215
216 /**
217 * Generation number of the hardware: 2 is 8xx, 3 is 9xx pre-965, 4 is 965.
218 */
219 int gen;
220 int gt;
221 bool needs_ff_sync;
222 bool is_haswell;
223 bool is_g4x;
224 bool is_945;
225 bool has_separate_stencil;
226 bool must_use_separate_stencil;
227 bool has_hiz;
228 bool has_llc;
229 bool has_swizzling;
230
231 int urb_size;
232
233 drm_intel_context *hw_ctx;
234
235 struct intel_batchbuffer batch;
236
237 drm_intel_bo *first_post_swapbuffers_batch;
238 bool need_throttle;
239 bool no_batch_wrap;
240 bool tnl_pipeline_running; /**< Set while i915's _tnl_run_pipeline. */
241
242 struct
243 {
244 GLuint id;
245 uint32_t start_ptr; /**< for i8xx */
246 uint32_t primitive; /**< Current hardware primitive type */
247 void (*flush) (struct intel_context *);
248 drm_intel_bo *vb_bo;
249 uint8_t *vb;
250 unsigned int start_offset; /**< Byte offset of primitive sequence */
251 unsigned int current_offset; /**< Byte offset of next vertex */
252 unsigned int count; /**< Number of vertices in current primitive */
253 } prim;
254
255 struct {
256 drm_intel_bo *bo;
257 GLuint offset;
258 uint32_t buffer_len;
259 uint32_t buffer_offset;
260 char buffer[4096];
261 } upload;
262
263 GLuint stats_wm;
264
265 /* Offsets of fields within the current vertex:
266 */
267 GLuint coloroffset;
268 GLuint specoffset;
269 GLuint wpos_offset;
270
271 struct tnl_attr_map vertex_attrs[VERT_ATTRIB_MAX];
272 GLuint vertex_attr_count;
273
274 GLfloat polygon_offset_scale; /* dependent on depth_scale, bpp */
275
276 bool hw_stencil;
277 bool hw_stipple;
278 bool no_rast;
279 bool always_flush_batch;
280 bool always_flush_cache;
281
282 /* State for intelvb.c and inteltris.c.
283 */
284 GLuint RenderIndex;
285 GLmatrix ViewportMatrix;
286 GLenum render_primitive;
287 GLenum reduced_primitive; /*< Only gen < 6 */
288 GLuint vertex_size;
289 GLubyte *verts; /* points to tnl->clipspace.vertex_buf */
290
291 /* Fallback rasterization functions
292 */
293 intel_point_func draw_point;
294 intel_line_func draw_line;
295 intel_tri_func draw_tri;
296
297 /**
298 * Set if rendering has occured to the drawable's front buffer.
299 *
300 * This is used in the DRI2 case to detect that glFlush should also copy
301 * the contents of the fake front buffer to the real front buffer.
302 */
303 bool front_buffer_dirty;
304
305 /**
306 * Track whether front-buffer rendering is currently enabled
307 *
308 * A separate flag is used to track this in order to support MRT more
309 * easily.
310 */
311 bool is_front_buffer_rendering;
312 /**
313 * Track whether front-buffer is the current read target.
314 *
315 * This is closely associated with is_front_buffer_rendering, but may
316 * be set separately. The DRI2 fake front buffer must be referenced
317 * either way.
318 */
319 bool is_front_buffer_reading;
320
321 /**
322 * Count of intel_regions that are mapped.
323 *
324 * This allows us to assert that no batch buffer is emitted if a
325 * region is mapped.
326 */
327 int num_mapped_regions;
328
329 bool use_texture_tiling;
330 bool use_early_z;
331
332 int driFd;
333
334 __DRIcontext *driContext;
335 struct intel_screen *intelScreen;
336 void (*saved_viewport)(struct gl_context * ctx,
337 GLint x, GLint y, GLsizei width, GLsizei height);
338
339 /**
340 * Configuration cache
341 */
342 driOptionCache optionCache;
343 };
344
345 extern char *__progname;
346
347
348 #define SUBPIXEL_X 0.125
349 #define SUBPIXEL_Y 0.125
350
351 /**
352 * Align a value down to an alignment value
353 *
354 * If \c value is not already aligned to the requested alignment value, it
355 * will be rounded down.
356 *
357 * \param value Value to be rounded
358 * \param alignment Alignment value to be used. This must be a power of two.
359 *
360 * \sa ALIGN()
361 */
362 #define ROUND_DOWN_TO(value, alignment) ((value) & ~(alignment - 1))
363
364 #define IS_POWER_OF_TWO(val) (((val) & (val - 1)) == 0)
365
366 static INLINE uint32_t
367 U_FIXED(float value, uint32_t frac_bits)
368 {
369 value *= (1 << frac_bits);
370 return value < 0 ? 0 : value;
371 }
372
373 static INLINE uint32_t
374 S_FIXED(float value, uint32_t frac_bits)
375 {
376 return value * (1 << frac_bits);
377 }
378
379 #define INTEL_FIREVERTICES(intel) \
380 do { \
381 if ((intel)->prim.flush) \
382 (intel)->prim.flush(intel); \
383 } while (0)
384
385 /* ================================================================
386 * From linux kernel i386 header files, copes with odd sizes better
387 * than COPY_DWORDS would:
388 * XXX Put this in src/mesa/main/imports.h ???
389 */
390 #if defined(i386) || defined(__i386__)
391 static INLINE void * __memcpy(void * to, const void * from, size_t n)
392 {
393 int d0, d1, d2;
394 __asm__ __volatile__(
395 "rep ; movsl\n\t"
396 "testb $2,%b4\n\t"
397 "je 1f\n\t"
398 "movsw\n"
399 "1:\ttestb $1,%b4\n\t"
400 "je 2f\n\t"
401 "movsb\n"
402 "2:"
403 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
404 :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
405 : "memory");
406 return (to);
407 }
408 #else
409 #define __memcpy(a,b,c) memcpy(a,b,c)
410 #endif
411
412
413 /* ================================================================
414 * Debugging:
415 */
416 extern int INTEL_DEBUG;
417
418 #define DEBUG_TEXTURE 0x1
419 #define DEBUG_STATE 0x2
420 #define DEBUG_IOCTL 0x4
421 #define DEBUG_BLIT 0x8
422 #define DEBUG_MIPTREE 0x10
423 #define DEBUG_PERF 0x20
424 #define DEBUG_VERBOSE 0x40
425 #define DEBUG_BATCH 0x80
426 #define DEBUG_PIXEL 0x100
427 #define DEBUG_BUFMGR 0x200
428 #define DEBUG_REGION 0x400
429 #define DEBUG_FBO 0x800
430 #define DEBUG_GS 0x1000
431 #define DEBUG_SYNC 0x2000
432 #define DEBUG_PRIMS 0x4000
433 #define DEBUG_VERTS 0x8000
434 #define DEBUG_DRI 0x10000
435 #define DEBUG_SF 0x20000
436 #define DEBUG_SANITY 0x40000
437 #define DEBUG_SLEEP 0x80000
438 #define DEBUG_STATS 0x100000
439 #define DEBUG_TILE 0x200000
440 #define DEBUG_WM 0x400000
441 #define DEBUG_URB 0x800000
442 #define DEBUG_VS 0x1000000
443 #define DEBUG_CLIP 0x2000000
444 #define DEBUG_AUB 0x4000000
445 #define DEBUG_SHADER_TIME 0x8000000
446 #define DEBUG_NO16 0x20000000
447
448 #ifdef HAVE_ANDROID_PLATFORM
449 #define LOG_TAG "INTEL-MESA"
450 #include <cutils/log.h>
451 #ifndef ALOGW
452 #define ALOGW LOGW
453 #endif
454 #define dbg_printf(...) ALOGW(__VA_ARGS__)
455 #else
456 #define dbg_printf(...) printf(__VA_ARGS__)
457 #endif /* HAVE_ANDROID_PLATFORM */
458
459 #define DBG(...) do { \
460 if (unlikely(INTEL_DEBUG & FILE_DEBUG_FLAG)) \
461 dbg_printf(__VA_ARGS__); \
462 } while(0)
463
464 #define fallback_debug(...) do { \
465 if (unlikely(INTEL_DEBUG & DEBUG_PERF)) \
466 dbg_printf(__VA_ARGS__); \
467 } while(0)
468
469 #define perf_debug(...) do { \
470 if (unlikely(INTEL_DEBUG & DEBUG_PERF)) \
471 dbg_printf(__VA_ARGS__); \
472 } while(0)
473
474 #define WARN_ONCE(cond, fmt...) do { \
475 if (unlikely(cond)) { \
476 static bool _warned = false; \
477 static GLuint msg_id = 0; \
478 if (!_warned) { \
479 fprintf(stderr, "WARNING: "); \
480 fprintf(stderr, fmt); \
481 _warned = true; \
482 \
483 _mesa_gl_debug(ctx, &msg_id, \
484 MESA_DEBUG_TYPE_OTHER, \
485 MESA_DEBUG_SEVERITY_HIGH, fmt); \
486 } \
487 } \
488 } while (0)
489
490 #define PCI_CHIP_845_G 0x2562
491 #define PCI_CHIP_I830_M 0x3577
492 #define PCI_CHIP_I855_GM 0x3582
493 #define PCI_CHIP_I865_G 0x2572
494 #define PCI_CHIP_I915_G 0x2582
495 #define PCI_CHIP_I915_GM 0x2592
496 #define PCI_CHIP_I945_G 0x2772
497 #define PCI_CHIP_I945_GM 0x27A2
498 #define PCI_CHIP_I945_GME 0x27AE
499 #define PCI_CHIP_G33_G 0x29C2
500 #define PCI_CHIP_Q35_G 0x29B2
501 #define PCI_CHIP_Q33_G 0x29D2
502
503
504 /* ================================================================
505 * intel_context.c:
506 */
507
508 extern bool intelInitContext(struct intel_context *intel,
509 int api,
510 unsigned major_version,
511 unsigned minor_version,
512 const struct gl_config * mesaVis,
513 __DRIcontext * driContextPriv,
514 void *sharedContextPrivate,
515 struct dd_function_table *functions,
516 unsigned *dri_ctx_error);
517
518 extern void intelFinish(struct gl_context * ctx);
519 extern void intel_flush_rendering_to_batch(struct gl_context *ctx);
520 extern void _intel_flush(struct gl_context * ctx, const char *file, int line);
521
522 #define intel_flush(ctx) _intel_flush(ctx, __FILE__, __LINE__)
523
524 extern void intelInitDriverFunctions(struct dd_function_table *functions);
525
526 void intel_init_syncobj_functions(struct dd_function_table *functions);
527
528
529 /* ================================================================
530 * intel_state.c:
531 */
532
533 #define COMPAREFUNC_ALWAYS 0
534 #define COMPAREFUNC_NEVER 0x1
535 #define COMPAREFUNC_LESS 0x2
536 #define COMPAREFUNC_EQUAL 0x3
537 #define COMPAREFUNC_LEQUAL 0x4
538 #define COMPAREFUNC_GREATER 0x5
539 #define COMPAREFUNC_NOTEQUAL 0x6
540 #define COMPAREFUNC_GEQUAL 0x7
541
542 #define STENCILOP_KEEP 0
543 #define STENCILOP_ZERO 0x1
544 #define STENCILOP_REPLACE 0x2
545 #define STENCILOP_INCRSAT 0x3
546 #define STENCILOP_DECRSAT 0x4
547 #define STENCILOP_INCR 0x5
548 #define STENCILOP_DECR 0x6
549 #define STENCILOP_INVERT 0x7
550
551 #define LOGICOP_CLEAR 0
552 #define LOGICOP_NOR 0x1
553 #define LOGICOP_AND_INV 0x2
554 #define LOGICOP_COPY_INV 0x3
555 #define LOGICOP_AND_RVRSE 0x4
556 #define LOGICOP_INV 0x5
557 #define LOGICOP_XOR 0x6
558 #define LOGICOP_NAND 0x7
559 #define LOGICOP_AND 0x8
560 #define LOGICOP_EQUIV 0x9
561 #define LOGICOP_NOOP 0xa
562 #define LOGICOP_OR_INV 0xb
563 #define LOGICOP_COPY 0xc
564 #define LOGICOP_OR_RVRSE 0xd
565 #define LOGICOP_OR 0xe
566 #define LOGICOP_SET 0xf
567
568 #define BLENDFACT_ZERO 0x01
569 #define BLENDFACT_ONE 0x02
570 #define BLENDFACT_SRC_COLR 0x03
571 #define BLENDFACT_INV_SRC_COLR 0x04
572 #define BLENDFACT_SRC_ALPHA 0x05
573 #define BLENDFACT_INV_SRC_ALPHA 0x06
574 #define BLENDFACT_DST_ALPHA 0x07
575 #define BLENDFACT_INV_DST_ALPHA 0x08
576 #define BLENDFACT_DST_COLR 0x09
577 #define BLENDFACT_INV_DST_COLR 0x0a
578 #define BLENDFACT_SRC_ALPHA_SATURATE 0x0b
579 #define BLENDFACT_CONST_COLOR 0x0c
580 #define BLENDFACT_INV_CONST_COLOR 0x0d
581 #define BLENDFACT_CONST_ALPHA 0x0e
582 #define BLENDFACT_INV_CONST_ALPHA 0x0f
583 #define BLENDFACT_MASK 0x0f
584
585 enum {
586 DRI_CONF_BO_REUSE_DISABLED,
587 DRI_CONF_BO_REUSE_ALL
588 };
589
590 extern int intel_translate_shadow_compare_func(GLenum func);
591 extern int intel_translate_compare_func(GLenum func);
592 extern int intel_translate_stencil_op(GLenum op);
593 extern int intel_translate_blend_factor(GLenum factor);
594 extern int intel_translate_logic_op(GLenum opcode);
595
596 void intel_update_renderbuffers(__DRIcontext *context,
597 __DRIdrawable *drawable);
598 void intel_prepare_render(struct intel_context *intel);
599
600 void
601 intel_downsample_for_dri2_flush(struct intel_context *intel,
602 __DRIdrawable *drawable);
603
604 void i915_set_buf_info_for_region(uint32_t *state, struct intel_region *region,
605 uint32_t buffer_id);
606 void intel_init_texture_formats(struct gl_context *ctx);
607
608 /*======================================================================
609 * Inline conversion functions.
610 * These are better-typed than the macros used previously:
611 */
612 static INLINE struct intel_context *
613 intel_context(struct gl_context * ctx)
614 {
615 return (struct intel_context *) ctx;
616 }
617
618 static INLINE bool
619 is_power_of_two(uint32_t value)
620 {
621 return (value & (value - 1)) == 0;
622 }
623
624 #ifdef __cplusplus
625 }
626 #endif
627
628 #endif