80e4cac131d20eddc0269c8df33c31cb3c51a136
[mesa.git] / src / mesa / drivers / dri / intel / intel_context.h
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef INTELCONTEXT_INC
29 #define INTELCONTEXT_INC
30
31
32 #include <stdbool.h>
33 #include <string.h>
34 #include "main/mtypes.h"
35 #include "main/mm.h"
36
37 #ifdef __cplusplus
38 extern "C" {
39 /* Evil hack for using libdrm in a c++ compiler. */
40 #define virtual virt
41 #endif
42
43 #include "drm.h"
44 #include "intel_bufmgr.h"
45
46 #include "intel_screen.h"
47 #include "intel_tex_obj.h"
48 #include "i915_drm.h"
49
50 #ifdef __cplusplus
51 #undef virtual
52 #endif
53
54 #include "tnl/t_vertex.h"
55
56 #define TAG(x) intel##x
57 #include "tnl_dd/t_dd_vertex.h"
58 #undef TAG
59
60 #define DV_PF_555 (1<<8)
61 #define DV_PF_565 (2<<8)
62 #define DV_PF_8888 (3<<8)
63 #define DV_PF_4444 (8<<8)
64 #define DV_PF_1555 (9<<8)
65
66 struct intel_region;
67 struct intel_context;
68
69 typedef void (*intel_tri_func) (struct intel_context *, intelVertex *,
70 intelVertex *, intelVertex *);
71 typedef void (*intel_line_func) (struct intel_context *, intelVertex *,
72 intelVertex *);
73 typedef void (*intel_point_func) (struct intel_context *, intelVertex *);
74
75 /**
76 * Bits for intel->Fallback field
77 */
78 /*@{*/
79 #define INTEL_FALLBACK_DRAW_BUFFER 0x1
80 #define INTEL_FALLBACK_READ_BUFFER 0x2
81 #define INTEL_FALLBACK_DEPTH_BUFFER 0x4
82 #define INTEL_FALLBACK_STENCIL_BUFFER 0x8
83 #define INTEL_FALLBACK_USER 0x10
84 #define INTEL_FALLBACK_RENDERMODE 0x20
85 #define INTEL_FALLBACK_TEXTURE 0x40
86 #define INTEL_FALLBACK_DRIVER 0x1000 /**< first for drivers */
87 /*@}*/
88
89 extern void intelFallback(struct intel_context *intel, GLbitfield bit,
90 bool mode);
91 #define FALLBACK( intel, bit, mode ) intelFallback( intel, bit, mode )
92
93
94 #define INTEL_WRITE_PART 0x1
95 #define INTEL_WRITE_FULL 0x2
96 #define INTEL_READ 0x4
97
98 #define INTEL_MAX_FIXUP 64
99
100 #ifndef likely
101 #ifdef __GNUC__
102 #define likely(expr) (__builtin_expect(expr, 1))
103 #define unlikely(expr) (__builtin_expect(expr, 0))
104 #else
105 #define likely(expr) (expr)
106 #define unlikely(expr) (expr)
107 #endif
108 #endif
109
110 struct intel_sync_object {
111 struct gl_sync_object Base;
112
113 /** Batch associated with this sync object */
114 drm_intel_bo *bo;
115 };
116
117 struct brw_context;
118
119 struct intel_batchbuffer {
120 /** Current batchbuffer being queued up. */
121 drm_intel_bo *bo;
122 /** Last BO submitted to the hardware. Used for glFinish(). */
123 drm_intel_bo *last_bo;
124 /** BO for post-sync nonzero writes for gen6 workaround. */
125 drm_intel_bo *workaround_bo;
126 bool need_workaround_flush;
127
128 struct cached_batch_item *cached_items;
129
130 uint16_t emit, total;
131 uint16_t used, reserved_space;
132 uint32_t map[8192];
133 #define BATCH_SZ (8192*sizeof(uint32_t))
134
135 uint32_t state_batch_offset;
136 bool is_blit;
137 bool needs_sol_reset;
138
139 struct {
140 uint16_t used;
141 int reloc_count;
142 } saved;
143 };
144
145 /**
146 * intel_context is derived from Mesa's context class: struct gl_context.
147 */
148 struct intel_context
149 {
150 struct gl_context ctx; /**< base class, must be first field */
151
152 struct
153 {
154 void (*destroy) (struct intel_context * intel);
155 void (*emit_state) (struct intel_context * intel);
156 void (*finish_batch) (struct intel_context * intel);
157 void (*new_batch) (struct intel_context * intel);
158 void (*emit_invarient_state) (struct intel_context * intel);
159 void (*update_texture_state) (struct intel_context * intel);
160
161 void (*render_start) (struct intel_context * intel);
162 void (*render_prevalidate) (struct intel_context * intel);
163 void (*set_draw_region) (struct intel_context * intel,
164 struct intel_region * draw_regions[],
165 struct intel_region * depth_region,
166 GLuint num_regions);
167 void (*update_draw_buffer)(struct intel_context *intel);
168
169 void (*reduced_primitive_state) (struct intel_context * intel,
170 GLenum rprim);
171
172 bool (*check_vertex_size) (struct intel_context * intel,
173 GLuint expected);
174 void (*invalidate_state) (struct intel_context *intel,
175 GLuint new_state);
176
177 void (*assert_not_dirty) (struct intel_context *intel);
178
179 void (*debug_batch)(struct intel_context *intel);
180 void (*annotate_aub)(struct intel_context *intel);
181 bool (*render_target_supported)(struct intel_context *intel,
182 struct gl_renderbuffer *rb);
183
184 /** Can HiZ be enabled on a depthbuffer of the given format? */
185 bool (*is_hiz_depth_format)(struct intel_context *intel,
186 gl_format format);
187
188 /**
189 * Surface state operations (i965+ only)
190 * \{
191 */
192 void (*update_texture_surface)(struct gl_context *ctx,
193 unsigned unit,
194 uint32_t *binding_table,
195 unsigned surf_index);
196 void (*update_renderbuffer_surface)(struct brw_context *brw,
197 struct gl_renderbuffer *rb,
198 unsigned unit);
199 void (*update_null_renderbuffer_surface)(struct brw_context *brw,
200 unsigned unit);
201 void (*create_constant_surface)(struct brw_context *brw,
202 drm_intel_bo *bo,
203 uint32_t offset,
204 int width,
205 uint32_t *out_offset);
206 /** \} */
207 } vtbl;
208
209 GLbitfield Fallback; /**< mask of INTEL_FALLBACK_x bits */
210 GLuint NewGLState;
211
212 dri_bufmgr *bufmgr;
213 unsigned int maxBatchSize;
214
215 /**
216 * Generation number of the hardware: 2 is 8xx, 3 is 9xx pre-965, 4 is 965.
217 */
218 int gen;
219 int gt;
220 bool needs_ff_sync;
221 bool is_haswell;
222 bool is_g4x;
223 bool is_945;
224 bool has_separate_stencil;
225 bool must_use_separate_stencil;
226 bool has_hiz;
227 bool has_llc;
228 bool has_swizzling;
229
230 int urb_size;
231
232 drm_intel_context *hw_ctx;
233
234 struct intel_batchbuffer batch;
235
236 drm_intel_bo *first_post_swapbuffers_batch;
237 bool need_throttle;
238 bool no_batch_wrap;
239 bool tnl_pipeline_running; /**< Set while i915's _tnl_run_pipeline. */
240
241 struct
242 {
243 GLuint id;
244 uint32_t start_ptr; /**< for i8xx */
245 uint32_t primitive; /**< Current hardware primitive type */
246 void (*flush) (struct intel_context *);
247 drm_intel_bo *vb_bo;
248 uint8_t *vb;
249 unsigned int start_offset; /**< Byte offset of primitive sequence */
250 unsigned int current_offset; /**< Byte offset of next vertex */
251 unsigned int count; /**< Number of vertices in current primitive */
252 } prim;
253
254 struct {
255 drm_intel_bo *bo;
256 GLuint offset;
257 uint32_t buffer_len;
258 uint32_t buffer_offset;
259 char buffer[4096];
260 } upload;
261
262 GLuint stats_wm;
263
264 /* Offsets of fields within the current vertex:
265 */
266 GLuint coloroffset;
267 GLuint specoffset;
268 GLuint wpos_offset;
269
270 struct tnl_attr_map vertex_attrs[VERT_ATTRIB_MAX];
271 GLuint vertex_attr_count;
272
273 GLfloat polygon_offset_scale; /* dependent on depth_scale, bpp */
274
275 bool hw_stencil;
276 bool hw_stipple;
277 bool no_rast;
278 bool always_flush_batch;
279 bool always_flush_cache;
280
281 /* State for intelvb.c and inteltris.c.
282 */
283 GLuint RenderIndex;
284 GLmatrix ViewportMatrix;
285 GLenum render_primitive;
286 GLenum reduced_primitive; /*< Only gen < 6 */
287 GLuint vertex_size;
288 GLubyte *verts; /* points to tnl->clipspace.vertex_buf */
289
290 /* Fallback rasterization functions
291 */
292 intel_point_func draw_point;
293 intel_line_func draw_line;
294 intel_tri_func draw_tri;
295
296 /**
297 * Set if rendering has occured to the drawable's front buffer.
298 *
299 * This is used in the DRI2 case to detect that glFlush should also copy
300 * the contents of the fake front buffer to the real front buffer.
301 */
302 bool front_buffer_dirty;
303
304 /**
305 * Track whether front-buffer rendering is currently enabled
306 *
307 * A separate flag is used to track this in order to support MRT more
308 * easily.
309 */
310 bool is_front_buffer_rendering;
311 /**
312 * Track whether front-buffer is the current read target.
313 *
314 * This is closely associated with is_front_buffer_rendering, but may
315 * be set separately. The DRI2 fake front buffer must be referenced
316 * either way.
317 */
318 bool is_front_buffer_reading;
319
320 /**
321 * Count of intel_regions that are mapped.
322 *
323 * This allows us to assert that no batch buffer is emitted if a
324 * region is mapped.
325 */
326 int num_mapped_regions;
327
328 bool use_texture_tiling;
329 bool use_early_z;
330
331 int driFd;
332
333 __DRIcontext *driContext;
334 struct intel_screen *intelScreen;
335 void (*saved_viewport)(struct gl_context * ctx,
336 GLint x, GLint y, GLsizei width, GLsizei height);
337
338 /**
339 * Configuration cache
340 */
341 driOptionCache optionCache;
342 };
343
344 extern char *__progname;
345
346
347 #define SUBPIXEL_X 0.125
348 #define SUBPIXEL_Y 0.125
349
350 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
351
352 /**
353 * Align a value down to an alignment value
354 *
355 * If \c value is not already aligned to the requested alignment value, it
356 * will be rounded down.
357 *
358 * \param value Value to be rounded
359 * \param alignment Alignment value to be used. This must be a power of two.
360 *
361 * \sa ALIGN()
362 */
363 #define ROUND_DOWN_TO(value, alignment) ((value) & ~(alignment - 1))
364
365 #define IS_POWER_OF_TWO(val) (((val) & (val - 1)) == 0)
366
367 static INLINE uint32_t
368 U_FIXED(float value, uint32_t frac_bits)
369 {
370 value *= (1 << frac_bits);
371 return value < 0 ? 0 : value;
372 }
373
374 static INLINE uint32_t
375 S_FIXED(float value, uint32_t frac_bits)
376 {
377 return value * (1 << frac_bits);
378 }
379
380 #define INTEL_FIREVERTICES(intel) \
381 do { \
382 if ((intel)->prim.flush) \
383 (intel)->prim.flush(intel); \
384 } while (0)
385
386 /* ================================================================
387 * From linux kernel i386 header files, copes with odd sizes better
388 * than COPY_DWORDS would:
389 * XXX Put this in src/mesa/main/imports.h ???
390 */
391 #if defined(i386) || defined(__i386__)
392 static INLINE void * __memcpy(void * to, const void * from, size_t n)
393 {
394 int d0, d1, d2;
395 __asm__ __volatile__(
396 "rep ; movsl\n\t"
397 "testb $2,%b4\n\t"
398 "je 1f\n\t"
399 "movsw\n"
400 "1:\ttestb $1,%b4\n\t"
401 "je 2f\n\t"
402 "movsb\n"
403 "2:"
404 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
405 :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
406 : "memory");
407 return (to);
408 }
409 #else
410 #define __memcpy(a,b,c) memcpy(a,b,c)
411 #endif
412
413
414 /* ================================================================
415 * Debugging:
416 */
417 extern int INTEL_DEBUG;
418
419 #define DEBUG_TEXTURE 0x1
420 #define DEBUG_STATE 0x2
421 #define DEBUG_IOCTL 0x4
422 #define DEBUG_BLIT 0x8
423 #define DEBUG_MIPTREE 0x10
424 #define DEBUG_PERF 0x20
425 #define DEBUG_VERBOSE 0x40
426 #define DEBUG_BATCH 0x80
427 #define DEBUG_PIXEL 0x100
428 #define DEBUG_BUFMGR 0x200
429 #define DEBUG_REGION 0x400
430 #define DEBUG_FBO 0x800
431 #define DEBUG_GS 0x1000
432 #define DEBUG_SYNC 0x2000
433 #define DEBUG_PRIMS 0x4000
434 #define DEBUG_VERTS 0x8000
435 #define DEBUG_DRI 0x10000
436 #define DEBUG_SF 0x20000
437 #define DEBUG_SANITY 0x40000
438 #define DEBUG_SLEEP 0x80000
439 #define DEBUG_STATS 0x100000
440 #define DEBUG_TILE 0x200000
441 #define DEBUG_WM 0x400000
442 #define DEBUG_URB 0x800000
443 #define DEBUG_VS 0x1000000
444 #define DEBUG_CLIP 0x2000000
445 #define DEBUG_AUB 0x4000000
446 #define DEBUG_SHADER_TIME 0x8000000
447 #define DEBUG_NO16 0x20000000
448
449 #ifdef HAVE_ANDROID_PLATFORM
450 #define LOG_TAG "INTEL-MESA"
451 #include <cutils/log.h>
452 #ifndef ALOGW
453 #define ALOGW LOGW
454 #endif
455 #define dbg_printf(...) ALOGW(__VA_ARGS__)
456 #else
457 #define dbg_printf(...) printf(__VA_ARGS__)
458 #endif /* HAVE_ANDROID_PLATFORM */
459
460 #define DBG(...) do { \
461 if (unlikely(INTEL_DEBUG & FILE_DEBUG_FLAG)) \
462 dbg_printf(__VA_ARGS__); \
463 } while(0)
464
465 #define fallback_debug(...) do { \
466 if (unlikely(INTEL_DEBUG & DEBUG_PERF)) \
467 dbg_printf(__VA_ARGS__); \
468 } while(0)
469
470 #define perf_debug(...) do { \
471 if (unlikely(INTEL_DEBUG & DEBUG_PERF)) \
472 dbg_printf(__VA_ARGS__); \
473 } while(0)
474
475 #define WARN_ONCE(cond, fmt...) do { \
476 if (unlikely(cond)) { \
477 static bool _warned = false; \
478 if (!_warned) { \
479 fprintf(stderr, "WARNING: "); \
480 fprintf(stderr, fmt); \
481 _warned = true; \
482 } \
483 } \
484 } while (0)
485
486 #define PCI_CHIP_845_G 0x2562
487 #define PCI_CHIP_I830_M 0x3577
488 #define PCI_CHIP_I855_GM 0x3582
489 #define PCI_CHIP_I865_G 0x2572
490 #define PCI_CHIP_I915_G 0x2582
491 #define PCI_CHIP_I915_GM 0x2592
492 #define PCI_CHIP_I945_G 0x2772
493 #define PCI_CHIP_I945_GM 0x27A2
494 #define PCI_CHIP_I945_GME 0x27AE
495 #define PCI_CHIP_G33_G 0x29C2
496 #define PCI_CHIP_Q35_G 0x29B2
497 #define PCI_CHIP_Q33_G 0x29D2
498
499
500 /* ================================================================
501 * intel_context.c:
502 */
503
504 extern bool intelInitContext(struct intel_context *intel,
505 int api,
506 unsigned major_version,
507 unsigned minor_version,
508 const struct gl_config * mesaVis,
509 __DRIcontext * driContextPriv,
510 void *sharedContextPrivate,
511 struct dd_function_table *functions,
512 unsigned *dri_ctx_error);
513
514 extern void intelFinish(struct gl_context * ctx);
515 extern void intel_flush_rendering_to_batch(struct gl_context *ctx);
516 extern void _intel_flush(struct gl_context * ctx, const char *file, int line);
517
518 #define intel_flush(ctx) _intel_flush(ctx, __FILE__, __LINE__)
519
520 extern void intelInitDriverFunctions(struct dd_function_table *functions);
521
522 void intel_init_syncobj_functions(struct dd_function_table *functions);
523
524
525 /* ================================================================
526 * intel_state.c:
527 */
528
529 #define COMPAREFUNC_ALWAYS 0
530 #define COMPAREFUNC_NEVER 0x1
531 #define COMPAREFUNC_LESS 0x2
532 #define COMPAREFUNC_EQUAL 0x3
533 #define COMPAREFUNC_LEQUAL 0x4
534 #define COMPAREFUNC_GREATER 0x5
535 #define COMPAREFUNC_NOTEQUAL 0x6
536 #define COMPAREFUNC_GEQUAL 0x7
537
538 #define STENCILOP_KEEP 0
539 #define STENCILOP_ZERO 0x1
540 #define STENCILOP_REPLACE 0x2
541 #define STENCILOP_INCRSAT 0x3
542 #define STENCILOP_DECRSAT 0x4
543 #define STENCILOP_INCR 0x5
544 #define STENCILOP_DECR 0x6
545 #define STENCILOP_INVERT 0x7
546
547 #define LOGICOP_CLEAR 0
548 #define LOGICOP_NOR 0x1
549 #define LOGICOP_AND_INV 0x2
550 #define LOGICOP_COPY_INV 0x3
551 #define LOGICOP_AND_RVRSE 0x4
552 #define LOGICOP_INV 0x5
553 #define LOGICOP_XOR 0x6
554 #define LOGICOP_NAND 0x7
555 #define LOGICOP_AND 0x8
556 #define LOGICOP_EQUIV 0x9
557 #define LOGICOP_NOOP 0xa
558 #define LOGICOP_OR_INV 0xb
559 #define LOGICOP_COPY 0xc
560 #define LOGICOP_OR_RVRSE 0xd
561 #define LOGICOP_OR 0xe
562 #define LOGICOP_SET 0xf
563
564 #define BLENDFACT_ZERO 0x01
565 #define BLENDFACT_ONE 0x02
566 #define BLENDFACT_SRC_COLR 0x03
567 #define BLENDFACT_INV_SRC_COLR 0x04
568 #define BLENDFACT_SRC_ALPHA 0x05
569 #define BLENDFACT_INV_SRC_ALPHA 0x06
570 #define BLENDFACT_DST_ALPHA 0x07
571 #define BLENDFACT_INV_DST_ALPHA 0x08
572 #define BLENDFACT_DST_COLR 0x09
573 #define BLENDFACT_INV_DST_COLR 0x0a
574 #define BLENDFACT_SRC_ALPHA_SATURATE 0x0b
575 #define BLENDFACT_CONST_COLOR 0x0c
576 #define BLENDFACT_INV_CONST_COLOR 0x0d
577 #define BLENDFACT_CONST_ALPHA 0x0e
578 #define BLENDFACT_INV_CONST_ALPHA 0x0f
579 #define BLENDFACT_MASK 0x0f
580
581 enum {
582 DRI_CONF_BO_REUSE_DISABLED,
583 DRI_CONF_BO_REUSE_ALL
584 };
585
586 extern int intel_translate_shadow_compare_func(GLenum func);
587 extern int intel_translate_compare_func(GLenum func);
588 extern int intel_translate_stencil_op(GLenum op);
589 extern int intel_translate_blend_factor(GLenum factor);
590 extern int intel_translate_logic_op(GLenum opcode);
591
592 void intel_update_renderbuffers(__DRIcontext *context,
593 __DRIdrawable *drawable);
594 void intel_prepare_render(struct intel_context *intel);
595
596 void
597 intel_downsample_for_dri2_flush(struct intel_context *intel,
598 __DRIdrawable *drawable);
599
600 void i915_set_buf_info_for_region(uint32_t *state, struct intel_region *region,
601 uint32_t buffer_id);
602 void intel_init_texture_formats(struct gl_context *ctx);
603
604 /*======================================================================
605 * Inline conversion functions.
606 * These are better-typed than the macros used previously:
607 */
608 static INLINE struct intel_context *
609 intel_context(struct gl_context * ctx)
610 {
611 return (struct intel_context *) ctx;
612 }
613
614 static INLINE bool
615 is_power_of_two(uint32_t value)
616 {
617 return (value & (value - 1)) == 0;
618 }
619
620 #ifdef __cplusplus
621 }
622 #endif
623
624 #endif