svga: check for and skip null vertex buffer pointers
[mesa.git] / src / mesa / drivers / dri / intel / intel_context.h
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef INTELCONTEXT_INC
29 #define INTELCONTEXT_INC
30
31
32 #include <stdbool.h>
33 #include <string.h>
34 #include "main/mtypes.h"
35 #include "main/mm.h"
36
37 #ifdef __cplusplus
38 extern "C" {
39 /* Evil hack for using libdrm in a c++ compiler. */
40 #define virtual virt
41 #endif
42
43 #include "drm.h"
44 #include "intel_bufmgr.h"
45
46 #include "intel_screen.h"
47 #include "intel_tex_obj.h"
48 #include "i915_drm.h"
49
50 #ifdef __cplusplus
51 #undef virtual
52 }
53 #endif
54
55 #include "tnl/t_vertex.h"
56
57 #define TAG(x) intel##x
58 #include "tnl_dd/t_dd_vertex.h"
59 #undef TAG
60
61 #define DV_PF_555 (1<<8)
62 #define DV_PF_565 (2<<8)
63 #define DV_PF_8888 (3<<8)
64 #define DV_PF_4444 (8<<8)
65 #define DV_PF_1555 (9<<8)
66
67 struct intel_region;
68 struct intel_context;
69
70 typedef void (*intel_tri_func) (struct intel_context *, intelVertex *,
71 intelVertex *, intelVertex *);
72 typedef void (*intel_line_func) (struct intel_context *, intelVertex *,
73 intelVertex *);
74 typedef void (*intel_point_func) (struct intel_context *, intelVertex *);
75
76 /**
77 * Bits for intel->Fallback field
78 */
79 /*@{*/
80 #define INTEL_FALLBACK_DRAW_BUFFER 0x1
81 #define INTEL_FALLBACK_READ_BUFFER 0x2
82 #define INTEL_FALLBACK_DEPTH_BUFFER 0x4
83 #define INTEL_FALLBACK_STENCIL_BUFFER 0x8
84 #define INTEL_FALLBACK_USER 0x10
85 #define INTEL_FALLBACK_RENDERMODE 0x20
86 #define INTEL_FALLBACK_TEXTURE 0x40
87 #define INTEL_FALLBACK_DRIVER 0x1000 /**< first for drivers */
88 /*@}*/
89
90 extern void intelFallback(struct intel_context *intel, GLbitfield bit,
91 bool mode);
92 #define FALLBACK( intel, bit, mode ) intelFallback( intel, bit, mode )
93
94
95 #define INTEL_WRITE_PART 0x1
96 #define INTEL_WRITE_FULL 0x2
97 #define INTEL_READ 0x4
98
99 #define INTEL_MAX_FIXUP 64
100
101 #ifndef likely
102 #ifdef __GNUC__
103 #define likely(expr) (__builtin_expect(expr, 1))
104 #define unlikely(expr) (__builtin_expect(expr, 0))
105 #else
106 #define likely(expr) (expr)
107 #define unlikely(expr) (expr)
108 #endif
109 #endif
110
111 struct intel_sync_object {
112 struct gl_sync_object Base;
113
114 /** Batch associated with this sync object */
115 drm_intel_bo *bo;
116 };
117
118 struct brw_context;
119
120 /**
121 * intel_context is derived from Mesa's context class: struct gl_context.
122 */
123 struct intel_context
124 {
125 struct gl_context ctx; /**< base class, must be first field */
126
127 struct
128 {
129 void (*destroy) (struct intel_context * intel);
130 void (*emit_state) (struct intel_context * intel);
131 void (*finish_batch) (struct intel_context * intel);
132 void (*new_batch) (struct intel_context * intel);
133 void (*emit_invarient_state) (struct intel_context * intel);
134 void (*update_texture_state) (struct intel_context * intel);
135
136 void (*render_start) (struct intel_context * intel);
137 void (*render_prevalidate) (struct intel_context * intel);
138 void (*set_draw_region) (struct intel_context * intel,
139 struct intel_region * draw_regions[],
140 struct intel_region * depth_region,
141 GLuint num_regions);
142 void (*update_draw_buffer)(struct intel_context *intel);
143
144 void (*reduced_primitive_state) (struct intel_context * intel,
145 GLenum rprim);
146
147 bool (*check_vertex_size) (struct intel_context * intel,
148 GLuint expected);
149 void (*invalidate_state) (struct intel_context *intel,
150 GLuint new_state);
151
152 void (*assert_not_dirty) (struct intel_context *intel);
153
154 void (*debug_batch)(struct intel_context *intel);
155 bool (*render_target_supported)(struct intel_context *intel,
156 struct gl_renderbuffer *rb);
157
158 /** Can HiZ be enabled on a depthbuffer of the given format? */
159 bool (*is_hiz_depth_format)(struct intel_context *intel,
160 gl_format format);
161
162 /**
163 * \name HiZ operations
164 *
165 * See the following sections of the Sandy Bridge PRM, Volume 1, Part2:
166 * - 7.5.3.1 Depth Buffer Clear
167 * - 7.5.3.2 Depth Buffer Resolve
168 * - 7.5.3.3 Hierarchical Depth Buffer Resolve
169 * \{
170 */
171 void (*resolve_hiz_slice)(struct intel_context *intel,
172 struct intel_mipmap_tree *mt,
173 uint32_t level,
174 uint32_t layer);
175
176 void (*resolve_depth_slice)(struct intel_context *intel,
177 struct intel_mipmap_tree *mt,
178 uint32_t level,
179 uint32_t layer);
180 /** \} */
181
182 /**
183 * Surface state operations (i965+ only)
184 * \{
185 */
186 void (*update_texture_surface)(struct gl_context *ctx, unsigned unit);
187 void (*update_renderbuffer_surface)(struct brw_context *brw,
188 struct gl_renderbuffer *rb,
189 unsigned unit);
190 void (*update_null_renderbuffer_surface)(struct brw_context *brw,
191 unsigned unit);
192 void (*create_constant_surface)(struct brw_context *brw,
193 drm_intel_bo *bo,
194 int width,
195 uint32_t *out_offset);
196 /** \} */
197 } vtbl;
198
199 GLbitfield Fallback; /**< mask of INTEL_FALLBACK_x bits */
200 GLuint NewGLState;
201
202 dri_bufmgr *bufmgr;
203 unsigned int maxBatchSize;
204
205 /**
206 * Generation number of the hardware: 2 is 8xx, 3 is 9xx pre-965, 4 is 965.
207 */
208 int gen;
209 int gt;
210 bool needs_ff_sync;
211 bool is_haswell;
212 bool is_g4x;
213 bool is_945;
214 bool has_separate_stencil;
215 bool must_use_separate_stencil;
216 bool has_hiz;
217 bool has_llc;
218 bool has_swizzling;
219
220 int urb_size;
221
222 struct intel_batchbuffer {
223 /** Current batchbuffer being queued up. */
224 drm_intel_bo *bo;
225 /** Last BO submitted to the hardware. Used for glFinish(). */
226 drm_intel_bo *last_bo;
227 /** BO for post-sync nonzero writes for gen6 workaround. */
228 drm_intel_bo *workaround_bo;
229 bool need_workaround_flush;
230
231 struct cached_batch_item *cached_items;
232
233 uint16_t emit, total;
234 uint16_t used, reserved_space;
235 uint32_t map[8192];
236 #define BATCH_SZ (8192*sizeof(uint32_t))
237
238 uint32_t state_batch_offset;
239 bool is_blit;
240 bool needs_sol_reset;
241
242 struct {
243 uint16_t used;
244 int reloc_count;
245 } saved;
246 } batch;
247
248 drm_intel_bo *first_post_swapbuffers_batch;
249 bool need_throttle;
250 bool no_batch_wrap;
251 bool tnl_pipeline_running; /**< Set while i915's _tnl_run_pipeline. */
252
253 struct
254 {
255 GLuint id;
256 uint32_t start_ptr; /**< for i8xx */
257 uint32_t primitive; /**< Current hardware primitive type */
258 void (*flush) (struct intel_context *);
259 drm_intel_bo *vb_bo;
260 uint8_t *vb;
261 unsigned int start_offset; /**< Byte offset of primitive sequence */
262 unsigned int current_offset; /**< Byte offset of next vertex */
263 unsigned int count; /**< Number of vertices in current primitive */
264 } prim;
265
266 struct {
267 drm_intel_bo *bo;
268 GLuint offset;
269 uint32_t buffer_len;
270 uint32_t buffer_offset;
271 char buffer[4096];
272 } upload;
273
274 GLuint stats_wm;
275
276 /* Offsets of fields within the current vertex:
277 */
278 GLuint coloroffset;
279 GLuint specoffset;
280 GLuint wpos_offset;
281
282 struct tnl_attr_map vertex_attrs[VERT_ATTRIB_MAX];
283 GLuint vertex_attr_count;
284
285 GLfloat polygon_offset_scale; /* dependent on depth_scale, bpp */
286
287 bool hw_stencil;
288 bool hw_stipple;
289 bool no_rast;
290 bool always_flush_batch;
291 bool always_flush_cache;
292
293 /* State for intelvb.c and inteltris.c.
294 */
295 GLuint RenderIndex;
296 GLmatrix ViewportMatrix;
297 GLenum render_primitive;
298 GLenum reduced_primitive; /*< Only gen < 6 */
299 GLuint vertex_size;
300 GLubyte *verts; /* points to tnl->clipspace.vertex_buf */
301
302 /* Fallback rasterization functions
303 */
304 intel_point_func draw_point;
305 intel_line_func draw_line;
306 intel_tri_func draw_tri;
307
308 /**
309 * Set if rendering has occured to the drawable's front buffer.
310 *
311 * This is used in the DRI2 case to detect that glFlush should also copy
312 * the contents of the fake front buffer to the real front buffer.
313 */
314 bool front_buffer_dirty;
315
316 /**
317 * Track whether front-buffer rendering is currently enabled
318 *
319 * A separate flag is used to track this in order to support MRT more
320 * easily.
321 */
322 bool is_front_buffer_rendering;
323 /**
324 * Track whether front-buffer is the current read target.
325 *
326 * This is closely associated with is_front_buffer_rendering, but may
327 * be set separately. The DRI2 fake front buffer must be referenced
328 * either way.
329 */
330 bool is_front_buffer_reading;
331
332 /**
333 * Count of intel_regions that are mapped.
334 *
335 * This allows us to assert that no batch buffer is emitted if a
336 * region is mapped.
337 */
338 int num_mapped_regions;
339
340 bool use_texture_tiling;
341 bool use_early_z;
342
343 int driFd;
344
345 __DRIcontext *driContext;
346 struct intel_screen *intelScreen;
347 void (*saved_viewport)(struct gl_context * ctx,
348 GLint x, GLint y, GLsizei width, GLsizei height);
349
350 /**
351 * Configuration cache
352 */
353 driOptionCache optionCache;
354 };
355
356 extern char *__progname;
357
358
359 #define SUBPIXEL_X 0.125
360 #define SUBPIXEL_Y 0.125
361
362 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
363
364 /**
365 * Align a value up to an alignment value
366 *
367 * If \c value is not already aligned to the requested alignment value, it
368 * will be rounded up.
369 *
370 * \param value Value to be rounded
371 * \param alignment Alignment value to be used. This must be a power of two.
372 *
373 * \sa ROUND_DOWN_TO()
374 */
375 #define ALIGN(value, alignment) (((value) + alignment - 1) & ~(alignment - 1))
376
377 /**
378 * Align a value down to an alignment value
379 *
380 * If \c value is not already aligned to the requested alignment value, it
381 * will be rounded down.
382 *
383 * \param value Value to be rounded
384 * \param alignment Alignment value to be used. This must be a power of two.
385 *
386 * \sa ALIGN()
387 */
388 #define ROUND_DOWN_TO(value, alignment) ((value) & ~(alignment - 1))
389
390 #define IS_POWER_OF_TWO(val) (((val) & (val - 1)) == 0)
391
392 static INLINE uint32_t
393 U_FIXED(float value, uint32_t frac_bits)
394 {
395 value *= (1 << frac_bits);
396 return value < 0 ? 0 : value;
397 }
398
399 static INLINE uint32_t
400 S_FIXED(float value, uint32_t frac_bits)
401 {
402 return value * (1 << frac_bits);
403 }
404
405 #define INTEL_FIREVERTICES(intel) \
406 do { \
407 if ((intel)->prim.flush) \
408 (intel)->prim.flush(intel); \
409 } while (0)
410
411 /* ================================================================
412 * From linux kernel i386 header files, copes with odd sizes better
413 * than COPY_DWORDS would:
414 * XXX Put this in src/mesa/main/imports.h ???
415 */
416 #if defined(i386) || defined(__i386__)
417 static INLINE void * __memcpy(void * to, const void * from, size_t n)
418 {
419 int d0, d1, d2;
420 __asm__ __volatile__(
421 "rep ; movsl\n\t"
422 "testb $2,%b4\n\t"
423 "je 1f\n\t"
424 "movsw\n"
425 "1:\ttestb $1,%b4\n\t"
426 "je 2f\n\t"
427 "movsb\n"
428 "2:"
429 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
430 :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
431 : "memory");
432 return (to);
433 }
434 #else
435 #define __memcpy(a,b,c) memcpy(a,b,c)
436 #endif
437
438
439 /* ================================================================
440 * Debugging:
441 */
442 extern int INTEL_DEBUG;
443
444 #define DEBUG_TEXTURE 0x1
445 #define DEBUG_STATE 0x2
446 #define DEBUG_IOCTL 0x4
447 #define DEBUG_BLIT 0x8
448 #define DEBUG_MIPTREE 0x10
449 #define DEBUG_FALLBACKS 0x20
450 #define DEBUG_VERBOSE 0x40
451 #define DEBUG_BATCH 0x80
452 #define DEBUG_PIXEL 0x100
453 #define DEBUG_BUFMGR 0x200
454 #define DEBUG_REGION 0x400
455 #define DEBUG_FBO 0x800
456 #define DEBUG_GS 0x1000
457 #define DEBUG_SYNC 0x2000
458 #define DEBUG_PRIMS 0x4000
459 #define DEBUG_VERTS 0x8000
460 #define DEBUG_DRI 0x10000
461 #define DEBUG_SF 0x20000
462 #define DEBUG_SANITY 0x40000
463 #define DEBUG_SLEEP 0x80000
464 #define DEBUG_STATS 0x100000
465 #define DEBUG_TILE 0x200000
466 #define DEBUG_WM 0x400000
467 #define DEBUG_URB 0x800000
468 #define DEBUG_VS 0x1000000
469 #define DEBUG_CLIP 0x2000000
470 #define DEBUG_AUB 0x4000000
471
472 #define DBG(...) do { \
473 if (unlikely(INTEL_DEBUG & FILE_DEBUG_FLAG)) \
474 printf(__VA_ARGS__); \
475 } while(0)
476
477 #define fallback_debug(...) do { \
478 if (unlikely(INTEL_DEBUG & DEBUG_FALLBACKS)) \
479 printf(__VA_ARGS__); \
480 } while(0)
481
482 #define PCI_CHIP_845_G 0x2562
483 #define PCI_CHIP_I830_M 0x3577
484 #define PCI_CHIP_I855_GM 0x3582
485 #define PCI_CHIP_I865_G 0x2572
486 #define PCI_CHIP_I915_G 0x2582
487 #define PCI_CHIP_I915_GM 0x2592
488 #define PCI_CHIP_I945_G 0x2772
489 #define PCI_CHIP_I945_GM 0x27A2
490 #define PCI_CHIP_I945_GME 0x27AE
491 #define PCI_CHIP_G33_G 0x29C2
492 #define PCI_CHIP_Q35_G 0x29B2
493 #define PCI_CHIP_Q33_G 0x29D2
494
495
496 /* ================================================================
497 * intel_context.c:
498 */
499
500 extern bool intelInitContext(struct intel_context *intel,
501 int api,
502 const struct gl_config * mesaVis,
503 __DRIcontext * driContextPriv,
504 void *sharedContextPrivate,
505 struct dd_function_table *functions);
506
507 extern void intelFinish(struct gl_context * ctx);
508 extern void intel_flush_rendering_to_batch(struct gl_context *ctx);
509 extern void _intel_flush(struct gl_context * ctx, const char *file, int line);
510
511 #define intel_flush(ctx) _intel_flush(ctx, __FILE__, __LINE__)
512
513 extern void intelInitDriverFunctions(struct dd_function_table *functions);
514
515 void intel_init_syncobj_functions(struct dd_function_table *functions);
516
517
518 /* ================================================================
519 * intel_state.c:
520 */
521 extern void intelInitStateFuncs(struct dd_function_table *functions);
522
523 #define COMPAREFUNC_ALWAYS 0
524 #define COMPAREFUNC_NEVER 0x1
525 #define COMPAREFUNC_LESS 0x2
526 #define COMPAREFUNC_EQUAL 0x3
527 #define COMPAREFUNC_LEQUAL 0x4
528 #define COMPAREFUNC_GREATER 0x5
529 #define COMPAREFUNC_NOTEQUAL 0x6
530 #define COMPAREFUNC_GEQUAL 0x7
531
532 #define STENCILOP_KEEP 0
533 #define STENCILOP_ZERO 0x1
534 #define STENCILOP_REPLACE 0x2
535 #define STENCILOP_INCRSAT 0x3
536 #define STENCILOP_DECRSAT 0x4
537 #define STENCILOP_INCR 0x5
538 #define STENCILOP_DECR 0x6
539 #define STENCILOP_INVERT 0x7
540
541 #define LOGICOP_CLEAR 0
542 #define LOGICOP_NOR 0x1
543 #define LOGICOP_AND_INV 0x2
544 #define LOGICOP_COPY_INV 0x3
545 #define LOGICOP_AND_RVRSE 0x4
546 #define LOGICOP_INV 0x5
547 #define LOGICOP_XOR 0x6
548 #define LOGICOP_NAND 0x7
549 #define LOGICOP_AND 0x8
550 #define LOGICOP_EQUIV 0x9
551 #define LOGICOP_NOOP 0xa
552 #define LOGICOP_OR_INV 0xb
553 #define LOGICOP_COPY 0xc
554 #define LOGICOP_OR_RVRSE 0xd
555 #define LOGICOP_OR 0xe
556 #define LOGICOP_SET 0xf
557
558 #define BLENDFACT_ZERO 0x01
559 #define BLENDFACT_ONE 0x02
560 #define BLENDFACT_SRC_COLR 0x03
561 #define BLENDFACT_INV_SRC_COLR 0x04
562 #define BLENDFACT_SRC_ALPHA 0x05
563 #define BLENDFACT_INV_SRC_ALPHA 0x06
564 #define BLENDFACT_DST_ALPHA 0x07
565 #define BLENDFACT_INV_DST_ALPHA 0x08
566 #define BLENDFACT_DST_COLR 0x09
567 #define BLENDFACT_INV_DST_COLR 0x0a
568 #define BLENDFACT_SRC_ALPHA_SATURATE 0x0b
569 #define BLENDFACT_CONST_COLOR 0x0c
570 #define BLENDFACT_INV_CONST_COLOR 0x0d
571 #define BLENDFACT_CONST_ALPHA 0x0e
572 #define BLENDFACT_INV_CONST_ALPHA 0x0f
573 #define BLENDFACT_MASK 0x0f
574
575 enum {
576 DRI_CONF_BO_REUSE_DISABLED,
577 DRI_CONF_BO_REUSE_ALL
578 };
579
580 extern int intel_translate_shadow_compare_func(GLenum func);
581 extern int intel_translate_compare_func(GLenum func);
582 extern int intel_translate_stencil_op(GLenum op);
583 extern int intel_translate_blend_factor(GLenum factor);
584 extern int intel_translate_logic_op(GLenum opcode);
585
586 void intel_update_renderbuffers(__DRIcontext *context,
587 __DRIdrawable *drawable);
588 void intel_prepare_render(struct intel_context *intel);
589
590 void i915_set_buf_info_for_region(uint32_t *state, struct intel_region *region,
591 uint32_t buffer_id);
592 void intel_init_texture_formats(struct gl_context *ctx);
593
594 /*======================================================================
595 * Inline conversion functions.
596 * These are better-typed than the macros used previously:
597 */
598 static INLINE struct intel_context *
599 intel_context(struct gl_context * ctx)
600 {
601 return (struct intel_context *) ctx;
602 }
603
604 static INLINE bool
605 is_power_of_two(uint32_t value)
606 {
607 return (value & (value - 1)) == 0;
608 }
609
610 #endif