1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #ifndef INTELCONTEXT_INC
29 #define INTELCONTEXT_INC
34 #include "main/mtypes.h"
39 /* Evil hack for using libdrm in a c++ compiler. */
44 #include "intel_bufmgr.h"
46 #include "intel_screen.h"
47 #include "intel_tex_obj.h"
54 #include "tnl/t_vertex.h"
59 #define INTEL_WRITE_PART 0x1
60 #define INTEL_WRITE_FULL 0x2
61 #define INTEL_READ 0x4
65 #define likely(expr) (__builtin_expect(expr, 1))
66 #define unlikely(expr) (__builtin_expect(expr, 0))
68 #define likely(expr) (expr)
69 #define unlikely(expr) (expr)
73 struct intel_sync_object
{
74 struct gl_sync_object Base
;
76 /** Batch associated with this sync object */
82 struct intel_batchbuffer
{
83 /** Current batchbuffer being queued up. */
85 /** Last BO submitted to the hardware. Used for glFinish(). */
86 drm_intel_bo
*last_bo
;
87 /** BO for post-sync nonzero writes for gen6 workaround. */
88 drm_intel_bo
*workaround_bo
;
89 bool need_workaround_flush
;
91 struct cached_batch_item
*cached_items
;
94 uint16_t used
, reserved_space
;
97 #define BATCH_SZ (8192*sizeof(uint32_t))
99 uint32_t state_batch_offset
;
101 bool needs_sol_reset
;
110 * intel_context is derived from Mesa's context class: struct gl_context.
114 struct gl_context ctx
; /**< base class, must be first field */
118 void (*destroy
) (struct brw_context
* brw
);
119 void (*finish_batch
) (struct brw_context
* brw
);
120 void (*new_batch
) (struct brw_context
* brw
);
122 void (*update_texture_surface
)(struct gl_context
*ctx
,
124 uint32_t *binding_table
,
125 unsigned surf_index
);
126 void (*update_renderbuffer_surface
)(struct brw_context
*brw
,
127 struct gl_renderbuffer
*rb
,
130 void (*update_null_renderbuffer_surface
)(struct brw_context
*brw
,
132 void (*create_constant_surface
)(struct brw_context
*brw
,
136 uint32_t *out_offset
,
140 * Send the appropriate state packets to configure depth, stencil, and
141 * HiZ buffers (i965+ only)
143 void (*emit_depth_stencil_hiz
)(struct brw_context
*brw
,
144 struct intel_mipmap_tree
*depth_mt
,
145 uint32_t depth_offset
,
146 uint32_t depthbuffer_format
,
147 uint32_t depth_surface_type
,
148 struct intel_mipmap_tree
*stencil_mt
,
149 bool hiz
, bool separate_stencil
,
150 uint32_t width
, uint32_t height
,
151 uint32_t tile_x
, uint32_t tile_y
);
160 * Generation number of the hardware: 2 is 8xx, 3 is 9xx pre-965, 4 is 965.
167 bool has_separate_stencil
;
168 bool must_use_separate_stencil
;
173 drm_intel_context
*hw_ctx
;
175 struct intel_batchbuffer batch
;
177 drm_intel_bo
*first_post_swapbuffers_batch
;
182 * Set if we're either a debug context or the INTEL_DEBUG=perf environment
183 * variable is set, this is the flag indicating to do expensive work that
184 * might lead to a perf_debug() call.
192 uint32_t buffer_offset
;
196 uint32_t max_gtt_map_object_size
;
201 bool always_flush_batch
;
202 bool always_flush_cache
;
203 bool disable_throttling
;
205 GLenum reduced_primitive
;
208 * Set if rendering has occured to the drawable's front buffer.
210 * This is used in the DRI2 case to detect that glFlush should also copy
211 * the contents of the fake front buffer to the real front buffer.
213 bool front_buffer_dirty
;
216 * Track whether front-buffer rendering is currently enabled
218 * A separate flag is used to track this in order to support MRT more
221 bool is_front_buffer_rendering
;
223 * Track whether front-buffer is the current read target.
225 * This is closely associated with is_front_buffer_rendering, but may
226 * be set separately. The DRI2 fake front buffer must be referenced
229 bool is_front_buffer_reading
;
233 __DRIcontext
*driContext
;
234 struct intel_screen
*intelScreen
;
238 * Align a value down to an alignment value
240 * If \c value is not already aligned to the requested alignment value, it
241 * will be rounded down.
243 * \param value Value to be rounded
244 * \param alignment Alignment value to be used. This must be a power of two.
248 #define ROUND_DOWN_TO(value, alignment) ((value) & ~(alignment - 1))
250 static INLINE
uint32_t
251 U_FIXED(float value
, uint32_t frac_bits
)
253 value
*= (1 << frac_bits
);
254 return value
< 0 ? 0 : value
;
257 static INLINE
uint32_t
258 S_FIXED(float value
, uint32_t frac_bits
)
260 return value
* (1 << frac_bits
);
263 /* ================================================================
264 * From linux kernel i386 header files, copes with odd sizes better
265 * than COPY_DWORDS would:
266 * XXX Put this in src/mesa/main/imports.h ???
268 #if defined(i386) || defined(__i386__)
269 static INLINE
void * __memcpy(void * to
, const void * from
, size_t n
)
272 __asm__
__volatile__(
277 "1:\ttestb $1,%b4\n\t"
281 : "=&c" (d0
), "=&D" (d1
), "=&S" (d2
)
282 :"0" (n
/4), "q" (n
),"1" ((long) to
),"2" ((long) from
)
287 #define __memcpy(a,b,c) memcpy(a,b,c)
291 /* ================================================================
294 extern int INTEL_DEBUG
;
296 #define DEBUG_TEXTURE 0x1
297 #define DEBUG_STATE 0x2
298 #define DEBUG_IOCTL 0x4
299 #define DEBUG_BLIT 0x8
300 #define DEBUG_MIPTREE 0x10
301 #define DEBUG_PERF 0x20
302 #define DEBUG_BATCH 0x80
303 #define DEBUG_PIXEL 0x100
304 #define DEBUG_BUFMGR 0x200
305 #define DEBUG_REGION 0x400
306 #define DEBUG_FBO 0x800
307 #define DEBUG_GS 0x1000
308 #define DEBUG_SYNC 0x2000
309 #define DEBUG_PRIMS 0x4000
310 #define DEBUG_VERTS 0x8000
311 #define DEBUG_DRI 0x10000
312 #define DEBUG_SF 0x20000
313 #define DEBUG_STATS 0x100000
314 #define DEBUG_WM 0x400000
315 #define DEBUG_URB 0x800000
316 #define DEBUG_VS 0x1000000
317 #define DEBUG_CLIP 0x2000000
318 #define DEBUG_AUB 0x4000000
319 #define DEBUG_SHADER_TIME 0x8000000
320 #define DEBUG_BLORP 0x10000000
321 #define DEBUG_NO16 0x20000000
323 #ifdef HAVE_ANDROID_PLATFORM
324 #define LOG_TAG "INTEL-MESA"
325 #include <cutils/log.h>
329 #define dbg_printf(...) ALOGW(__VA_ARGS__)
331 #define dbg_printf(...) printf(__VA_ARGS__)
332 #endif /* HAVE_ANDROID_PLATFORM */
334 #define DBG(...) do { \
335 if (unlikely(INTEL_DEBUG & FILE_DEBUG_FLAG)) \
336 dbg_printf(__VA_ARGS__); \
339 #define perf_debug(...) do { \
340 static GLuint msg_id = 0; \
341 if (unlikely(INTEL_DEBUG & DEBUG_PERF)) \
342 dbg_printf(__VA_ARGS__); \
343 if (intel->perf_debug) \
344 _mesa_gl_debug(&intel->ctx, &msg_id, \
345 MESA_DEBUG_TYPE_PERFORMANCE, \
346 MESA_DEBUG_SEVERITY_MEDIUM, \
350 #define WARN_ONCE(cond, fmt...) do { \
351 if (unlikely(cond)) { \
352 static bool _warned = false; \
353 static GLuint msg_id = 0; \
355 fprintf(stderr, "WARNING: "); \
356 fprintf(stderr, fmt); \
359 _mesa_gl_debug(ctx, &msg_id, \
360 MESA_DEBUG_TYPE_OTHER, \
361 MESA_DEBUG_SEVERITY_HIGH, fmt); \
366 /* ================================================================
370 extern bool intelInitContext(struct brw_context
*brw
,
372 unsigned major_version
,
373 unsigned minor_version
,
374 const struct gl_config
* mesaVis
,
375 __DRIcontext
* driContextPriv
,
376 void *sharedContextPrivate
,
377 struct dd_function_table
*functions
,
378 unsigned *dri_ctx_error
);
380 extern void intelFinish(struct gl_context
* ctx
);
381 extern void _intel_flush(struct gl_context
* ctx
, const char *file
, int line
);
383 #define intel_flush(ctx) _intel_flush(ctx, __FILE__, __LINE__)
385 extern void intelInitDriverFunctions(struct dd_function_table
*functions
);
387 void intel_init_syncobj_functions(struct dd_function_table
*functions
);
390 DRI_CONF_BO_REUSE_DISABLED
,
391 DRI_CONF_BO_REUSE_ALL
394 extern int intel_translate_shadow_compare_func(GLenum func
);
395 extern int intel_translate_compare_func(GLenum func
);
396 extern int intel_translate_stencil_op(GLenum op
);
397 extern int intel_translate_logic_op(GLenum opcode
);
399 void intel_update_renderbuffers(__DRIcontext
*context
,
400 __DRIdrawable
*drawable
);
401 void intel_prepare_render(struct brw_context
*brw
);
404 intel_resolve_for_dri2_flush(struct brw_context
*brw
,
405 __DRIdrawable
*drawable
);
408 intelInitExtensions(struct gl_context
*ctx
);
410 intelInitClearFuncs(struct dd_function_table
*functions
);
412 /*======================================================================
413 * Inline conversion functions.
414 * These are better-typed than the macros used previously:
416 static INLINE
struct intel_context
*
417 intel_context(struct gl_context
* ctx
)
419 return (struct intel_context
*) ctx
;
423 is_power_of_two(uint32_t value
)
425 return (value
& (value
- 1)) == 0;