Remove all traces of CULL_MASK_ACTIVE.
[mesa.git] / src / mesa / math / m_debug_util.h
1 /* $Id: m_debug_util.h,v 1.3 2001/03/30 14:44:43 gareth Exp $ */
2
3 /*
4 * Mesa 3-D graphics library
5 * Version: 3.5
6 *
7 * Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included
17 * in all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
23 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
24 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 * Authors:
27 * Gareth Hughes <gareth@valinux.com>
28 */
29
30 #ifndef __M_DEBUG_UTIL_H__
31 #define __M_DEBUG_UTIL_H__
32
33
34 #ifdef DEBUG /* This code only used for debugging */
35
36
37 /* Comment this out to deactivate the cycle counter.
38 * NOTE: it works only on CPUs which know the 'rdtsc' command (586 or higher)
39 * (hope, you don't try to debug Mesa on a 386 ;)
40 */
41 #if defined(__GNUC__) && defined(__i386__) && defined(USE_X86_ASM)
42 #define RUN_DEBUG_BENCHMARK
43 #endif
44
45 #define TEST_COUNT 128 /* size of the tested vector array */
46
47 #define REQUIRED_PRECISION 10 /* allow 4 bits to miss */
48 #define MAX_PRECISION 24 /* max. precision possible */
49
50
51 #ifdef RUN_DEBUG_BENCHMARK
52 /* Overhead of profiling counter in cycles. Automatically adjusted to
53 * your machine at run time - counter initialization should give very
54 * consistent results.
55 */
56 extern long counter_overhead;
57
58 /* This is the value of the environment variable MESA_PROFILE, and is
59 * used to determine if we should benchmark the functions as well as
60 * verify their correctness.
61 */
62 extern char *mesa_profile;
63
64 /* Modify the the number of tests if you like.
65 * We take the minimum of all results, because every error should be
66 * positive (time used by other processes, task switches etc).
67 * It is assumed that all calculations are done in the cache.
68 */
69
70 #if 1 /* PPro, PII, PIII version */
71
72 /* Profiling on the P6 architecture requires a little more work, due to
73 * the internal out-of-order execution. We must perform a serializing
74 * 'cpuid' instruction before and after the 'rdtsc' instructions to make
75 * sure no other uops are executed when we sample the timestamp counter.
76 */
77 #define INIT_COUNTER() \
78 do { \
79 int cycle_i; \
80 counter_overhead = LONG_MAX; \
81 for ( cycle_i = 0 ; cycle_i < 8 ; cycle_i++ ) { \
82 long cycle_tmp1 = 0, cycle_tmp2 = 0; \
83 __asm__ __volatile__ ( "push %%ebx \n" \
84 "xor %%eax, %%eax \n" \
85 "cpuid \n" \
86 "rdtsc \n" \
87 "mov %%eax, %0 \n" \
88 "xor %%eax, %%eax \n" \
89 "cpuid \n" \
90 "pop %%ebx \n" \
91 "push %%ebx \n" \
92 "xor %%eax, %%eax \n" \
93 "cpuid \n" \
94 "rdtsc \n" \
95 "mov %%eax, %1 \n" \
96 "xor %%eax, %%eax \n" \
97 "cpuid \n" \
98 "pop %%ebx \n" \
99 : "=m" (cycle_tmp1), "=m" (cycle_tmp2) \
100 : : "eax", "ecx", "edx" ); \
101 if ( counter_overhead > (cycle_tmp2 - cycle_tmp1) ) { \
102 counter_overhead = cycle_tmp2 - cycle_tmp1; \
103 } \
104 } \
105 } while (0)
106
107 #define BEGIN_RACE(x) \
108 x = LONG_MAX; \
109 for ( cycle_i = 0 ; cycle_i < 10 ; cycle_i++ ) { \
110 long cycle_tmp1 = 0, cycle_tmp2 = 0; \
111 __asm__ __volatile__ ( "push %%ebx \n" \
112 "xor %%eax, %%eax \n" \
113 "cpuid \n" \
114 "rdtsc \n" \
115 "mov %%eax, %0 \n" \
116 "xor %%eax, %%eax \n" \
117 "cpuid \n" \
118 "pop %%ebx \n" \
119 : "=m" (cycle_tmp1) \
120 : : "eax", "ecx", "edx" );
121
122 #define END_RACE(x) \
123 __asm__ __volatile__ ( "push %%ebx \n" \
124 "xor %%eax, %%eax \n" \
125 "cpuid \n" \
126 "rdtsc \n" \
127 "mov %%eax, %0 \n" \
128 "xor %%eax, %%eax \n" \
129 "cpuid \n" \
130 "pop %%ebx \n" \
131 : "=m" (cycle_tmp2) \
132 : : "eax", "ecx", "edx" ); \
133 if ( x > (cycle_tmp2 - cycle_tmp1) ) { \
134 x = cycle_tmp2 - cycle_tmp1; \
135 } \
136 } \
137 x -= counter_overhead;
138
139 #else /* PPlain, PMMX version */
140
141 /* To ensure accurate results, we stall the pipelines with the
142 * non-pairable 'cdq' instruction. This ensures all the code being
143 * profiled is complete when the 'rdtsc' instruction executes.
144 */
145 #define INIT_COUNTER(x) \
146 do { \
147 int cycle_i; \
148 x = LONG_MAX; \
149 for ( cycle_i = 0 ; cycle_i < 32 ; cycle_i++ ) { \
150 long cycle_tmp1, cycle_tmp2, dummy; \
151 __asm__ ( "mov %%eax, %0" : "=a" (cycle_tmp1) ); \
152 __asm__ ( "mov %%eax, %0" : "=a" (cycle_tmp2) ); \
153 __asm__ ( "cdq" ); \
154 __asm__ ( "cdq" ); \
155 __asm__ ( "rdtsc" : "=a" (cycle_tmp1), "=d" (dummy) ); \
156 __asm__ ( "cdq" ); \
157 __asm__ ( "cdq" ); \
158 __asm__ ( "rdtsc" : "=a" (cycle_tmp2), "=d" (dummy) ); \
159 if ( x > (cycle_tmp2 - cycle_tmp1) ) \
160 x = cycle_tmp2 - cycle_tmp1; \
161 } \
162 } while (0)
163
164 #define BEGIN_RACE(x) \
165 x = LONG_MAX; \
166 for ( cycle_i = 0 ; cycle_i < 16 ; cycle_i++ ) { \
167 long cycle_tmp1, cycle_tmp2, dummy; \
168 __asm__ ( "mov %%eax, %0" : "=a" (cycle_tmp1) ); \
169 __asm__ ( "mov %%eax, %0" : "=a" (cycle_tmp2) ); \
170 __asm__ ( "cdq" ); \
171 __asm__ ( "cdq" ); \
172 __asm__ ( "rdtsc" : "=a" (cycle_tmp1), "=d" (dummy) );
173
174
175 #define END_RACE(x) \
176 __asm__ ( "cdq" ); \
177 __asm__ ( "cdq" ); \
178 __asm__ ( "rdtsc" : "=a" (cycle_tmp2), "=d" (dummy) ); \
179 if ( x > (cycle_tmp2 - cycle_tmp1) ) \
180 x = cycle_tmp2 - cycle_tmp1; \
181 } \
182 x -= counter_overhead;
183
184 #endif
185
186 #else
187
188 #define BEGIN_RACE(x)
189 #define END_RACE(x)
190
191 #endif
192
193
194 /* =============================================================
195 * Helper functions
196 */
197
198 static GLfloat rnd( void )
199 {
200 GLfloat f = (GLfloat)rand() / (GLfloat)RAND_MAX;
201 GLfloat gran = (GLfloat)(1 << 13);
202
203 f = (GLfloat)(GLint)(f * gran) / gran;
204
205 return f * 2.0 - 1.0;
206 }
207
208 static int significand_match( GLfloat a, GLfloat b )
209 {
210 GLfloat d = a - b;
211 int a_ex, b_ex, d_ex;
212
213 if ( d == 0.0F ) {
214 return MAX_PRECISION; /* Exact match */
215 }
216
217 if ( a == 0.0F || b == 0.0F ) {
218 /* It would probably be better to check if the
219 * non-zero number is denormalized and return
220 * the index of the highest set bit here.
221 */
222 return 0;
223 }
224
225 frexp( a, &a_ex );
226 frexp( b, &b_ex );
227 frexp( d, &d_ex );
228
229 if ( a_ex < b_ex ) {
230 return a_ex - d_ex;
231 } else {
232 return b_ex - d_ex;
233 }
234 }
235
236 enum { NIL = 0, ONE = 1, NEG = -1, VAR = 2 };
237
238 static void init_matrix( GLfloat *m )
239 {
240 m[0] = 63.0; m[4] = 43.0; m[ 8] = 29.0; m[12] = 43.0;
241 m[1] = 55.0; m[5] = 17.0; m[ 9] = 31.0; m[13] = 7.0;
242 m[2] = 44.0; m[6] = 9.0; m[10] = 7.0; m[14] = 3.0;
243 m[3] = 11.0; m[7] = 23.0; m[11] = 91.0; m[15] = 9.0;
244 }
245
246
247 /* Ensure our arrays are correctly aligned.
248 */
249 #if defined(__GNUC__)
250 # define ALIGN16 __attribute__ ((aligned (16)))
251 #elif defined(__MSC__)
252 # define ALIGN16 __declspec(align(16)) /* GH: Does this work? */
253 #else
254 # warning "ALIGN16 will not 16-byte align!\n"
255 # define ALIGN16
256 #endif
257
258
259 #endif /* DEBUG */
260
261 #endif /* __M_DEBUG_UTIL_H__ */