Drop GLcontext typedef and use struct gl_context instead
[mesa.git] / src / mesa / drivers / dri / common / mmx.h
1 /* mmx.h
2
3 MultiMedia eXtensions GCC interface library for IA32.
4
5 To use this library, simply include this header file
6 and compile with GCC. You MUST have inlining enabled
7 in order for mmx_ok() to work; this can be done by
8 simply using -O on the GCC command line.
9
10 Compiling with -DMMX_TRACE will cause detailed trace
11 output to be sent to stderr for each mmx operation.
12 This adds lots of code, and obviously slows execution to
13 a crawl, but can be very useful for debugging.
14
15 THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY
16 EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT
17 LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTABILITY
18 AND FITNESS FOR ANY PARTICULAR PURPOSE.
19
20 1997-98 by H. Dietz and R. Fisher
21
22 History:
23 97-98* R.Fisher Early versions
24 980501 R.Fisher Original Release
25 980611* H.Dietz Rewrite, correctly implementing inlines, and
26 R.Fisher including direct register accesses.
27 980616 R.Fisher Release of 980611 as 980616.
28 980714 R.Fisher Minor corrections to Makefile, etc.
29 980715 R.Fisher mmx_ok() now prevents optimizer from using
30 clobbered values.
31 mmx_ok() now checks if cpuid instruction is
32 available before trying to use it.
33 980726* R.Fisher mm_support() searches for AMD 3DNow, Cyrix
34 Extended MMX, and standard MMX. It returns a
35 value which is positive if any of these are
36 supported, and can be masked with constants to
37 see which. mmx_ok() is now a call to this
38 980726* R.Fisher Added i2r support for shift functions
39 980919 R.Fisher Fixed AMD extended feature recognition bug.
40 980921 R.Fisher Added definition/check for _MMX_H.
41 Added "float s[2]" to mmx_t for use with
42 3DNow and EMMX. So same mmx_t can be used.
43 981013 R.Fisher Fixed cpuid function 1 bug (looked at wrong reg)
44 Fixed psllq_i2r error in mmxtest.c
45
46 * Unreleased (internal or interim) versions
47
48 Notes:
49 It appears that the latest gas has the pand problem fixed, therefore
50 I'll undefine BROKEN_PAND by default.
51 String compares may be quicker than the multiple test/jumps in vendor
52 test sequence in mmx_ok(), but I'm not concerned with that right now.
53
54 Acknowledgments:
55 Jussi Laako for pointing out the errors ultimately found to be
56 connected to the failure to notify the optimizer of clobbered values.
57 Roger Hardiman for reminding us that CPUID isn't everywhere, and that
58 someone may actually try to use this on a machine without CPUID.
59 Also for suggesting code for checking this.
60 Robert Dale for pointing out the AMD recognition bug.
61 Jimmy Mayfield and Carl Witty for pointing out the Intel recognition
62 bug.
63 Carl Witty for pointing out the psllq_i2r test bug.
64 */
65
66 #ifndef _MMX_H
67 #define _MMX_H
68
69 //#define MMX_TRACE
70
71 /* Warning: at this writing, the version of GAS packaged
72 with most Linux distributions does not handle the
73 parallel AND operation mnemonic correctly. If the
74 symbol BROKEN_PAND is defined, a slower alternative
75 coding will be used. If execution of mmxtest results
76 in an illegal instruction fault, define this symbol.
77 */
78 #undef BROKEN_PAND
79
80
81 /* The type of an value that fits in an MMX register
82 (note that long long constant values MUST be suffixed
83 by LL and unsigned long long values by ULL, lest
84 they be truncated by the compiler)
85 */
86 typedef union {
87 long long q; /* Quadword (64-bit) value */
88 unsigned long long uq; /* Unsigned Quadword */
89 int d[2]; /* 2 Doubleword (32-bit) values */
90 unsigned int ud[2]; /* 2 Unsigned Doubleword */
91 short w[4]; /* 4 Word (16-bit) values */
92 unsigned short uw[4]; /* 4 Unsigned Word */
93 char b[8]; /* 8 Byte (8-bit) values */
94 unsigned char ub[8]; /* 8 Unsigned Byte */
95 float s[2]; /* Single-precision (32-bit) value */
96 } mmx_t;
97
98 /* Helper functions for the instruction macros that follow...
99 (note that memory-to-register, m2r, instructions are nearly
100 as efficient as register-to-register, r2r, instructions;
101 however, memory-to-memory instructions are really simulated
102 as a convenience, and are only 1/3 as efficient)
103 */
104 #ifdef MMX_TRACE
105
106 /* Include the stuff for printing a trace to stderr...
107 */
108
109 #include <stdio.h>
110
111 #define mmx_i2r(op, imm, reg) \
112 { \
113 mmx_t mmx_trace; \
114 mmx_trace = (imm); \
115 fprintf(stderr, #op "_i2r(" #imm "=0x%016llx, ", mmx_trace.q); \
116 __asm__ __volatile__ ("movq %%" #reg ", %0" \
117 : "=X" (mmx_trace) \
118 : /* nothing */ ); \
119 fprintf(stderr, #reg "=0x%016llx) => ", mmx_trace.q); \
120 __asm__ __volatile__ (#op " %0, %%" #reg \
121 : /* nothing */ \
122 : "X" (imm)); \
123 __asm__ __volatile__ ("movq %%" #reg ", %0" \
124 : "=X" (mmx_trace) \
125 : /* nothing */ ); \
126 fprintf(stderr, #reg "=0x%016llx\n", mmx_trace.q); \
127 }
128
129 #define mmx_m2r(op, mem, reg) \
130 { \
131 mmx_t mmx_trace; \
132 mmx_trace = (mem); \
133 fprintf(stderr, #op "_m2r(" #mem "=0x%016llx, ", mmx_trace.q); \
134 __asm__ __volatile__ ("movq %%" #reg ", %0" \
135 : "=X" (mmx_trace) \
136 : /* nothing */ ); \
137 fprintf(stderr, #reg "=0x%016llx) => ", mmx_trace.q); \
138 __asm__ __volatile__ (#op " %0, %%" #reg \
139 : /* nothing */ \
140 : "X" (mem)); \
141 __asm__ __volatile__ ("movq %%" #reg ", %0" \
142 : "=X" (mmx_trace) \
143 : /* nothing */ ); \
144 fprintf(stderr, #reg "=0x%016llx\n", mmx_trace.q); \
145 }
146
147 #define mmx_r2m(op, reg, mem) \
148 { \
149 mmx_t mmx_trace; \
150 __asm__ __volatile__ ("movq %%" #reg ", %0" \
151 : "=X" (mmx_trace) \
152 : /* nothing */ ); \
153 fprintf(stderr, #op "_r2m(" #reg "=0x%016llx, ", mmx_trace.q); \
154 mmx_trace = (mem); \
155 fprintf(stderr, #mem "=0x%016llx) => ", mmx_trace.q); \
156 __asm__ __volatile__ (#op " %%" #reg ", %0" \
157 : "=X" (mem) \
158 : /* nothing */ ); \
159 mmx_trace = (mem); \
160 fprintf(stderr, #mem "=0x%016llx\n", mmx_trace.q); \
161 }
162
163 #define mmx_r2r(op, regs, regd) \
164 { \
165 mmx_t mmx_trace; \
166 __asm__ __volatile__ ("movq %%" #regs ", %0" \
167 : "=X" (mmx_trace) \
168 : /* nothing */ ); \
169 fprintf(stderr, #op "_r2r(" #regs "=0x%016llx, ", mmx_trace.q); \
170 __asm__ __volatile__ ("movq %%" #regd ", %0" \
171 : "=X" (mmx_trace) \
172 : /* nothing */ ); \
173 fprintf(stderr, #regd "=0x%016llx) => ", mmx_trace.q); \
174 __asm__ __volatile__ (#op " %" #regs ", %" #regd); \
175 __asm__ __volatile__ ("movq %%" #regd ", %0" \
176 : "=X" (mmx_trace) \
177 : /* nothing */ ); \
178 fprintf(stderr, #regd "=0x%016llx\n", mmx_trace.q); \
179 }
180
181 #define mmx_m2m(op, mems, memd) \
182 { \
183 mmx_t mmx_trace; \
184 mmx_trace = (mems); \
185 fprintf(stderr, #op "_m2m(" #mems "=0x%016llx, ", mmx_trace.q); \
186 mmx_trace = (memd); \
187 fprintf(stderr, #memd "=0x%016llx) => ", mmx_trace.q); \
188 __asm__ __volatile__ ("movq %0, %%mm0\n\t" \
189 #op " %1, %%mm0\n\t" \
190 "movq %%mm0, %0" \
191 : "=X" (memd) \
192 : "X" (mems)); \
193 mmx_trace = (memd); \
194 fprintf(stderr, #memd "=0x%016llx\n", mmx_trace.q); \
195 }
196
197 #else
198
199 /* These macros are a lot simpler without the tracing...
200 */
201
202 #define mmx_i2r(op, imm, reg) \
203 __asm__ __volatile__ (#op " $" #imm ", %%" #reg \
204 : /* nothing */ \
205 : /* nothing */);
206
207 #define mmx_m2r(op, mem, reg) \
208 __asm__ __volatile__ (#op " %0, %%" #reg \
209 : /* nothing */ \
210 : "X" (mem))
211
212 #define mmx_r2m(op, reg, mem) \
213 __asm__ __volatile__ (#op " %%" #reg ", %0" \
214 : "=X" (mem) \
215 : /* nothing */ )
216
217 #define mmx_r2r(op, regs, regd) \
218 __asm__ __volatile__ (#op " %" #regs ", %" #regd)
219
220 #define mmx_m2m(op, mems, memd) \
221 __asm__ __volatile__ ("movq %0, %%mm0\n\t" \
222 #op " %1, %%mm0\n\t" \
223 "movq %%mm0, %0" \
224 : "=X" (memd) \
225 : "X" (mems))
226
227 #endif
228
229
230 /* 1x64 MOVe Quadword
231 (this is both a load and a store...
232 in fact, it is the only way to store)
233 */
234 #define movq_m2r(var, reg) mmx_m2r(movq, var, reg)
235 #define movq_r2m(reg, var) mmx_r2m(movq, reg, var)
236 #define movq_r2r(regs, regd) mmx_r2r(movq, regs, regd)
237 #define movq(vars, vard) \
238 __asm__ __volatile__ ("movq %1, %%mm0\n\t" \
239 "movq %%mm0, %0" \
240 : "=X" (vard) \
241 : "X" (vars))
242
243
244 /* 1x32 MOVe Doubleword
245 (like movq, this is both load and store...
246 but is most useful for moving things between
247 mmx registers and ordinary registers)
248 */
249 #define movd_m2r(var, reg) mmx_m2r(movd, var, reg)
250 #define movd_r2m(reg, var) mmx_r2m(movd, reg, var)
251 #define movd_r2r(regs, regd) mmx_r2r(movd, regs, regd)
252 #define movd(vars, vard) \
253 __asm__ __volatile__ ("movd %1, %%mm0\n\t" \
254 "movd %%mm0, %0" \
255 : "=X" (vard) \
256 : "X" (vars))
257
258
259 /* 2x32, 4x16, and 8x8 Parallel ADDs
260 */
261 #define paddd_m2r(var, reg) mmx_m2r(paddd, var, reg)
262 #define paddd_r2r(regs, regd) mmx_r2r(paddd, regs, regd)
263 #define paddd(vars, vard) mmx_m2m(paddd, vars, vard)
264
265 #define paddw_m2r(var, reg) mmx_m2r(paddw, var, reg)
266 #define paddw_r2r(regs, regd) mmx_r2r(paddw, regs, regd)
267 #define paddw(vars, vard) mmx_m2m(paddw, vars, vard)
268
269 #define paddb_m2r(var, reg) mmx_m2r(paddb, var, reg)
270 #define paddb_r2r(regs, regd) mmx_r2r(paddb, regs, regd)
271 #define paddb(vars, vard) mmx_m2m(paddb, vars, vard)
272
273
274 /* 4x16 and 8x8 Parallel ADDs using Saturation arithmetic
275 */
276 #define paddsw_m2r(var, reg) mmx_m2r(paddsw, var, reg)
277 #define paddsw_r2r(regs, regd) mmx_r2r(paddsw, regs, regd)
278 #define paddsw(vars, vard) mmx_m2m(paddsw, vars, vard)
279
280 #define paddsb_m2r(var, reg) mmx_m2r(paddsb, var, reg)
281 #define paddsb_r2r(regs, regd) mmx_r2r(paddsb, regs, regd)
282 #define paddsb(vars, vard) mmx_m2m(paddsb, vars, vard)
283
284
285 /* 4x16 and 8x8 Parallel ADDs using Unsigned Saturation arithmetic
286 */
287 #define paddusw_m2r(var, reg) mmx_m2r(paddusw, var, reg)
288 #define paddusw_r2r(regs, regd) mmx_r2r(paddusw, regs, regd)
289 #define paddusw(vars, vard) mmx_m2m(paddusw, vars, vard)
290
291 #define paddusb_m2r(var, reg) mmx_m2r(paddusb, var, reg)
292 #define paddusb_r2r(regs, regd) mmx_r2r(paddusb, regs, regd)
293 #define paddusb(vars, vard) mmx_m2m(paddusb, vars, vard)
294
295
296 /* 2x32, 4x16, and 8x8 Parallel SUBs
297 */
298 #define psubd_m2r(var, reg) mmx_m2r(psubd, var, reg)
299 #define psubd_r2r(regs, regd) mmx_r2r(psubd, regs, regd)
300 #define psubd(vars, vard) mmx_m2m(psubd, vars, vard)
301
302 #define psubw_m2r(var, reg) mmx_m2r(psubw, var, reg)
303 #define psubw_r2r(regs, regd) mmx_r2r(psubw, regs, regd)
304 #define psubw(vars, vard) mmx_m2m(psubw, vars, vard)
305
306 #define psubb_m2r(var, reg) mmx_m2r(psubb, var, reg)
307 #define psubb_r2r(regs, regd) mmx_r2r(psubb, regs, regd)
308 #define psubb(vars, vard) mmx_m2m(psubb, vars, vard)
309
310
311 /* 4x16 and 8x8 Parallel SUBs using Saturation arithmetic
312 */
313 #define psubsw_m2r(var, reg) mmx_m2r(psubsw, var, reg)
314 #define psubsw_r2r(regs, regd) mmx_r2r(psubsw, regs, regd)
315 #define psubsw(vars, vard) mmx_m2m(psubsw, vars, vard)
316
317 #define psubsb_m2r(var, reg) mmx_m2r(psubsb, var, reg)
318 #define psubsb_r2r(regs, regd) mmx_r2r(psubsb, regs, regd)
319 #define psubsb(vars, vard) mmx_m2m(psubsb, vars, vard)
320
321
322 /* 4x16 and 8x8 Parallel SUBs using Unsigned Saturation arithmetic
323 */
324 #define psubusw_m2r(var, reg) mmx_m2r(psubusw, var, reg)
325 #define psubusw_r2r(regs, regd) mmx_r2r(psubusw, regs, regd)
326 #define psubusw(vars, vard) mmx_m2m(psubusw, vars, vard)
327
328 #define psubusb_m2r(var, reg) mmx_m2r(psubusb, var, reg)
329 #define psubusb_r2r(regs, regd) mmx_r2r(psubusb, regs, regd)
330 #define psubusb(vars, vard) mmx_m2m(psubusb, vars, vard)
331
332
333 /* 4x16 Parallel MULs giving Low 4x16 portions of results
334 */
335 #define pmullw_m2r(var, reg) mmx_m2r(pmullw, var, reg)
336 #define pmullw_r2r(regs, regd) mmx_r2r(pmullw, regs, regd)
337 #define pmullw(vars, vard) mmx_m2m(pmullw, vars, vard)
338
339
340 /* 4x16 Parallel MULs giving High 4x16 portions of results
341 */
342 #define pmulhw_m2r(var, reg) mmx_m2r(pmulhw, var, reg)
343 #define pmulhw_r2r(regs, regd) mmx_r2r(pmulhw, regs, regd)
344 #define pmulhw(vars, vard) mmx_m2m(pmulhw, vars, vard)
345
346
347 /* 4x16->2x32 Parallel Mul-ADD
348 (muls like pmullw, then adds adjacent 16-bit fields
349 in the multiply result to make the final 2x32 result)
350 */
351 #define pmaddwd_m2r(var, reg) mmx_m2r(pmaddwd, var, reg)
352 #define pmaddwd_r2r(regs, regd) mmx_r2r(pmaddwd, regs, regd)
353 #define pmaddwd(vars, vard) mmx_m2m(pmaddwd, vars, vard)
354
355
356 /* 1x64 bitwise AND
357 */
358 #ifdef BROKEN_PAND
359 #define pand_m2r(var, reg) \
360 { \
361 mmx_m2r(pandn, (mmx_t) -1LL, reg); \
362 mmx_m2r(pandn, var, reg); \
363 }
364 #define pand_r2r(regs, regd) \
365 { \
366 mmx_m2r(pandn, (mmx_t) -1LL, regd); \
367 mmx_r2r(pandn, regs, regd) \
368 }
369 #define pand(vars, vard) \
370 { \
371 movq_m2r(vard, mm0); \
372 mmx_m2r(pandn, (mmx_t) -1LL, mm0); \
373 mmx_m2r(pandn, vars, mm0); \
374 movq_r2m(mm0, vard); \
375 }
376 #else
377 #define pand_m2r(var, reg) mmx_m2r(pand, var, reg)
378 #define pand_r2r(regs, regd) mmx_r2r(pand, regs, regd)
379 #define pand(vars, vard) mmx_m2m(pand, vars, vard)
380 #endif
381
382
383 /* 1x64 bitwise AND with Not the destination
384 */
385 #define pandn_m2r(var, reg) mmx_m2r(pandn, var, reg)
386 #define pandn_r2r(regs, regd) mmx_r2r(pandn, regs, regd)
387 #define pandn(vars, vard) mmx_m2m(pandn, vars, vard)
388
389
390 /* 1x64 bitwise OR
391 */
392 #define por_m2r(var, reg) mmx_m2r(por, var, reg)
393 #define por_r2r(regs, regd) mmx_r2r(por, regs, regd)
394 #define por(vars, vard) mmx_m2m(por, vars, vard)
395
396
397 /* 1x64 bitwise eXclusive OR
398 */
399 #define pxor_m2r(var, reg) mmx_m2r(pxor, var, reg)
400 #define pxor_r2r(regs, regd) mmx_r2r(pxor, regs, regd)
401 #define pxor(vars, vard) mmx_m2m(pxor, vars, vard)
402
403
404 /* 2x32, 4x16, and 8x8 Parallel CoMPare for EQuality
405 (resulting fields are either 0 or -1)
406 */
407 #define pcmpeqd_m2r(var, reg) mmx_m2r(pcmpeqd, var, reg)
408 #define pcmpeqd_r2r(regs, regd) mmx_r2r(pcmpeqd, regs, regd)
409 #define pcmpeqd(vars, vard) mmx_m2m(pcmpeqd, vars, vard)
410
411 #define pcmpeqw_m2r(var, reg) mmx_m2r(pcmpeqw, var, reg)
412 #define pcmpeqw_r2r(regs, regd) mmx_r2r(pcmpeqw, regs, regd)
413 #define pcmpeqw(vars, vard) mmx_m2m(pcmpeqw, vars, vard)
414
415 #define pcmpeqb_m2r(var, reg) mmx_m2r(pcmpeqb, var, reg)
416 #define pcmpeqb_r2r(regs, regd) mmx_r2r(pcmpeqb, regs, regd)
417 #define pcmpeqb(vars, vard) mmx_m2m(pcmpeqb, vars, vard)
418
419
420 /* 2x32, 4x16, and 8x8 Parallel CoMPare for Greater Than
421 (resulting fields are either 0 or -1)
422 */
423 #define pcmpgtd_m2r(var, reg) mmx_m2r(pcmpgtd, var, reg)
424 #define pcmpgtd_r2r(regs, regd) mmx_r2r(pcmpgtd, regs, regd)
425 #define pcmpgtd(vars, vard) mmx_m2m(pcmpgtd, vars, vard)
426
427 #define pcmpgtw_m2r(var, reg) mmx_m2r(pcmpgtw, var, reg)
428 #define pcmpgtw_r2r(regs, regd) mmx_r2r(pcmpgtw, regs, regd)
429 #define pcmpgtw(vars, vard) mmx_m2m(pcmpgtw, vars, vard)
430
431 #define pcmpgtb_m2r(var, reg) mmx_m2r(pcmpgtb, var, reg)
432 #define pcmpgtb_r2r(regs, regd) mmx_r2r(pcmpgtb, regs, regd)
433 #define pcmpgtb(vars, vard) mmx_m2m(pcmpgtb, vars, vard)
434
435
436 /* 1x64, 2x32, and 4x16 Parallel Shift Left Logical
437 */
438 #define psllq_i2r(imm, reg) mmx_i2r(psllq, imm, reg)
439 #define psllq_m2r(var, reg) mmx_m2r(psllq, var, reg)
440 #define psllq_r2r(regs, regd) mmx_r2r(psllq, regs, regd)
441 #define psllq(vars, vard) mmx_m2m(psllq, vars, vard)
442
443 #define pslld_i2r(imm, reg) mmx_i2r(pslld, imm, reg)
444 #define pslld_m2r(var, reg) mmx_m2r(pslld, var, reg)
445 #define pslld_r2r(regs, regd) mmx_r2r(pslld, regs, regd)
446 #define pslld(vars, vard) mmx_m2m(pslld, vars, vard)
447
448 #define psllw_i2r(imm, reg) mmx_i2r(psllw, imm, reg)
449 #define psllw_m2r(var, reg) mmx_m2r(psllw, var, reg)
450 #define psllw_r2r(regs, regd) mmx_r2r(psllw, regs, regd)
451 #define psllw(vars, vard) mmx_m2m(psllw, vars, vard)
452
453
454 /* 1x64, 2x32, and 4x16 Parallel Shift Right Logical
455 */
456 #define psrlq_i2r(imm, reg) mmx_i2r(psrlq, imm, reg)
457 #define psrlq_m2r(var, reg) mmx_m2r(psrlq, var, reg)
458 #define psrlq_r2r(regs, regd) mmx_r2r(psrlq, regs, regd)
459 #define psrlq(vars, vard) mmx_m2m(psrlq, vars, vard)
460
461 #define psrld_i2r(imm, reg) mmx_i2r(psrld, imm, reg)
462 #define psrld_m2r(var, reg) mmx_m2r(psrld, var, reg)
463 #define psrld_r2r(regs, regd) mmx_r2r(psrld, regs, regd)
464 #define psrld(vars, vard) mmx_m2m(psrld, vars, vard)
465
466 #define psrlw_i2r(imm, reg) mmx_i2r(psrlw, imm, reg)
467 #define psrlw_m2r(var, reg) mmx_m2r(psrlw, var, reg)
468 #define psrlw_r2r(regs, regd) mmx_r2r(psrlw, regs, regd)
469 #define psrlw(vars, vard) mmx_m2m(psrlw, vars, vard)
470
471
472 /* 2x32 and 4x16 Parallel Shift Right Arithmetic
473 */
474 #define psrad_i2r(imm, reg) mmx_i2r(psrad, imm, reg)
475 #define psrad_m2r(var, reg) mmx_m2r(psrad, var, reg)
476 #define psrad_r2r(regs, regd) mmx_r2r(psrad, regs, regd)
477 #define psrad(vars, vard) mmx_m2m(psrad, vars, vard)
478
479 #define psraw_i2r(imm, reg) mmx_i2r(psraw, imm, reg)
480 #define psraw_m2r(var, reg) mmx_m2r(psraw, var, reg)
481 #define psraw_r2r(regs, regd) mmx_r2r(psraw, regs, regd)
482 #define psraw(vars, vard) mmx_m2m(psraw, vars, vard)
483
484
485 /* 2x32->4x16 and 4x16->8x8 PACK and Signed Saturate
486 (packs source and dest fields into dest in that order)
487 */
488 #define packssdw_m2r(var, reg) mmx_m2r(packssdw, var, reg)
489 #define packssdw_r2r(regs, regd) mmx_r2r(packssdw, regs, regd)
490 #define packssdw(vars, vard) mmx_m2m(packssdw, vars, vard)
491
492 #define packsswb_m2r(var, reg) mmx_m2r(packsswb, var, reg)
493 #define packsswb_r2r(regs, regd) mmx_r2r(packsswb, regs, regd)
494 #define packsswb(vars, vard) mmx_m2m(packsswb, vars, vard)
495
496
497 /* 4x16->8x8 PACK and Unsigned Saturate
498 (packs source and dest fields into dest in that order)
499 */
500 #define packuswb_m2r(var, reg) mmx_m2r(packuswb, var, reg)
501 #define packuswb_r2r(regs, regd) mmx_r2r(packuswb, regs, regd)
502 #define packuswb(vars, vard) mmx_m2m(packuswb, vars, vard)
503
504
505 /* 2x32->1x64, 4x16->2x32, and 8x8->4x16 UNPaCK Low
506 (interleaves low half of dest with low half of source
507 as padding in each result field)
508 */
509 #define punpckldq_m2r(var, reg) mmx_m2r(punpckldq, var, reg)
510 #define punpckldq_r2r(regs, regd) mmx_r2r(punpckldq, regs, regd)
511 #define punpckldq(vars, vard) mmx_m2m(punpckldq, vars, vard)
512
513 #define punpcklwd_m2r(var, reg) mmx_m2r(punpcklwd, var, reg)
514 #define punpcklwd_r2r(regs, regd) mmx_r2r(punpcklwd, regs, regd)
515 #define punpcklwd(vars, vard) mmx_m2m(punpcklwd, vars, vard)
516
517 #define punpcklbw_m2r(var, reg) mmx_m2r(punpcklbw, var, reg)
518 #define punpcklbw_r2r(regs, regd) mmx_r2r(punpcklbw, regs, regd)
519 #define punpcklbw(vars, vard) mmx_m2m(punpcklbw, vars, vard)
520
521
522 /* 2x32->1x64, 4x16->2x32, and 8x8->4x16 UNPaCK High
523 (interleaves high half of dest with high half of source
524 as padding in each result field)
525 */
526 #define punpckhdq_m2r(var, reg) mmx_m2r(punpckhdq, var, reg)
527 #define punpckhdq_r2r(regs, regd) mmx_r2r(punpckhdq, regs, regd)
528 #define punpckhdq(vars, vard) mmx_m2m(punpckhdq, vars, vard)
529
530 #define punpckhwd_m2r(var, reg) mmx_m2r(punpckhwd, var, reg)
531 #define punpckhwd_r2r(regs, regd) mmx_r2r(punpckhwd, regs, regd)
532 #define punpckhwd(vars, vard) mmx_m2m(punpckhwd, vars, vard)
533
534 #define punpckhbw_m2r(var, reg) mmx_m2r(punpckhbw, var, reg)
535 #define punpckhbw_r2r(regs, regd) mmx_r2r(punpckhbw, regs, regd)
536 #define punpckhbw(vars, vard) mmx_m2m(punpckhbw, vars, vard)
537
538
539 /* Empty MMx State
540 (used to clean-up when going from mmx to float use
541 of the registers that are shared by both; note that
542 there is no float-to-mmx operation needed, because
543 only the float tag word info is corruptible)
544 */
545 #ifdef MMX_TRACE
546
547 #define emms() \
548 { \
549 fprintf(stderr, "emms()\n"); \
550 __asm__ __volatile__ ("emms"); \
551 }
552
553 #else
554
555 #define emms() __asm__ __volatile__ ("emms")
556
557 #endif
558
559 #endif
560