3 * Written by Jos� Fonseca <j_r_fonseca@yahoo.co.uk>
9 #define MATH_ASM_PTR_SIZE 4
10 #include "math/m_vector_asm.h"
12 /* integer multiplication - alpha plus one
14 * makes the following approximation to the division (Sree)
16 * rgb*a/255 ~= (rgb*(a+1)) >> 256
18 * which is the fastest method that satisfies the following OpenGL criteria
20 * 0*0 = 0 and 255*255 = 255
22 * note that MX1 is a register with 0xffffffffffffffff constant which can be easily obtained making
24 * PCMPEQW ( MX1, MX1 )
26 #define GMB_MULT_AP1( MP1, MA1, MP2, MA2, MX1 ) \
27 PSUBW ( MX1, MA1 ) /* a1 + 1 | a1 + 1 | a1 + 1 | a1 + 1 */ ;\
28 PMULLW ( MP1, MA1 ) /* t1 = p1*a1 */ ;\
30 TWO(PSUBW ( MX1, MA2 )) /* a2 + 1 | a2 + 1 | a2 + 1 | a2 + 1 */ ;\
31 TWO(PMULLW ( MP2, MA2 )) /* t2 = p2*a2 */ ;\
33 PSRLW ( CONST(8), MA1 ) /* t1 >> 8 ~= t1/255 */ ;\
34 TWO(PSRLW ( CONST(8), MA2 )) /* t2 >> 8 ~= t2/255 */
37 /* integer multiplication - geometric series
39 * takes the geometric series approximation to the division
41 * t/255 = (t >> 8) + (t >> 16) + (t >> 24) ..
43 * in this case just the first two terms to fit in 16bit arithmetic
45 * t/255 ~= (t + (t >> 8)) >> 8
47 * note that just by itself it doesn't satisfies the OpenGL criteria, as 255*255 = 254,
48 * so the special case a = 255 must be accounted or roundoff must be used
50 #define GMB_MULT_GS( MP1, MA1, MP2, MA2 ) \
51 PMULLW ( MP1, MA1 ) /* t1 = p1*a1 */ ;\
52 TWO(PMULLW ( MP2, MA2 )) /* t2 = p2*a2 */ ;\
55 PSRLW ( CONST(8), MA1 ) /* t1 >> 8 */ ;\
57 TWO(MOVQ ( MA2, MP2 )) ;\
58 TWO(PSRLW ( CONST(8), MA2 )) /* t2 >> 8 */ ;\
60 PADDW ( MP1, MA1 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */ ;\
61 PSRLW ( CONST(8), MA1 ) /* sa1 | sb1 | sg1 | sr1 */ ;\
63 TWO(PADDW ( MP2, MA2 )) /* t2 + (t2 >> 8) ~= (t2/255) << 8 */ ;\
64 TWO(PSRLW ( CONST(8), MA2 )) /* sa2 | sb2 | sg2 | sr2 */
67 /* integer multiplication - geometric series plus rounding
69 * when using a geometric series division instead of truncating the result
70 * use roundoff in the approximation (Jim Blinn)
74 * achieving the exact results
76 * note that M80 is register with the 0x0080008000800080 constant
78 #define GMB_MULT_GSR( MP1, MA1, MP2, MA2, M80 ) \
79 PMULLW ( MP1, MA1 ) /* t1 = p1*a1 */ ;\
80 PADDW ( M80, MA1 ) /* t1 += 0x80 */ ;\
82 TWO(PMULLW ( MP2, MA2 )) /* t2 = p2*a2 */ ;\
83 TWO(PADDW ( M80, MA2 )) /* t2 += 0x80 */ ;\
86 PSRLW ( CONST(8), MA1 ) /* t1 >> 8 */ ;\
88 TWO(MOVQ ( MA2, MP2 )) ;\
89 TWO(PSRLW ( CONST(8), MA2 )) /* t2 >> 8 */ ;\
91 PADDW ( MP1, MA1 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */ ;\
92 PSRLW ( CONST(8), MA1 ) /* sa1 | sb1 | sg1 | sr1 */ ;\
94 TWO(PADDW ( MP2, MA2 )) /* t2 + (t2 >> 8) ~= (t2/255) << 8 */ ;\
95 TWO(PSRLW ( CONST(8), MA2 )) /* sa2 | sb2 | sg2 | sr2 */
98 /* linear interpolation - geometric series
100 #define GMB_LERP_GS( MP1, MQ1, MA1, MP2, MQ2, MA2) \
101 PSUBW ( MQ1, MP1 ) /* pa1 - qa1 | pb1 - qb1 | pg1 - qg1 | pr1 - qr1 */ ;\
102 PSLLW ( CONST(8), MQ1 ) /* q1 << 8 */ ;\
103 PMULLW ( MP1, MA1 ) /* t1 = (q1 - p1)*pa1 */ ;\
105 TWO(PSUBW ( MQ2, MP2 )) /* pa2 - qa2 | pb2 - qb2 | pg2 - qg2 | pr2 - qr2 */ ;\
106 TWO(PSLLW ( CONST(8), MQ2 )) /* q2 << 8 */ ;\
107 TWO(PMULLW ( MP2, MA2 )) /* t2 = (q2 - p2)*pa2 */ ;\
110 PSRLW ( CONST(8), MA1 ) /* t1 >> 8 */ ;\
112 TWO(MOVQ ( MA2, MP2 )) ;\
113 TWO(PSRLW ( CONST(8), MA2 )) /* t2 >> 8 */ ;\
115 PADDW ( MP1, MA1 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */ ;\
116 TWO(PADDW ( MP2, MA2 )) /* t2 + (t2 >> 8) ~= (t2/255) << 8 */ ;\
118 PADDW ( MQ1, MA1 ) /* (t1/255 + q1) << 8 */ ;\
119 TWO(PADDW ( MQ2, MA2 )) /* (t2/255 + q2) << 8 */ ;\
121 PSRLW ( CONST(8), MA1 ) /* sa1 | sb1 | sg1 | sr1 */ ;\
122 TWO(PSRLW ( CONST(8), MA2 )) /* sa2 | sb2 | sg2 | sr2 */
125 /* linear interpolation - geometric series with roundoff
127 * this is a generalization of Blinn's formula to signed arithmetic
129 * note that M80 is a register with the 0x0080008000800080 constant
131 #define GMB_LERP_GSR( MP1, MQ1, MA1, MP2, MQ2, MA2, M80) \
132 PSUBW ( MQ1, MP1 ) /* pa1 - qa1 | pb1 - qb1 | pg1 - qg1 | pr1 - qr1 */ ;\
133 PSLLW ( CONST(8), MQ1 ) /* q1 << 8 */ ;\
134 PMULLW ( MP1, MA1 ) /* t1 = (q1 - p1)*pa1 */ ;\
136 TWO(PSUBW ( MQ2, MP2 )) /* pa2 - qa2 | pb2 - qb2 | pg2 - qg2 | pr2 - qr2 */ ;\
137 TWO(PSLLW ( CONST(8), MQ2 )) /* q2 << 8 */ ;\
138 TWO(PMULLW ( MP2, MA2 )) /* t2 = (q2 - p2)*pa2 */ ;\
140 PSRLW ( CONST(15), MP1 ) /* q1 > p1 ? 1 : 0 */ ;\
141 TWO(PSRLW ( CONST(15), MP2 )) /* q2 > q2 ? 1 : 0 */ ;\
143 PSLLW ( CONST(8), MP1 ) /* q1 > p1 ? 0x100 : 0 */ ;\
144 TWO(PSLLW ( CONST(8), MP2 )) /* q2 > q2 ? 0x100 : 0 */ ;\
146 PSUBW ( MP1, MA1 ) /* t1 -=? 0x100 */ ;\
147 TWO(PSUBW ( MP2, MA2 )) /* t2 -=? 0x100 */ ;\
149 PADDW ( M80, MA1 ) /* t1 += 0x80 */ ;\
150 TWO(PADDW ( M80, MA2 )) /* t2 += 0x80 */ ;\
153 PSRLW ( CONST(8), MA1 ) /* t1 >> 8 */ ;\
155 TWO(MOVQ ( MA2, MP2 )) ;\
156 TWO(PSRLW ( CONST(8), MA2 )) /* t2 >> 8 */ ;\
158 PADDW ( MP1, MA1 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */ ;\
159 TWO(PADDW ( MP2, MA2 )) /* t2 + (t2 >> 8) ~= (t2/255) << 8 */ ;\
161 PADDW ( MQ1, MA1 ) /* (t1/255 + q1) << 8 */ ;\
162 TWO(PADDW ( MQ2, MA2 )) /* (t2/255 + q2) << 8 */ ;\
164 PSRLW ( CONST(8), MA1 ) /* sa1 | sb1 | sg1 | sr1 */ ;\
165 TWO(PSRLW ( CONST(8), MA2 )) /* sa2 | sb2 | sg2 | sr2 */
168 /* linear interpolation - geometric series with correction
170 * instead of the roundoff this adds a small correction to satisfy the OpenGL criteria
172 * t/255 ~= (t + (t >> 8) + (t >> 15)) >> 8
174 * note that although is faster than rounding off it doesn't give always the exact results
176 #define GMB_LERP_GSC( MP1, MQ1, MA1, MP2, MQ2, MA2) \
177 PSUBW ( MQ1, MP1 ) /* pa1 - qa1 | pb1 - qb1 | pg1 - qg1 | pr1 - qr1 */ ;\
178 PSLLW ( CONST(8), MQ1 ) /* q1 << 8 */ ;\
179 PMULLW ( MP1, MA1 ) /* t1 = (q1 - p1)*pa1 */ ;\
181 TWO(PSUBW ( MQ2, MP2 )) /* pa2 - qa2 | pb2 - qb2 | pg2 - qg2 | pr2 - qr2 */ ;\
182 TWO(PSLLW ( CONST(8), MQ2 )) /* q2 << 8 */ ;\
183 TWO(PMULLW ( MP2, MA2 )) /* t2 = (q2 - p2)*pa2 */ ;\
186 PSRLW ( CONST(8), MA1 ) /* t1 >> 8 */ ;\
188 TWO(MOVQ ( MA2, MP2 )) ;\
189 TWO(PSRLW ( CONST(8), MA2 )) /* t2 >> 8 */ ;\
191 PADDW ( MA1, MP1 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */ ;\
192 PSRLW ( CONST(7), MA1 ) /* t1 >> 15 */ ;\
194 TWO(PADDW ( MA2, MP2 )) /* t2 + (t2 >> 8) ~= (t2/255) << 8 */ ;\
195 TWO(PSRLW ( CONST(7), MA2 )) /* t2 >> 15 */ ;\
197 PADDW ( MP1, MA1 ) /* t1 + (t1 >> 8) + (t1 >>15) ~= (t1/255) << 8 */ ;\
198 TWO(PADDW ( MP2, MA2 )) /* t2 + (t2 >> 8) + (t2 >>15) ~= (t2/255) << 8 */ ;\
200 PADDW ( MQ1, MA1 ) /* (t1/255 + q1) << 8 */ ;\
201 TWO(PADDW ( MQ2, MA2 )) /* (t2/255 + q2) << 8 */ ;\
203 PSRLW ( CONST(8), MA1 ) /* sa1 | sb1 | sg1 | sr1 */ ;\
204 TWO(PSRLW ( CONST(8), MA2 )) /* sa2 | sb2 | sg2 | sr2 */
207 /* common blending setup code
209 * note that M00 is a register with 0x0000000000000000 constant which can be easily obtained making
213 #define GMB_LOAD(rgba, dest, MPP, MQQ) \
214 ONE(MOVD ( REGIND(rgba), MPP )) /* | | | | qa1 | qb1 | qg1 | qr1 */ ;\
215 ONE(MOVD ( REGIND(dest), MQQ )) /* | | | | pa1 | pb1 | pg1 | pr1 */ ;\
217 TWO(MOVQ ( REGIND(rgba), MPP )) /* qa2 | qb2 | qg2 | qr2 | qa1 | qb1 | qg1 | qr1 */ ;\
218 TWO(MOVQ ( REGIND(dest), MQQ )) /* pa2 | pb2 | pg2 | pr2 | pa1 | pb1 | pg1 | pr1 */
220 #define GMB_UNPACK(MP1, MQ1, MP2, MQ2, M00) \
221 TWO(MOVQ ( MP1, MP2 )) ;\
222 TWO(MOVQ ( MQ1, MQ2 )) ;\
224 PUNPCKLBW ( M00, MQ1 ) /* qa1 | qb1 | qg1 | qr1 */ ;\
225 TWO(PUNPCKHBW ( M00, MQ2 )) /* qa2 | qb2 | qg2 | qr2 */ ;\
226 PUNPCKLBW ( M00, MP1 ) /* pa1 | pb1 | pg1 | pr1 */ ;\
227 TWO(PUNPCKHBW ( M00, MP2 )) /* pa2 | pb2 | pg2 | pr2 */
229 #define GMB_ALPHA(MP1, MA1, MP2, MA2) \
231 TWO(MOVQ ( MP2, MA2 )) ;\
233 PUNPCKHWD ( MA1, MA1 ) /* pa1 | pa1 | | */ ;\
234 TWO(PUNPCKHWD ( MA2, MA2 )) /* pa2 | pa2 | | */ ;\
235 PUNPCKHDQ ( MA1, MA1 ) /* pa1 | pa1 | pa1 | pa1 */ ;\
236 TWO(PUNPCKHDQ ( MA2, MA2 )) /* pa2 | pa2 | pa2 | pa2 */
238 #define GMB_PACK( MS1, MS2 ) \
239 PACKUSWB ( MS2, MS1 ) /* sa2 | sb2 | sg2 | sr2 | sa1 | sb1 | sg1 | sr1 */ ;
241 #define GMB_STORE(rgba, MSS ) \
242 ONE(MOVD ( MSS, REGIND(rgba) )) /* | | | | sa1 | sb1 | sg1 | sr1 */ ;\
243 TWO(MOVQ ( MSS, REGIND(rgba) )) /* sa2 | sb2 | sg2 | sr2 | sa1 | sb1 | sg1 | sr1 */
245 /* Kevin F. Quinn <kevquinn@gentoo.org> 2 July 2006
246 * Replace data segment constants with text-segment
247 * constants (via pushl/movq)
252 D_LONG 0x00800080, 0x00800080
255 D_LONG 0x80808080, 0x80808080
257 #define const_0080_l 0x00800080
258 #define const_0080_h 0x00800080
259 #define const_80_l 0x80808080
260 #define const_80_h 0x80808080
265 /* Blend transparency function
268 #define TAG(x) CONCAT(x,_transparency)
269 #define LLTAG(x) LLBL2(x,_transparency)
272 PXOR ( MM0, MM0 ) /* 0x0000 | 0x0000 | 0x0000 | 0x0000 */
274 #define MAIN( rgba, dest ) \
275 GMB_LOAD( rgba, dest, MM1, MM2 ) ;\
276 GMB_UNPACK( MM1, MM2, MM4, MM5, MM0 ) ;\
277 GMB_ALPHA( MM1, MM3, MM4, MM6 ) ;\
278 GMB_LERP_GSC( MM1, MM2, MM3, MM4, MM5, MM6 ) ;\
279 GMB_PACK( MM3, MM6 ) ;\
280 GMB_STORE( rgba, MM3 )
282 #include "mmx_blendtmp.h"
285 /* Blend add function
287 * FIXME: Add some loop unrolling here...
290 #define TAG(x) CONCAT(x,_add)
291 #define LLTAG(x) LLBL2(x,_add)
295 #define MAIN( rgba, dest ) \
296 ONE(MOVD ( REGIND(rgba), MM1 )) /* | | | | qa1 | qb1 | qg1 | qr1 */ ;\
297 ONE(MOVD ( REGIND(dest), MM2 )) /* | | | | pa1 | pb1 | pg1 | pr1 */ ;\
298 ONE(PADDUSB ( MM2, MM1 )) ;\
299 ONE(MOVD ( MM1, REGIND(rgba) )) /* | | | | sa1 | sb1 | sg1 | sr1 */ ;\
301 TWO(MOVQ ( REGIND(rgba), MM1 )) /* qa2 | qb2 | qg2 | qr2 | qa1 | qb1 | qg1 | qr1 */ ;\
302 TWO(PADDUSB ( REGIND(dest), MM1 )) /* sa2 | sb2 | sg2 | sr2 | sa1 | sb1 | sg1 | sr1 */ ;\
303 TWO(MOVQ ( MM1, REGIND(rgba) ))
305 #include "mmx_blendtmp.h"
308 /* Blend min function
311 #define TAG(x) CONCAT(x,_min)
312 #define LLTAG(x) LLBL2(x,_min)
314 /* Kevin F. Quinn 2nd July 2006
315 * Replace data segment constants with text-segment instructions
317 MOVQ ( CONTENT(const_80), MM7 )
320 PUSH_L ( CONST(const_80_h) ) /* 0x80| 0x80| 0x80| 0x80| 0x80| 0x80| 0x80| 0x80*/ ;\
321 PUSH_L ( CONST(const_80_l) ) ;\
322 MOVQ ( REGIND(ESP), MM7 ) ;\
323 ADD_L ( CONST(8), ESP)
325 #define MAIN( rgba, dest ) \
326 GMB_LOAD( rgba, dest, MM1, MM2 ) ;\
329 PXOR ( MM7, MM3 ) /* unsigned -> signed */ ;\
330 PXOR ( MM7, MM4 ) /* unsigned -> signed */ ;\
331 PCMPGTB ( MM3, MM4 ) /* q > p ? 0xff : 0x00 */ ;\
332 PAND ( MM4, MM1 ) /* q > p ? p : 0 */ ;\
333 PANDN ( MM2, MM4 ) /* q > p ? 0 : q */ ;\
334 POR ( MM1, MM4 ) /* q > p ? p : q */ ;\
335 GMB_STORE( rgba, MM4 )
337 #include "mmx_blendtmp.h"
340 /* Blend max function
343 #define TAG(x) CONCAT(x,_max)
344 #define LLTAG(x) LLBL2(x,_max)
346 /* Kevin F. Quinn 2nd July 2006
347 * Replace data segment constants with text-segment instructions
349 MOVQ ( CONTENT(const_80), MM7 )
352 PUSH_L ( CONST(const_80_l) ) /* 0x80| 0x80| 0x80| 0x80| 0x80| 0x80| 0x80| 0x80*/ ;\
353 PUSH_L ( CONST(const_80_h) ) ;\
354 MOVQ ( REGIND(ESP), MM7 ) ;\
355 ADD_L ( CONST(8), ESP)
357 #define MAIN( rgba, dest ) \
358 GMB_LOAD( rgba, dest, MM1, MM2 ) ;\
361 PXOR ( MM7, MM3 ) /* unsigned -> signed */ ;\
362 PXOR ( MM7, MM4 ) /* unsigned -> signed */ ;\
363 PCMPGTB ( MM3, MM4 ) /* q > p ? 0xff : 0x00 */ ;\
364 PAND ( MM4, MM2 ) /* q > p ? q : 0 */ ;\
365 PANDN ( MM1, MM4 ) /* q > p ? 0 : p */ ;\
366 POR ( MM2, MM4 ) /* q > p ? p : q */ ;\
367 GMB_STORE( rgba, MM4 )
369 #include "mmx_blendtmp.h"
372 /* Blend modulate function
375 #define TAG(x) CONCAT(x,_modulate)
376 #define LLTAG(x) LLBL2(x,_modulate)
378 /* Kevin F. Quinn 2nd July 2006
379 * Replace data segment constants with text-segment instructions
381 MOVQ ( CONTENT(const_0080), MM7 )
384 PXOR ( MM0, MM0 ) /* 0x0000 | 0x0000 | 0x0000 | 0x0000 */ ;\
385 PUSH_L ( CONST(const_0080_l) ) /* 0x0080 | 0x0080 | 0x0080 | 0x0080 */ ;\
386 PUSH_L ( CONST(const_0080_h) ) ;\
387 MOVQ ( REGIND(ESP), MM7 ) ;\
388 ADD_L ( CONST(8), ESP)
390 #define MAIN( rgba, dest ) \
391 GMB_LOAD( rgba, dest, MM1, MM2 ) ;\
392 GMB_UNPACK( MM1, MM2, MM4, MM5, MM0 ) ;\
393 GMB_MULT_GSR( MM1, MM2, MM4, MM5, MM7 ) ;\
394 GMB_PACK( MM2, MM5 ) ;\
395 GMB_STORE( rgba, MM2 )
397 #include "mmx_blendtmp.h"
401 #if defined (__ELF__) && defined (__linux__)
402 .section .note.GNU-stack,"",%progbits