2 * Written by José Fonseca <j_r_fonseca@yahoo.co.uk>
8 /* integer multiplication - alpha plus one
10 * makes the following approximation to the division (Sree)
12 * rgb*a/255 ~= (rgb*(a+1)) >> 256
14 * which is the fastest method that satisfies the following OpenGL criteria
16 * 0*0 = 0 and 255*255 = 255
18 * note that MX1 is a register with 0xffffffffffffffff constant which can be easily obtained making
20 * PCMPEQW ( MX1, MX1 )
22 #define GMB_MULT_AP1( MP1, MA1, MP2, MA2, MX1 ) \
23 PSUBW ( MX1, MA1 ) /* a1 + 1 | a1 + 1 | a1 + 1 | a1 + 1 */ ;\
24 TWO(PSUBW ( MX1, MA2 )) /* a2 + 1 | a2 + 1 | a2 + 1 | a2 + 1 */ ;\
26 PMULLW ( MP1, MA1 ) /* t1 = p1*a1 */ ;\
27 TWO(PMULLW ( MP2, MA2 )) /* t2 = p2*a2 */ ;\
29 PSRLW ( CONST(8), MA1 ) /* t1 >> 8 ~= t1/255 */ ;\
30 TWO(PSRLW ( CONST(8), MA2 )) /* t2 >> 8 ~= t2/255 */
33 /* integer multiplication - geometric series
35 * takes the geometric series approximation to the division
37 * t/255 = (t >> 8) + (t >> 16) + (t >> 24) ..
39 * in this case just the first two terms to fit in 16bit arithmetic
41 * t/255 ~= (t + (t >> 8)) >> 8
43 * note that just by itself it doesn't satisfies the OpenGL criteria, as 255*255 = 254,
44 * so the special case a = 255 must be accounted or roundoff must be used
46 #define GMB_MULT_GS( MP1, MA1, MP2, MA2 ) \
47 PMULLW ( MP1, MA1 ) /* t1 = p1*a1 */ ;\
48 TWO(PMULLW ( MP2, MA2 )) /* t2 = p2*a2 */ ;\
51 TWO(MOVQ ( MA2, MP2 )) ;\
53 PSRLW ( CONST(8), MP1 ) /* t1 >> 8 */ ;\
54 TWO(PSRLW ( CONST(8), MP2 )) /* t2 >> 8 */ ;\
56 PADDW ( MP1, MA1 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */ ;\
57 TWO(PADDW ( MP2, MA2 )) /* t2 + (t2 >> 8) ~= (t2/255) << 8 */ ;\
59 PSRLW ( CONST(8), MA1 ) /* sa1 | sb1 | sg1 | sr1 */ ;\
60 TWO(PSRLW ( CONST(8), MA2 )) /* sa2 | sb2 | sg2 | sr2 */
63 /* integer multiplication - geometric series plus rounding
65 * when using a geometric series division instead of truncating the result
66 * use roundoff in the approximation (Jim Blinn)
70 * achieving the exact results
72 * note that M80 is register with the 0x0080008000800080 constant
74 #define GMB_MULT_GSR( MP1, MA1, MP2, MA2, M80 ) \
75 PMULLW ( MP1, MA1 ) /* t1 = p1*a1 */ ;\
76 TWO(PMULLW ( MP2, MA2 )) /* t2 = p2*a2 */ ;\
78 PADDW ( M80, MA1 ) /* t1 += 0x80 */ ;\
79 TWO(PADDW ( M80, MA2 )) /* t2 += 0x80 */ ;\
82 TWO(MOVQ ( MA2, MP2 )) ;\
84 PSRLW ( CONST(8), MP1 ) /* t1 >> 8 */ ;\
85 TWO(PSRLW ( CONST(8), MP2 )) /* t2 >> 8 */ ;\
87 PADDW ( MP1, MA1 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */ ;\
88 TWO(PADDW ( MP2, MA2 )) /* t2 + (t2 >> 8) ~= (t2/255) << 8 */ ;\
90 PSRLW ( CONST(8), MA1 ) /* sa1 | sb1 | sg1 | sr1 */ ;\
91 TWO(PSRLW ( CONST(8), MA2 )) /* sa2 | sb2 | sg2 | sr2 */
94 /* linear interpolation - geometric series
96 #define GMB_LERP_GS( MP1, MQ1, MA1, MP2, MQ2, MA2) \
97 PSUBW ( MQ1, MP1 ) /* pa1 - qa1 | pb1 - qb1 | pg1 - qg1 | pr1 - qr1 */ ;\
98 TWO(PSUBW ( MQ2, MP2 )) /* pa2 - qa2 | pb2 - qb2 | pg2 - qg2 | pr2 - qr2 */ ;\
100 PSLLW ( CONST(8), MQ1 ) /* q1 << 8 */ ;\
101 TWO(PSLLW ( CONST(8), MQ2 )) /* q2 << 8 */ ;\
103 PMULLW ( MP1, MA1 ) /* t1 = (q1 - p1)*pa1 */ ;\
104 TWO(PMULLW ( MP2, MA2 )) /* t2 = (q2 - p2)*pa2 */ ;\
107 TWO(MOVQ ( MA2, MP2 )) ;\
109 PSRLW ( CONST(8), MP1 ) /* t1 >> 8 */ ;\
110 TWO(PSRLW ( CONST(8), MP2 )) /* t2 >> 8 */ ;\
112 PADDW ( MP1, MA1 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */ ;\
113 TWO(PADDW ( MP2, MA2 )) /* t2 + (t2 >> 8) ~= (t2/255) << 8 */ ;\
115 PADDW ( MQ1, MA1 ) /* (t1/255 + q1) << 8 */ ;\
116 TWO(PADDW ( MQ2, MA2 )) /* (t2/255 + q2) << 8 */ ;\
118 PSRLW ( CONST(8), MA1 ) /* sa1 | sb1 | sg1 | sr1 */ ;\
119 TWO(PSRLW ( CONST(8), MA2 )) /* sa2 | sb2 | sg2 | sr2 */
122 /* linear interpolation - geometric series with roundoff
124 * this is a generalization of Blinn's formula to signed arithmetic
126 * note that M80 is a register with the 0x0080008000800080 constant
128 #define GMB_LERP_GSR( MP1, MQ1, MA1, MP2, MQ2, MA2, M80) \
129 PSUBW ( MQ1, MP1 ) /* pa1 - qa1 | pb1 - qb1 | pg1 - qg1 | pr1 - qr1 */ ;\
130 TWO(PSUBW ( MQ2, MP2 )) /* pa2 - qa2 | pb2 - qb2 | pg2 - qg2 | pr2 - qr2 */ ;\
132 PSLLW ( CONST(8), MQ1 ) /* q1 << 8 */ ;\
133 TWO(PSLLW ( CONST(8), MQ2 )) /* q2 << 8 */ ;\
135 PMULLW ( MP1, MA1 ) /* t1 = (q1 - p1)*pa1 */ ;\
136 TWO(PMULLW ( MP2, MA2 )) /* t2 = (q2 - p2)*pa2 */ ;\
138 PSRLW ( CONST(15), MP1 ) /* q1 > p1 ? 1 : 0 */ ;\
139 TWO(PSRLW ( CONST(15), MP2 )) /* q2 > q2 ? 1 : 0 */ ;\
141 PSLLW ( CONST(8), MP1 ) /* q1 > p1 ? 0x100 : 0 */ ;\
142 TWO(PSLLW ( CONST(8), MP2 )) /* q2 > q2 ? 0x100 : 0 */ ;\
144 PSUBW ( MP1, MA1 ) /* t1 -=? 0x100 */ ;\
145 TWO(PSUBW ( MP2, MA2 )) /* t2 -=? 0x100 */ ;\
147 PADDW ( M80, MA1 ) /* t1 += 0x80 */ ;\
148 TWO(PADDW ( M80, MA2 )) /* t2 += 0x80 */ ;\
151 TWO(MOVQ ( MA2, MP2 )) ;\
153 PSRLW ( CONST(8), MP1 ) /* t1 >> 8 */ ;\
154 TWO(PSRLW ( CONST(8), MP2 )) /* t2 >> 8 */ ;\
156 PADDW ( MP1, MA1 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */ ;\
157 TWO(PADDW ( MP2, MA2 )) /* t2 + (t2 >> 8) ~= (t2/255) << 8 */ ;\
159 PADDW ( MQ1, MA1 ) /* (t1/255 + q1) << 8 */ ;\
160 TWO(PADDW ( MQ2, MA2 )) /* (t2/255 + q2) << 8 */ ;\
162 PSRLW ( CONST(8), MA1 ) /* sa1 | sb1 | sg1 | sr1 */ ;\
163 TWO(PSRLW ( CONST(8), MA2 )) /* sa2 | sb2 | sg2 | sr2 */
166 /* linear interpolation - geometric series with correction
168 * instead of the roundoff this adds a small correction to satisfy the OpenGL criteria
170 * t/255 ~= (t + (t >> 8) + (t >> 15)) >> 8
172 * note that although is faster than rounding off it doesn't give always the exact results
174 #define GMB_LERP_GSC( MP1, MQ1, MA1, MP2, MQ2, MA2) \
175 PSUBW ( MQ1, MP1 ) /* pa1 - qa1 | pb1 - qb1 | pg1 - qg1 | pr1 - qr1 */ ;\
176 TWO(PSUBW ( MQ2, MP2 )) /* pa2 - qa2 | pb2 - qb2 | pg2 - qg2 | pr2 - qr2 */ ;\
178 PSLLW ( CONST(8), MQ1 ) /* q1 << 8 */ ;\
179 TWO(PSLLW ( CONST(8), MQ2 )) /* q2 << 8 */ ;\
181 PMULLW ( MP1, MA1 ) /* t1 = (q1 - p1)*pa1 */ ;\
182 TWO(PMULLW ( MP2, MA2 )) /* t2 = (q2 - p2)*pa2 */ ;\
185 TWO(MOVQ ( MA2, MP2 )) ;\
187 PSRLW ( CONST(8), MP1 ) /* t1 >> 8 */ ;\
188 TWO(PSRLW ( CONST(8), MP2 )) /* t2 >> 8 */ ;\
190 PADDW ( MP1, MA1 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */ ;\
191 TWO(PADDW ( MP2, MA2 )) /* t2 + (t2 >> 8) ~= (t2/255) << 8 */ ;\
193 PSRLW ( CONST(7), MP1 ) /* t1 >> 15 */ ;\
194 TWO(PSRLW ( CONST(7), MP2 )) /* t2 >> 15 */ ;\
196 PADDW ( MP1, MA1 ) /* t1 + (t1 >> 8) + (t1 >>15) ~= (t1/255) << 8 */ ;\
197 TWO(PADDW ( MP2, MA2 )) /* t2 + (t2 >> 8) + (t2 >>15) ~= (t2/255) << 8 */ ;\
199 PADDW ( MQ1, MA1 ) /* (t1/255 + q1) << 8 */ ;\
200 TWO(PADDW ( MQ2, MA2 )) /* (t2/255 + q2) << 8 */ ;\
202 PSRLW ( CONST(8), MA1 ) /* sa1 | sb1 | sg1 | sr1 */ ;\
203 TWO(PSRLW ( CONST(8), MA2 )) /* sa2 | sb2 | sg2 | sr2 */
206 /* common blending setup code
208 * note that M00 is a register with 0x0000000000000000 constant which can be easily obtained making
212 #define GMB_LOAD(rgba, dest, MPP, MQQ) \
213 ONE(MOVD ( REGIND(rgba), MPP )) /* | | | | qa1 | qb1 | qg1 | qr1 */ ;\
214 ONE(MOVD ( REGIND(dest), MQQ )) /* | | | | pa1 | pb1 | pg1 | pr1 */ ;\
216 TWO(MOVQ ( REGIND(rgba), MPP )) /* qa2 | qb2 | qg2 | qr2 | qa1 | qb1 | qg1 | qr1 */ ;\
217 TWO(MOVQ ( REGIND(dest), MQQ )) /* pa2 | pb2 | pg2 | pr2 | pa1 | pb1 | pg1 | pr1 */
219 #define GMB_UNPACK(MP1, MQ1, MP2, MQ2, M00) \
220 TWO(MOVQ ( MP1, MP2 )) ;\
221 TWO(MOVQ ( MQ1, MQ2 )) ;\
223 PUNPCKLBW ( M00, MQ1 ) /* qa1 | qb1 | qg1 | qr1 */ ;\
224 TWO(PUNPCKHBW ( M00, MQ2 )) /* qa2 | qb2 | qg2 | qr2 */ ;\
225 PUNPCKLBW ( M00, MP1 ) /* pa1 | pb1 | pg1 | pr1 */ ;\
226 TWO(PUNPCKHBW ( M00, MP2 )) /* pa2 | pb2 | pg2 | pr2 */
228 #define GMB_ALPHA(MP1, MA1, MP2, MA2) \
230 TWO(MOVQ ( MP2, MA2 )) ;\
232 PUNPCKHWD ( MA1, MA1 ) /* pa1 | pa1 | | */ ;\
233 TWO(PUNPCKHWD ( MA2, MA2 )) /* pa2 | pa2 | | */ ;\
234 PUNPCKHDQ ( MA1, MA1 ) /* pa1 | pa1 | pa1 | pa1 */ ;\
235 TWO(PUNPCKHDQ ( MA2, MA2 )) /* pa2 | pa2 | pa2 | pa2 */
237 #define GMB_PACK( MS1, MS2 ) \
238 PACKUSWB ( MS2, MS1 ) /* sa2 | sb2 | sg2 | sr2 | sa1 | sb1 | sg1 | sr1 */ ;\
240 #define GMB_STORE(rgba, MSS ) \
241 ONE(MOVD ( MSS, REGIND(rgba) )) ;\
242 TWO(MOVQ ( MSS, REGIND(rgba) ))
249 D_LONG 0x00800080, 0x00800080
254 /* common transparency blending mode
257 #define TAG(x) x##_transparency
260 PXOR ( MM0, MM0 ) /* 0x0000 | 0x0000 | 0x0000 | 0x0000 */
262 #define MAIN( rgba, dest ) \
263 GMB_LOAD( rgba, dest, MM1, MM2 ) ;\
264 GMB_UNPACK( MM1, MM2, MM4, MM5, MM0 ) ;\
265 GMB_ALPHA( MM1, MM3, MM4, MM6 ) ;\
266 GMB_LERP_GSC( MM1, MM2, MM3, MM4, MM5, MM6 ) ;\
267 GMB_PACK( MM3, MM6 ) ;\
268 GMB_STORE( rgba, MM3 )
270 #include "mmx_blendtmp.h"
276 #define TAG(x) x##_add
280 #define MAIN( rgba, dest ) \
281 GMB_LOAD( rgba, dest, MM1, MM2 ) ;\
282 PADDUSB ( MM1, MM2 ) ;\
283 GMB_STORE( rgba, MM2 )
285 #include "mmx_blendtmp.h"
288 /* modulate blending mode
291 #define TAG(x) x##_modulate
294 PXOR ( MM0, MM0 ) /* 0x0000 | 0x0000 | 0x0000 | 0x0000 */ ;\
295 MOVQ ( CONTENT(const_80), MM7 ) /* 0xffff | 0xffff | 0xffff | 0xffff */
297 #define MAIN( rgba, dest ) \
298 GMB_LOAD( rgba, dest, MM1, MM2 ) ;\
299 GMB_UNPACK( MM1, MM2, MM4, MM5, MM0 ) ;\
300 GMB_MULT_GSR( MM1, MM2, MM4, MM5, MM7 ) ;\
301 GMB_PACK( MM2, MM5 ) ;\
302 GMB_STORE( rgba, MM2 )
304 #include "mmx_blendtmp.h"