2 * Written by José Fonseca <j_r_fonseca@yahoo.co.uk>
8 * make the following approximation to the division (Sree)
10 * rgb*a/255 ~= (rgb*(a+1)) >> 256
12 * which is the fastest method that satisfies the following OpenGL criteria
14 * 0*0 = 0 and 255*255 = 255
16 * note this one should be used alone
18 #define GMBT_ALPHA_PLUS_ONE 0
21 * take the geometric series approximation to the division
23 * t/255 = (t >> 8) + (t >> 16) + (t >> 24) ..
25 * in this case just the first two terms to fit in 16bit arithmetic
27 * t/255 ~= (t + (t >> 8)) >> 8
29 * note that just by itself it doesn't satisfies the OpenGL criteria, as 255*255 = 254,
30 * so the special case a = 255 must be accounted or roundoff must be used
32 #define GMBT_GEOMETRIC_SERIES 1
35 * when using a geometric series division instead of truncating the result
36 * use roundoff in the approximation (Jim Blinn)
40 * achieving the exact results
42 #define GMBT_ROUNDOFF 0
44 /* instead of the roundoff this adds a small correction to satisfy the OpenGL criteria
46 * t/255 ~= (t + (t >> 8) + (t >> 15)) >> 8
48 * note that although is faster than rounding off it doesn't give always the exact results
50 #define GMBT_GEOMETRIC_CORRECTION 1
61 * this eliminates a multiply at the expense of
62 * complicating the roundoff but is generally worth it
64 #define GMBT_SIGNED_ARITHMETIC 1
71 D_LONG 0x00800080, 0x00800080
77 GLOBL GLNAME(_mesa_mmx_blend_transparency)
80 * void blend_transparency( GLcontext *ctx,
82 * const GLubyte mask[],
84 * CONST GLchan dest[][4] )
86 * Common transparency blending mode.
88 GLNAME( _mesa_mmx_blend_transparency ):
96 MOV_L ( REGOFF(12, EBP), ECX ) /* n */
97 CMP_L ( CONST(0), ECX)
98 JE ( LLBL (GMBT_return) )
100 MOV_L ( REGOFF(16, EBP), EBX ) /* mask */
101 MOV_L ( REGOFF(20, EBP), EDI ) /* rgba */
102 MOV_L ( REGOFF(24, EBP), ESI ) /* dest */
104 TEST_L ( CONST(4), EDI ) /* align rgba on an 8-byte boundary */
105 JZ ( LLBL (GMBT_align_end) )
107 CMP_B ( CONST(0), REGIND(EBX) ) /* *mask == 0 */
108 JE ( LLBL (GMBT_align_continue) )
110 PXOR ( MM0, MM0 ) /* 0x0000 | 0x0000 | 0x0000 | 0x0000 */
112 MOVD ( REGIND(ESI), MM1 ) /* | | | | qa1 | qb1 | qg1 | qr1 */
113 MOVD ( REGIND(EDI), MM2 ) /* | | | | pa1 | pb1 | pg1 | pr1 */
115 PUNPCKLBW ( MM0, MM1 ) /* qa1 | qb1 | qg1 | qr1 */
116 PUNPCKLBW ( MM0, MM2 ) /* pa1 | pb1 | pg1 | pr1 */
120 PUNPCKHWD ( MM3, MM3 ) /* pa1 | pa1 | | */
121 PUNPCKHDQ ( MM3, MM3 ) /* pa1 | pa1 | pa1 | pa1 */
123 #if GMBT_ALPHA_PLUS_ONE
124 PCMPEQW ( MM4, MM4 ) /* 0xffff | 0xffff | 0xffff | 0xffff */
126 PSUBW ( MM4, MM3 ) /* pa1 + 1 | pa1 + 1 | pa1 + 1 | pa1 + 1 */
129 #if GMBT_SIGNED_ARITHMETIC
130 PSUBW ( MM1, MM2 ) /* pa1 - qa1 | pb1 - qb1 | pg1 - qg1 | pr1 - qr1 */
132 PSLLW ( CONST(8), MM1 ) /* q1 << 8 */
138 PMULLW ( MM3, MM2 ) /* t1 = (q1 - p1)*pa1 */
141 PSRLW ( CONST(15), MM4 ) /* q1 > p1 ? 1 : 0 */
143 PSLLW ( CONST(8), MM4 ) /* q1 > p1 ? 0x100 : 0 */
145 PSUBW ( MM4, MM2 ) /* t1 -=? 0x100 */
149 PCMPEQW ( MM4, MM4 ) /* 0xffff | 0xffff | 0xffff | 0xffff */
150 PUNPCKLBW ( MM0, MM4 ) /* 0x00ff | 0x00ff | 0x00ff | 0x00ff */
153 PMULLW ( MM3, MM2 ) /* p1*pa1 */
155 PSUBW ( MM3, MM0 ) /* 255 - pa1 | 255 - pa1 | 255 - pa1 | 255 - pa1 */
157 PMULLW ( MM0, MM1 ) /* q1*(255 - pa1) */
159 PADDW ( MM1, MM2 ) /* t1 = p1*pa1 + q1*(255 - pa1) */
163 MOVQ ( CONTENT(const_80), MM4 )
165 PADDW ( MM4, MM2 ) /* t1 += 0x80 */
168 #if GMBT_GEOMETRIC_SERIES
171 PSRLW ( CONST(8), MM3 ) /* t1 >> 8 */
173 PADDW ( MM3, MM2 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */
175 #if GMBT_GEOMETRIC_CORRECTION
176 PSRLW ( CONST(7), MM3 ) /* t1 >> 15 */
178 PADDW ( MM3, MM2 ) /* t1 + (t1 >> 8) + (t1 >>15) ~= (t1/255) << 8 */
182 #if GMBT_SIGNED_ARITHMETIC
183 PADDW ( MM1, MM2 ) /* (t1/255 + q1) << 8 */
186 PSRLW ( CONST(8), MM2 ) /* sa1 | sb1 | sg1 | sr1 */
188 PACKUSWB ( MM0, MM2 ) /* | | | | sa1 | sb1 | sg1 | sr1 */
189 MOVD ( MM2, REGIND(EDI) )
191 LLBL (GMBT_align_continue):
193 DEC_L ( ECX ) /* n -= 1 */
194 INC_L ( EBX ) /* mask += 1 */
195 ADD_L ( CONST(4), EDI ) /* rgba += 1 */
196 ADD_L ( CONST(4), ESI ) /* dest += 1 */
198 LLBL (GMBT_align_end):
200 CMP_L ( CONST(2), ECX)
201 JB ( LLBL (GMBT_loop_end) )
204 LLBL (GMBT_loop_begin):
206 CMP_W ( CONST(0), REGIND(EBX) ) /* *mask == 0 && *(mask + 1) == 0 */
207 JE ( LLBL (GMBT_loop_continue) )
209 /* NOTE: the instruction pairing when multiple pipelines are available must be checked */
211 PXOR ( MM0, MM0 ) /* 0x0000 | 0x0000 | 0x0000 | 0x0000 */
213 MOVQ ( REGIND(ESI), MM7 ) /* qa2 | qb2 | qg2 | qr2 | qa1 | qb1 | qg1 | qr1 */
214 MOVQ ( REGIND(EDI), MM6 ) /* pa2 | pb2 | pg2 | pr2 | pa1 | pb1 | pg1 | pr1 */
219 PUNPCKLBW ( MM0, MM1 ) /* qa1 | qb1 | qg1 | qr1 */
220 PUNPCKHBW ( MM0, MM7 ) /* qa2 | qb2 | qg2 | qr2 */
221 PUNPCKLBW ( MM0, MM2 ) /* pa1 | pb1 | pg1 | pr1 */
222 PUNPCKHBW ( MM0, MM6 ) /* pa2 | pb2 | pg2 | pr2 */
227 PUNPCKHWD ( MM3, MM3 ) /* pa1 | pa1 | | */
228 PUNPCKHWD ( MM5, MM5 ) /* pa2 | pa2 | | */
229 PUNPCKHDQ ( MM3, MM3 ) /* pa1 | pa1 | pa1 | pa1 */
230 PUNPCKHDQ ( MM5, MM5 ) /* pa2 | pa2 | pa2 | pa2 */
232 #if GMBT_ALPHA_PLUS_ONE
233 PCMPEQW ( MM4, MM4 ) /* 0xffff | 0xffff | 0xffff | 0xffff */
235 PSUBW ( MM4, MM3 ) /* pa1 + 1 | pa1 + 1 | pa1 + 1 | pa1 + 1 */
236 PSUBW ( MM4, MM5 ) /* pa2 + 1 | pa2 + 1 | pa2 + 1 | pa2 + 1 */
239 #if GMBT_SIGNED_ARITHMETIC
240 PSUBW ( MM1, MM2 ) /* pa1 - qa1 | pb1 - qb1 | pg1 - qg1 | pr1 - qr1 */
241 PSUBW ( MM7, MM6 ) /* pa2 - qa2 | pb2 - qb2 | pg2 - qg2 | pr2 - qr2 */
243 PSLLW ( CONST(8), MM1 ) /* q1 << 8 */
244 PSLLW ( CONST(8), MM7 ) /* q2 << 8 */
251 PMULLW ( MM3, MM2 ) /* t1 = (q1 - p1)*pa1 */
252 PMULLW ( MM5, MM6 ) /* t2 = (q2 - p2)*pa2 */
255 PSRLW ( CONST(15), MM0 ) /* q1 > p1 ? 1 : 0 */
256 PSRLW ( CONST(15), MM4 ) /* q2 > q2 ? 1 : 0 */
258 PSLLW ( CONST(8), MM0 ) /* q1 > p1 ? 0x100 : 0 */
259 PSLLW ( CONST(8), MM4 ) /* q2 > q2 ? 0x100 : 0 */
261 PSUBW ( MM0, MM2 ) /* t1 -=? 0x100 */
262 PSUBW ( MM4, MM7 ) /* t2 -=? 0x100 */
266 PCMPEQW ( MM4, MM4 ) /* 0xffff | 0xffff | 0xffff | 0xffff */
267 PUNPCKLBW ( MM0, MM4 ) /* 0x00ff | 0x00ff | 0x00ff | 0x00ff */
270 PMULLW ( MM3, MM2 ) /* p1*pa1 */
271 PMULLW ( MM5, MM6 ) /* p2*pa2 */
273 PSUBW ( MM3, MM0 ) /* 255 - pa1 | 255 - pa1 | 255 - pa1 | 255 - pa1 */
274 PSUBW ( MM5, MM4 ) /* 255 - pa2 | 255 - pa2 | 255 - pa2 | 255 - pa2 */
276 PMULLW ( MM0, MM1 ) /* q1*(255 - pa1) */
277 PMULLW ( MM4, MM7 ) /* q2*(255 - pa2) */
279 PADDW ( MM1, MM2 ) /* t1 = p1*pa1 + q1*(255 - pa1) */
280 PADDW ( MM7, MM6 ) /* t2 = p2*pa2 + q2*(255 - pa2) */
284 MOVQ ( CONTENT(const_80), MM4 )
286 PADDW ( MM4, MM2 ) /* t1 += 0x80 */
287 PADDW ( MM4, MM6 ) /* t2 += 0x80 */
290 #if GMBT_GEOMETRIC_SERIES
294 PSRLW ( CONST(8), MM3 ) /* t1 >> 8 */
295 PSRLW ( CONST(8), MM5 ) /* t2 >> 8 */
297 PADDW ( MM3, MM2 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */
298 PADDW ( MM5, MM6 ) /* t2 + (t2 >> 8) ~= (t2/255) << 8 */
300 #if GMBT_GEOMETRIC_CORRECTION
301 PSRLW ( CONST(7), MM3 ) /* t1 >> 15 */
302 PSRLW ( CONST(7), MM5 ) /* t2 >> 15 */
304 PADDW ( MM3, MM2 ) /* t1 + (t1 >> 8) + (t1 >>15) ~= (t1/255) << 8 */
305 PADDW ( MM5, MM6 ) /* t2 + (t2 >> 8) + (t2 >>15) ~= (t2/255) << 8 */
309 #if GMBT_SIGNED_ARITHMETIC
310 PADDW ( MM1, MM2 ) /* (t1/255 + q1) << 8 */
311 PADDW ( MM7, MM6 ) /* (t2/255 + q2) << 8 */
314 PSRLW ( CONST(8), MM2 ) /* sa1 | sb1 | sg1 | sr1 */
315 PSRLW ( CONST(8), MM6 ) /* sa2 | sb2 | sg2 | sr2 */
317 PACKUSWB ( MM6, MM2 ) /* sa2 | sb2 | sg2 | sr2 | sa1 | sb1 | sg1 | sr1 */
318 MOVQ ( MM2, REGIND(EDI) )
320 LLBL (GMBT_loop_continue):
323 DEC_L ( ECX ) /* n -= 2 */
324 ADD_L ( CONST(2), EBX ) /* mask += 2 */
325 ADD_L ( CONST(8), EDI ) /* rgba += 2 */
326 ADD_L ( CONST(8), ESI ) /* dest += 2 */
327 CMP_L ( CONST(2), ECX )
328 JAE ( LLBL (GMBT_loop_begin) )
330 LLBL (GMBT_loop_end):
332 CMP_L ( CONST(1), ECX )
333 JB ( LLBL (GMBT_done) )
335 CMP_B ( CONST(0), REGIND(EBX) ) /* *mask == 0 */
336 JE ( LLBL (GMBT_done) )
338 PXOR ( MM0, MM0 ) /* 0x0000 | 0x0000 | 0x0000 | 0x0000 */
340 MOVD ( REGIND(ESI), MM1 ) /* | | | | qa1 | qb1 | qg1 | qr1 */
341 MOVD ( REGIND(EDI), MM2 ) /* | | | | pa1 | pb1 | pg1 | pr1 */
343 PUNPCKLBW ( MM0, MM1 ) /* qa1 | qb1 | qg1 | qr1 */
344 PUNPCKLBW ( MM0, MM2 ) /* pa1 | pb1 | pg1 | pr1 */
348 PUNPCKHWD ( MM3, MM3 ) /* pa1 | pa1 | | */
349 PUNPCKHDQ ( MM3, MM3 ) /* pa1 | pa1 | pa1 | pa1 */
351 #if GMBT_ALPHA_PLUS_ONE
352 PCMPEQW ( MM4, MM4 ) /* 0xffff | 0xffff | 0xffff | 0xffff */
354 PSUBW ( MM4, MM3 ) /* pa1 + 1 | pa1 + 1 | pa1 + 1 | pa1 + 1 */
357 #if GMBT_SIGNED_ARITHMETIC
358 PSUBW ( MM1, MM2 ) /* pa1 - qa1 | pb1 - qb1 | pg1 - qg1 | pr1 - qr1 */
360 PSLLW ( CONST(8), MM1 ) /* q1 << 8 */
366 PMULLW ( MM3, MM2 ) /* t1 = (q1 - p1)*pa1 */
369 PSRLW ( CONST(15), MM4 ) /* q1 > p1 ? 1 : 0 */
371 PSLLW ( CONST(8), MM4 ) /* q1 > p1 ? 0x100 : 0 */
373 PSUBW ( MM4, MM2 ) /* t1 -=? 0x100 */
377 PCMPEQW ( MM4, MM4 ) /* 0xffff | 0xffff | 0xffff | 0xffff */
378 PUNPCKLBW ( MM0, MM4 ) /* 0x00ff | 0x00ff | 0x00ff | 0x00ff */
381 PMULLW ( MM3, MM2 ) /* p1*pa1 */
383 PSUBW ( MM3, MM0 ) /* 255 - pa1 | 255 - pa1 | 255 - pa1 | 255 - pa1 */
385 PMULLW ( MM0, MM1 ) /* q1*(255 - pa1) */
387 PADDW ( MM1, MM2 ) /* t1 = p1*pa1 + q1*(255 - pa1) */
391 MOVQ ( CONTENT(const_80), MM4 )
393 PADDW ( MM4, MM2 ) /* t1 += 0x80 */
396 #if GMBT_GEOMETRIC_SERIES
399 PSRLW ( CONST(8), MM3 ) /* t1 >> 8 */
401 PADDW ( MM3, MM2 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */
403 #if GMBT_GEOMETRIC_CORRECTION
404 PSRLW ( CONST(7), MM3 ) /* t1 >> 15 */
406 PADDW ( MM3, MM2 ) /* t1 + (t1 >> 8) + (t1 >>15) ~= (t1/255) << 8 */
410 #if GMBT_SIGNED_ARITHMETIC
411 PADDW ( MM1, MM2 ) /* (t1/255 + q1) << 8 */
414 PSRLW ( CONST(8), MM2 ) /* sa1 | sb1 | sg1 | sr1 */
416 PACKUSWB ( MM0, MM2 ) /* | | | | sa1 | sb1 | sg1 | sr1 */
417 MOVD ( MM2, REGIND(EDI) )