Jose's latest patch (GMBT_GEOMETRIC_CORRECTION)
[mesa.git] / src / mesa / x86 / mmx_blend.S
1 /*
2 * Written by José Fonseca <j_r_fonseca@yahoo.co.uk>
3 */
4
5 #include "matypes.h"
6
7 /*
8 * make the following approximation to the division (Sree)
9 *
10 * rgb*a/255 ~= (rgb*(a+1)) >> 256
11 *
12 * which is the fastest method that satisfies the following OpenGL criteria
13 *
14 * 0*0 = 0 and 255*255 = 255
15 *
16 * note this one should be used alone
17 */
18 #define GMBT_ALPHA_PLUS_ONE 0
19
20 /*
21 * take the geometric series approximation to the division
22 *
23 * t/255 = (t >> 8) + (t >> 16) + (t >> 24) ..
24 *
25 * in this case just the first two terms to fit in 16bit arithmetic
26 *
27 * t/255 ~= (t + (t >> 8)) >> 8
28 *
29 * note that just by itself it doesn't satisfies the OpenGL criteria, as 255*255 = 254,
30 * so the special case a = 255 must be accounted or roundoff must be used
31 */
32 #define GMBT_GEOMETRIC_SERIES 1
33
34 /*
35 * when using a geometric series division instead of truncating the result
36 * use roundoff in the approximation (Jim Blinn)
37 *
38 * t = rgb*a + 0x80
39 *
40 * achieving the exact results
41 */
42 #define GMBT_ROUNDOFF 0
43
44 /* instead of the roundoff this adds a small correction to satisfy the OpenGL criteria
45 *
46 * t/255 ~= (t + (t >> 8) + (t >> 15)) >> 8
47 *
48 * note that although is faster than rounding off it doesn't give always the exact results
49 */
50 #define GMBT_GEOMETRIC_CORRECTION 1
51
52 /*
53 * do
54 *
55 * s = (q - p)*a + q
56 *
57 * instead of
58 *
59 * s = p*a + q*(1-a)
60 *
61 * this eliminates a multiply at the expense of
62 * complicating the roundoff but is generally worth it
63 */
64 #define GMBT_SIGNED_ARITHMETIC 1
65
66 #if GMBT_ROUNDOFF
67 SEG_DATA
68
69 ALIGNDATA8
70 const_80:
71 D_LONG 0x00800080, 0x00800080
72 #endif
73
74 SEG_TEXT
75
76 ALIGNTEXT16
77 GLOBL GLNAME(_mesa_mmx_blend_transparency)
78
79 /*
80 * void blend_transparency( GLcontext *ctx,
81 * GLuint n,
82 * const GLubyte mask[],
83 * GLchan rgba[][4],
84 * CONST GLchan dest[][4] )
85 *
86 * Common transparency blending mode.
87 */
88 GLNAME( _mesa_mmx_blend_transparency ):
89
90 PUSH_L ( EBP )
91 MOV_L ( ESP, EBP )
92 PUSH_L ( ESI )
93 PUSH_L ( EDI )
94 PUSH_L ( EBX )
95
96 MOV_L ( REGOFF(12, EBP), ECX ) /* n */
97 CMP_L ( CONST(0), ECX)
98 JE ( LLBL (GMBT_return) )
99
100 MOV_L ( REGOFF(16, EBP), EBX ) /* mask */
101 MOV_L ( REGOFF(20, EBP), EDI ) /* rgba */
102 MOV_L ( REGOFF(24, EBP), ESI ) /* dest */
103
104 TEST_L ( CONST(4), EDI ) /* align rgba on an 8-byte boundary */
105 JZ ( LLBL (GMBT_align_end) )
106
107 CMP_B ( CONST(0), REGIND(EBX) ) /* *mask == 0 */
108 JE ( LLBL (GMBT_align_continue) )
109
110 PXOR ( MM0, MM0 ) /* 0x0000 | 0x0000 | 0x0000 | 0x0000 */
111
112 MOVD ( REGIND(ESI), MM1 ) /* | | | | qa1 | qb1 | qg1 | qr1 */
113 MOVD ( REGIND(EDI), MM2 ) /* | | | | pa1 | pb1 | pg1 | pr1 */
114
115 PUNPCKLBW ( MM0, MM1 ) /* qa1 | qb1 | qg1 | qr1 */
116 PUNPCKLBW ( MM0, MM2 ) /* pa1 | pb1 | pg1 | pr1 */
117
118 MOVQ ( MM2, MM3 )
119
120 PUNPCKHWD ( MM3, MM3 ) /* pa1 | pa1 | | */
121 PUNPCKHDQ ( MM3, MM3 ) /* pa1 | pa1 | pa1 | pa1 */
122
123 #if GMBT_ALPHA_PLUS_ONE
124 PCMPEQW ( MM4, MM4 ) /* 0xffff | 0xffff | 0xffff | 0xffff */
125
126 PSUBW ( MM4, MM3 ) /* pa1 + 1 | pa1 + 1 | pa1 + 1 | pa1 + 1 */
127 #endif
128
129 #if GMBT_SIGNED_ARITHMETIC
130 PSUBW ( MM1, MM2 ) /* pa1 - qa1 | pb1 - qb1 | pg1 - qg1 | pr1 - qr1 */
131
132 PSLLW ( CONST(8), MM1 ) /* q1 << 8 */
133
134 #if GMBT_ROUNDOFF
135 MOVQ ( MM2, MM4 )
136 #endif
137
138 PMULLW ( MM3, MM2 ) /* t1 = (q1 - p1)*pa1 */
139
140 #if GMBT_ROUNDOFF
141 PSRLW ( CONST(15), MM4 ) /* q1 > p1 ? 1 : 0 */
142
143 PSLLW ( CONST(8), MM4 ) /* q1 > p1 ? 0x100 : 0 */
144
145 PSUBW ( MM4, MM2 ) /* t1 -=? 0x100 */
146 #endif
147
148 #else
149 PCMPEQW ( MM4, MM4 ) /* 0xffff | 0xffff | 0xffff | 0xffff */
150 PUNPCKLBW ( MM0, MM4 ) /* 0x00ff | 0x00ff | 0x00ff | 0x00ff */
151 MOVQ ( MM4, MM0 )
152
153 PMULLW ( MM3, MM2 ) /* p1*pa1 */
154
155 PSUBW ( MM3, MM0 ) /* 255 - pa1 | 255 - pa1 | 255 - pa1 | 255 - pa1 */
156
157 PMULLW ( MM0, MM1 ) /* q1*(255 - pa1) */
158
159 PADDW ( MM1, MM2 ) /* t1 = p1*pa1 + q1*(255 - pa1) */
160 #endif
161
162 #if GMBT_ROUNDOFF
163 MOVQ ( CONTENT(const_80), MM4 )
164
165 PADDW ( MM4, MM2 ) /* t1 += 0x80 */
166 #endif
167
168 #if GMBT_GEOMETRIC_SERIES
169 MOVQ ( MM2, MM3 )
170
171 PSRLW ( CONST(8), MM3 ) /* t1 >> 8 */
172
173 PADDW ( MM3, MM2 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */
174 #endif
175
176 #if GMBT_SIGNED_ARITHMETIC
177 PADDW ( MM1, MM2 ) /* (t1/255 + q1) << 8 */
178 #endif
179
180 PSRLW ( CONST(8), MM2 ) /* sa1 | sb1 | sg1 | sr1 */
181
182 PACKUSWB ( MM0, MM2 ) /* | | | | sa1 | sb1 | sg1 | sr1 */
183 MOVD ( MM2, REGIND(EDI) )
184
185 LLBL (GMBT_align_continue):
186
187 DEC_L ( ECX ) /* n -= 1 */
188 INC_L ( EBX ) /* mask += 1 */
189 ADD_L ( CONST(4), EDI ) /* rgba += 1 */
190 ADD_L ( CONST(4), ESI ) /* dest += 1 */
191
192 LLBL (GMBT_align_end):
193
194 CMP_L ( CONST(2), ECX)
195 JB ( LLBL (GMBT_loop_end) )
196
197 ALIGNTEXT16
198 LLBL (GMBT_loop_begin):
199
200 CMP_W ( CONST(0), REGIND(EBX) ) /* *mask == 0 && *(mask + 1) == 0 */
201 JE ( LLBL (GMBT_loop_continue) )
202
203 /* NOTE: the instruction pairing when multiple pipelines are available must be checked */
204
205 PXOR ( MM0, MM0 ) /* 0x0000 | 0x0000 | 0x0000 | 0x0000 */
206
207 MOVQ ( REGIND(ESI), MM7 ) /* qa2 | qb2 | qg2 | qr2 | qa1 | qb1 | qg1 | qr1 */
208 MOVQ ( REGIND(EDI), MM6 ) /* pa2 | pb2 | pg2 | pr2 | pa1 | pb1 | pg1 | pr1 */
209
210 MOVQ ( MM7, MM1 )
211 MOVQ ( MM6, MM2 )
212
213 PUNPCKLBW ( MM0, MM1 ) /* qa1 | qb1 | qg1 | qr1 */
214 PUNPCKHBW ( MM0, MM7 ) /* qa2 | qb2 | qg2 | qr2 */
215 PUNPCKLBW ( MM0, MM2 ) /* pa1 | pb1 | pg1 | pr1 */
216 PUNPCKHBW ( MM0, MM6 ) /* pa2 | pb2 | pg2 | pr2 */
217
218 MOVQ ( MM2, MM3 )
219 MOVQ ( MM6, MM5 )
220
221 PUNPCKHWD ( MM3, MM3 ) /* pa1 | pa1 | | */
222 PUNPCKHWD ( MM5, MM5 ) /* pa2 | pa2 | | */
223 PUNPCKHDQ ( MM3, MM3 ) /* pa1 | pa1 | pa1 | pa1 */
224 PUNPCKHDQ ( MM5, MM5 ) /* pa2 | pa2 | pa2 | pa2 */
225
226 #if GMBT_ALPHA_PLUS_ONE
227 PCMPEQW ( MM4, MM4 ) /* 0xffff | 0xffff | 0xffff | 0xffff */
228
229 PSUBW ( MM4, MM3 ) /* pa1 + 1 | pa1 + 1 | pa1 + 1 | pa1 + 1 */
230 PSUBW ( MM4, MM5 ) /* pa2 + 1 | pa2 + 1 | pa2 + 1 | pa2 + 1 */
231 #endif
232
233 #if GMBT_SIGNED_ARITHMETIC
234 PSUBW ( MM1, MM2 ) /* pa1 - qa1 | pb1 - qb1 | pg1 - qg1 | pr1 - qr1 */
235 PSUBW ( MM7, MM6 ) /* pa2 - qa2 | pb2 - qb2 | pg2 - qg2 | pr2 - qr2 */
236
237 PSLLW ( CONST(8), MM1 ) /* q1 << 8 */
238 PSLLW ( CONST(8), MM7 ) /* q2 << 8 */
239
240 #if GMBT_ROUNDOFF
241 MOVQ ( MM2, MM0 )
242 MOVQ ( MM6, MM4 )
243 #endif
244
245 PMULLW ( MM3, MM2 ) /* t1 = (q1 - p1)*pa1 */
246 PMULLW ( MM5, MM6 ) /* t2 = (q2 - p2)*pa2 */
247
248 #if GMBT_ROUNDOFF
249 PSRLW ( CONST(15), MM0 ) /* q1 > p1 ? 1 : 0 */
250 PSRLW ( CONST(15), MM4 ) /* q2 > q2 ? 1 : 0 */
251
252 PSLLW ( CONST(8), MM0 ) /* q1 > p1 ? 0x100 : 0 */
253 PSLLW ( CONST(8), MM4 ) /* q2 > q2 ? 0x100 : 0 */
254
255 PSUBW ( MM0, MM2 ) /* t1 -=? 0x100 */
256 PSUBW ( MM4, MM7 ) /* t2 -=? 0x100 */
257 #endif
258
259 #else
260 PCMPEQW ( MM4, MM4 ) /* 0xffff | 0xffff | 0xffff | 0xffff */
261 PUNPCKLBW ( MM0, MM4 ) /* 0x00ff | 0x00ff | 0x00ff | 0x00ff */
262 MOVQ ( MM4, MM0 )
263
264 PMULLW ( MM3, MM2 ) /* p1*pa1 */
265 PMULLW ( MM5, MM6 ) /* p2*pa2 */
266
267 PSUBW ( MM3, MM0 ) /* 255 - pa1 | 255 - pa1 | 255 - pa1 | 255 - pa1 */
268 PSUBW ( MM5, MM4 ) /* 255 - pa2 | 255 - pa2 | 255 - pa2 | 255 - pa2 */
269
270 PMULLW ( MM0, MM1 ) /* q1*(255 - pa1) */
271 PMULLW ( MM4, MM7 ) /* q2*(255 - pa2) */
272
273 PADDW ( MM1, MM2 ) /* t1 = p1*pa1 + q1*(255 - pa1) */
274 PADDW ( MM7, MM6 ) /* t2 = p2*pa2 + q2*(255 - pa2) */
275 #endif
276
277 #if GMBT_ROUNDOFF
278 MOVQ ( CONTENT(const_80), MM4 )
279
280 PADDW ( MM4, MM2 ) /* t1 += 0x80 */
281 PADDW ( MM4, MM6 ) /* t2 += 0x80 */
282 #endif
283
284 #if GMBT_GEOMETRIC_SERIES
285 MOVQ ( MM2, MM3 )
286 MOVQ ( MM6, MM5 )
287
288 PSRLW ( CONST(8), MM3 ) /* t1 >> 8 */
289 PSRLW ( CONST(8), MM5 ) /* t2 >> 8 */
290
291 PADDW ( MM3, MM2 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */
292 PADDW ( MM5, MM6 ) /* t2 + (t2 >> 8) ~= (t2/255) << 8 */
293
294 #if GMBT_GEOMETRIC_CORRECTION
295 PSRLW ( CONST(7), MM3 ) /* t1 >> 15 */
296 PSRLW ( CONST(7), MM5 ) /* t2 >> 15 */
297
298 PADDW ( MM3, MM2 ) /* t1 + (t1 >> 8) + (t1 >>15) ~= (t1/255) << 8 */
299 PADDW ( MM5, MM6 ) /* t2 + (t2 >> 8) + (t2 >>15) ~= (t2/255) << 8 */
300 #endif
301 #endif
302
303 #if GMBT_SIGNED_ARITHMETIC
304 PADDW ( MM1, MM2 ) /* (t1/255 + q1) << 8 */
305 PADDW ( MM7, MM6 ) /* (t2/255 + q2) << 8 */
306 #endif
307
308 PSRLW ( CONST(8), MM2 ) /* sa1 | sb1 | sg1 | sr1 */
309 PSRLW ( CONST(8), MM6 ) /* sa2 | sb2 | sg2 | sr2 */
310
311 PACKUSWB ( MM6, MM2 ) /* sa2 | sb2 | sg2 | sr2 | sa1 | sb1 | sg1 | sr1 */
312 MOVQ ( MM2, REGIND(EDI) )
313
314 LLBL (GMBT_loop_continue):
315
316 DEC_L ( ECX )
317 DEC_L ( ECX ) /* n -= 2 */
318 ADD_L ( CONST(2), EBX ) /* mask += 2 */
319 ADD_L ( CONST(8), EDI ) /* rgba += 2 */
320 ADD_L ( CONST(8), ESI ) /* dest += 2 */
321 CMP_L ( CONST(2), ECX )
322 JAE ( LLBL (GMBT_loop_begin) )
323
324 LLBL (GMBT_loop_end):
325
326 CMP_L ( CONST(1), ECX )
327 JB ( LLBL (GMBT_done) )
328
329 CMP_B ( CONST(0), REGIND(EBX) ) /* *mask == 0 */
330 JE ( LLBL (GMBT_done) )
331
332 PXOR ( MM0, MM0 ) /* 0x0000 | 0x0000 | 0x0000 | 0x0000 */
333
334 MOVD ( REGIND(ESI), MM1 ) /* | | | | qa1 | qb1 | qg1 | qr1 */
335 MOVD ( REGIND(EDI), MM2 ) /* | | | | pa1 | pb1 | pg1 | pr1 */
336
337 PUNPCKLBW ( MM0, MM1 ) /* qa1 | qb1 | qg1 | qr1 */
338 PUNPCKLBW ( MM0, MM2 ) /* pa1 | pb1 | pg1 | pr1 */
339
340 MOVQ ( MM2, MM3 )
341
342 PUNPCKHWD ( MM3, MM3 ) /* pa1 | pa1 | | */
343 PUNPCKHDQ ( MM3, MM3 ) /* pa1 | pa1 | pa1 | pa1 */
344
345 #if GMBT_ALPHA_PLUS_ONE
346 PCMPEQW ( MM4, MM4 ) /* 0xffff | 0xffff | 0xffff | 0xffff */
347
348 PSUBW ( MM4, MM3 ) /* pa1 + 1 | pa1 + 1 | pa1 + 1 | pa1 + 1 */
349 #endif
350
351 #if GMBT_SIGNED_ARITHMETIC
352 PSUBW ( MM1, MM2 ) /* pa1 - qa1 | pb1 - qb1 | pg1 - qg1 | pr1 - qr1 */
353
354 PSLLW ( CONST(8), MM1 ) /* q1 << 8 */
355
356 #if GMBT_ROUNDOFF
357 MOVQ ( MM2, MM4 )
358 #endif
359
360 PMULLW ( MM3, MM2 ) /* t1 = (q1 - p1)*pa1 */
361
362 #if GMBT_ROUNDOFF
363 PSRLW ( CONST(15), MM4 ) /* q1 > p1 ? 1 : 0 */
364
365 PSLLW ( CONST(8), MM4 ) /* q1 > p1 ? 0x100 : 0 */
366
367 PSUBW ( MM4, MM2 ) /* t1 -=? 0x100 */
368 #endif
369
370 #else
371 PCMPEQW ( MM4, MM4 ) /* 0xffff | 0xffff | 0xffff | 0xffff */
372 PUNPCKLBW ( MM0, MM4 ) /* 0x00ff | 0x00ff | 0x00ff | 0x00ff */
373 MOVQ ( MM4, MM0 )
374
375 PMULLW ( MM3, MM2 ) /* p1*pa1 */
376
377 PSUBW ( MM3, MM0 ) /* 255 - pa1 | 255 - pa1 | 255 - pa1 | 255 - pa1 */
378
379 PMULLW ( MM0, MM1 ) /* q1*(255 - pa1) */
380
381 PADDW ( MM1, MM2 ) /* t1 = p1*pa1 + q1*(255 - pa1) */
382 #endif
383
384 #if GMBT_ROUNDOFF
385 MOVQ ( CONTENT(const_80), MM4 )
386
387 PADDW ( MM4, MM2 ) /* t1 += 0x80 */
388 #endif
389
390 #if GMBT_GEOMETRIC_SERIES
391 MOVQ ( MM2, MM3 )
392
393 PSRLW ( CONST(8), MM3 ) /* t1 >> 8 */
394
395 PADDW ( MM3, MM2 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */
396 #endif
397
398 #if GMBT_SIGNED_ARITHMETIC
399 PADDW ( MM1, MM2 ) /* (t1/255 + q1) << 8 */
400 #endif
401
402 PSRLW ( CONST(8), MM2 ) /* sa1 | sb1 | sg1 | sr1 */
403
404 PACKUSWB ( MM0, MM2 ) /* | | | | sa1 | sb1 | sg1 | sr1 */
405 MOVD ( MM2, REGIND(EDI) )
406
407 LLBL (GMBT_done):
408
409 EMMS
410
411 LLBL (GMBT_return):
412
413 POP_L ( EBX )
414 POP_L ( EDI )
415 POP_L ( ESI )
416 MOV_L ( EBP, ESP )
417 POP_L ( EBP )
418 RET