vega: remove unused vars
[mesa.git] / src / gallium / state_trackers / vega / vg_translate.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26
27 #include "vg_translate.h"
28
29 #include "pipe/p_format.h"
30 #include "util/u_pack_color.h"
31
32 void _vega_pack_rgba_span_float(struct vg_context *ctx,
33 VGuint n, VGfloat rgba[][4],
34 VGImageFormat dstFormat,
35 void *dstAddr)
36 {
37 VGint i;
38
39 switch (dstFormat) {
40 case VG_sRGBX_8888: {
41 VGint *dst = (VGint *)dstAddr;
42 for (i = 0; i < n; ++i) {
43 VGubyte r, g, b ,a;
44 r = float_to_ubyte(rgba[i][0]);
45 g = float_to_ubyte(rgba[i][1]);
46 b = float_to_ubyte(rgba[i][2]);
47 a = 255;
48 dst[i] = r << 24 | g << 16 | b << 8 | a;
49 }
50 return;
51 }
52 break;
53 case VG_sRGBA_8888: {
54 VGint *dst = (VGint *)dstAddr;
55 for (i = 0; i < n; ++i) {
56 VGubyte r, g, b ,a;
57 r = float_to_ubyte(rgba[i][0]);
58 g = float_to_ubyte(rgba[i][1]);
59 b = float_to_ubyte(rgba[i][2]);
60 a = float_to_ubyte(rgba[i][3]);
61 dst[i] = r << 24 | g << 16 | b << 8 | a;
62 }
63 return;
64 }
65 break;
66 case VG_sRGBA_8888_PRE: {
67 VGint *dst = (VGint *)dstAddr;
68 for (i = 0; i < n; ++i) {
69 VGubyte r, g, b ,a;
70 r = float_to_ubyte(rgba[i][0]);
71 g = float_to_ubyte(rgba[i][1]);
72 b = float_to_ubyte(rgba[i][2]);
73 a = float_to_ubyte(rgba[i][3]);
74 dst[i] = r << 24 | g << 16 | b << 8 | a;
75 }
76 return;
77 }
78 break;
79 case VG_sRGB_565: {
80 VGshort *dst = (VGshort *)dstAddr;
81 for (i = 0; i < n; ++i) {
82 VGubyte r, g, b;
83 r = float_to_ubyte(rgba[i][0]);
84 g = float_to_ubyte(rgba[i][1]);
85 b = float_to_ubyte(rgba[i][2]);
86 r = (r / 255.0) * 32;
87 g = (g / 255.0) * 32;
88 b = (b / 255.0) * 32;
89
90 dst[i] = b | g << 5 | r << 11;
91 }
92 return;
93 }
94 break;
95 case VG_sRGBA_5551: {
96 VGshort *dst = (VGshort *)dstAddr;
97 for (i = 0; i < n; ++i) {
98 VGubyte r, g, b, a;
99 r = float_to_ubyte(rgba[i][0]);
100 g = float_to_ubyte(rgba[i][1]);
101 b = float_to_ubyte(rgba[i][2]);
102 a = float_to_ubyte(rgba[i][3]);
103 r = (r / 255.0) * 32;
104 g = (g / 255.0) * 32;
105 b = (b / 255.0) * 32;
106 a = (a / 255.0);
107
108 dst[i] = a | b << 1 | g << 6 | r << 11;
109 }
110 return;
111 }
112 break;
113 case VG_sRGBA_4444: {
114 VGshort *dst = (VGshort *)dstAddr;
115 for (i = 0; i < n; ++i) {
116 VGubyte r, g, b, a;
117 r = float_to_ubyte(rgba[i][0]);
118 g = float_to_ubyte(rgba[i][1]);
119 b = float_to_ubyte(rgba[i][2]);
120 a = float_to_ubyte(rgba[i][3]);
121 r = (r / 255.0) * 16;
122 g = (g / 255.0) * 16;
123 b = (b / 255.0) * 16;
124 a = (a / 255.0) * 16;
125
126 dst[i] = a | b << 4 | g << 8 | r << 12;
127 }
128 return;
129 }
130 break;
131 case VG_sL_8: {
132 VGubyte *dst = (VGubyte *)dstAddr;
133 for (i = 0; i < n; ++i) {
134 VGubyte a;
135 a = float_to_ubyte(rgba[i][3]);
136
137 dst[i] = a;
138 }
139 return;
140 }
141 break;
142 case VG_lRGBX_8888: {
143 VGint *dst = (VGint *)dstAddr;
144 for (i = 0; i < n; ++i) {
145 VGubyte r, g, b ,a;
146 r = float_to_ubyte(rgba[i][0]);
147 g = float_to_ubyte(rgba[i][1]);
148 b = float_to_ubyte(rgba[i][2]);
149 a = 255;
150 dst[i] = r << 24 | g << 16 | b << 8 | a;
151 }
152 return;
153 }
154 break;
155 case VG_lRGBA_8888: {
156 VGint *dst = (VGint *)dstAddr;
157 for (i = 0; i < n; ++i) {
158 VGubyte r, g, b ,a;
159 r = float_to_ubyte(rgba[i][0]);
160 g = float_to_ubyte(rgba[i][1]);
161 b = float_to_ubyte(rgba[i][2]);
162 a = float_to_ubyte(rgba[i][3]);
163 dst[i] = r << 24 | g << 16 | b << 8 | a;
164 }
165 return;
166 }
167 case VG_lRGBA_8888_PRE: {
168 VGint *dst = (VGint *)dstAddr;
169 for (i = 0; i < n; ++i) {
170 VGubyte r, g, b ,a;
171 r = float_to_ubyte(rgba[i][0]);
172 g = float_to_ubyte(rgba[i][1]);
173 b = float_to_ubyte(rgba[i][2]);
174 a = float_to_ubyte(rgba[i][3]);
175 dst[i] = r << 24 | g << 16 | b << 8 | a;
176 }
177 return;
178 }
179 break;
180 case VG_lL_8: {
181 VGubyte *dst = (VGubyte *)dstAddr;
182 for (i = 0; i < n; ++i) {
183 VGubyte a;
184 a = float_to_ubyte(rgba[i][3]);
185 dst[i] = a;
186 }
187 return;
188 }
189 break;
190 case VG_A_8: {
191 VGubyte *dst = (VGubyte *)dstAddr;
192 for (i = 0; i < n; ++i) {
193 VGubyte a;
194 a = float_to_ubyte(rgba[i][3]);
195
196 dst[i] = a;
197 }
198 return;
199 }
200 break;
201 case VG_BW_1: {
202 VGshort *dst = (VGshort *)dstAddr;
203 for (i = 0; i < n; ++i) {
204 VGubyte r, g, b, a;
205 VGubyte res;
206 r = float_to_ubyte(rgba[i][0]);
207 g = float_to_ubyte(rgba[i][1]);
208 b = float_to_ubyte(rgba[i][2]);
209 a = float_to_ubyte(rgba[i][3]);
210
211 res = (r + g + b + a)/4;
212 dst[i] = (res & (128));
213 }
214 return;
215 }
216 break;
217 #ifdef OPENVG_VERSION_1_1
218 case VG_A_1: {
219 VGshort *dst = (VGshort *)dstAddr;
220 for (i = 0; i < n; ++i) {
221 VGubyte a;
222 a = float_to_ubyte(rgba[i][3]);
223
224 dst[i] = (a & (128));
225 }
226 return;
227 }
228 break;
229 case VG_A_4: {
230 VGshort *dst = (VGshort *)dstAddr;
231 for (i = 0; i < n; ++i) {
232 VGubyte a;
233 VGubyte res;
234 a = float_to_ubyte(rgba[i][3]);
235
236 res = a/4;
237 dst[i] = (res & (128));
238 }
239 return;
240 }
241 break;
242 #endif
243 case VG_sXRGB_8888:
244 break;
245 case VG_sARGB_8888: {
246 VGint *dst = (VGint *)dstAddr;
247 for (i = 0; i < n; ++i) {
248 VGubyte r, g, b ,a;
249 r = float_to_ubyte(rgba[i][0]);
250 g = float_to_ubyte(rgba[i][1]);
251 b = float_to_ubyte(rgba[i][2]);
252 a = float_to_ubyte(rgba[i][3]);
253 dst[i] = a << 24 | r << 16 | g << 8 | b;
254 }
255 return;
256 }
257 break;
258 case VG_sARGB_8888_PRE: {
259 VGint *dst = (VGint *)dstAddr;
260 for (i = 0; i < n; ++i) {
261 VGubyte r, g, b ,a;
262 r = float_to_ubyte(rgba[i][0]);
263 g = float_to_ubyte(rgba[i][1]);
264 b = float_to_ubyte(rgba[i][2]);
265 a = float_to_ubyte(rgba[i][3]);
266 dst[i] = a << 24 | r << 16 | g << 8 | b;
267 }
268 return;
269 }
270 break;
271 case VG_sARGB_1555:
272 break;
273 case VG_sARGB_4444:
274 break;
275 case VG_lXRGB_8888:
276 break;
277 case VG_lARGB_8888: {
278 VGint *dst = (VGint *)dstAddr;
279 for (i = 0; i < n; ++i) {
280 VGubyte r, g, b ,a;
281 r = float_to_ubyte(rgba[i][0]);
282 g = float_to_ubyte(rgba[i][1]);
283 b = float_to_ubyte(rgba[i][2]);
284 a = float_to_ubyte(rgba[i][3]);
285 dst[i] = a << 24 | r << 16 | g << 8 | b;
286 }
287 return;
288 }
289 break;
290 case VG_lARGB_8888_PRE: {
291 VGint *dst = (VGint *)dstAddr;
292 for (i = 0; i < n; ++i) {
293 VGubyte r, g, b ,a;
294 r = float_to_ubyte(rgba[i][0]);
295 g = float_to_ubyte(rgba[i][1]);
296 b = float_to_ubyte(rgba[i][2]);
297 a = float_to_ubyte(rgba[i][3]);
298 dst[i] = a << 24 | r << 16 | g << 8 | b;
299 }
300 return;
301 }
302 break;
303 case VG_sBGRX_8888: {
304 VGint *dst = (VGint *)dstAddr;
305 for (i = 0; i < n; ++i) {
306 VGubyte r, g, b ,a;
307 r = float_to_ubyte(rgba[i][0]);
308 g = float_to_ubyte(rgba[i][1]);
309 b = float_to_ubyte(rgba[i][2]);
310 a = 0xff;
311 dst[i] = b << 24 | g << 16 | r << 8 | a;
312 }
313 return;
314 }
315 break;
316 case VG_sBGRA_8888: {
317 VGint *dst = (VGint *)dstAddr;
318 for (i = 0; i < n; ++i) {
319 VGubyte r, g, b ,a;
320 r = float_to_ubyte(rgba[i][0]);
321 g = float_to_ubyte(rgba[i][1]);
322 b = float_to_ubyte(rgba[i][2]);
323 a = float_to_ubyte(rgba[i][3]);
324 dst[i] = b << 24 | g << 16 | r << 8 | a;
325 }
326 return;
327 }
328 break;
329 case VG_sBGRA_8888_PRE: {
330 VGint *dst = (VGint *)dstAddr;
331 for (i = 0; i < n; ++i) {
332 VGubyte r, g, b ,a;
333 r = float_to_ubyte(rgba[i][0]);
334 g = float_to_ubyte(rgba[i][1]);
335 b = float_to_ubyte(rgba[i][2]);
336 a = float_to_ubyte(rgba[i][3]);
337 dst[i] = b << 24 | g << 16 | r << 8 | a;
338 }
339 return;
340 }
341 break;
342 case VG_sBGR_565:
343 break;
344 case VG_sBGRA_5551:
345 break;
346 case VG_sBGRA_4444:
347 break;
348 case VG_lBGRX_8888: {
349 VGint *dst = (VGint *)dstAddr;
350 for (i = 0; i < n; ++i) {
351 VGubyte r, g, b ,a;
352 r = float_to_ubyte(rgba[i][0]);
353 g = float_to_ubyte(rgba[i][1]);
354 b = float_to_ubyte(rgba[i][2]);
355 a = 0xff;
356 dst[i] = b << 24 | g << 16 | r << 8 | a;
357 }
358 return;
359 }
360 break;
361 case VG_lBGRA_8888: {
362 VGint *dst = (VGint *)dstAddr;
363 for (i = 0; i < n; ++i) {
364 VGubyte r, g, b ,a;
365 r = float_to_ubyte(rgba[i][0]);
366 g = float_to_ubyte(rgba[i][1]);
367 b = float_to_ubyte(rgba[i][2]);
368 a = float_to_ubyte(rgba[i][3]);
369 dst[i] = b << 24 | g << 16 | r << 8 | a;
370 }
371 return;
372 }
373 break;
374 case VG_lBGRA_8888_PRE: {
375 VGint *dst = (VGint *)dstAddr;
376 for (i = 0; i < n; ++i) {
377 VGubyte r, g, b ,a;
378 r = float_to_ubyte(rgba[i][0]);
379 g = float_to_ubyte(rgba[i][1]);
380 b = float_to_ubyte(rgba[i][2]);
381 a = float_to_ubyte(rgba[i][3]);
382 dst[i] = b << 24 | g << 16 | r << 8 | a;
383 }
384 return;
385 }
386 break;
387 case VG_sXBGR_8888:
388 break;
389 case VG_sABGR_8888: {
390 VGint *dst = (VGint *)dstAddr;
391 for (i = 0; i < n; ++i) {
392 VGubyte r, g, b ,a;
393 r = float_to_ubyte(rgba[i][0]);
394 g = float_to_ubyte(rgba[i][1]);
395 b = float_to_ubyte(rgba[i][2]);
396 a = float_to_ubyte(rgba[i][3]);
397 dst[i] = a << 24 | b << 16 | g << 8 | r;
398 }
399 return;
400 }
401 break;
402 case VG_sABGR_8888_PRE: {
403 VGint *dst = (VGint *)dstAddr;
404 for (i = 0; i < n; ++i) {
405 VGubyte r, g, b ,a;
406 r = float_to_ubyte(rgba[i][0]);
407 g = float_to_ubyte(rgba[i][1]);
408 b = float_to_ubyte(rgba[i][2]);
409 a = float_to_ubyte(rgba[i][3]);
410 dst[i] = a << 24 | b << 16 | g << 8 | r;
411 }
412 return;
413 }
414 break;
415 case VG_sABGR_1555:
416 break;
417 case VG_sABGR_4444:
418 break;
419 case VG_lXBGR_8888:
420 break;
421 case VG_lABGR_8888: {
422 VGint *dst = (VGint *)dstAddr;
423 for (i = 0; i < n; ++i) {
424 VGubyte r, g, b ,a;
425 r = float_to_ubyte(rgba[i][0]);
426 g = float_to_ubyte(rgba[i][1]);
427 b = float_to_ubyte(rgba[i][2]);
428 a = float_to_ubyte(rgba[i][3]);
429 dst[i] = a << 24 | b << 16 | g << 8 | r;
430 }
431 return;
432 }
433 break;
434 case VG_lABGR_8888_PRE: {
435 VGint *dst = (VGint *)dstAddr;
436 for (i = 0; i < n; ++i) {
437 VGubyte r, g, b ,a;
438 r = float_to_ubyte(rgba[i][0]);
439 g = float_to_ubyte(rgba[i][1]);
440 b = float_to_ubyte(rgba[i][2]);
441 a = float_to_ubyte(rgba[i][3]);
442 dst[i] = a << 24 | b << 16 | g << 8 | r;
443 }
444 return;
445 }
446 break;
447 default:
448 assert(!"Unknown ReadPixels format");
449 break;
450 }
451 assert(!"Not implemented ReadPixels format");
452 }
453
454 void _vega_unpack_float_span_rgba(struct vg_context *ctx,
455 VGuint n,
456 VGuint offset,
457 const void * data,
458 VGImageFormat dataFormat,
459 VGfloat rgba[][4])
460 {
461 VGint i;
462 union util_color uc;
463
464 switch (dataFormat) {
465 case VG_sRGBX_8888: {
466 VGuint *src = (VGuint *)data;
467 src += offset;
468 for (i = 0; i < n; ++i) {
469 VGubyte r, g, b ,a;
470 r = (*src >> 24) & 0xff;
471 g = (*src >> 16) & 0xff;
472 b = (*src >> 8) & 0xff;
473 a = 0xff;
474
475 util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
476 rgba[i][0] = uc.f[0];
477 rgba[i][1] = uc.f[1];
478 rgba[i][2] = uc.f[2];
479 rgba[i][3] = uc.f[3];
480 ++src;
481 }
482 }
483 return;
484 case VG_sRGBA_8888: {
485 VGuint *src = (VGuint *)data;
486 src += offset;
487 for (i = 0; i < n; ++i) {
488 VGubyte r, g, b ,a;
489 r = (*src >> 24) & 0xff;
490 g = (*src >> 16) & 0xff;
491 b = (*src >> 8) & 0xff;
492 a = (*src >> 0) & 0xff;
493
494 util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
495 rgba[i][0] = uc.f[0];
496 rgba[i][1] = uc.f[1];
497 rgba[i][2] = uc.f[2];
498 rgba[i][3] = uc.f[3];
499 ++src;
500 }
501 return;
502 }
503 break;
504 case VG_sRGBA_8888_PRE: {
505 VGint *src = (VGint *)data;
506 src += offset;
507 for (i = 0; i < n; ++i) {
508 VGubyte r, g, b ,a;
509 r = (*src >> 24) & 0xff;
510 g = (*src >> 16) & 0xff;
511 b = (*src >> 8) & 0xff;
512 a = (*src >> 0) & 0xff;
513
514 util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
515 rgba[i][0] = uc.f[0];
516 rgba[i][1] = uc.f[1];
517 rgba[i][2] = uc.f[2];
518 rgba[i][3] = uc.f[3];
519 ++src;
520 }
521 return;
522 }
523 break;
524 case VG_sRGB_565: {
525 VGshort *src = (VGshort *)data;
526 src += offset;
527 for (i = 0; i < n; ++i) {
528 VGfloat clr[4];
529 clr[0] = ((*src >> 10) & 31)/31.;
530 clr[1] = ((*src >> 5) & 95)/95.;
531 clr[2] = ((*src >> 0) & 31)/31.;
532 clr[3] = 1.f;
533
534 util_pack_color(clr, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
535 rgba[i][0] = uc.f[0];
536 rgba[i][1] = uc.f[1];
537 rgba[i][2] = uc.f[2];
538 rgba[i][3] = uc.f[3];
539 ++src;
540 }
541 }
542 return;
543 case VG_sRGBA_5551: {
544 VGshort *src = (VGshort *)data;
545 src += offset;
546 for (i = 0; i < n; ++i) {
547 VGfloat clr[4];
548 clr[0] = ((*src >> 10) & 31)/31.;
549 clr[1] = ((*src >> 5) & 31)/31.;
550 clr[2] = ((*src >> 1) & 31)/31.;
551 clr[3] = ((*src >> 0) & 1)/1.;
552
553 util_pack_color(clr, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
554 rgba[i][0] = uc.f[0];
555 rgba[i][1] = uc.f[1];
556 rgba[i][2] = uc.f[2];
557 rgba[i][3] = uc.f[3];
558 ++src;
559 }
560 }
561 return;
562 case VG_sRGBA_4444: {
563 VGshort *src = (VGshort *)data;
564 src += offset;
565 for (i = 0; i < n; ++i) {
566 VGfloat clr[4];
567 clr[0] = ((*src >> 12) & 15)/15.;
568 clr[1] = ((*src >> 8) & 15)/15.;
569 clr[2] = ((*src >> 4) & 15)/15.;
570 clr[3] = ((*src >> 0) & 15)/15.;
571
572 util_pack_color(clr, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
573 rgba[i][0] = uc.f[0];
574 rgba[i][1] = uc.f[1];
575 rgba[i][2] = uc.f[2];
576 rgba[i][3] = uc.f[3];
577 ++src;
578 }
579 }
580 return;
581 case VG_sL_8: {
582 VGubyte *src = (VGubyte *)data;
583 src += offset;
584 for (i = 0; i < n; ++i) {
585 util_pack_color_ub(0xff, 0xff, 0xff, *src, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
586 rgba[i][0] = uc.f[0];
587 rgba[i][1] = uc.f[1];
588 rgba[i][2] = uc.f[2];
589 rgba[i][3] = uc.f[3];
590 ++src;
591 }
592 }
593 return;
594 case VG_lRGBX_8888: {
595 VGuint *src = (VGuint *)data;
596 src += offset;
597 for (i = 0; i < n; ++i) {
598 VGubyte r, g, b ,a;
599 r = (*src >> 24) & 0xff;
600 g = (*src >> 16) & 0xff;
601 b = (*src >> 8) & 0xff;
602 a = 0xff;
603
604 util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
605 rgba[i][0] = uc.f[0];
606 rgba[i][1] = uc.f[1];
607 rgba[i][2] = uc.f[2];
608 rgba[i][3] = uc.f[3];
609 ++src;
610 }
611 }
612 return;
613 case VG_lRGBA_8888: {
614 VGint *src = (VGint *)data;
615 src += offset;
616 for (i = 0; i < n; ++i) {
617 VGubyte r, g, b ,a;
618 r = (*src >> 24) & 0xff;
619 g = (*src >> 16) & 0xff;
620 b = (*src >> 8) & 0xff;
621 a = (*src >> 0) & 0xff;
622
623 util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
624 rgba[i][0] = uc.f[0];
625 rgba[i][1] = uc.f[1];
626 rgba[i][2] = uc.f[2];
627 rgba[i][3] = uc.f[3];
628 ++src;
629 }
630 return;
631 }
632 break;
633 case VG_lRGBA_8888_PRE: {
634 VGint *src = (VGint *)data;
635 src += offset;
636 for (i = 0; i < n; ++i) {
637 VGubyte r, g, b ,a;
638 r = (*src >> 24) & 0xff;
639 g = (*src >> 16) & 0xff;
640 b = (*src >> 8) & 0xff;
641 a = (*src >> 0) & 0xff;
642
643 util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
644 rgba[i][0] = uc.f[0];
645 rgba[i][1] = uc.f[1];
646 rgba[i][2] = uc.f[2];
647 rgba[i][3] = uc.f[3];
648 ++src;
649 }
650 return;
651 }
652 break;
653 case VG_lL_8: {
654 VGubyte *src = (VGubyte *)data;
655 src += offset;
656 for (i = 0; i < n; ++i) {
657 util_pack_color_ub(0xff, 0xff, 0xff, *src, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
658 rgba[i][0] = uc.f[0];
659 rgba[i][1] = uc.f[1];
660 rgba[i][2] = uc.f[2];
661 rgba[i][3] = uc.f[3];
662 ++src;
663 }
664 }
665 return;
666 case VG_A_8: {
667 VGubyte *src = (VGubyte *)data;
668 src += offset;
669 for (i = 0; i < n; ++i) {
670 util_pack_color_ub(0xff, 0xff, 0xff, *src, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
671 rgba[i][0] = uc.f[0];
672 rgba[i][1] = uc.f[1];
673 rgba[i][2] = uc.f[2];
674 rgba[i][3] = uc.f[3];
675 ++src;
676 }
677 }
678 return;
679 case VG_BW_1: {
680 VGubyte *src = (VGubyte *)data;
681 src += offset;
682 for (i = 0; i < n; i += 8) {
683 VGfloat clr[4];
684 VGint j;
685 for (j = 0; j < 8 && j < n ; ++j) {
686 VGint shift = j;
687 clr[0] = (((*src) & (1<<shift)) >> shift);
688 clr[1] = clr[0];
689 clr[2] = clr[0];
690 clr[3] = 1.f;
691
692 util_pack_color(clr, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
693 rgba[i+j][0] = uc.f[0];
694 rgba[i+j][1] = uc.f[1];
695 rgba[i+j][2] = uc.f[2];
696 rgba[i+j][3] = uc.f[3];
697 }
698 ++src;
699 }
700 }
701 return;
702 #ifdef OPENVG_VERSION_1_1
703 case VG_A_1: {
704 VGubyte *src = (VGubyte *)data;
705 src += offset;
706 for (i = 0; i < n; i += 8) {
707 VGfloat clr[4];
708 VGint j;
709 for (j = 0; j < 8 && j < n ; ++j) {
710 VGint shift = j;
711 clr[0] = 0.f;
712 clr[1] = 0.f;
713 clr[2] = 0.f;
714 clr[3] = (((*src) & (1<<shift)) >> shift);
715
716 util_pack_color(clr, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
717 rgba[i+j][0] = uc.f[0];
718 rgba[i+j][1] = uc.f[1];
719 rgba[i+j][2] = uc.f[2];
720 rgba[i+j][3] = uc.f[3];
721 }
722 ++src;
723 }
724 }
725 return;
726 case VG_A_4: {
727 VGubyte *src = (VGubyte *)data;
728 src += offset/2;
729 for (i = 0; i < n; i += 2) {
730 VGfloat clr[4];
731 VGint j;
732 for (j = 0; j < n && j < 2; ++j) {
733 VGint bitter, shift;
734 if (j == 0) {
735 bitter = 0x0f;
736 shift = 0;
737 } else {
738 bitter = 0xf0;
739 shift = 4;
740 }
741 clr[0] = 0.f;
742 clr[1] = 0.f;
743 clr[2] = 0.f;
744 clr[3] = ((*src) & (bitter)) >> shift;
745
746 util_pack_color(clr, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
747 rgba[i+j][0] = uc.f[0];
748 rgba[i+j][1] = uc.f[1];
749 rgba[i+j][2] = uc.f[2];
750 rgba[i+j][3] = uc.f[3];
751 }
752 ++src;
753 }
754 }
755 return;
756 #endif
757 case VG_sXRGB_8888:
758 break;
759 case VG_sARGB_8888: {
760 VGuint *src = (VGuint *)data;
761 src += offset;
762 for (i = 0; i < n; ++i) {
763 VGubyte r, g, b ,a;
764 a = (*src >> 24) & 0xff;
765 r = (*src >> 16) & 0xff;
766 g = (*src >> 8) & 0xff;
767 b = (*src >> 0) & 0xff;
768
769 util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
770 rgba[i][0] = uc.f[0];
771 rgba[i][1] = uc.f[1];
772 rgba[i][2] = uc.f[2];
773 rgba[i][3] = uc.f[3];
774 ++src;
775 }
776 return;
777 }
778 break;
779 case VG_sARGB_8888_PRE: {
780 VGuint *src = (VGuint *)data;
781 src += offset;
782 for (i = 0; i < n; ++i) {
783 VGubyte r, g, b ,a;
784 a = (*src >> 24) & 0xff;
785 r = (*src >> 16) & 0xff;
786 g = (*src >> 8) & 0xff;
787 b = (*src >> 0) & 0xff;
788
789 util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
790 rgba[i][0] = uc.f[0];
791 rgba[i][1] = uc.f[1];
792 rgba[i][2] = uc.f[2];
793 rgba[i][3] = uc.f[3];
794 ++src;
795 }
796 return;
797 }
798 break;
799 case VG_sARGB_1555:
800 break;
801 case VG_sARGB_4444:
802 break;
803 case VG_lXRGB_8888:
804 break;
805 case VG_lARGB_8888: {
806 VGint *src = (VGint *)data;
807 src += offset;
808 for (i = 0; i < n; ++i) {
809 VGubyte r, g, b ,a;
810 a = (*src >> 24) & 0xff;
811 r = (*src >> 16) & 0xff;
812 g = (*src >> 8) & 0xff;
813 b = (*src >> 0) & 0xff;
814
815 util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
816 rgba[i][0] = uc.f[0];
817 rgba[i][1] = uc.f[1];
818 rgba[i][2] = uc.f[2];
819 rgba[i][3] = uc.f[3];
820 ++src;
821 }
822 return;
823 }
824 break;
825 case VG_lARGB_8888_PRE: {
826 VGint *src = (VGint *)data;
827 src += offset;
828 for (i = 0; i < n; ++i) {
829 VGubyte r, g, b ,a;
830 a = (*src >> 24) & 0xff;
831 r = (*src >> 16) & 0xff;
832 g = (*src >> 8) & 0xff;
833 b = (*src >> 0) & 0xff;
834
835 util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
836 rgba[i][0] = uc.f[0];
837 rgba[i][1] = uc.f[1];
838 rgba[i][2] = uc.f[2];
839 rgba[i][3] = uc.f[3];
840 ++src;
841 }
842 return;
843 }
844 break;
845 case VG_sBGRX_8888:
846 break;
847 case VG_sBGRA_8888: {
848 VGuint *src = (VGuint *)data;
849 src += offset;
850 for (i = 0; i < n; ++i) {
851 VGubyte r, g, b ,a;
852 b = (*src >> 24) & 0xff;
853 g = (*src >> 16) & 0xff;
854 r = (*src >> 8) & 0xff;
855 a = (*src >> 0) & 0xff;
856
857 util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
858 rgba[i][0] = uc.f[0];
859 rgba[i][1] = uc.f[1];
860 rgba[i][2] = uc.f[2];
861 rgba[i][3] = uc.f[3];
862 ++src;
863 }
864 return;
865 }
866 break;
867 case VG_sBGRA_8888_PRE: {
868 VGuint *src = (VGuint *)data;
869 src += offset;
870 for (i = 0; i < n; ++i) {
871 VGubyte r, g, b ,a;
872 b = (*src >> 24) & 0xff;
873 g = (*src >> 16) & 0xff;
874 r = (*src >> 8) & 0xff;
875 a = (*src >> 0) & 0xff;
876
877 util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
878 rgba[i][0] = uc.f[0];
879 rgba[i][1] = uc.f[1];
880 rgba[i][2] = uc.f[2];
881 rgba[i][3] = uc.f[3];
882 ++src;
883 }
884 return;
885 }
886 break;
887 case VG_sBGR_565:
888 break;
889 case VG_sBGRA_5551:
890 break;
891 case VG_sBGRA_4444:
892 break;
893 case VG_lBGRX_8888:
894 break;
895 case VG_lBGRA_8888: {
896 VGuint *src = (VGuint *)data;
897 src += offset;
898 for (i = 0; i < n; ++i) {
899 VGubyte r, g, b ,a;
900 b = (*src >> 24) & 0xff;
901 g = (*src >> 16) & 0xff;
902 r = (*src >> 8) & 0xff;
903 a = (*src >> 0) & 0xff;
904
905 util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
906 rgba[i][0] = uc.f[0];
907 rgba[i][1] = uc.f[1];
908 rgba[i][2] = uc.f[2];
909 rgba[i][3] = uc.f[3];
910 ++src;
911 }
912 return;
913 }
914 break;
915 case VG_lBGRA_8888_PRE: {
916 VGuint *src = (VGuint *)data;
917 src += offset;
918 for (i = 0; i < n; ++i) {
919 VGubyte r, g, b ,a;
920 b = (*src >> 24) & 0xff;
921 g = (*src >> 16) & 0xff;
922 r = (*src >> 8) & 0xff;
923 a = (*src >> 0) & 0xff;
924
925 util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
926 rgba[i][0] = uc.f[0];
927 rgba[i][1] = uc.f[1];
928 rgba[i][2] = uc.f[2];
929 rgba[i][3] = uc.f[3];
930 ++src;
931 }
932 return;
933 }
934 break;
935 case VG_sXBGR_8888:
936 break;
937 case VG_sABGR_8888: {
938 VGuint *src = (VGuint *)data;
939 src += offset;
940 for (i = 0; i < n; ++i) {
941 VGubyte r, g, b ,a;
942 a = (*src >> 24) & 0xff;
943 b = (*src >> 16) & 0xff;
944 g = (*src >> 8) & 0xff;
945 r = (*src >> 0) & 0xff;
946
947 util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
948 rgba[i][0] = uc.f[0];
949 rgba[i][1] = uc.f[1];
950 rgba[i][2] = uc.f[2];
951 rgba[i][3] = uc.f[3];
952 ++src;
953 }
954 return;
955 }
956 break;
957 case VG_sABGR_8888_PRE: {
958 VGuint *src = (VGuint *)data;
959 src += offset;
960 for (i = 0; i < n; ++i) {
961 VGubyte r, g, b ,a;
962 a = (*src >> 24) & 0xff;
963 b = (*src >> 16) & 0xff;
964 g = (*src >> 8) & 0xff;
965 r = (*src >> 0) & 0xff;
966
967 util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
968 rgba[i][0] = uc.f[0];
969 rgba[i][1] = uc.f[1];
970 rgba[i][2] = uc.f[2];
971 rgba[i][3] = uc.f[3];
972 ++src;
973 }
974 return;
975 }
976 break;
977 case VG_sABGR_1555:
978 break;
979 case VG_sABGR_4444:
980 break;
981 case VG_lXBGR_8888:
982 break;
983 case VG_lABGR_8888: {
984 VGuint *src = (VGuint *)data;
985 src += offset;
986 for (i = 0; i < n; ++i) {
987 VGubyte r, g, b ,a;
988 a = (*src >> 24) & 0xff;
989 b = (*src >> 16) & 0xff;
990 g = (*src >> 8) & 0xff;
991 r = (*src >> 0) & 0xff;
992
993 util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
994 rgba[i][0] = uc.f[0];
995 rgba[i][1] = uc.f[1];
996 rgba[i][2] = uc.f[2];
997 rgba[i][3] = uc.f[3];
998 ++src;
999 }
1000 return;
1001 }
1002 break;
1003 case VG_lABGR_8888_PRE: {
1004 VGuint *src = (VGuint *)data;
1005 src += offset;
1006 for (i = 0; i < n; ++i) {
1007 VGubyte r, g, b ,a;
1008 a = (*src >> 24) & 0xff;
1009 b = (*src >> 16) & 0xff;
1010 g = (*src >> 8) & 0xff;
1011 r = (*src >> 0) & 0xff;
1012
1013 util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT, &uc);
1014 rgba[i][0] = uc.f[0];
1015 rgba[i][1] = uc.f[1];
1016 rgba[i][2] = uc.f[2];
1017 rgba[i][3] = uc.f[3];
1018 ++src;
1019 }
1020 return;
1021 }
1022 break;
1023 default:
1024 assert(!"Unknown ReadPixels format");
1025 break;
1026 }
1027 assert(!"Not implemented ReadPixels format");
1028 }
1029
1030 VGint _vega_size_for_format(VGImageFormat dataFormat)
1031 {
1032 switch (dataFormat) {
1033 case VG_sRGBX_8888:
1034 case VG_sRGBA_8888:
1035 case VG_sRGBA_8888_PRE:
1036 return 4;
1037 case VG_sRGB_565:
1038 case VG_sRGBA_5551:
1039 case VG_sRGBA_4444:
1040 return 2;
1041 case VG_sL_8:
1042 return 1;
1043 case VG_lRGBX_8888:
1044 case VG_lRGBA_8888:
1045 case VG_lRGBA_8888_PRE:
1046 return 4;
1047 case VG_lL_8:
1048 return 1;
1049 case VG_A_8:
1050 return 1;
1051 case VG_BW_1:
1052 return 1;
1053 #ifdef OPENVG_VERSION_1_1
1054 case VG_A_1:
1055 break;
1056 case VG_A_4:
1057 break;
1058 #endif
1059 case VG_sXRGB_8888:
1060 case VG_sARGB_8888:
1061 case VG_sARGB_8888_PRE:
1062 return 4;
1063 case VG_sARGB_1555:
1064 case VG_sARGB_4444:
1065 return 2;
1066 case VG_lXRGB_8888:
1067 case VG_lARGB_8888:
1068 case VG_lARGB_8888_PRE:
1069 case VG_sBGRX_8888:
1070 case VG_sBGRA_8888:
1071 case VG_sBGRA_8888_PRE:
1072 return 4;
1073 case VG_sBGR_565:
1074 case VG_sBGRA_5551:
1075 case VG_sBGRA_4444:
1076 return 2;
1077 case VG_lBGRX_8888:
1078 case VG_lBGRA_8888:
1079 case VG_lBGRA_8888_PRE:
1080 case VG_sXBGR_8888:
1081 case VG_sABGR_8888:
1082 case VG_sABGR_8888_PRE:
1083 return 4;
1084 case VG_sABGR_1555:
1085 case VG_sABGR_4444:
1086 return 2;
1087 case VG_lXBGR_8888:
1088 case VG_lABGR_8888:
1089 case VG_lABGR_8888_PRE:
1090 return 4;
1091 default:
1092 assert(!"Unknown ReadPixels format");
1093 break;
1094 }
1095 assert(!"Not implemented ReadPixels format");
1096 return 0;
1097 }