480ced16c515ccd4736dbc78cc8c4a9ab5341f23
[mesa.git] / src / gallium / state_trackers / vega / api_filters.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26
27 #include "VG/openvg.h"
28
29 #include "vg_context.h"
30 #include "image.h"
31 #include "api.h"
32 #include "handle.h"
33 #include "renderer.h"
34 #include "shaders_cache.h"
35
36 #include "pipe/p_context.h"
37 #include "pipe/p_state.h"
38 #include "util/u_inlines.h"
39 #include "pipe/p_screen.h"
40
41 #include "util/u_format.h"
42 #include "util/u_sampler.h"
43 #include "util/u_string.h"
44
45 #include "asm_filters.h"
46
47
48 struct filter_info {
49 struct vg_image *dst;
50 struct vg_image *src;
51 struct vg_shader * (*setup_shader)(struct vg_context *, void *);
52 void *user_data;
53 const void *const_buffer;
54 VGint const_buffer_len;
55 VGTilingMode tiling_mode;
56 struct pipe_sampler_view *extra_texture_view;
57 };
58
59 static INLINE struct pipe_resource *create_texture_1d(struct vg_context *ctx,
60 const VGuint *color_data,
61 const VGint color_data_len)
62 {
63 struct pipe_context *pipe = ctx->pipe;
64 struct pipe_screen *screen = pipe->screen;
65 struct pipe_resource *tex = 0;
66 struct pipe_resource templ;
67
68 memset(&templ, 0, sizeof(templ));
69 templ.target = PIPE_TEXTURE_1D;
70 templ.format = PIPE_FORMAT_B8G8R8A8_UNORM;
71 templ.last_level = 0;
72 templ.width0 = color_data_len;
73 templ.height0 = 1;
74 templ.depth0 = 1;
75 templ.array_size = 1;
76 templ.bind = PIPE_BIND_SAMPLER_VIEW;
77
78 tex = screen->resource_create(screen, &templ);
79
80 { /* upload color_data */
81 struct pipe_transfer *transfer;
82 void *map =
83 pipe_transfer_map(pipe, tex,
84 0, 0,
85 PIPE_TRANSFER_READ_WRITE ,
86 0, 0, tex->width0, tex->height0,
87 &transfer);
88 memcpy(map, color_data, sizeof(VGint)*color_data_len);
89 pipe->transfer_unmap(pipe, transfer);
90 }
91
92 return tex;
93 }
94
95 static INLINE struct pipe_sampler_view *create_texture_1d_view(struct vg_context *ctx,
96 const VGuint *color_data,
97 const VGint color_data_len)
98 {
99 struct pipe_context *pipe = ctx->pipe;
100 struct pipe_resource *texture;
101 struct pipe_sampler_view view_templ;
102 struct pipe_sampler_view *view;
103
104 texture = create_texture_1d(ctx, color_data, color_data_len);
105
106 if (!texture)
107 return NULL;
108
109 u_sampler_view_default_template(&view_templ, texture, texture->format);
110 view = pipe->create_sampler_view(pipe, texture, &view_templ);
111 /* want the texture to go away if the view is freed */
112 pipe_resource_reference(&texture, NULL);
113
114 return view;
115 }
116
117 static struct vg_shader * setup_color_matrix(struct vg_context *ctx, void *user_data)
118 {
119 struct vg_shader *shader =
120 shader_create_from_text(ctx->pipe, color_matrix_asm, 200,
121 PIPE_SHADER_FRAGMENT);
122 return shader;
123 }
124
125 static struct vg_shader * setup_convolution(struct vg_context *ctx, void *user_data)
126 {
127 char buffer[1024];
128 VGint num_consts = (VGint)(long)(user_data);
129 struct vg_shader *shader;
130
131 util_snprintf(buffer, 1023, convolution_asm, num_consts, num_consts / 2 + 1);
132
133 shader = shader_create_from_text(ctx->pipe, buffer, 200,
134 PIPE_SHADER_FRAGMENT);
135
136 return shader;
137 }
138
139 static struct vg_shader * setup_lookup(struct vg_context *ctx, void *user_data)
140 {
141 struct vg_shader *shader =
142 shader_create_from_text(ctx->pipe, lookup_asm,
143 200, PIPE_SHADER_FRAGMENT);
144
145 return shader;
146 }
147
148
149 static struct vg_shader * setup_lookup_single(struct vg_context *ctx, void *user_data)
150 {
151 char buffer[1024];
152 VGImageChannel channel = (VGImageChannel)(user_data);
153 struct vg_shader *shader;
154
155 switch(channel) {
156 case VG_RED:
157 util_snprintf(buffer, 1023, lookup_single_asm, "xxxx");
158 break;
159 case VG_GREEN:
160 util_snprintf(buffer, 1023, lookup_single_asm, "yyyy");
161 break;
162 case VG_BLUE:
163 util_snprintf(buffer, 1023, lookup_single_asm, "zzzz");
164 break;
165 case VG_ALPHA:
166 util_snprintf(buffer, 1023, lookup_single_asm, "wwww");
167 break;
168 default:
169 debug_assert(!"Unknown color channel");
170 }
171
172 shader = shader_create_from_text(ctx->pipe, buffer, 200,
173 PIPE_SHADER_FRAGMENT);
174
175 return shader;
176 }
177
178 static void execute_filter(struct vg_context *ctx,
179 struct filter_info *info)
180 {
181 struct vg_shader *shader;
182 const struct pipe_sampler_state *samplers[2];
183 struct pipe_sampler_view *views[2];
184 struct pipe_sampler_state sampler;
185 uint tex_wrap;
186
187 memset(&sampler, 0, sizeof(sampler));
188 sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR;
189 sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
190 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
191 sampler.normalized_coords = 1;
192
193 switch (info->tiling_mode) {
194 case VG_TILE_FILL:
195 tex_wrap = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
196 /* copy border color */
197 memcpy(sampler.border_color.f, ctx->state.vg.tile_fill_color,
198 sizeof(sampler.border_color));
199 break;
200 case VG_TILE_PAD:
201 tex_wrap = PIPE_TEX_WRAP_CLAMP_TO_EDGE;;
202 break;
203 case VG_TILE_REPEAT:
204 tex_wrap = PIPE_TEX_WRAP_REPEAT;;
205 break;
206 case VG_TILE_REFLECT:
207 tex_wrap = PIPE_TEX_WRAP_MIRROR_REPEAT;
208 break;
209 default:
210 debug_assert(!"Unknown tiling mode");
211 tex_wrap = 0;
212 break;
213 }
214
215 sampler.wrap_s = tex_wrap;
216 sampler.wrap_t = tex_wrap;
217 sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
218
219 samplers[0] = samplers[1] = &sampler;
220 views[0] = info->src->sampler_view;
221 views[1] = info->extra_texture_view;
222
223 shader = info->setup_shader(ctx, info->user_data);
224
225 if (renderer_filter_begin(ctx->renderer,
226 info->dst->sampler_view->texture, VG_TRUE,
227 ctx->state.vg.filter_channel_mask,
228 samplers, views, (info->extra_texture_view) ? 2 : 1,
229 shader->driver, info->const_buffer, info->const_buffer_len)) {
230 renderer_filter(ctx->renderer,
231 info->dst->x, info->dst->y, info->dst->width, info->dst->height,
232 info->src->x, info->src->y, info->src->width, info->src->height);
233 renderer_filter_end(ctx->renderer);
234 }
235
236 vg_shader_destroy(ctx, shader);
237 }
238
239 void vegaColorMatrix(VGImage dst, VGImage src,
240 const VGfloat * matrix)
241 {
242 struct vg_context *ctx = vg_current_context();
243 struct vg_image *d, *s;
244 struct filter_info info;
245
246 if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
247 vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
248 return;
249 }
250 if (!matrix || !is_aligned(matrix)) {
251 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
252 return;
253 }
254
255 d = handle_to_image(dst);
256 s = handle_to_image(src);
257
258 if (vg_image_overlaps(d, s)) {
259 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
260 return;
261 }
262
263 info.dst = d;
264 info.src = s;
265 info.setup_shader = &setup_color_matrix;
266 info.user_data = NULL;
267 info.const_buffer = matrix;
268 info.const_buffer_len = 20 * sizeof(VGfloat);
269 info.tiling_mode = VG_TILE_PAD;
270 info.extra_texture_view = NULL;
271 execute_filter(ctx, &info);
272 }
273
274 static VGfloat texture_offset(VGfloat width, VGint kernelSize, VGint current, VGint shift)
275 {
276 VGfloat diff = (VGfloat) (current - shift);
277
278 return diff / width;
279 }
280
281 void vegaConvolve(VGImage dst, VGImage src,
282 VGint kernelWidth, VGint kernelHeight,
283 VGint shiftX, VGint shiftY,
284 const VGshort * kernel,
285 VGfloat scale,
286 VGfloat bias,
287 VGTilingMode tilingMode)
288 {
289 struct vg_context *ctx = vg_current_context();
290 VGfloat *buffer;
291 VGint buffer_len;
292 VGint i, j;
293 VGint idx = 0;
294 struct vg_image *d, *s;
295 VGint kernel_size = kernelWidth * kernelHeight;
296 struct filter_info info;
297 const VGint max_kernel_size = vegaGeti(VG_MAX_KERNEL_SIZE);
298
299 if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
300 vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
301 return;
302 }
303
304 if (kernelWidth <= 0 || kernelHeight <= 0 ||
305 kernelWidth > max_kernel_size || kernelHeight > max_kernel_size) {
306 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
307 return;
308 }
309
310 if (!kernel || !is_aligned_to(kernel, 2)) {
311 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
312 return;
313 }
314
315 if (tilingMode < VG_TILE_FILL ||
316 tilingMode > VG_TILE_REFLECT) {
317 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
318 return;
319 }
320
321 d = handle_to_image(dst);
322 s = handle_to_image(src);
323
324 if (vg_image_overlaps(d, s)) {
325 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
326 return;
327 }
328
329 vg_validate_state(ctx);
330
331 buffer_len = 8 + 2 * 4 * kernel_size;
332 buffer = malloc(buffer_len * sizeof(VGfloat));
333
334 buffer[0] = 0.f;
335 buffer[1] = 1.f;
336 buffer[2] = 2.f; /*unused*/
337 buffer[3] = 4.f; /*unused*/
338
339 buffer[4] = (VGfloat) (kernelWidth * kernelHeight);
340 buffer[5] = scale;
341 buffer[6] = bias;
342 buffer[7] = 0.f;
343
344 idx = 8;
345 for (j = 0; j < kernelHeight; ++j) {
346 for (i = 0; i < kernelWidth; ++i) {
347 VGint index = j * kernelWidth + i;
348 VGfloat x, y;
349
350 x = (VGfloat) texture_offset(s->width, kernelWidth, i, shiftX);
351 y = (VGfloat) texture_offset(s->height, kernelHeight, j, shiftY);
352
353 buffer[idx + index*4 + 0] = x;
354 buffer[idx + index*4 + 1] = y;
355 buffer[idx + index*4 + 2] = 0.f;
356 buffer[idx + index*4 + 3] = 0.f;
357 }
358 }
359 idx += kernel_size * 4;
360
361 for (j = 0; j < kernelHeight; ++j) {
362 for (i = 0; i < kernelWidth; ++i) {
363 /* transpose the kernel */
364 VGint index = j * kernelWidth + i;
365 VGint kindex = (kernelWidth - i - 1) * kernelHeight + (kernelHeight - j - 1);
366 buffer[idx + index*4 + 0] = kernel[kindex];
367 buffer[idx + index*4 + 1] = kernel[kindex];
368 buffer[idx + index*4 + 2] = kernel[kindex];
369 buffer[idx + index*4 + 3] = kernel[kindex];
370 }
371 }
372
373 info.dst = d;
374 info.src = s;
375 info.setup_shader = &setup_convolution;
376 info.user_data = (void*)(long)(buffer_len/4);
377 info.const_buffer = buffer;
378 info.const_buffer_len = buffer_len * sizeof(VGfloat);
379 info.tiling_mode = tilingMode;
380 info.extra_texture_view = NULL;
381 execute_filter(ctx, &info);
382
383 free(buffer);
384 }
385
386 void vegaSeparableConvolve(VGImage dst, VGImage src,
387 VGint kernelWidth,
388 VGint kernelHeight,
389 VGint shiftX, VGint shiftY,
390 const VGshort * kernelX,
391 const VGshort * kernelY,
392 VGfloat scale,
393 VGfloat bias,
394 VGTilingMode tilingMode)
395 {
396 struct vg_context *ctx = vg_current_context();
397 VGshort *kernel;
398 VGint i, j, idx = 0;
399 const VGint max_kernel_size = vegaGeti(VG_MAX_SEPARABLE_KERNEL_SIZE);
400
401 if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
402 vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
403 return;
404 }
405
406 if (kernelWidth <= 0 || kernelHeight <= 0 ||
407 kernelWidth > max_kernel_size || kernelHeight > max_kernel_size) {
408 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
409 return;
410 }
411
412 if (!kernelX || !kernelY ||
413 !is_aligned_to(kernelX, 2) || !is_aligned_to(kernelY, 2)) {
414 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
415 return;
416 }
417 if (tilingMode < VG_TILE_FILL ||
418 tilingMode > VG_TILE_REFLECT) {
419 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
420 return;
421 }
422 kernel = malloc(sizeof(VGshort)*kernelWidth*kernelHeight);
423 for (i = 0; i < kernelWidth; ++i) {
424 for (j = 0; j < kernelHeight; ++j) {
425 kernel[idx] = kernelX[i] * kernelY[j];
426 ++idx;
427 }
428 }
429 vegaConvolve(dst, src, kernelWidth, kernelHeight, shiftX, shiftY,
430 kernel, scale, bias, tilingMode);
431 free(kernel);
432 }
433
434 static INLINE VGfloat compute_gaussian_componenet(VGfloat x, VGfloat y,
435 VGfloat stdDeviationX,
436 VGfloat stdDeviationY)
437 {
438 VGfloat mult = 1 / ( 2 * M_PI * stdDeviationX * stdDeviationY);
439 VGfloat e = exp( - ( pow(x, 2)/(2*pow(stdDeviationX, 2)) +
440 pow(y, 2)/(2*pow(stdDeviationY, 2)) ) );
441 return mult * e;
442 }
443
444 static INLINE VGint compute_kernel_size(VGfloat deviation)
445 {
446 VGint size = ceil(2.146 * deviation);
447 if (size > 11)
448 return 11;
449 return size;
450 }
451
452 static void compute_gaussian_kernel(VGfloat *kernel,
453 VGint width, VGint height,
454 VGfloat stdDeviationX,
455 VGfloat stdDeviationY)
456 {
457 VGint i, j;
458 VGfloat scale = 0.0f;
459
460 for (j = 0; j < height; ++j) {
461 for (i = 0; i < width; ++i) {
462 VGint idx = (height - j -1) * width + (width - i -1);
463 kernel[idx] = compute_gaussian_componenet(i-(ceil(width/2))-1,
464 j-ceil(height/2)-1,
465 stdDeviationX, stdDeviationY);
466 scale += kernel[idx];
467 }
468 }
469
470 for (j = 0; j < height; ++j) {
471 for (i = 0; i < width; ++i) {
472 VGint idx = j * width + i;
473 kernel[idx] /= scale;
474 }
475 }
476 }
477
478 void vegaGaussianBlur(VGImage dst, VGImage src,
479 VGfloat stdDeviationX,
480 VGfloat stdDeviationY,
481 VGTilingMode tilingMode)
482 {
483 struct vg_context *ctx = vg_current_context();
484 struct vg_image *d, *s;
485 VGfloat *buffer, *kernel;
486 VGint kernel_width, kernel_height, kernel_size;
487 VGint buffer_len;
488 VGint idx, i, j;
489 struct filter_info info;
490
491 if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
492 vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
493 return;
494 }
495 if (stdDeviationX <= 0 || stdDeviationY <= 0) {
496 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
497 return;
498 }
499
500 if (tilingMode < VG_TILE_FILL ||
501 tilingMode > VG_TILE_REFLECT) {
502 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
503 return;
504 }
505
506 d = handle_to_image(dst);
507 s = handle_to_image(src);
508
509 if (vg_image_overlaps(d, s)) {
510 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
511 return;
512 }
513
514 kernel_width = compute_kernel_size(stdDeviationX);
515 kernel_height = compute_kernel_size(stdDeviationY);
516 kernel_size = kernel_width * kernel_height;
517 kernel = malloc(sizeof(VGfloat)*kernel_size);
518 compute_gaussian_kernel(kernel, kernel_width, kernel_height,
519 stdDeviationX, stdDeviationY);
520
521 buffer_len = 8 + 2 * 4 * kernel_size;
522 buffer = malloc(buffer_len * sizeof(VGfloat));
523
524 buffer[0] = 0.f;
525 buffer[1] = 1.f;
526 buffer[2] = 2.f; /*unused*/
527 buffer[3] = 4.f; /*unused*/
528
529 buffer[4] = kernel_width * kernel_height;
530 buffer[5] = 1.f;/*scale*/
531 buffer[6] = 0.f;/*bias*/
532 buffer[7] = 0.f;
533
534 idx = 8;
535 for (j = 0; j < kernel_height; ++j) {
536 for (i = 0; i < kernel_width; ++i) {
537 VGint index = j * kernel_width + i;
538 VGfloat x, y;
539
540 x = texture_offset(s->width, kernel_width, i, kernel_width/2);
541 y = texture_offset(s->height, kernel_height, j, kernel_height/2);
542
543 buffer[idx + index*4 + 0] = x;
544 buffer[idx + index*4 + 1] = y;
545 buffer[idx + index*4 + 2] = 0.f;
546 buffer[idx + index*4 + 3] = 0.f;
547 }
548 }
549 idx += kernel_size * 4;
550
551 for (j = 0; j < kernel_height; ++j) {
552 for (i = 0; i < kernel_width; ++i) {
553 /* transpose the kernel */
554 VGint index = j * kernel_width + i;
555 VGint kindex = (kernel_width - i - 1) * kernel_height + (kernel_height - j - 1);
556 buffer[idx + index*4 + 0] = kernel[kindex];
557 buffer[idx + index*4 + 1] = kernel[kindex];
558 buffer[idx + index*4 + 2] = kernel[kindex];
559 buffer[idx + index*4 + 3] = kernel[kindex];
560 }
561 }
562
563 info.dst = d;
564 info.src = s;
565 info.setup_shader = &setup_convolution;
566 info.user_data = (void*)(long)(buffer_len/4);
567 info.const_buffer = buffer;
568 info.const_buffer_len = buffer_len * sizeof(VGfloat);
569 info.tiling_mode = tilingMode;
570 info.extra_texture_view = NULL;
571 execute_filter(ctx, &info);
572
573 free(buffer);
574 free(kernel);
575 }
576
577 void vegaLookup(VGImage dst, VGImage src,
578 const VGubyte * redLUT,
579 const VGubyte * greenLUT,
580 const VGubyte * blueLUT,
581 const VGubyte * alphaLUT,
582 VGboolean outputLinear,
583 VGboolean outputPremultiplied)
584 {
585 struct vg_context *ctx = vg_current_context();
586 struct vg_image *d, *s;
587 VGuint color_data[256];
588 VGint i;
589 struct pipe_sampler_view *lut_texture_view;
590 VGfloat buffer[4];
591 struct filter_info info;
592
593 if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
594 vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
595 return;
596 }
597
598 if (!redLUT || !greenLUT || !blueLUT || !alphaLUT) {
599 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
600 return;
601 }
602
603 d = handle_to_image(dst);
604 s = handle_to_image(src);
605
606 if (vg_image_overlaps(d, s)) {
607 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
608 return;
609 }
610
611 for (i = 0; i < 256; ++i) {
612 color_data[i] = blueLUT[i] << 24 | greenLUT[i] << 16 |
613 redLUT[i] << 8 | alphaLUT[i];
614 }
615 lut_texture_view = create_texture_1d_view(ctx, color_data, 255);
616
617 buffer[0] = 0.f;
618 buffer[1] = 0.f;
619 buffer[2] = 1.f;
620 buffer[3] = 1.f;
621
622 info.dst = d;
623 info.src = s;
624 info.setup_shader = &setup_lookup;
625 info.user_data = NULL;
626 info.const_buffer = buffer;
627 info.const_buffer_len = 4 * sizeof(VGfloat);
628 info.tiling_mode = VG_TILE_PAD;
629 info.extra_texture_view = lut_texture_view;
630
631 execute_filter(ctx, &info);
632
633 pipe_sampler_view_reference(&lut_texture_view, NULL);
634 }
635
636 void vegaLookupSingle(VGImage dst, VGImage src,
637 const VGuint * lookupTable,
638 VGImageChannel sourceChannel,
639 VGboolean outputLinear,
640 VGboolean outputPremultiplied)
641 {
642 struct vg_context *ctx = vg_current_context();
643 struct vg_image *d, *s;
644 struct pipe_sampler_view *lut_texture_view;
645 VGfloat buffer[4];
646 struct filter_info info;
647 VGuint color_data[256];
648 VGint i;
649
650 if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
651 vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
652 return;
653 }
654
655 if (!lookupTable || !is_aligned(lookupTable)) {
656 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
657 return;
658 }
659
660 if (sourceChannel != VG_RED && sourceChannel != VG_GREEN &&
661 sourceChannel != VG_BLUE && sourceChannel != VG_ALPHA) {
662 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
663 return;
664 }
665
666 d = handle_to_image(dst);
667 s = handle_to_image(src);
668
669 if (vg_image_overlaps(d, s)) {
670 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
671 return;
672 }
673
674 vg_validate_state(ctx);
675
676 for (i = 0; i < 256; ++i) {
677 VGuint rgba = lookupTable[i];
678 VGubyte blue, green, red, alpha;
679 red = (rgba & 0xff000000)>>24;
680 green = (rgba & 0x00ff0000)>>16;
681 blue = (rgba & 0x0000ff00)>> 8;
682 alpha = (rgba & 0x000000ff)>> 0;
683 color_data[i] = blue << 24 | green << 16 |
684 red << 8 | alpha;
685 }
686 lut_texture_view = create_texture_1d_view(ctx, color_data, 256);
687
688 buffer[0] = 0.f;
689 buffer[1] = 0.f;
690 buffer[2] = 1.f;
691 buffer[3] = 1.f;
692
693 info.dst = d;
694 info.src = s;
695 info.setup_shader = &setup_lookup_single;
696 info.user_data = (void*)sourceChannel;
697 info.const_buffer = buffer;
698 info.const_buffer_len = 4 * sizeof(VGfloat);
699 info.tiling_mode = VG_TILE_PAD;
700 info.extra_texture_view = lut_texture_view;
701
702 execute_filter(ctx, &info);
703
704 pipe_sampler_view_reference(&lut_texture_view, NULL);
705 }