Merge branch 'mesa_7_5_branch' into mesa_7_6_branch
[mesa.git] / src / gallium / state_trackers / vega / api_filters.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26
27 #include "VG/openvg.h"
28
29 #include "vg_context.h"
30 #include "image.h"
31 #include "renderer.h"
32 #include "shaders_cache.h"
33 #include "st_inlines.h"
34
35 #include "pipe/p_context.h"
36 #include "pipe/p_state.h"
37 #include "pipe/p_inlines.h"
38 #include "pipe/p_screen.h"
39 #include "pipe/p_shader_tokens.h"
40
41 #include "util/u_memory.h"
42
43
44 #include "asm_filters.h"
45
46
47 struct filter_info {
48 struct vg_image *dst;
49 struct vg_image *src;
50 struct vg_shader * (*setup_shader)(struct vg_context *, void *);
51 void *user_data;
52 const void *const_buffer;
53 VGint const_buffer_len;
54 VGTilingMode tiling_mode;
55 struct pipe_texture *extra_texture;
56 };
57
58 static INLINE struct pipe_texture *create_texture_1d(struct vg_context *ctx,
59 const VGuint *color_data,
60 const VGint color_data_len)
61 {
62 struct pipe_context *pipe = ctx->pipe;
63 struct pipe_screen *screen = pipe->screen;
64 struct pipe_texture *tex = 0;
65 struct pipe_texture templ;
66
67 memset(&templ, 0, sizeof(templ));
68 templ.target = PIPE_TEXTURE_1D;
69 templ.format = PIPE_FORMAT_A8R8G8B8_UNORM;
70 templ.last_level = 0;
71 templ.width[0] = color_data_len;
72 templ.height[0] = 1;
73 templ.depth[0] = 1;
74 pf_get_block(PIPE_FORMAT_A8R8G8B8_UNORM, &templ.block);
75 templ.tex_usage = PIPE_TEXTURE_USAGE_SAMPLER;
76
77 tex = screen->texture_create(screen, &templ);
78
79 { /* upload color_data */
80 struct pipe_transfer *transfer =
81 screen->get_tex_transfer(screen, tex,
82 0, 0, 0,
83 PIPE_TRANSFER_READ_WRITE ,
84 0, 0, tex->width[0], tex->height[0]);
85 void *map = screen->transfer_map(screen, transfer);
86 memcpy(map, color_data, sizeof(VGint)*color_data_len);
87 screen->transfer_unmap(screen, transfer);
88 screen->tex_transfer_destroy(transfer);
89 }
90
91 return tex;
92 }
93
94 static INLINE struct pipe_surface * setup_framebuffer(struct vg_image *dst)
95 {
96 struct vg_context *ctx = vg_current_context();
97 struct pipe_context *pipe = ctx->pipe;
98 struct pipe_framebuffer_state fb;
99 struct pipe_surface *dst_surf = pipe->screen->get_tex_surface(
100 pipe->screen, dst->texture, 0, 0, 0,
101 PIPE_BUFFER_USAGE_GPU_WRITE);
102
103 /* drawing dest */
104 memset(&fb, 0, sizeof(fb));
105 fb.width = dst->x + dst_surf->width;
106 fb.height = dst->y + dst_surf->height;
107 fb.nr_cbufs = 1;
108 fb.cbufs[0] = dst_surf;
109 {
110 VGint i;
111 for (i = 1; i < PIPE_MAX_COLOR_BUFS; ++i)
112 fb.cbufs[i] = 0;
113 }
114 cso_set_framebuffer(ctx->cso_context, &fb);
115
116 return dst_surf;
117 }
118
119 static void setup_viewport(struct vg_image *dst)
120 {
121 struct vg_context *ctx = vg_current_context();
122 vg_set_viewport(ctx, VEGA_Y0_TOP);
123 }
124
125 static void setup_blend()
126 {
127 struct vg_context *ctx = vg_current_context();
128 struct pipe_blend_state blend;
129 memset(&blend, 0, sizeof(blend));
130 blend.rgb_src_factor = PIPE_BLENDFACTOR_ONE;
131 blend.alpha_src_factor = PIPE_BLENDFACTOR_ONE;
132 blend.rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
133 blend.alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
134 if (ctx->state.vg.filter_channel_mask & VG_RED)
135 blend.colormask |= PIPE_MASK_R;
136 if (ctx->state.vg.filter_channel_mask & VG_GREEN)
137 blend.colormask |= PIPE_MASK_G;
138 if (ctx->state.vg.filter_channel_mask & VG_BLUE)
139 blend.colormask |= PIPE_MASK_B;
140 if (ctx->state.vg.filter_channel_mask & VG_ALPHA)
141 blend.colormask |= PIPE_MASK_A;
142 blend.blend_enable = 1;
143 cso_set_blend(ctx->cso_context, &blend);
144 }
145
146 static void setup_constant_buffer(struct vg_context *ctx, const void *buffer,
147 VGint param_bytes)
148 {
149 struct pipe_context *pipe = ctx->pipe;
150 struct pipe_constant_buffer *cbuf = &ctx->filter.buffer;
151
152 /* We always need to get a new buffer, to keep the drivers simple and
153 * avoid gratuitous rendering synchronization. */
154 pipe_buffer_reference(&cbuf->buffer, NULL);
155
156 cbuf->buffer = pipe_buffer_create(pipe->screen, 16,
157 PIPE_BUFFER_USAGE_CONSTANT,
158 param_bytes);
159
160 if (cbuf->buffer) {
161 st_no_flush_pipe_buffer_write(ctx, cbuf->buffer,
162 0, param_bytes, buffer);
163 }
164
165 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, cbuf);
166 }
167
168 static void setup_samplers(struct vg_context *ctx, struct filter_info *info)
169 {
170 struct pipe_sampler_state *samplers[PIPE_MAX_SAMPLERS];
171 struct pipe_texture *textures[PIPE_MAX_SAMPLERS];
172 struct pipe_sampler_state sampler[3];
173 int num_samplers = 0;
174 int num_textures = 0;
175
176 samplers[0] = NULL;
177 samplers[1] = NULL;
178 samplers[2] = NULL;
179 samplers[3] = NULL;
180 textures[0] = NULL;
181 textures[1] = NULL;
182 textures[2] = NULL;
183 textures[3] = NULL;
184
185 memset(&sampler[0], 0, sizeof(struct pipe_sampler_state));
186 sampler[0].wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
187 sampler[0].wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
188 sampler[0].wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
189 sampler[0].min_img_filter = PIPE_TEX_MIPFILTER_LINEAR;
190 sampler[0].mag_img_filter = PIPE_TEX_MIPFILTER_LINEAR;
191 sampler[0].normalized_coords = 1;
192
193 switch(info->tiling_mode) {
194 case VG_TILE_FILL:
195 sampler[0].wrap_s = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
196 sampler[0].wrap_t = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
197 memcpy(sampler[0].border_color,
198 ctx->state.vg.tile_fill_color,
199 sizeof(VGfloat) * 4);
200 break;
201 case VG_TILE_PAD:
202 sampler[0].wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
203 sampler[0].wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
204 break;
205 case VG_TILE_REPEAT:
206 sampler[0].wrap_s = PIPE_TEX_WRAP_REPEAT;
207 sampler[0].wrap_t = PIPE_TEX_WRAP_REPEAT;
208 break;
209 case VG_TILE_REFLECT:
210 sampler[0].wrap_s = PIPE_TEX_WRAP_MIRROR_REPEAT;
211 sampler[0].wrap_t = PIPE_TEX_WRAP_MIRROR_REPEAT;
212 break;
213 default:
214 debug_assert(!"Unknown tiling mode");
215 }
216
217 samplers[0] = &sampler[0];
218 textures[0] = info->src->texture;
219 ++num_samplers;
220 ++num_textures;
221
222 if (info->extra_texture) {
223 memcpy(&sampler[1], &sampler[0], sizeof(struct pipe_sampler_state));
224 samplers[1] = &sampler[1];
225 textures[1] = info->extra_texture;
226 ++num_samplers;
227 ++num_textures;
228 }
229
230
231 cso_set_samplers(ctx->cso_context, num_samplers, (const struct pipe_sampler_state **)samplers);
232 cso_set_sampler_textures(ctx->cso_context, num_textures, textures);
233 }
234
235 static struct vg_shader * setup_color_matrix(struct vg_context *ctx, void *user_data)
236 {
237 struct vg_shader *shader =
238 shader_create_from_text(ctx->pipe, color_matrix_asm, 200,
239 PIPE_SHADER_FRAGMENT);
240 cso_set_fragment_shader_handle(ctx->cso_context, shader->driver);
241 return shader;
242 }
243
244 static struct vg_shader * setup_convolution(struct vg_context *ctx, void *user_data)
245 {
246 char buffer[1024];
247 VGint num_consts = (VGint)(long)(user_data);
248 struct vg_shader *shader;
249
250 snprintf(buffer, 1023, convolution_asm, num_consts, num_consts / 2 + 1);
251
252 shader = shader_create_from_text(ctx->pipe, buffer, 200,
253 PIPE_SHADER_FRAGMENT);
254
255 cso_set_fragment_shader_handle(ctx->cso_context, shader->driver);
256 return shader;
257 }
258
259 static struct vg_shader * setup_lookup(struct vg_context *ctx, void *user_data)
260 {
261 struct vg_shader *shader =
262 shader_create_from_text(ctx->pipe, lookup_asm,
263 200, PIPE_SHADER_FRAGMENT);
264
265 cso_set_fragment_shader_handle(ctx->cso_context, shader->driver);
266 return shader;
267 }
268
269
270 static struct vg_shader * setup_lookup_single(struct vg_context *ctx, void *user_data)
271 {
272 char buffer[1024];
273 VGImageChannel channel = (VGImageChannel)(user_data);
274 struct vg_shader *shader;
275
276 switch(channel) {
277 case VG_RED:
278 snprintf(buffer, 1023, lookup_single_asm, "xxxx");
279 break;
280 case VG_GREEN:
281 snprintf(buffer, 1023, lookup_single_asm, "yyyy");
282 break;
283 case VG_BLUE:
284 snprintf(buffer, 1023, lookup_single_asm, "zzzz");
285 break;
286 case VG_ALPHA:
287 snprintf(buffer, 1023, lookup_single_asm, "wwww");
288 break;
289 default:
290 debug_assert(!"Unknown color channel");
291 }
292
293 shader = shader_create_from_text(ctx->pipe, buffer, 200,
294 PIPE_SHADER_FRAGMENT);
295
296 cso_set_fragment_shader_handle(ctx->cso_context, shader->driver);
297 return shader;
298 }
299
300 static void execute_filter(struct vg_context *ctx,
301 struct filter_info *info)
302 {
303 struct pipe_surface *dst_surf;
304 struct vg_shader *shader;
305
306 cso_save_framebuffer(ctx->cso_context);
307 cso_save_fragment_shader(ctx->cso_context);
308 cso_save_viewport(ctx->cso_context);
309 cso_save_blend(ctx->cso_context);
310 cso_save_samplers(ctx->cso_context);
311 cso_save_sampler_textures(ctx->cso_context);
312
313 dst_surf = setup_framebuffer(info->dst);
314 setup_viewport(info->dst);
315 setup_blend();
316 setup_constant_buffer(ctx, info->const_buffer, info->const_buffer_len);
317 shader = info->setup_shader(ctx, info->user_data);
318 setup_samplers(ctx, info);
319
320 renderer_draw_texture(ctx->renderer,
321 info->src->texture,
322 info->dst->x, info->dst->y,
323 info->dst->x + info->dst->width,
324 info->dst->y + info->dst->height,
325 info->dst->x, info->dst->y,
326 info->dst->x + info->dst->width,
327 info->dst->y + info->dst->height);
328
329 cso_restore_framebuffer(ctx->cso_context);
330 cso_restore_fragment_shader(ctx->cso_context);
331 cso_restore_viewport(ctx->cso_context);
332 cso_restore_blend(ctx->cso_context);
333 cso_restore_samplers(ctx->cso_context);
334 cso_restore_sampler_textures(ctx->cso_context);
335
336 vg_shader_destroy(ctx, shader);
337
338 pipe_surface_reference(&dst_surf, NULL);
339 }
340
341 void vgColorMatrix(VGImage dst, VGImage src,
342 const VGfloat * matrix)
343 {
344 struct vg_context *ctx = vg_current_context();
345 struct vg_image *d, *s;
346 struct filter_info info;
347
348 if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
349 vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
350 return;
351 }
352 if (!matrix || !is_aligned(matrix)) {
353 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
354 return;
355 }
356
357 d = (struct vg_image*)dst;
358 s = (struct vg_image*)src;
359
360 if (vg_image_overlaps(d, s)) {
361 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
362 return;
363 }
364
365 info.dst = d;
366 info.src = s;
367 info.setup_shader = &setup_color_matrix;
368 info.user_data = NULL;
369 info.const_buffer = matrix;
370 info.const_buffer_len = 20 * sizeof(VGfloat);
371 info.tiling_mode = VG_TILE_PAD;
372 info.extra_texture = 0;
373 execute_filter(ctx, &info);
374 }
375
376 static VGfloat texture_offset(VGfloat width, VGint kernelSize, VGint current, VGint shift)
377 {
378 VGfloat diff = current - shift;
379
380 return diff / width;
381 }
382
383 void vgConvolve(VGImage dst, VGImage src,
384 VGint kernelWidth, VGint kernelHeight,
385 VGint shiftX, VGint shiftY,
386 const VGshort * kernel,
387 VGfloat scale,
388 VGfloat bias,
389 VGTilingMode tilingMode)
390 {
391 struct vg_context *ctx = vg_current_context();
392 VGfloat *buffer;
393 VGint buffer_len;
394 VGint i, j;
395 VGint idx = 0;
396 struct vg_image *d, *s;
397 VGint kernel_size = kernelWidth * kernelHeight;
398 struct filter_info info;
399 const VGint max_kernel_size = vgGeti(VG_MAX_KERNEL_SIZE);
400
401 if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
402 vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
403 return;
404 }
405
406 if (kernelWidth <= 0 || kernelHeight <= 0 ||
407 kernelWidth > max_kernel_size || kernelHeight > max_kernel_size) {
408 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
409 return;
410 }
411
412 if (!kernel || !is_aligned_to(kernel, 2)) {
413 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
414 return;
415 }
416
417 if (tilingMode < VG_TILE_FILL ||
418 tilingMode > VG_TILE_REFLECT) {
419 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
420 return;
421 }
422
423 d = (struct vg_image*)dst;
424 s = (struct vg_image*)src;
425
426 if (vg_image_overlaps(d, s)) {
427 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
428 return;
429 }
430
431 vg_validate_state(ctx);
432
433 buffer_len = 8 + 2 * 4 * kernel_size;
434 buffer = (VGfloat*)malloc(buffer_len * sizeof(VGfloat));
435
436 buffer[0] = 0.f;
437 buffer[1] = 1.f;
438 buffer[2] = 2.f; /*unused*/
439 buffer[3] = 4.f; /*unused*/
440
441 buffer[4] = kernelWidth * kernelHeight;
442 buffer[5] = scale;
443 buffer[6] = bias;
444 buffer[7] = 0.f;
445
446 idx = 8;
447 for (j = 0; j < kernelHeight; ++j) {
448 for (i = 0; i < kernelWidth; ++i) {
449 VGint index = j * kernelWidth + i;
450 VGfloat x, y;
451
452 x = texture_offset(s->width, kernelWidth, i, shiftX);
453 y = texture_offset(s->height, kernelHeight, j, shiftY);
454
455 buffer[idx + index*4 + 0] = x;
456 buffer[idx + index*4 + 1] = y;
457 buffer[idx + index*4 + 2] = 0.f;
458 buffer[idx + index*4 + 3] = 0.f;
459 }
460 }
461 idx += kernel_size * 4;
462
463 for (j = 0; j < kernelHeight; ++j) {
464 for (i = 0; i < kernelWidth; ++i) {
465 /* transpose the kernel */
466 VGint index = j * kernelWidth + i;
467 VGint kindex = (kernelWidth - i - 1) * kernelHeight + (kernelHeight - j - 1);
468 buffer[idx + index*4 + 0] = kernel[kindex];
469 buffer[idx + index*4 + 1] = kernel[kindex];
470 buffer[idx + index*4 + 2] = kernel[kindex];
471 buffer[idx + index*4 + 3] = kernel[kindex];
472 }
473 }
474
475 info.dst = d;
476 info.src = s;
477 info.setup_shader = &setup_convolution;
478 info.user_data = (void*)(long)(buffer_len/4);
479 info.const_buffer = buffer;
480 info.const_buffer_len = buffer_len * sizeof(VGfloat);
481 info.tiling_mode = tilingMode;
482 info.extra_texture = 0;
483 execute_filter(ctx, &info);
484
485 free(buffer);
486 }
487
488 void vgSeparableConvolve(VGImage dst, VGImage src,
489 VGint kernelWidth,
490 VGint kernelHeight,
491 VGint shiftX, VGint shiftY,
492 const VGshort * kernelX,
493 const VGshort * kernelY,
494 VGfloat scale,
495 VGfloat bias,
496 VGTilingMode tilingMode)
497 {
498 struct vg_context *ctx = vg_current_context();
499 VGshort *kernel;
500 VGint i, j, idx = 0;
501 const VGint max_kernel_size = vgGeti(VG_MAX_SEPARABLE_KERNEL_SIZE);
502
503 if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
504 vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
505 return;
506 }
507
508 if (kernelWidth <= 0 || kernelHeight <= 0 ||
509 kernelWidth > max_kernel_size || kernelHeight > max_kernel_size) {
510 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
511 return;
512 }
513
514 if (!kernelX || !kernelY ||
515 !is_aligned_to(kernelX, 2) || !is_aligned_to(kernelY, 2)) {
516 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
517 return;
518 }
519 if (tilingMode < VG_TILE_FILL ||
520 tilingMode > VG_TILE_REFLECT) {
521 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
522 return;
523 }
524 kernel = malloc(sizeof(VGshort)*kernelWidth*kernelHeight);
525 for (i = 0; i < kernelWidth; ++i) {
526 for (j = 0; j < kernelHeight; ++j) {
527 kernel[idx] = kernelX[i] * kernelY[j];
528 ++idx;
529 }
530 }
531 vgConvolve(dst, src, kernelWidth, kernelHeight, shiftX, shiftY,
532 kernel, scale, bias, tilingMode);
533 free(kernel);
534 }
535
536 static INLINE VGfloat compute_gaussian_componenet(VGfloat x, VGfloat y,
537 VGfloat stdDeviationX,
538 VGfloat stdDeviationY)
539 {
540 VGfloat mult = 1 / ( 2 * M_PI * stdDeviationX * stdDeviationY);
541 VGfloat e = exp( - ( pow(x, 2)/(2*pow(stdDeviationX, 2)) +
542 pow(y, 2)/(2*pow(stdDeviationY, 2)) ) );
543 return mult * e;
544 }
545
546 static INLINE VGint compute_kernel_size(VGfloat deviation)
547 {
548 VGint size = ceil(2.146 * deviation);
549 if (size > 11)
550 return 11;
551 return size;
552 }
553
554 static void compute_gaussian_kernel(VGfloat *kernel,
555 VGint width, VGint height,
556 VGfloat stdDeviationX,
557 VGfloat stdDeviationY)
558 {
559 VGint i, j;
560 VGfloat scale = 0.0f;
561
562 for (j = 0; j < height; ++j) {
563 for (i = 0; i < width; ++i) {
564 VGint idx = (height - j -1) * width + (width - i -1);
565 kernel[idx] = compute_gaussian_componenet(i-(ceil(width/2))-1,
566 j-ceil(height/2)-1,
567 stdDeviationX, stdDeviationY);
568 scale += kernel[idx];
569 }
570 }
571
572 for (j = 0; j < height; ++j) {
573 for (i = 0; i < width; ++i) {
574 VGint idx = j * width + i;
575 kernel[idx] /= scale;
576 }
577 }
578 }
579
580 void vgGaussianBlur(VGImage dst, VGImage src,
581 VGfloat stdDeviationX,
582 VGfloat stdDeviationY,
583 VGTilingMode tilingMode)
584 {
585 struct vg_context *ctx = vg_current_context();
586 struct vg_image *d, *s;
587 VGfloat *buffer, *kernel;
588 VGint kernel_width, kernel_height, kernel_size;
589 VGint buffer_len;
590 VGint idx, i, j;
591 struct filter_info info;
592
593 if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
594 vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
595 return;
596 }
597 if (stdDeviationX <= 0 || stdDeviationY <= 0) {
598 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
599 return;
600 }
601
602 if (tilingMode < VG_TILE_FILL ||
603 tilingMode > VG_TILE_REFLECT) {
604 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
605 return;
606 }
607
608 d = (struct vg_image*)dst;
609 s = (struct vg_image*)src;
610
611 if (vg_image_overlaps(d, s)) {
612 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
613 return;
614 }
615
616 kernel_width = compute_kernel_size(stdDeviationX);
617 kernel_height = compute_kernel_size(stdDeviationY);
618 kernel_size = kernel_width * kernel_height;
619 kernel = malloc(sizeof(VGfloat)*kernel_size);
620 compute_gaussian_kernel(kernel, kernel_width, kernel_height,
621 stdDeviationX, stdDeviationY);
622
623 buffer_len = 8 + 2 * 4 * kernel_size;
624 buffer = (VGfloat*)malloc(buffer_len * sizeof(VGfloat));
625
626 buffer[0] = 0.f;
627 buffer[1] = 1.f;
628 buffer[2] = 2.f; /*unused*/
629 buffer[3] = 4.f; /*unused*/
630
631 buffer[4] = kernel_width * kernel_height;
632 buffer[5] = 1.f;/*scale*/
633 buffer[6] = 0.f;/*bias*/
634 buffer[7] = 0.f;
635
636 idx = 8;
637 for (j = 0; j < kernel_height; ++j) {
638 for (i = 0; i < kernel_width; ++i) {
639 VGint index = j * kernel_width + i;
640 VGfloat x, y;
641
642 x = texture_offset(s->width, kernel_width, i, kernel_width/2);
643 y = texture_offset(s->height, kernel_height, j, kernel_height/2);
644
645 buffer[idx + index*4 + 0] = x;
646 buffer[idx + index*4 + 1] = y;
647 buffer[idx + index*4 + 2] = 0.f;
648 buffer[idx + index*4 + 3] = 0.f;
649 }
650 }
651 idx += kernel_size * 4;
652
653 for (j = 0; j < kernel_height; ++j) {
654 for (i = 0; i < kernel_width; ++i) {
655 /* transpose the kernel */
656 VGint index = j * kernel_width + i;
657 VGint kindex = (kernel_width - i - 1) * kernel_height + (kernel_height - j - 1);
658 buffer[idx + index*4 + 0] = kernel[kindex];
659 buffer[idx + index*4 + 1] = kernel[kindex];
660 buffer[idx + index*4 + 2] = kernel[kindex];
661 buffer[idx + index*4 + 3] = kernel[kindex];
662 }
663 }
664
665 info.dst = d;
666 info.src = s;
667 info.setup_shader = &setup_convolution;
668 info.user_data = (void*)(long)(buffer_len/4);
669 info.const_buffer = buffer;
670 info.const_buffer_len = buffer_len * sizeof(VGfloat);
671 info.tiling_mode = tilingMode;
672 info.extra_texture = 0;
673 execute_filter(ctx, &info);
674
675 free(buffer);
676 free(kernel);
677 }
678
679 void vgLookup(VGImage dst, VGImage src,
680 const VGubyte * redLUT,
681 const VGubyte * greenLUT,
682 const VGubyte * blueLUT,
683 const VGubyte * alphaLUT,
684 VGboolean outputLinear,
685 VGboolean outputPremultiplied)
686 {
687 struct vg_context *ctx = vg_current_context();
688 struct vg_image *d, *s;
689 VGuint color_data[256];
690 VGint i;
691 struct pipe_texture *lut_texture;
692 VGfloat buffer[4];
693 struct filter_info info;
694
695 if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
696 vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
697 return;
698 }
699
700 if (!redLUT || !greenLUT || !blueLUT || !alphaLUT) {
701 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
702 return;
703 }
704
705 d = (struct vg_image*)dst;
706 s = (struct vg_image*)src;
707
708 if (vg_image_overlaps(d, s)) {
709 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
710 return;
711 }
712
713 for (i = 0; i < 256; ++i) {
714 color_data[i] = blueLUT[i] << 24 | greenLUT[i] << 16 |
715 redLUT[i] << 8 | alphaLUT[i];
716 }
717 lut_texture = create_texture_1d(ctx, color_data, 255);
718
719 buffer[0] = 0.f;
720 buffer[1] = 0.f;
721 buffer[2] = 1.f;
722 buffer[3] = 1.f;
723
724 info.dst = d;
725 info.src = s;
726 info.setup_shader = &setup_lookup;
727 info.user_data = NULL;
728 info.const_buffer = buffer;
729 info.const_buffer_len = 4 * sizeof(VGfloat);
730 info.tiling_mode = VG_TILE_PAD;
731 info.extra_texture = lut_texture;
732
733 execute_filter(ctx, &info);
734
735 pipe_texture_reference(&lut_texture, NULL);
736 }
737
738 void vgLookupSingle(VGImage dst, VGImage src,
739 const VGuint * lookupTable,
740 VGImageChannel sourceChannel,
741 VGboolean outputLinear,
742 VGboolean outputPremultiplied)
743 {
744 struct vg_context *ctx = vg_current_context();
745 struct vg_image *d, *s;
746 struct pipe_texture *lut_texture;
747 VGfloat buffer[4];
748 struct filter_info info;
749 VGuint color_data[256];
750 VGint i;
751
752 if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
753 vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
754 return;
755 }
756
757 if (!lookupTable || !is_aligned(lookupTable)) {
758 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
759 return;
760 }
761
762 if (sourceChannel != VG_RED && sourceChannel != VG_GREEN &&
763 sourceChannel != VG_BLUE && sourceChannel != VG_ALPHA) {
764 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
765 return;
766 }
767
768 d = (struct vg_image*)dst;
769 s = (struct vg_image*)src;
770
771 if (vg_image_overlaps(d, s)) {
772 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
773 return;
774 }
775
776 for (i = 0; i < 256; ++i) {
777 VGuint rgba = lookupTable[i];
778 VGubyte blue, green, red, alpha;
779 red = (rgba & 0xff000000)>>24;
780 green = (rgba & 0x00ff0000)>>16;
781 blue = (rgba & 0x0000ff00)>> 8;
782 alpha = (rgba & 0x000000ff)>> 0;
783 color_data[i] = blue << 24 | green << 16 |
784 red << 8 | alpha;
785 }
786 lut_texture = create_texture_1d(ctx, color_data, 256);
787
788 buffer[0] = 0.f;
789 buffer[1] = 0.f;
790 buffer[2] = 1.f;
791 buffer[3] = 1.f;
792
793 info.dst = d;
794 info.src = s;
795 info.setup_shader = &setup_lookup_single;
796 info.user_data = (void*)sourceChannel;
797 info.const_buffer = buffer;
798 info.const_buffer_len = 4 * sizeof(VGfloat);
799 info.tiling_mode = VG_TILE_PAD;
800 info.extra_texture = lut_texture;
801
802 execute_filter(ctx, &info);
803
804 pipe_texture_reference(&lut_texture, NULL);
805 }