Move pf_get_block() to u_format auxiliary module.
[mesa.git] / src / gallium / state_trackers / vega / api_filters.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26
27 #include "VG/openvg.h"
28
29 #include "vg_context.h"
30 #include "image.h"
31 #include "renderer.h"
32 #include "shaders_cache.h"
33 #include "st_inlines.h"
34
35 #include "pipe/p_context.h"
36 #include "pipe/p_state.h"
37 #include "pipe/p_inlines.h"
38 #include "pipe/p_screen.h"
39 #include "pipe/p_shader_tokens.h"
40
41 #include "util/u_format.h"
42 #include "util/u_memory.h"
43
44
45 #include "asm_filters.h"
46
47
48 struct filter_info {
49 struct vg_image *dst;
50 struct vg_image *src;
51 struct vg_shader * (*setup_shader)(struct vg_context *, void *);
52 void *user_data;
53 const void *const_buffer;
54 VGint const_buffer_len;
55 VGTilingMode tiling_mode;
56 struct pipe_texture *extra_texture;
57 };
58
59 static INLINE struct pipe_texture *create_texture_1d(struct vg_context *ctx,
60 const VGuint *color_data,
61 const VGint color_data_len)
62 {
63 struct pipe_context *pipe = ctx->pipe;
64 struct pipe_screen *screen = pipe->screen;
65 struct pipe_texture *tex = 0;
66 struct pipe_texture templ;
67
68 memset(&templ, 0, sizeof(templ));
69 templ.target = PIPE_TEXTURE_1D;
70 templ.format = PIPE_FORMAT_A8R8G8B8_UNORM;
71 templ.last_level = 0;
72 templ.width0 = color_data_len;
73 templ.height0 = 1;
74 templ.depth0 = 1;
75 util_format_get_block(PIPE_FORMAT_A8R8G8B8_UNORM, &templ.block);
76 templ.tex_usage = PIPE_TEXTURE_USAGE_SAMPLER;
77
78 tex = screen->texture_create(screen, &templ);
79
80 { /* upload color_data */
81 struct pipe_transfer *transfer =
82 screen->get_tex_transfer(screen, tex,
83 0, 0, 0,
84 PIPE_TRANSFER_READ_WRITE ,
85 0, 0, tex->width0, tex->height0);
86 void *map = screen->transfer_map(screen, transfer);
87 memcpy(map, color_data, sizeof(VGint)*color_data_len);
88 screen->transfer_unmap(screen, transfer);
89 screen->tex_transfer_destroy(transfer);
90 }
91
92 return tex;
93 }
94
95 static INLINE struct pipe_surface * setup_framebuffer(struct vg_image *dst)
96 {
97 struct vg_context *ctx = vg_current_context();
98 struct pipe_context *pipe = ctx->pipe;
99 struct pipe_framebuffer_state fb;
100 struct pipe_surface *dst_surf = pipe->screen->get_tex_surface(
101 pipe->screen, dst->texture, 0, 0, 0,
102 PIPE_BUFFER_USAGE_GPU_WRITE);
103
104 /* drawing dest */
105 memset(&fb, 0, sizeof(fb));
106 fb.width = dst->x + dst_surf->width;
107 fb.height = dst->y + dst_surf->height;
108 fb.nr_cbufs = 1;
109 fb.cbufs[0] = dst_surf;
110 {
111 VGint i;
112 for (i = 1; i < PIPE_MAX_COLOR_BUFS; ++i)
113 fb.cbufs[i] = 0;
114 }
115 cso_set_framebuffer(ctx->cso_context, &fb);
116
117 return dst_surf;
118 }
119
120 static void setup_viewport(struct vg_image *dst)
121 {
122 struct vg_context *ctx = vg_current_context();
123 vg_set_viewport(ctx, VEGA_Y0_TOP);
124 }
125
126 static void setup_blend()
127 {
128 struct vg_context *ctx = vg_current_context();
129 struct pipe_blend_state blend;
130 memset(&blend, 0, sizeof(blend));
131 blend.rgb_src_factor = PIPE_BLENDFACTOR_ONE;
132 blend.alpha_src_factor = PIPE_BLENDFACTOR_ONE;
133 blend.rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
134 blend.alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
135 if (ctx->state.vg.filter_channel_mask & VG_RED)
136 blend.colormask |= PIPE_MASK_R;
137 if (ctx->state.vg.filter_channel_mask & VG_GREEN)
138 blend.colormask |= PIPE_MASK_G;
139 if (ctx->state.vg.filter_channel_mask & VG_BLUE)
140 blend.colormask |= PIPE_MASK_B;
141 if (ctx->state.vg.filter_channel_mask & VG_ALPHA)
142 blend.colormask |= PIPE_MASK_A;
143 blend.blend_enable = 1;
144 cso_set_blend(ctx->cso_context, &blend);
145 }
146
147 static void setup_constant_buffer(struct vg_context *ctx, const void *buffer,
148 VGint param_bytes)
149 {
150 struct pipe_context *pipe = ctx->pipe;
151 struct pipe_constant_buffer *cbuf = &ctx->filter.buffer;
152
153 /* We always need to get a new buffer, to keep the drivers simple and
154 * avoid gratuitous rendering synchronization. */
155 pipe_buffer_reference(&cbuf->buffer, NULL);
156
157 cbuf->buffer = pipe_buffer_create(pipe->screen, 16,
158 PIPE_BUFFER_USAGE_CONSTANT,
159 param_bytes);
160
161 if (cbuf->buffer) {
162 st_no_flush_pipe_buffer_write(ctx, cbuf->buffer,
163 0, param_bytes, buffer);
164 }
165
166 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, cbuf);
167 }
168
169 static void setup_samplers(struct vg_context *ctx, struct filter_info *info)
170 {
171 struct pipe_sampler_state *samplers[PIPE_MAX_SAMPLERS];
172 struct pipe_texture *textures[PIPE_MAX_SAMPLERS];
173 struct pipe_sampler_state sampler[3];
174 int num_samplers = 0;
175 int num_textures = 0;
176
177 samplers[0] = NULL;
178 samplers[1] = NULL;
179 samplers[2] = NULL;
180 samplers[3] = NULL;
181 textures[0] = NULL;
182 textures[1] = NULL;
183 textures[2] = NULL;
184 textures[3] = NULL;
185
186 memset(&sampler[0], 0, sizeof(struct pipe_sampler_state));
187 sampler[0].wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
188 sampler[0].wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
189 sampler[0].wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
190 sampler[0].min_img_filter = PIPE_TEX_MIPFILTER_LINEAR;
191 sampler[0].mag_img_filter = PIPE_TEX_MIPFILTER_LINEAR;
192 sampler[0].normalized_coords = 1;
193
194 switch(info->tiling_mode) {
195 case VG_TILE_FILL:
196 sampler[0].wrap_s = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
197 sampler[0].wrap_t = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
198 memcpy(sampler[0].border_color,
199 ctx->state.vg.tile_fill_color,
200 sizeof(VGfloat) * 4);
201 break;
202 case VG_TILE_PAD:
203 sampler[0].wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
204 sampler[0].wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
205 break;
206 case VG_TILE_REPEAT:
207 sampler[0].wrap_s = PIPE_TEX_WRAP_REPEAT;
208 sampler[0].wrap_t = PIPE_TEX_WRAP_REPEAT;
209 break;
210 case VG_TILE_REFLECT:
211 sampler[0].wrap_s = PIPE_TEX_WRAP_MIRROR_REPEAT;
212 sampler[0].wrap_t = PIPE_TEX_WRAP_MIRROR_REPEAT;
213 break;
214 default:
215 debug_assert(!"Unknown tiling mode");
216 }
217
218 samplers[0] = &sampler[0];
219 textures[0] = info->src->texture;
220 ++num_samplers;
221 ++num_textures;
222
223 if (info->extra_texture) {
224 memcpy(&sampler[1], &sampler[0], sizeof(struct pipe_sampler_state));
225 samplers[1] = &sampler[1];
226 textures[1] = info->extra_texture;
227 ++num_samplers;
228 ++num_textures;
229 }
230
231
232 cso_set_samplers(ctx->cso_context, num_samplers, (const struct pipe_sampler_state **)samplers);
233 cso_set_sampler_textures(ctx->cso_context, num_textures, textures);
234 }
235
236 static struct vg_shader * setup_color_matrix(struct vg_context *ctx, void *user_data)
237 {
238 struct vg_shader *shader =
239 shader_create_from_text(ctx->pipe, color_matrix_asm, 200,
240 PIPE_SHADER_FRAGMENT);
241 cso_set_fragment_shader_handle(ctx->cso_context, shader->driver);
242 return shader;
243 }
244
245 static struct vg_shader * setup_convolution(struct vg_context *ctx, void *user_data)
246 {
247 char buffer[1024];
248 VGint num_consts = (VGint)(long)(user_data);
249 struct vg_shader *shader;
250
251 snprintf(buffer, 1023, convolution_asm, num_consts, num_consts / 2 + 1);
252
253 shader = shader_create_from_text(ctx->pipe, buffer, 200,
254 PIPE_SHADER_FRAGMENT);
255
256 cso_set_fragment_shader_handle(ctx->cso_context, shader->driver);
257 return shader;
258 }
259
260 static struct vg_shader * setup_lookup(struct vg_context *ctx, void *user_data)
261 {
262 struct vg_shader *shader =
263 shader_create_from_text(ctx->pipe, lookup_asm,
264 200, PIPE_SHADER_FRAGMENT);
265
266 cso_set_fragment_shader_handle(ctx->cso_context, shader->driver);
267 return shader;
268 }
269
270
271 static struct vg_shader * setup_lookup_single(struct vg_context *ctx, void *user_data)
272 {
273 char buffer[1024];
274 VGImageChannel channel = (VGImageChannel)(user_data);
275 struct vg_shader *shader;
276
277 switch(channel) {
278 case VG_RED:
279 snprintf(buffer, 1023, lookup_single_asm, "xxxx");
280 break;
281 case VG_GREEN:
282 snprintf(buffer, 1023, lookup_single_asm, "yyyy");
283 break;
284 case VG_BLUE:
285 snprintf(buffer, 1023, lookup_single_asm, "zzzz");
286 break;
287 case VG_ALPHA:
288 snprintf(buffer, 1023, lookup_single_asm, "wwww");
289 break;
290 default:
291 debug_assert(!"Unknown color channel");
292 }
293
294 shader = shader_create_from_text(ctx->pipe, buffer, 200,
295 PIPE_SHADER_FRAGMENT);
296
297 cso_set_fragment_shader_handle(ctx->cso_context, shader->driver);
298 return shader;
299 }
300
301 static void execute_filter(struct vg_context *ctx,
302 struct filter_info *info)
303 {
304 struct pipe_surface *dst_surf;
305 struct vg_shader *shader;
306
307 cso_save_framebuffer(ctx->cso_context);
308 cso_save_fragment_shader(ctx->cso_context);
309 cso_save_viewport(ctx->cso_context);
310 cso_save_blend(ctx->cso_context);
311 cso_save_samplers(ctx->cso_context);
312 cso_save_sampler_textures(ctx->cso_context);
313
314 dst_surf = setup_framebuffer(info->dst);
315 setup_viewport(info->dst);
316 setup_blend();
317 setup_constant_buffer(ctx, info->const_buffer, info->const_buffer_len);
318 shader = info->setup_shader(ctx, info->user_data);
319 setup_samplers(ctx, info);
320
321 renderer_draw_texture(ctx->renderer,
322 info->src->texture,
323 info->dst->x, info->dst->y,
324 info->dst->x + info->dst->width,
325 info->dst->y + info->dst->height,
326 info->dst->x, info->dst->y,
327 info->dst->x + info->dst->width,
328 info->dst->y + info->dst->height);
329
330 cso_restore_framebuffer(ctx->cso_context);
331 cso_restore_fragment_shader(ctx->cso_context);
332 cso_restore_viewport(ctx->cso_context);
333 cso_restore_blend(ctx->cso_context);
334 cso_restore_samplers(ctx->cso_context);
335 cso_restore_sampler_textures(ctx->cso_context);
336
337 vg_shader_destroy(ctx, shader);
338
339 pipe_surface_reference(&dst_surf, NULL);
340 }
341
342 void vgColorMatrix(VGImage dst, VGImage src,
343 const VGfloat * matrix)
344 {
345 struct vg_context *ctx = vg_current_context();
346 struct vg_image *d, *s;
347 struct filter_info info;
348
349 if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
350 vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
351 return;
352 }
353 if (!matrix || !is_aligned(matrix)) {
354 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
355 return;
356 }
357
358 d = (struct vg_image*)dst;
359 s = (struct vg_image*)src;
360
361 if (vg_image_overlaps(d, s)) {
362 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
363 return;
364 }
365
366 info.dst = d;
367 info.src = s;
368 info.setup_shader = &setup_color_matrix;
369 info.user_data = NULL;
370 info.const_buffer = matrix;
371 info.const_buffer_len = 20 * sizeof(VGfloat);
372 info.tiling_mode = VG_TILE_PAD;
373 info.extra_texture = 0;
374 execute_filter(ctx, &info);
375 }
376
377 static VGfloat texture_offset(VGfloat width, VGint kernelSize, VGint current, VGint shift)
378 {
379 VGfloat diff = current - shift;
380
381 return diff / width;
382 }
383
384 void vgConvolve(VGImage dst, VGImage src,
385 VGint kernelWidth, VGint kernelHeight,
386 VGint shiftX, VGint shiftY,
387 const VGshort * kernel,
388 VGfloat scale,
389 VGfloat bias,
390 VGTilingMode tilingMode)
391 {
392 struct vg_context *ctx = vg_current_context();
393 VGfloat *buffer;
394 VGint buffer_len;
395 VGint i, j;
396 VGint idx = 0;
397 struct vg_image *d, *s;
398 VGint kernel_size = kernelWidth * kernelHeight;
399 struct filter_info info;
400 const VGint max_kernel_size = vgGeti(VG_MAX_KERNEL_SIZE);
401
402 if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
403 vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
404 return;
405 }
406
407 if (kernelWidth <= 0 || kernelHeight <= 0 ||
408 kernelWidth > max_kernel_size || kernelHeight > max_kernel_size) {
409 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
410 return;
411 }
412
413 if (!kernel || !is_aligned_to(kernel, 2)) {
414 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
415 return;
416 }
417
418 if (tilingMode < VG_TILE_FILL ||
419 tilingMode > VG_TILE_REFLECT) {
420 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
421 return;
422 }
423
424 d = (struct vg_image*)dst;
425 s = (struct vg_image*)src;
426
427 if (vg_image_overlaps(d, s)) {
428 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
429 return;
430 }
431
432 vg_validate_state(ctx);
433
434 buffer_len = 8 + 2 * 4 * kernel_size;
435 buffer = (VGfloat*)malloc(buffer_len * sizeof(VGfloat));
436
437 buffer[0] = 0.f;
438 buffer[1] = 1.f;
439 buffer[2] = 2.f; /*unused*/
440 buffer[3] = 4.f; /*unused*/
441
442 buffer[4] = kernelWidth * kernelHeight;
443 buffer[5] = scale;
444 buffer[6] = bias;
445 buffer[7] = 0.f;
446
447 idx = 8;
448 for (j = 0; j < kernelHeight; ++j) {
449 for (i = 0; i < kernelWidth; ++i) {
450 VGint index = j * kernelWidth + i;
451 VGfloat x, y;
452
453 x = texture_offset(s->width, kernelWidth, i, shiftX);
454 y = texture_offset(s->height, kernelHeight, j, shiftY);
455
456 buffer[idx + index*4 + 0] = x;
457 buffer[idx + index*4 + 1] = y;
458 buffer[idx + index*4 + 2] = 0.f;
459 buffer[idx + index*4 + 3] = 0.f;
460 }
461 }
462 idx += kernel_size * 4;
463
464 for (j = 0; j < kernelHeight; ++j) {
465 for (i = 0; i < kernelWidth; ++i) {
466 /* transpose the kernel */
467 VGint index = j * kernelWidth + i;
468 VGint kindex = (kernelWidth - i - 1) * kernelHeight + (kernelHeight - j - 1);
469 buffer[idx + index*4 + 0] = kernel[kindex];
470 buffer[idx + index*4 + 1] = kernel[kindex];
471 buffer[idx + index*4 + 2] = kernel[kindex];
472 buffer[idx + index*4 + 3] = kernel[kindex];
473 }
474 }
475
476 info.dst = d;
477 info.src = s;
478 info.setup_shader = &setup_convolution;
479 info.user_data = (void*)(long)(buffer_len/4);
480 info.const_buffer = buffer;
481 info.const_buffer_len = buffer_len * sizeof(VGfloat);
482 info.tiling_mode = tilingMode;
483 info.extra_texture = 0;
484 execute_filter(ctx, &info);
485
486 free(buffer);
487 }
488
489 void vgSeparableConvolve(VGImage dst, VGImage src,
490 VGint kernelWidth,
491 VGint kernelHeight,
492 VGint shiftX, VGint shiftY,
493 const VGshort * kernelX,
494 const VGshort * kernelY,
495 VGfloat scale,
496 VGfloat bias,
497 VGTilingMode tilingMode)
498 {
499 struct vg_context *ctx = vg_current_context();
500 VGshort *kernel;
501 VGint i, j, idx = 0;
502 const VGint max_kernel_size = vgGeti(VG_MAX_SEPARABLE_KERNEL_SIZE);
503
504 if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
505 vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
506 return;
507 }
508
509 if (kernelWidth <= 0 || kernelHeight <= 0 ||
510 kernelWidth > max_kernel_size || kernelHeight > max_kernel_size) {
511 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
512 return;
513 }
514
515 if (!kernelX || !kernelY ||
516 !is_aligned_to(kernelX, 2) || !is_aligned_to(kernelY, 2)) {
517 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
518 return;
519 }
520 if (tilingMode < VG_TILE_FILL ||
521 tilingMode > VG_TILE_REFLECT) {
522 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
523 return;
524 }
525 kernel = malloc(sizeof(VGshort)*kernelWidth*kernelHeight);
526 for (i = 0; i < kernelWidth; ++i) {
527 for (j = 0; j < kernelHeight; ++j) {
528 kernel[idx] = kernelX[i] * kernelY[j];
529 ++idx;
530 }
531 }
532 vgConvolve(dst, src, kernelWidth, kernelHeight, shiftX, shiftY,
533 kernel, scale, bias, tilingMode);
534 free(kernel);
535 }
536
537 static INLINE VGfloat compute_gaussian_componenet(VGfloat x, VGfloat y,
538 VGfloat stdDeviationX,
539 VGfloat stdDeviationY)
540 {
541 VGfloat mult = 1 / ( 2 * M_PI * stdDeviationX * stdDeviationY);
542 VGfloat e = exp( - ( pow(x, 2)/(2*pow(stdDeviationX, 2)) +
543 pow(y, 2)/(2*pow(stdDeviationY, 2)) ) );
544 return mult * e;
545 }
546
547 static INLINE VGint compute_kernel_size(VGfloat deviation)
548 {
549 VGint size = ceil(2.146 * deviation);
550 if (size > 11)
551 return 11;
552 return size;
553 }
554
555 static void compute_gaussian_kernel(VGfloat *kernel,
556 VGint width, VGint height,
557 VGfloat stdDeviationX,
558 VGfloat stdDeviationY)
559 {
560 VGint i, j;
561 VGfloat scale = 0.0f;
562
563 for (j = 0; j < height; ++j) {
564 for (i = 0; i < width; ++i) {
565 VGint idx = (height - j -1) * width + (width - i -1);
566 kernel[idx] = compute_gaussian_componenet(i-(ceil(width/2))-1,
567 j-ceil(height/2)-1,
568 stdDeviationX, stdDeviationY);
569 scale += kernel[idx];
570 }
571 }
572
573 for (j = 0; j < height; ++j) {
574 for (i = 0; i < width; ++i) {
575 VGint idx = j * width + i;
576 kernel[idx] /= scale;
577 }
578 }
579 }
580
581 void vgGaussianBlur(VGImage dst, VGImage src,
582 VGfloat stdDeviationX,
583 VGfloat stdDeviationY,
584 VGTilingMode tilingMode)
585 {
586 struct vg_context *ctx = vg_current_context();
587 struct vg_image *d, *s;
588 VGfloat *buffer, *kernel;
589 VGint kernel_width, kernel_height, kernel_size;
590 VGint buffer_len;
591 VGint idx, i, j;
592 struct filter_info info;
593
594 if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
595 vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
596 return;
597 }
598 if (stdDeviationX <= 0 || stdDeviationY <= 0) {
599 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
600 return;
601 }
602
603 if (tilingMode < VG_TILE_FILL ||
604 tilingMode > VG_TILE_REFLECT) {
605 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
606 return;
607 }
608
609 d = (struct vg_image*)dst;
610 s = (struct vg_image*)src;
611
612 if (vg_image_overlaps(d, s)) {
613 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
614 return;
615 }
616
617 kernel_width = compute_kernel_size(stdDeviationX);
618 kernel_height = compute_kernel_size(stdDeviationY);
619 kernel_size = kernel_width * kernel_height;
620 kernel = malloc(sizeof(VGfloat)*kernel_size);
621 compute_gaussian_kernel(kernel, kernel_width, kernel_height,
622 stdDeviationX, stdDeviationY);
623
624 buffer_len = 8 + 2 * 4 * kernel_size;
625 buffer = (VGfloat*)malloc(buffer_len * sizeof(VGfloat));
626
627 buffer[0] = 0.f;
628 buffer[1] = 1.f;
629 buffer[2] = 2.f; /*unused*/
630 buffer[3] = 4.f; /*unused*/
631
632 buffer[4] = kernel_width * kernel_height;
633 buffer[5] = 1.f;/*scale*/
634 buffer[6] = 0.f;/*bias*/
635 buffer[7] = 0.f;
636
637 idx = 8;
638 for (j = 0; j < kernel_height; ++j) {
639 for (i = 0; i < kernel_width; ++i) {
640 VGint index = j * kernel_width + i;
641 VGfloat x, y;
642
643 x = texture_offset(s->width, kernel_width, i, kernel_width/2);
644 y = texture_offset(s->height, kernel_height, j, kernel_height/2);
645
646 buffer[idx + index*4 + 0] = x;
647 buffer[idx + index*4 + 1] = y;
648 buffer[idx + index*4 + 2] = 0.f;
649 buffer[idx + index*4 + 3] = 0.f;
650 }
651 }
652 idx += kernel_size * 4;
653
654 for (j = 0; j < kernel_height; ++j) {
655 for (i = 0; i < kernel_width; ++i) {
656 /* transpose the kernel */
657 VGint index = j * kernel_width + i;
658 VGint kindex = (kernel_width - i - 1) * kernel_height + (kernel_height - j - 1);
659 buffer[idx + index*4 + 0] = kernel[kindex];
660 buffer[idx + index*4 + 1] = kernel[kindex];
661 buffer[idx + index*4 + 2] = kernel[kindex];
662 buffer[idx + index*4 + 3] = kernel[kindex];
663 }
664 }
665
666 info.dst = d;
667 info.src = s;
668 info.setup_shader = &setup_convolution;
669 info.user_data = (void*)(long)(buffer_len/4);
670 info.const_buffer = buffer;
671 info.const_buffer_len = buffer_len * sizeof(VGfloat);
672 info.tiling_mode = tilingMode;
673 info.extra_texture = 0;
674 execute_filter(ctx, &info);
675
676 free(buffer);
677 free(kernel);
678 }
679
680 void vgLookup(VGImage dst, VGImage src,
681 const VGubyte * redLUT,
682 const VGubyte * greenLUT,
683 const VGubyte * blueLUT,
684 const VGubyte * alphaLUT,
685 VGboolean outputLinear,
686 VGboolean outputPremultiplied)
687 {
688 struct vg_context *ctx = vg_current_context();
689 struct vg_image *d, *s;
690 VGuint color_data[256];
691 VGint i;
692 struct pipe_texture *lut_texture;
693 VGfloat buffer[4];
694 struct filter_info info;
695
696 if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
697 vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
698 return;
699 }
700
701 if (!redLUT || !greenLUT || !blueLUT || !alphaLUT) {
702 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
703 return;
704 }
705
706 d = (struct vg_image*)dst;
707 s = (struct vg_image*)src;
708
709 if (vg_image_overlaps(d, s)) {
710 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
711 return;
712 }
713
714 for (i = 0; i < 256; ++i) {
715 color_data[i] = blueLUT[i] << 24 | greenLUT[i] << 16 |
716 redLUT[i] << 8 | alphaLUT[i];
717 }
718 lut_texture = create_texture_1d(ctx, color_data, 255);
719
720 buffer[0] = 0.f;
721 buffer[1] = 0.f;
722 buffer[2] = 1.f;
723 buffer[3] = 1.f;
724
725 info.dst = d;
726 info.src = s;
727 info.setup_shader = &setup_lookup;
728 info.user_data = NULL;
729 info.const_buffer = buffer;
730 info.const_buffer_len = 4 * sizeof(VGfloat);
731 info.tiling_mode = VG_TILE_PAD;
732 info.extra_texture = lut_texture;
733
734 execute_filter(ctx, &info);
735
736 pipe_texture_reference(&lut_texture, NULL);
737 }
738
739 void vgLookupSingle(VGImage dst, VGImage src,
740 const VGuint * lookupTable,
741 VGImageChannel sourceChannel,
742 VGboolean outputLinear,
743 VGboolean outputPremultiplied)
744 {
745 struct vg_context *ctx = vg_current_context();
746 struct vg_image *d, *s;
747 struct pipe_texture *lut_texture;
748 VGfloat buffer[4];
749 struct filter_info info;
750 VGuint color_data[256];
751 VGint i;
752
753 if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
754 vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
755 return;
756 }
757
758 if (!lookupTable || !is_aligned(lookupTable)) {
759 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
760 return;
761 }
762
763 if (sourceChannel != VG_RED && sourceChannel != VG_GREEN &&
764 sourceChannel != VG_BLUE && sourceChannel != VG_ALPHA) {
765 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
766 return;
767 }
768
769 d = (struct vg_image*)dst;
770 s = (struct vg_image*)src;
771
772 if (vg_image_overlaps(d, s)) {
773 vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
774 return;
775 }
776
777 for (i = 0; i < 256; ++i) {
778 VGuint rgba = lookupTable[i];
779 VGubyte blue, green, red, alpha;
780 red = (rgba & 0xff000000)>>24;
781 green = (rgba & 0x00ff0000)>>16;
782 blue = (rgba & 0x0000ff00)>> 8;
783 alpha = (rgba & 0x000000ff)>> 0;
784 color_data[i] = blue << 24 | green << 16 |
785 red << 8 | alpha;
786 }
787 lut_texture = create_texture_1d(ctx, color_data, 256);
788
789 buffer[0] = 0.f;
790 buffer[1] = 0.f;
791 buffer[2] = 1.f;
792 buffer[3] = 1.f;
793
794 info.dst = d;
795 info.src = s;
796 info.setup_shader = &setup_lookup_single;
797 info.user_data = (void*)sourceChannel;
798 info.const_buffer = buffer;
799 info.const_buffer_len = 4 * sizeof(VGfloat);
800 info.tiling_mode = VG_TILE_PAD;
801 info.extra_texture = lut_texture;
802
803 execute_filter(ctx, &info);
804
805 pipe_texture_reference(&lut_texture, NULL);
806 }