Merge branch 'mesa_7_7_branch'
[mesa.git] / src / gallium / state_trackers / vega / mask.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26
27 #include "mask.h"
28
29 #include "path.h"
30 #include "image.h"
31 #include "shaders_cache.h"
32 #include "renderer.h"
33 #include "asm_util.h"
34 #include "st_inlines.h"
35
36 #include "pipe/p_context.h"
37 #include "pipe/p_screen.h"
38 #include "pipe/p_inlines.h"
39 #include "util/u_format.h"
40 #include "util/u_memory.h"
41
42 struct vg_mask_layer {
43 struct vg_object base;
44
45 VGint width;
46 VGint height;
47
48 struct pipe_texture *texture;
49 };
50
51 static INLINE struct pipe_surface *
52 alpha_mask_surface(struct vg_context *ctx, int usage)
53 {
54 struct pipe_screen *screen = ctx->pipe->screen;
55 struct st_framebuffer *stfb = ctx->draw_buffer;
56 return screen->get_tex_surface(screen,
57 stfb->alpha_mask,
58 0, 0, 0,
59 usage);
60 }
61
62 static INLINE VGboolean
63 intersect_rectangles(VGint dwidth, VGint dheight,
64 VGint swidth, VGint sheight,
65 VGint tx, VGint ty,
66 VGint twidth, VGint theight,
67 VGint *offsets,
68 VGint *location)
69 {
70 if (tx + twidth <= 0 || tx >= dwidth)
71 return VG_FALSE;
72 if (ty + theight <= 0 || ty >= dheight)
73 return VG_FALSE;
74
75 offsets[0] = 0;
76 offsets[1] = 0;
77 location[0] = tx;
78 location[1] = ty;
79
80 if (tx < 0) {
81 offsets[0] -= tx;
82 location[0] = 0;
83
84 location[2] = MIN2(tx + swidth, MIN2(dwidth, tx + twidth));
85 offsets[2] = location[2];
86 } else {
87 offsets[2] = MIN2(twidth, MIN2(dwidth - tx, swidth ));
88 location[2] = offsets[2];
89 }
90
91 if (ty < 0) {
92 offsets[1] -= ty;
93 location[1] = 0;
94
95 location[3] = MIN2(ty + sheight, MIN2(dheight, ty + theight));
96 offsets[3] = location[3];
97 } else {
98 offsets[3] = MIN2(theight, MIN2(dheight - ty, sheight));
99 location[3] = offsets[3];
100 }
101
102 return VG_TRUE;
103 }
104
105 #if DEBUG_MASKS
106 static void read_alpha_mask(void * data, VGint dataStride,
107 VGImageFormat dataFormat,
108 VGint sx, VGint sy,
109 VGint width, VGint height)
110 {
111 struct vg_context *ctx = vg_current_context();
112 struct pipe_context *pipe = ctx->pipe;
113 struct pipe_screen *screen = pipe->screen;
114
115 struct st_framebuffer *stfb = ctx->draw_buffer;
116 struct st_renderbuffer *strb = stfb->alpha_mask;
117 struct pipe_framebuffer_state *fb = &ctx->state.g3d.fb;
118
119 VGfloat temp[VEGA_MAX_IMAGE_WIDTH][4];
120 VGfloat *df = (VGfloat*)temp;
121 VGint y = (fb->height - sy) - 1, yStep = -1;
122 VGint i;
123 VGubyte *dst = (VGubyte *)data;
124 VGint xoffset = 0, yoffset = 0;
125
126 /* make sure rendering has completed */
127 pipe->flush(pipe, PIPE_FLUSH_RENDER_CACHE, NULL);
128 if (sx < 0) {
129 xoffset = -sx;
130 xoffset *= _vega_size_for_format(dataFormat);
131 width += sx;
132 sx = 0;
133 }
134 if (sy < 0) {
135 yoffset = -sy;
136 height += sy;
137 sy = 0;
138 y = (fb->height - sy) - 1;
139 yoffset *= dataStride;
140 }
141
142 {
143 struct pipe_surface *surf;
144
145 surf = screen->get_tex_surface(screen, strb->texture, 0, 0, 0,
146 PIPE_BUFFER_USAGE_CPU_READ);
147
148 /* Do a row at a time to flip image data vertically */
149 for (i = 0; i < height; i++) {
150 #if 0
151 debug_printf("%d-%d == %d\n", sy, height, y);
152 #endif
153 pipe_get_tile_rgba(surf, sx, y, width, 1, df);
154 y += yStep;
155 _vega_pack_rgba_span_float(ctx, width, temp, dataFormat,
156 dst + yoffset + xoffset);
157 dst += dataStride;
158 }
159
160 pipe_surface_reference(&surf, NULL);
161 }
162 }
163
164 void save_alpha_to_file(const char *filename)
165 {
166 struct vg_context *ctx = vg_current_context();
167 struct pipe_framebuffer_state *fb = &ctx->state.g3d.fb;
168 VGint *data;
169 int i, j;
170
171 data = malloc(sizeof(int) * fb->width * fb->height);
172 read_alpha_mask(data, fb->width * sizeof(int),
173 VG_sRGBA_8888,
174 0, 0, fb->width, fb->height);
175 fprintf(stderr, "/*---------- start */\n");
176 fprintf(stderr, "const int image_width = %d;\n",
177 fb->width);
178 fprintf(stderr, "const int image_height = %d;\n",
179 fb->height);
180 fprintf(stderr, "const int image_data = {\n");
181 for (i = 0; i < fb->height; ++i) {
182 for (j = 0; j < fb->width; ++j) {
183 int rgba = data[i * fb->height + j];
184 int argb = 0;
185 argb = (rgba >> 8);
186 argb |= ((rgba & 0xff) << 24);
187 fprintf(stderr, "0x%x, ", argb);
188 }
189 fprintf(stderr, "\n");
190 }
191 fprintf(stderr, "};\n");
192 fprintf(stderr, "/*---------- end */\n");
193 }
194 #endif
195
196 static void setup_mask_framebuffer(struct pipe_surface *surf,
197 VGint surf_width, VGint surf_height)
198 {
199 struct vg_context *ctx = vg_current_context();
200 struct pipe_framebuffer_state fb;
201
202 memset(&fb, 0, sizeof(fb));
203 fb.width = surf_width;
204 fb.height = surf_height;
205 fb.nr_cbufs = 1;
206 fb.cbufs[0] = surf;
207 {
208 VGint i;
209 for (i = 1; i < PIPE_MAX_COLOR_BUFS; ++i)
210 fb.cbufs[i] = 0;
211 }
212 cso_set_framebuffer(ctx->cso_context, &fb);
213 }
214
215
216 /* setup shader constants */
217 static void setup_mask_operation(VGMaskOperation operation)
218 {
219 struct vg_context *ctx = vg_current_context();
220 struct pipe_constant_buffer *cbuf = &ctx->mask.cbuf;
221 const VGint param_bytes = 4 * sizeof(VGfloat);
222 const VGfloat ones[4] = {1.f, 1.f, 1.f, 1.f};
223 void *shader = 0;
224
225 /* We always need to get a new buffer, to keep the drivers simple and
226 * avoid gratuitous rendering synchronization.
227 */
228 pipe_buffer_reference(&cbuf->buffer, NULL);
229
230 cbuf->buffer = pipe_buffer_create(ctx->pipe->screen, 1,
231 PIPE_BUFFER_USAGE_CONSTANT,
232 param_bytes);
233 if (cbuf->buffer) {
234 st_no_flush_pipe_buffer_write(ctx, cbuf->buffer,
235 0, param_bytes, ones);
236 }
237
238 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, cbuf);
239 switch (operation) {
240 case VG_UNION_MASK: {
241 if (!ctx->mask.union_fs) {
242 ctx->mask.union_fs = shader_create_from_text(ctx->pipe,
243 union_mask_asm,
244 200,
245 PIPE_SHADER_FRAGMENT);
246 }
247 shader = ctx->mask.union_fs->driver;
248 }
249 break;
250 case VG_INTERSECT_MASK: {
251 if (!ctx->mask.intersect_fs) {
252 ctx->mask.intersect_fs = shader_create_from_text(ctx->pipe,
253 intersect_mask_asm,
254 200,
255 PIPE_SHADER_FRAGMENT);
256 }
257 shader = ctx->mask.intersect_fs->driver;
258 }
259 break;
260 case VG_SUBTRACT_MASK: {
261 if (!ctx->mask.subtract_fs) {
262 ctx->mask.subtract_fs = shader_create_from_text(ctx->pipe,
263 subtract_mask_asm,
264 200,
265 PIPE_SHADER_FRAGMENT);
266 }
267 shader = ctx->mask.subtract_fs->driver;
268 }
269 break;
270 case VG_SET_MASK: {
271 if (!ctx->mask.set_fs) {
272 ctx->mask.set_fs = shader_create_from_text(ctx->pipe,
273 set_mask_asm,
274 200,
275 PIPE_SHADER_FRAGMENT);
276 }
277 shader = ctx->mask.set_fs->driver;
278 }
279 break;
280 default:
281 assert(0);
282 break;
283 }
284 cso_set_fragment_shader_handle(ctx->cso_context, shader);
285 }
286
287 static void setup_mask_samplers(struct pipe_texture *umask)
288 {
289 struct vg_context *ctx = vg_current_context();
290 struct pipe_sampler_state *samplers[PIPE_MAX_SAMPLERS];
291 struct pipe_texture *textures[PIPE_MAX_SAMPLERS];
292 struct st_framebuffer *fb_buffers = ctx->draw_buffer;
293 struct pipe_texture *uprev = NULL;
294 struct pipe_sampler_state sampler;
295
296 uprev = fb_buffers->blend_texture;
297 sampler = ctx->mask.sampler;
298 sampler.normalized_coords = 1;
299
300 samplers[0] = NULL;
301 samplers[1] = NULL;
302 samplers[2] = NULL;
303 textures[0] = NULL;
304 textures[1] = NULL;
305 textures[2] = NULL;
306
307 samplers[0] = &sampler;
308 samplers[1] = &ctx->mask.sampler;
309
310 textures[0] = umask;
311 textures[1] = uprev;
312
313 cso_set_samplers(ctx->cso_context, 2,
314 (const struct pipe_sampler_state **)samplers);
315 cso_set_sampler_textures(ctx->cso_context, 2, textures);
316 }
317
318
319 /* setup shader constants */
320 static void setup_mask_fill(const VGfloat color[4])
321 {
322 struct vg_context *ctx = vg_current_context();
323 struct pipe_constant_buffer *cbuf = &ctx->mask.cbuf;
324 const VGint param_bytes = 4 * sizeof(VGfloat);
325
326 /* We always need to get a new buffer, to keep the drivers simple and
327 * avoid gratuitous rendering synchronization.
328 */
329 pipe_buffer_reference(&cbuf->buffer, NULL);
330
331 cbuf->buffer = pipe_buffer_create(ctx->pipe->screen, 1,
332 PIPE_BUFFER_USAGE_CONSTANT,
333 param_bytes);
334 if (cbuf->buffer) {
335 st_no_flush_pipe_buffer_write(ctx, cbuf->buffer, 0, param_bytes, color);
336 }
337
338 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, cbuf);
339 cso_set_fragment_shader_handle(ctx->cso_context,
340 shaders_cache_fill(ctx->sc,
341 VEGA_SOLID_FILL_SHADER));
342 }
343
344 static void setup_mask_viewport()
345 {
346 struct vg_context *ctx = vg_current_context();
347 vg_set_viewport(ctx, VEGA_Y0_TOP);
348 }
349
350 static void setup_mask_blend()
351 {
352 struct vg_context *ctx = vg_current_context();
353
354 struct pipe_blend_state blend;
355
356 memset(&blend, 0, sizeof(struct pipe_blend_state));
357 blend.blend_enable = 1;
358 blend.colormask |= PIPE_MASK_R;
359 blend.colormask |= PIPE_MASK_G;
360 blend.colormask |= PIPE_MASK_B;
361 blend.colormask |= PIPE_MASK_A;
362 blend.rgb_src_factor = PIPE_BLENDFACTOR_ONE;
363 blend.alpha_src_factor = PIPE_BLENDFACTOR_ONE;
364 blend.rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
365 blend.alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
366
367 cso_set_blend(ctx->cso_context, &blend);
368 }
369
370
371 static void surface_fill(struct pipe_surface *surf,
372 int surf_width, int surf_height,
373 int x, int y, int width, int height,
374 const VGfloat color[4])
375 {
376 struct vg_context *ctx = vg_current_context();
377
378 if (x < 0) {
379 width += x;
380 x = 0;
381 }
382 if (y < 0) {
383 height += y;
384 y = 0;
385 }
386
387 cso_save_framebuffer(ctx->cso_context);
388 cso_save_blend(ctx->cso_context);
389 cso_save_fragment_shader(ctx->cso_context);
390 cso_save_viewport(ctx->cso_context);
391
392 setup_mask_blend();
393 setup_mask_fill(color);
394 setup_mask_framebuffer(surf, surf_width, surf_height);
395 setup_mask_viewport();
396
397 renderer_draw_quad(ctx->renderer, x, y,
398 x + width, y + height, 0.0f/*depth should be disabled*/);
399
400
401 /* make sure rendering has completed */
402 ctx->pipe->flush(ctx->pipe,
403 PIPE_FLUSH_RENDER_CACHE | PIPE_FLUSH_FRAME,
404 NULL);
405
406 #if DEBUG_MASKS
407 save_alpha_to_file(0);
408 #endif
409
410 cso_restore_blend(ctx->cso_context);
411 cso_restore_framebuffer(ctx->cso_context);
412 cso_restore_fragment_shader(ctx->cso_context);
413 cso_restore_viewport(ctx->cso_context);
414 }
415
416
417 static void mask_using_texture(struct pipe_texture *texture,
418 VGMaskOperation operation,
419 VGint x, VGint y,
420 VGint width, VGint height)
421 {
422 struct vg_context *ctx = vg_current_context();
423 struct pipe_surface *surface =
424 alpha_mask_surface(ctx, PIPE_BUFFER_USAGE_GPU_WRITE);
425 VGint offsets[4], loc[4];
426
427 if (!surface)
428 return;
429 if (!intersect_rectangles(surface->width, surface->height,
430 texture->width0, texture->height0,
431 x, y, width, height,
432 offsets, loc))
433 return;
434 #if 0
435 debug_printf("Offset = [%d, %d, %d, %d]\n", offsets[0],
436 offsets[1], offsets[2], offsets[3]);
437 debug_printf("Locati = [%d, %d, %d, %d]\n", loc[0],
438 loc[1], loc[2], loc[3]);
439 #endif
440
441 /* prepare our blend surface */
442 vg_prepare_blend_surface_from_mask(ctx);
443
444 cso_save_samplers(ctx->cso_context);
445 cso_save_sampler_textures(ctx->cso_context);
446 cso_save_framebuffer(ctx->cso_context);
447 cso_save_blend(ctx->cso_context);
448 cso_save_fragment_shader(ctx->cso_context);
449 cso_save_viewport(ctx->cso_context);
450
451 setup_mask_samplers(texture);
452 setup_mask_blend();
453 setup_mask_operation(operation);
454 setup_mask_framebuffer(surface, surface->width, surface->height);
455 setup_mask_viewport();
456
457 /* render the quad to propagate the rendering from stencil */
458 renderer_draw_texture(ctx->renderer, texture,
459 offsets[0], offsets[1],
460 offsets[0] + offsets[2], offsets[1] + offsets[3],
461 loc[0], loc[1], loc[0] + loc[2], loc[1] + loc[3]);
462
463 /* make sure rendering has completed */
464 ctx->pipe->flush(ctx->pipe, PIPE_FLUSH_RENDER_CACHE, NULL);
465 cso_restore_blend(ctx->cso_context);
466 cso_restore_framebuffer(ctx->cso_context);
467 cso_restore_fragment_shader(ctx->cso_context);
468 cso_restore_samplers(ctx->cso_context);
469 cso_restore_sampler_textures(ctx->cso_context);
470 cso_restore_viewport(ctx->cso_context);
471
472 pipe_surface_reference(&surface, NULL);
473 }
474
475
476 #ifdef OPENVG_VERSION_1_1
477
478 struct vg_mask_layer * mask_layer_create(VGint width, VGint height)
479 {
480 struct vg_context *ctx = vg_current_context();
481 struct vg_mask_layer *mask = 0;
482
483 mask = CALLOC_STRUCT(vg_mask_layer);
484 vg_init_object(&mask->base, ctx, VG_OBJECT_MASK);
485 mask->width = width;
486 mask->height = height;
487
488 {
489 struct pipe_texture pt;
490 struct pipe_screen *screen = ctx->pipe->screen;
491
492 memset(&pt, 0, sizeof(pt));
493 pt.target = PIPE_TEXTURE_2D;
494 pt.format = PIPE_FORMAT_A8R8G8B8_UNORM;
495 pt.last_level = 0;
496 pt.width0 = width;
497 pt.height0 = height;
498 pt.depth0 = 1;
499 pt.tex_usage = PIPE_TEXTURE_USAGE_SAMPLER;
500 pt.compressed = 0;
501
502 mask->texture = screen->texture_create(screen, &pt);
503 }
504
505 vg_context_add_object(ctx, VG_OBJECT_MASK, mask);
506
507 return mask;
508 }
509
510 void mask_layer_destroy(struct vg_mask_layer *layer)
511 {
512 struct vg_context *ctx = vg_current_context();
513
514 vg_context_remove_object(ctx, VG_OBJECT_MASK, layer);
515 pipe_texture_release(&layer->texture);
516 free(layer);
517 }
518
519 void mask_layer_fill(struct vg_mask_layer *layer,
520 VGint x, VGint y,
521 VGint width, VGint height,
522 VGfloat value)
523 {
524 struct vg_context *ctx = vg_current_context();
525 VGfloat alpha_color[4] = {0, 0, 0, 0};
526 struct pipe_surface *surface;
527
528 alpha_color[3] = value;
529
530 surface = ctx->pipe->screen->get_tex_surface(
531 ctx->pipe->screen, layer->texture,
532 0, 0, 0,
533 PIPE_BUFFER_USAGE_GPU_WRITE);
534
535 surface_fill(surface,
536 layer->width, layer->height,
537 x, y, width, height, alpha_color);
538
539 ctx->pipe->screen->tex_surface_release(ctx->pipe->screen, &surface);
540 }
541
542 void mask_copy(struct vg_mask_layer *layer,
543 VGint sx, VGint sy,
544 VGint dx, VGint dy,
545 VGint width, VGint height)
546 {
547 struct vg_context *ctx = vg_current_context();
548 struct st_framebuffer *fb_buffers = ctx->draw_buffer;
549
550 renderer_copy_texture(ctx->renderer,
551 layer->texture,
552 sx, sy,
553 sx + width, sy + height,
554 fb_buffers->alpha_mask,
555 dx, dy,
556 dx + width, dy + height);
557 }
558
559 static void mask_layer_render_to(struct vg_mask_layer *layer,
560 struct path *path,
561 VGbitfield paint_modes)
562 {
563 struct vg_context *ctx = vg_current_context();
564 const VGfloat fill_color[4] = {1.f, 1.f, 1.f, 1.f};
565 struct pipe_screen *screen = ctx->pipe->screen;
566 struct pipe_surface *surface;
567
568 surface = screen->get_tex_surface(screen, layer->texture, 0, 0, 0,
569 PIPE_BUFFER_USAGE_GPU_WRITE);
570
571 cso_save_framebuffer(ctx->cso_context);
572 cso_save_fragment_shader(ctx->cso_context);
573 cso_save_viewport(ctx->cso_context);
574
575 setup_mask_blend();
576 setup_mask_fill(fill_color);
577 setup_mask_framebuffer(surface, layer->width, layer->height);
578 setup_mask_viewport();
579
580 if (paint_modes & VG_FILL_PATH) {
581 struct matrix *mat = &ctx->state.vg.path_user_to_surface_matrix;
582 path_fill(path, mat);
583 }
584
585 if (paint_modes & VG_STROKE_PATH){
586 path_stroke(path);
587 }
588
589
590 /* make sure rendering has completed */
591 ctx->pipe->flush(ctx->pipe, PIPE_FLUSH_RENDER_CACHE, NULL);
592
593 cso_restore_framebuffer(ctx->cso_context);
594 cso_restore_fragment_shader(ctx->cso_context);
595 cso_restore_viewport(ctx->cso_context);
596 ctx->state.dirty |= BLEND_DIRTY;
597
598 screen->tex_surface_release(ctx->pipe->screen, &surface);
599 }
600
601 void mask_render_to(struct path *path,
602 VGbitfield paint_modes,
603 VGMaskOperation operation)
604 {
605 struct vg_context *ctx = vg_current_context();
606 struct st_framebuffer *fb_buffers = ctx->draw_buffer;
607 struct vg_mask_layer *temp_layer;
608 VGint width, height;
609
610 width = fb_buffers->alpha_mask->width0;
611 height = fb_buffers->alpha_mask->width0;
612
613 temp_layer = mask_layer_create(width, height);
614
615 mask_layer_render_to(temp_layer, path, paint_modes);
616
617 mask_using_layer(temp_layer, 0, 0, width, height,
618 operation);
619
620 mask_layer_destroy(temp_layer);
621 }
622
623 void mask_using_layer(struct vg_mask_layer *layer,
624 VGMaskOperation operation,
625 VGint x, VGint y,
626 VGint width, VGint height)
627 {
628 mask_using_texture(layer->texture, operation,
629 x, y, width, height);
630 }
631
632 VGint mask_layer_width(struct vg_mask_layer *layer)
633 {
634 return layer->width;
635 }
636
637 VGint mask_layer_height(struct vg_mask_layer *layer)
638 {
639 return layer->height;
640 }
641
642
643 #endif
644
645 void mask_using_image(struct vg_image *image,
646 VGMaskOperation operation,
647 VGint x, VGint y,
648 VGint width, VGint height)
649 {
650 mask_using_texture(image->texture, operation,
651 x, y, width, height);
652 }
653
654 void mask_fill(VGint x, VGint y, VGint width, VGint height,
655 VGfloat value)
656 {
657 struct vg_context *ctx = vg_current_context();
658 VGfloat alpha_color[4] = {.0f, .0f, .0f, value};
659 struct pipe_surface *surf = alpha_mask_surface(
660 ctx, PIPE_BUFFER_USAGE_GPU_WRITE);
661
662 #if DEBUG_MASKS
663 debug_printf("mask_fill(%d, %d, %d, %d) with rgba(%f, %f, %f, %f)\n",
664 x, y, width, height,
665 alpha_color[0], alpha_color[1],
666 alpha_color[2], alpha_color[3]);
667 debug_printf("XXX %f === %f \n",
668 alpha_color[3], value);
669 #endif
670
671 surface_fill(surf, surf->width, surf->height,
672 x, y, width, height, alpha_color);
673
674 pipe_surface_reference(&surf, NULL);
675 }
676
677 VGint mask_bind_samplers(struct pipe_sampler_state **samplers,
678 struct pipe_texture **textures)
679 {
680 struct vg_context *ctx = vg_current_context();
681
682 if (ctx->state.vg.masking) {
683 struct st_framebuffer *fb_buffers = ctx->draw_buffer;
684
685 samplers[1] = &ctx->mask.sampler;
686 textures[1] = fb_buffers->alpha_mask;
687 return 1;
688 } else
689 return 0;
690 }