Merge branch 'gallium-polygon-stipple'
[mesa.git] / src / gallium / auxiliary / vl / vl_zscan.c
1 /**************************************************************************
2 *
3 * Copyright 2011 Christian König
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <assert.h>
29
30 #include <pipe/p_screen.h>
31 #include <pipe/p_context.h>
32
33 #include <util/u_draw.h>
34 #include <util/u_sampler.h>
35 #include <util/u_inlines.h>
36 #include <util/u_memory.h>
37
38 #include <tgsi/tgsi_ureg.h>
39
40 #include <vl/vl_defines.h>
41 #include <vl/vl_types.h>
42
43 #include "vl_zscan.h"
44 #include "vl_vertex_buffers.h"
45
46 enum VS_OUTPUT
47 {
48 VS_O_VPOS,
49 VS_O_VTEX
50 };
51
52 const int vl_zscan_linear[] =
53 {
54 /* Linear scan pattern */
55 0, 1, 2, 3, 4, 5, 6, 7,
56 8, 9,10,11,12,13,14,15,
57 16,17,18,19,20,21,22,23,
58 24,25,26,27,28,29,30,31,
59 32,33,34,35,36,37,38,39,
60 40,41,42,43,44,45,46,47,
61 48,49,50,51,52,53,54,55,
62 56,57,58,59,60,61,62,63
63 };
64
65 const int vl_zscan_normal[] =
66 {
67 /* Zig-Zag scan pattern */
68 0, 1, 8,16, 9, 2, 3,10,
69 17,24,32,25,18,11, 4, 5,
70 12,19,26,33,40,48,41,34,
71 27,20,13, 6, 7,14,21,28,
72 35,42,49,56,57,50,43,36,
73 29,22,15,23,30,37,44,51,
74 58,59,52,45,38,31,39,46,
75 53,60,61,54,47,55,62,63
76 };
77
78 const int vl_zscan_alternate[] =
79 {
80 /* Alternate scan pattern */
81 0, 8,16,24, 1, 9, 2,10,
82 17,25,32,40,48,56,57,49,
83 41,33,26,18, 3,11, 4,12,
84 19,27,34,42,50,58,35,43,
85 51,59,20,28, 5,13, 6,14,
86 21,29,36,44,52,60,37,45,
87 53,61,22,30, 7,15,23,31,
88 38,46,54,62,39,47,55,63
89 };
90
91 static void *
92 create_vert_shader(struct vl_zscan *zscan)
93 {
94 struct ureg_program *shader;
95
96 struct ureg_src scale;
97 struct ureg_src vrect, vpos, block_num;
98
99 struct ureg_dst tmp;
100 struct ureg_dst o_vpos;
101 struct ureg_dst *o_vtex;
102
103 signed i;
104
105 shader = ureg_create(TGSI_PROCESSOR_VERTEX);
106 if (!shader)
107 return NULL;
108
109 o_vtex = MALLOC(zscan->num_channels * sizeof(struct ureg_dst));
110
111 scale = ureg_imm2f(shader,
112 (float)BLOCK_WIDTH / zscan->buffer_width,
113 (float)BLOCK_HEIGHT / zscan->buffer_height);
114
115 vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
116 vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
117 block_num = ureg_DECL_vs_input(shader, VS_I_BLOCK_NUM);
118
119 tmp = ureg_DECL_temporary(shader);
120
121 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
122
123 for (i = 0; i < zscan->num_channels; ++i)
124 o_vtex[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX + i);
125
126 /*
127 * o_vpos.xy = (vpos + vrect) * scale
128 * o_vpos.zw = 1.0f
129 *
130 * tmp.xy = InstanceID / blocks_per_line
131 * tmp.x = frac(tmp.x)
132 * tmp.y = floor(tmp.y)
133 *
134 * o_vtex.x = vrect.x / blocks_per_line + tmp.x
135 * o_vtex.y = vrect.y
136 * o_vtex.z = tmp.z * blocks_per_line / blocks_total
137 */
138 ureg_ADD(shader, ureg_writemask(tmp, TGSI_WRITEMASK_XY), vpos, vrect);
139 ureg_MUL(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(tmp), scale);
140 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), ureg_imm1f(shader, 1.0f));
141
142 ureg_MUL(shader, ureg_writemask(tmp, TGSI_WRITEMASK_XW), ureg_scalar(block_num, TGSI_SWIZZLE_X),
143 ureg_imm1f(shader, 1.0f / zscan->blocks_per_line));
144
145 ureg_FRC(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X));
146 ureg_FLR(shader, ureg_writemask(tmp, TGSI_WRITEMASK_W), ureg_src(tmp));
147
148 for (i = 0; i < zscan->num_channels; ++i) {
149 ureg_ADD(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X), ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y),
150 ureg_imm1f(shader, 1.0f / (zscan->blocks_per_line * BLOCK_WIDTH) * (i - (signed)zscan->num_channels / 2)));
151
152 ureg_MAD(shader, ureg_writemask(o_vtex[i], TGSI_WRITEMASK_X), vrect,
153 ureg_imm1f(shader, 1.0f / zscan->blocks_per_line), ureg_src(tmp));
154 ureg_MOV(shader, ureg_writemask(o_vtex[i], TGSI_WRITEMASK_Y), vrect);
155 ureg_MOV(shader, ureg_writemask(o_vtex[i], TGSI_WRITEMASK_Z), vpos);
156 ureg_MUL(shader, ureg_writemask(o_vtex[i], TGSI_WRITEMASK_W), ureg_src(tmp),
157 ureg_imm1f(shader, (float)zscan->blocks_per_line / zscan->blocks_total));
158 }
159
160 ureg_release_temporary(shader, tmp);
161 ureg_END(shader);
162
163 FREE(o_vtex);
164
165 return ureg_create_shader_and_destroy(shader, zscan->pipe);
166 }
167
168 static void *
169 create_frag_shader(struct vl_zscan *zscan)
170 {
171 struct ureg_program *shader;
172 struct ureg_src *vtex;
173
174 struct ureg_src samp_src, samp_scan, samp_quant;
175
176 struct ureg_dst *tmp;
177 struct ureg_dst quant, fragment;
178
179 unsigned i;
180
181 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
182 if (!shader)
183 return NULL;
184
185 vtex = MALLOC(zscan->num_channels * sizeof(struct ureg_src));
186 tmp = MALLOC(zscan->num_channels * sizeof(struct ureg_dst));
187
188 for (i = 0; i < zscan->num_channels; ++i)
189 vtex[i] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX + i, TGSI_INTERPOLATE_LINEAR);
190
191 samp_src = ureg_DECL_sampler(shader, 0);
192 samp_scan = ureg_DECL_sampler(shader, 1);
193 samp_quant = ureg_DECL_sampler(shader, 2);
194
195 for (i = 0; i < zscan->num_channels; ++i)
196 tmp[i] = ureg_DECL_temporary(shader);
197 quant = ureg_DECL_temporary(shader);
198
199 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
200
201 /*
202 * tmp.x = tex(vtex, 1)
203 * tmp.y = vtex.z
204 * fragment = tex(tmp, 0) * quant
205 */
206 for (i = 0; i < zscan->num_channels; ++i)
207 ureg_TEX(shader, ureg_writemask(tmp[i], TGSI_WRITEMASK_X), TGSI_TEXTURE_2D, vtex[i], samp_scan);
208
209 for (i = 0; i < zscan->num_channels; ++i)
210 ureg_MOV(shader, ureg_writemask(tmp[i], TGSI_WRITEMASK_Y), ureg_scalar(vtex[i], TGSI_SWIZZLE_W));
211
212 for (i = 0; i < zscan->num_channels; ++i) {
213 ureg_TEX(shader, ureg_writemask(tmp[0], TGSI_WRITEMASK_X << i), TGSI_TEXTURE_2D, ureg_src(tmp[i]), samp_src);
214 ureg_TEX(shader, ureg_writemask(quant, TGSI_WRITEMASK_X << i), TGSI_TEXTURE_3D, vtex[i], samp_quant);
215 }
216
217 ureg_MUL(shader, quant, ureg_src(quant), ureg_imm1f(shader, 16.0f));
218 ureg_MUL(shader, fragment, ureg_src(tmp[0]), ureg_src(quant));
219
220 for (i = 0; i < zscan->num_channels; ++i)
221 ureg_release_temporary(shader, tmp[i]);
222 ureg_END(shader);
223
224 FREE(vtex);
225 FREE(tmp);
226
227 return ureg_create_shader_and_destroy(shader, zscan->pipe);
228 }
229
230 static bool
231 init_shaders(struct vl_zscan *zscan)
232 {
233 assert(zscan);
234
235 zscan->vs = create_vert_shader(zscan);
236 if (!zscan->vs)
237 goto error_vs;
238
239 zscan->fs = create_frag_shader(zscan);
240 if (!zscan->fs)
241 goto error_fs;
242
243 return true;
244
245 error_fs:
246 zscan->pipe->delete_vs_state(zscan->pipe, zscan->vs);
247
248 error_vs:
249 return false;
250 }
251
252 static void
253 cleanup_shaders(struct vl_zscan *zscan)
254 {
255 assert(zscan);
256
257 zscan->pipe->delete_vs_state(zscan->pipe, zscan->vs);
258 zscan->pipe->delete_fs_state(zscan->pipe, zscan->fs);
259 }
260
261 static bool
262 init_state(struct vl_zscan *zscan)
263 {
264 struct pipe_blend_state blend;
265 struct pipe_rasterizer_state rs_state;
266 struct pipe_sampler_state sampler;
267 unsigned i;
268
269 assert(zscan);
270
271 memset(&rs_state, 0, sizeof(rs_state));
272 rs_state.gl_rasterization_rules = true;
273 zscan->rs_state = zscan->pipe->create_rasterizer_state(zscan->pipe, &rs_state);
274 if (!zscan->rs_state)
275 goto error_rs_state;
276
277 memset(&blend, 0, sizeof blend);
278
279 blend.independent_blend_enable = 0;
280 blend.rt[0].blend_enable = 0;
281 blend.rt[0].rgb_func = PIPE_BLEND_ADD;
282 blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_ONE;
283 blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ONE;
284 blend.rt[0].alpha_func = PIPE_BLEND_ADD;
285 blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
286 blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
287 blend.logicop_enable = 0;
288 blend.logicop_func = PIPE_LOGICOP_CLEAR;
289 /* Needed to allow color writes to FB, even if blending disabled */
290 blend.rt[0].colormask = PIPE_MASK_RGBA;
291 blend.dither = 0;
292 zscan->blend = zscan->pipe->create_blend_state(zscan->pipe, &blend);
293 if (!zscan->blend)
294 goto error_blend;
295
296 for (i = 0; i < 3; ++i) {
297 memset(&sampler, 0, sizeof(sampler));
298 sampler.wrap_s = PIPE_TEX_WRAP_REPEAT;
299 sampler.wrap_t = PIPE_TEX_WRAP_REPEAT;
300 sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
301 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
302 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
303 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
304 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
305 sampler.compare_func = PIPE_FUNC_ALWAYS;
306 sampler.normalized_coords = 1;
307 zscan->samplers[i] = zscan->pipe->create_sampler_state(zscan->pipe, &sampler);
308 if (!zscan->samplers[i])
309 goto error_samplers;
310 }
311
312 return true;
313
314 error_samplers:
315 for (i = 0; i < 2; ++i)
316 if (zscan->samplers[i])
317 zscan->pipe->delete_sampler_state(zscan->pipe, zscan->samplers[i]);
318
319 zscan->pipe->delete_rasterizer_state(zscan->pipe, zscan->rs_state);
320
321 error_blend:
322 zscan->pipe->delete_blend_state(zscan->pipe, zscan->blend);
323
324 error_rs_state:
325 return false;
326 }
327
328 static void
329 cleanup_state(struct vl_zscan *zscan)
330 {
331 unsigned i;
332
333 assert(zscan);
334
335 for (i = 0; i < 3; ++i)
336 zscan->pipe->delete_sampler_state(zscan->pipe, zscan->samplers[i]);
337
338 zscan->pipe->delete_rasterizer_state(zscan->pipe, zscan->rs_state);
339 zscan->pipe->delete_blend_state(zscan->pipe, zscan->blend);
340 }
341
342 struct pipe_sampler_view *
343 vl_zscan_layout(struct pipe_context *pipe, const int layout[64], unsigned blocks_per_line)
344 {
345 const unsigned total_size = blocks_per_line * BLOCK_WIDTH * BLOCK_HEIGHT;
346
347 int patched_layout[64];
348
349 struct pipe_resource res_tmpl, *res;
350 struct pipe_sampler_view sv_tmpl, *sv;
351 struct pipe_transfer *buf_transfer;
352 unsigned x, y, i, pitch;
353 float *f;
354
355 struct pipe_box rect =
356 {
357 0, 0, 0,
358 BLOCK_WIDTH * blocks_per_line,
359 BLOCK_HEIGHT,
360 1
361 };
362
363 assert(pipe && layout && blocks_per_line);
364
365 for (i = 0; i < 64; ++i)
366 patched_layout[layout[i]] = i;
367
368 memset(&res_tmpl, 0, sizeof(res_tmpl));
369 res_tmpl.target = PIPE_TEXTURE_2D;
370 res_tmpl.format = PIPE_FORMAT_R32_FLOAT;
371 res_tmpl.width0 = BLOCK_WIDTH * blocks_per_line;
372 res_tmpl.height0 = BLOCK_HEIGHT;
373 res_tmpl.depth0 = 1;
374 res_tmpl.array_size = 1;
375 res_tmpl.usage = PIPE_USAGE_IMMUTABLE;
376 res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW;
377
378 res = pipe->screen->resource_create(pipe->screen, &res_tmpl);
379 if (!res)
380 goto error_resource;
381
382 buf_transfer = pipe->get_transfer
383 (
384 pipe, res,
385 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
386 &rect
387 );
388 if (!buf_transfer)
389 goto error_transfer;
390
391 pitch = buf_transfer->stride / sizeof(float);
392
393 f = pipe->transfer_map(pipe, buf_transfer);
394 if (!f)
395 goto error_map;
396
397 for (i = 0; i < blocks_per_line; ++i)
398 for (y = 0; y < BLOCK_HEIGHT; ++y)
399 for (x = 0; x < BLOCK_WIDTH; ++x) {
400 float addr = patched_layout[x + y * BLOCK_WIDTH] +
401 i * BLOCK_WIDTH * BLOCK_HEIGHT;
402
403 addr /= total_size;
404
405 f[i * BLOCK_WIDTH + y * pitch + x] = addr;
406 }
407
408 pipe->transfer_unmap(pipe, buf_transfer);
409 pipe->transfer_destroy(pipe, buf_transfer);
410
411 memset(&sv_tmpl, 0, sizeof(sv_tmpl));
412 u_sampler_view_default_template(&sv_tmpl, res, res->format);
413 sv = pipe->create_sampler_view(pipe, res, &sv_tmpl);
414 pipe_resource_reference(&res, NULL);
415 if (!sv)
416 goto error_map;
417
418 return sv;
419
420 error_map:
421 pipe->transfer_destroy(pipe, buf_transfer);
422
423 error_transfer:
424 pipe_resource_reference(&res, NULL);
425
426 error_resource:
427 return NULL;
428 }
429
430 bool
431 vl_zscan_init(struct vl_zscan *zscan, struct pipe_context *pipe,
432 unsigned buffer_width, unsigned buffer_height,
433 unsigned blocks_per_line, unsigned blocks_total,
434 unsigned num_channels)
435 {
436 assert(zscan && pipe);
437
438 zscan->pipe = pipe;
439 zscan->buffer_width = buffer_width;
440 zscan->buffer_height = buffer_height;
441 zscan->num_channels = num_channels;
442 zscan->blocks_per_line = blocks_per_line;
443 zscan->blocks_total = blocks_total;
444
445 if(!init_shaders(zscan))
446 return false;
447
448 if(!init_state(zscan)) {
449 cleanup_shaders(zscan);
450 return false;
451 }
452
453 return true;
454 }
455
456 void
457 vl_zscan_cleanup(struct vl_zscan *zscan)
458 {
459 assert(zscan);
460
461 cleanup_shaders(zscan);
462 cleanup_state(zscan);
463 }
464
465 bool
466 vl_zscan_init_buffer(struct vl_zscan *zscan, struct vl_zscan_buffer *buffer,
467 struct pipe_sampler_view *src, struct pipe_surface *dst)
468 {
469 struct pipe_resource res_tmpl, *res;
470 struct pipe_sampler_view sv_tmpl;
471
472 assert(zscan && buffer);
473
474 memset(buffer, 0, sizeof(struct vl_zscan_buffer));
475
476 buffer->zscan = zscan;
477
478 pipe_sampler_view_reference(&buffer->src, src);
479
480 buffer->viewport.scale[0] = dst->width;
481 buffer->viewport.scale[1] = dst->height;
482 buffer->viewport.scale[2] = 1;
483 buffer->viewport.scale[3] = 1;
484 buffer->viewport.translate[0] = 0;
485 buffer->viewport.translate[1] = 0;
486 buffer->viewport.translate[2] = 0;
487 buffer->viewport.translate[3] = 0;
488
489 buffer->fb_state.width = dst->width;
490 buffer->fb_state.height = dst->height;
491 buffer->fb_state.nr_cbufs = 1;
492 pipe_surface_reference(&buffer->fb_state.cbufs[0], dst);
493
494 memset(&res_tmpl, 0, sizeof(res_tmpl));
495 res_tmpl.target = PIPE_TEXTURE_3D;
496 res_tmpl.format = PIPE_FORMAT_R8_UNORM;
497 res_tmpl.width0 = BLOCK_WIDTH * zscan->blocks_per_line;
498 res_tmpl.height0 = BLOCK_HEIGHT;
499 res_tmpl.depth0 = 2;
500 res_tmpl.array_size = 1;
501 res_tmpl.usage = PIPE_USAGE_IMMUTABLE;
502 res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW;
503
504 res = zscan->pipe->screen->resource_create(zscan->pipe->screen, &res_tmpl);
505 if (!res)
506 return false;
507
508 memset(&sv_tmpl, 0, sizeof(sv_tmpl));
509 u_sampler_view_default_template(&sv_tmpl, res, res->format);
510 sv_tmpl.swizzle_r = sv_tmpl.swizzle_g = sv_tmpl.swizzle_b = sv_tmpl.swizzle_a = TGSI_SWIZZLE_X;
511 buffer->quant = zscan->pipe->create_sampler_view(zscan->pipe, res, &sv_tmpl);
512 pipe_resource_reference(&res, NULL);
513 if (!buffer->quant)
514 return false;
515
516 return true;
517 }
518
519 void
520 vl_zscan_cleanup_buffer(struct vl_zscan_buffer *buffer)
521 {
522 assert(buffer);
523
524 pipe_sampler_view_reference(&buffer->src, NULL);
525 pipe_sampler_view_reference(&buffer->layout, NULL);
526 pipe_sampler_view_reference(&buffer->quant, NULL);
527 pipe_surface_reference(&buffer->fb_state.cbufs[0], NULL);
528 }
529
530 void
531 vl_zscan_set_layout(struct vl_zscan_buffer *buffer, struct pipe_sampler_view *layout)
532 {
533 assert(buffer);
534 assert(layout);
535
536 pipe_sampler_view_reference(&buffer->layout, layout);
537 }
538
539 void
540 vl_zscan_upload_quant(struct vl_zscan_buffer *buffer, const uint8_t matrix[64], bool intra)
541 {
542 struct pipe_context *pipe;
543 struct pipe_transfer *buf_transfer;
544 unsigned x, y, i, pitch;
545 uint8_t *data;
546
547 struct pipe_box rect =
548 {
549 0, 0, intra ? 1 : 0,
550 BLOCK_WIDTH,
551 BLOCK_HEIGHT,
552 1
553 };
554
555 assert(buffer);
556 assert(matrix);
557
558 pipe = buffer->zscan->pipe;
559
560 rect.width *= buffer->zscan->blocks_per_line;
561
562 buf_transfer = pipe->get_transfer
563 (
564 pipe, buffer->quant->texture,
565 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
566 &rect
567 );
568 if (!buf_transfer)
569 goto error_transfer;
570
571 pitch = buf_transfer->stride;
572
573 data = pipe->transfer_map(pipe, buf_transfer);
574 if (!data)
575 goto error_map;
576
577 for (i = 0; i < buffer->zscan->blocks_per_line; ++i)
578 for (y = 0; y < BLOCK_HEIGHT; ++y)
579 for (x = 0; x < BLOCK_WIDTH; ++x)
580 data[i * BLOCK_WIDTH + y * pitch + x] = matrix[x + y * BLOCK_WIDTH];
581
582 pipe->transfer_unmap(pipe, buf_transfer);
583
584 error_map:
585 pipe->transfer_destroy(pipe, buf_transfer);
586
587 error_transfer:
588 return;
589 }
590
591 void
592 vl_zscan_render(struct vl_zscan_buffer *buffer, unsigned num_instances)
593 {
594 struct vl_zscan *zscan;
595
596 assert(buffer);
597
598 zscan = buffer->zscan;
599
600 zscan->pipe->bind_rasterizer_state(zscan->pipe, zscan->rs_state);
601 zscan->pipe->bind_blend_state(zscan->pipe, zscan->blend);
602 zscan->pipe->bind_fragment_sampler_states(zscan->pipe, 3, zscan->samplers);
603 zscan->pipe->set_framebuffer_state(zscan->pipe, &buffer->fb_state);
604 zscan->pipe->set_viewport_state(zscan->pipe, &buffer->viewport);
605 zscan->pipe->set_fragment_sampler_views(zscan->pipe, 3, &buffer->src);
606 zscan->pipe->bind_vs_state(zscan->pipe, zscan->vs);
607 zscan->pipe->bind_fs_state(zscan->pipe, zscan->fs);
608 util_draw_arrays_instanced(zscan->pipe, PIPE_PRIM_QUADS, 0, 4, 0, num_instances);
609 }