Merge remote-tracking branch 'origin/master' into pipe-video
[mesa.git] / src / gallium / auxiliary / vl / vl_zscan.c
1 /**************************************************************************
2 *
3 * Copyright 2011 Christian König
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <assert.h>
29
30 #include <pipe/p_screen.h>
31 #include <pipe/p_context.h>
32
33 #include <util/u_draw.h>
34 #include <util/u_sampler.h>
35 #include <util/u_inlines.h>
36
37 #include <tgsi/tgsi_ureg.h>
38
39 #include <vl/vl_defines.h>
40 #include <vl/vl_types.h>
41
42 #include "vl_zscan.h"
43 #include "vl_vertex_buffers.h"
44
45 enum VS_OUTPUT
46 {
47 VS_O_VPOS,
48 VS_O_VTEX
49 };
50
51 const int vl_zscan_linear[] =
52 {
53 /* Linear scan pattern */
54 0, 1, 2, 3, 4, 5, 6, 7,
55 8, 9,10,11,12,13,14,15,
56 16,17,18,19,20,21,22,23,
57 24,25,26,27,28,29,30,31,
58 32,33,34,35,36,37,38,39,
59 40,41,42,43,44,45,46,47,
60 48,49,50,51,52,53,54,55,
61 56,57,58,59,60,61,62,63
62 };
63
64 const int vl_zscan_normal[] =
65 {
66 /* Zig-Zag scan pattern */
67 0, 1, 8,16, 9, 2, 3,10,
68 17,24,32,25,18,11, 4, 5,
69 12,19,26,33,40,48,41,34,
70 27,20,13, 6, 7,14,21,28,
71 35,42,49,56,57,50,43,36,
72 29,22,15,23,30,37,44,51,
73 58,59,52,45,38,31,39,46,
74 53,60,61,54,47,55,62,63
75 };
76
77 const int vl_zscan_alternate[] =
78 {
79 /* Alternate scan pattern */
80 0, 8,16,24, 1, 9, 2,10,
81 17,25,32,40,48,56,57,49,
82 41,33,26,18, 3,11, 4,12,
83 19,27,34,42,50,58,35,43,
84 51,59,20,28, 5,13, 6,14,
85 21,29,36,44,52,60,37,45,
86 53,61,22,30, 7,15,23,31,
87 38,46,54,62,39,47,55,63
88 };
89
90 static void *
91 create_vert_shader(struct vl_zscan *zscan)
92 {
93 struct ureg_program *shader;
94
95 struct ureg_src scale;
96 struct ureg_src vrect, vpos, block_num;
97
98 struct ureg_dst tmp;
99 struct ureg_dst o_vpos, o_vtex[zscan->num_channels];
100
101 signed i;
102
103 shader = ureg_create(TGSI_PROCESSOR_VERTEX);
104 if (!shader)
105 return NULL;
106
107 scale = ureg_imm2f(shader,
108 (float)BLOCK_WIDTH / zscan->buffer_width,
109 (float)BLOCK_HEIGHT / zscan->buffer_height);
110
111 vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
112 vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
113
114 block_num = ureg_DECL_system_value(shader, 0, TGSI_SEMANTIC_INSTANCEID, 0);
115
116 tmp = ureg_DECL_temporary(shader);
117
118 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
119
120 for (i = 0; i < zscan->num_channels; ++i)
121 o_vtex[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX + i);
122
123 /*
124 * o_vpos.xy = (vpos + vrect) * scale
125 * o_vpos.zw = 1.0f
126 *
127 * tmp.xy = InstanceID / blocks_per_line
128 * tmp.x = frac(tmp.x)
129 * tmp.y = floor(tmp.y)
130 *
131 * o_vtex.x = vrect.x / blocks_per_line + tmp.x
132 * o_vtex.y = vrect.y
133 * o_vtex.z = tmp.z * blocks_per_line / blocks_total
134 */
135 ureg_ADD(shader, ureg_writemask(tmp, TGSI_WRITEMASK_XY), vpos, vrect);
136 ureg_MUL(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(tmp), scale);
137 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), ureg_imm1f(shader, 1.0f));
138
139 ureg_MUL(shader, ureg_writemask(tmp, TGSI_WRITEMASK_XW), ureg_scalar(block_num, TGSI_SWIZZLE_X),
140 ureg_imm1f(shader, 1.0f / zscan->blocks_per_line));
141
142 ureg_FRC(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X));
143 ureg_FLR(shader, ureg_writemask(tmp, TGSI_WRITEMASK_W), ureg_src(tmp));
144
145 for (i = 0; i < zscan->num_channels; ++i) {
146 ureg_ADD(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X), ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y),
147 ureg_imm1f(shader, 1.0f / (zscan->blocks_per_line * BLOCK_WIDTH) * (i - (signed)zscan->num_channels / 2)));
148
149 ureg_MAD(shader, ureg_writemask(o_vtex[i], TGSI_WRITEMASK_X), vrect,
150 ureg_imm1f(shader, 1.0f / zscan->blocks_per_line), ureg_src(tmp));
151 ureg_MOV(shader, ureg_writemask(o_vtex[i], TGSI_WRITEMASK_Y), vrect);
152 ureg_MOV(shader, ureg_writemask(o_vtex[i], TGSI_WRITEMASK_Z), vpos);
153 ureg_MUL(shader, ureg_writemask(o_vtex[i], TGSI_WRITEMASK_W), ureg_src(tmp),
154 ureg_imm1f(shader, (float)zscan->blocks_per_line / zscan->blocks_total));
155 }
156
157 ureg_release_temporary(shader, tmp);
158 ureg_END(shader);
159
160 return ureg_create_shader_and_destroy(shader, zscan->pipe);
161 }
162
163 static void *
164 create_frag_shader(struct vl_zscan *zscan)
165 {
166 struct ureg_program *shader;
167 struct ureg_src vtex[zscan->num_channels];
168
169 struct ureg_src samp_src, samp_scan, samp_quant;
170
171 struct ureg_dst tmp[zscan->num_channels];
172 struct ureg_dst quant, fragment;
173
174 unsigned i;
175
176 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
177 if (!shader)
178 return NULL;
179
180 for (i = 0; i < zscan->num_channels; ++i)
181 vtex[i] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX + i, TGSI_INTERPOLATE_LINEAR);
182
183 samp_src = ureg_DECL_sampler(shader, 0);
184 samp_scan = ureg_DECL_sampler(shader, 1);
185 samp_quant = ureg_DECL_sampler(shader, 2);
186
187 for (i = 0; i < zscan->num_channels; ++i)
188 tmp[i] = ureg_DECL_temporary(shader);
189 quant = ureg_DECL_temporary(shader);
190
191 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
192
193 /*
194 * tmp.x = tex(vtex, 1)
195 * tmp.y = vtex.z
196 * fragment = tex(tmp, 0) * quant
197 */
198 for (i = 0; i < zscan->num_channels; ++i)
199 ureg_TEX(shader, ureg_writemask(tmp[i], TGSI_WRITEMASK_X), TGSI_TEXTURE_2D, vtex[i], samp_scan);
200
201 for (i = 0; i < zscan->num_channels; ++i)
202 ureg_MOV(shader, ureg_writemask(tmp[i], TGSI_WRITEMASK_Y), ureg_scalar(vtex[i], TGSI_SWIZZLE_W));
203
204 for (i = 0; i < zscan->num_channels; ++i) {
205 ureg_TEX(shader, ureg_writemask(tmp[0], TGSI_WRITEMASK_X << i), TGSI_TEXTURE_2D, ureg_src(tmp[i]), samp_src);
206 ureg_TEX(shader, ureg_writemask(quant, TGSI_WRITEMASK_X << i), TGSI_TEXTURE_3D, vtex[i], samp_quant);
207 }
208
209 ureg_MUL(shader, quant, ureg_src(quant), ureg_imm1f(shader, 16.0f));
210 ureg_MUL(shader, fragment, ureg_src(tmp[0]), ureg_src(quant));
211
212 for (i = 0; i < zscan->num_channels; ++i)
213 ureg_release_temporary(shader, tmp[i]);
214 ureg_END(shader);
215
216 return ureg_create_shader_and_destroy(shader, zscan->pipe);
217 }
218
219 static bool
220 init_shaders(struct vl_zscan *zscan)
221 {
222 assert(zscan);
223
224 zscan->vs = create_vert_shader(zscan);
225 if (!zscan->vs)
226 goto error_vs;
227
228 zscan->fs = create_frag_shader(zscan);
229 if (!zscan->fs)
230 goto error_fs;
231
232 return true;
233
234 error_fs:
235 zscan->pipe->delete_vs_state(zscan->pipe, zscan->vs);
236
237 error_vs:
238 return false;
239 }
240
241 static void
242 cleanup_shaders(struct vl_zscan *zscan)
243 {
244 assert(zscan);
245
246 zscan->pipe->delete_vs_state(zscan->pipe, zscan->vs);
247 zscan->pipe->delete_fs_state(zscan->pipe, zscan->fs);
248 }
249
250 static bool
251 init_state(struct vl_zscan *zscan)
252 {
253 struct pipe_blend_state blend;
254 struct pipe_rasterizer_state rs_state;
255 struct pipe_sampler_state sampler;
256 unsigned i;
257
258 assert(zscan);
259
260 memset(&rs_state, 0, sizeof(rs_state));
261 rs_state.gl_rasterization_rules = true;
262 zscan->rs_state = zscan->pipe->create_rasterizer_state(zscan->pipe, &rs_state);
263 if (!zscan->rs_state)
264 goto error_rs_state;
265
266 memset(&blend, 0, sizeof blend);
267
268 blend.independent_blend_enable = 0;
269 blend.rt[0].blend_enable = 0;
270 blend.rt[0].rgb_func = PIPE_BLEND_ADD;
271 blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_ONE;
272 blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ONE;
273 blend.rt[0].alpha_func = PIPE_BLEND_ADD;
274 blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
275 blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
276 blend.logicop_enable = 0;
277 blend.logicop_func = PIPE_LOGICOP_CLEAR;
278 /* Needed to allow color writes to FB, even if blending disabled */
279 blend.rt[0].colormask = PIPE_MASK_RGBA;
280 blend.dither = 0;
281 zscan->blend = zscan->pipe->create_blend_state(zscan->pipe, &blend);
282 if (!zscan->blend)
283 goto error_blend;
284
285 for (i = 0; i < 3; ++i) {
286 memset(&sampler, 0, sizeof(sampler));
287 sampler.wrap_s = PIPE_TEX_WRAP_REPEAT;
288 sampler.wrap_t = PIPE_TEX_WRAP_REPEAT;
289 sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
290 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
291 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
292 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
293 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
294 sampler.compare_func = PIPE_FUNC_ALWAYS;
295 sampler.normalized_coords = 1;
296 zscan->samplers[i] = zscan->pipe->create_sampler_state(zscan->pipe, &sampler);
297 if (!zscan->samplers[i])
298 goto error_samplers;
299 }
300
301 return true;
302
303 error_samplers:
304 for (i = 0; i < 2; ++i)
305 if (zscan->samplers[i])
306 zscan->pipe->delete_sampler_state(zscan->pipe, zscan->samplers[i]);
307
308 zscan->pipe->delete_rasterizer_state(zscan->pipe, zscan->rs_state);
309
310 error_blend:
311 zscan->pipe->delete_blend_state(zscan->pipe, zscan->blend);
312
313 error_rs_state:
314 return false;
315 }
316
317 static void
318 cleanup_state(struct vl_zscan *zscan)
319 {
320 unsigned i;
321
322 assert(zscan);
323
324 for (i = 0; i < 3; ++i)
325 zscan->pipe->delete_sampler_state(zscan->pipe, zscan->samplers[i]);
326
327 zscan->pipe->delete_rasterizer_state(zscan->pipe, zscan->rs_state);
328 zscan->pipe->delete_blend_state(zscan->pipe, zscan->blend);
329 }
330
331 struct pipe_sampler_view *
332 vl_zscan_layout(struct pipe_context *pipe, const int layout[64], unsigned blocks_per_line)
333 {
334 const unsigned total_size = blocks_per_line * BLOCK_WIDTH * BLOCK_HEIGHT;
335
336 int patched_layout[64];
337
338 struct pipe_resource res_tmpl, *res;
339 struct pipe_sampler_view sv_tmpl, *sv;
340 struct pipe_transfer *buf_transfer;
341 unsigned x, y, i, pitch;
342 float *f;
343
344 struct pipe_box rect =
345 {
346 0, 0, 0,
347 BLOCK_WIDTH * blocks_per_line,
348 BLOCK_HEIGHT,
349 1
350 };
351
352 assert(pipe && layout && blocks_per_line);
353
354 for (i = 0; i < 64; ++i)
355 patched_layout[layout[i]] = i;
356
357 memset(&res_tmpl, 0, sizeof(res_tmpl));
358 res_tmpl.target = PIPE_TEXTURE_2D;
359 res_tmpl.format = PIPE_FORMAT_R32_FLOAT;
360 res_tmpl.width0 = BLOCK_WIDTH * blocks_per_line;
361 res_tmpl.height0 = BLOCK_HEIGHT;
362 res_tmpl.depth0 = 1;
363 res_tmpl.array_size = 1;
364 res_tmpl.usage = PIPE_USAGE_IMMUTABLE;
365 res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW;
366
367 res = pipe->screen->resource_create(pipe->screen, &res_tmpl);
368 if (!res)
369 goto error_resource;
370
371 buf_transfer = pipe->get_transfer
372 (
373 pipe, res,
374 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
375 &rect
376 );
377 if (!buf_transfer)
378 goto error_transfer;
379
380 pitch = buf_transfer->stride / sizeof(float);
381
382 f = pipe->transfer_map(pipe, buf_transfer);
383 if (!f)
384 goto error_map;
385
386 for (i = 0; i < blocks_per_line; ++i)
387 for (y = 0; y < BLOCK_HEIGHT; ++y)
388 for (x = 0; x < BLOCK_WIDTH; ++x) {
389 float addr = patched_layout[x + y * BLOCK_WIDTH] +
390 i * BLOCK_WIDTH * BLOCK_HEIGHT;
391
392 addr /= total_size;
393
394 f[i * BLOCK_WIDTH + y * pitch + x] = addr;
395 }
396
397 pipe->transfer_unmap(pipe, buf_transfer);
398 pipe->transfer_destroy(pipe, buf_transfer);
399
400 memset(&sv_tmpl, 0, sizeof(sv_tmpl));
401 u_sampler_view_default_template(&sv_tmpl, res, res->format);
402 sv = pipe->create_sampler_view(pipe, res, &sv_tmpl);
403 pipe_resource_reference(&res, NULL);
404 if (!sv)
405 goto error_map;
406
407 return sv;
408
409 error_map:
410 pipe->transfer_destroy(pipe, buf_transfer);
411
412 error_transfer:
413 pipe_resource_reference(&res, NULL);
414
415 error_resource:
416 return NULL;
417 }
418
419 bool
420 vl_zscan_init(struct vl_zscan *zscan, struct pipe_context *pipe,
421 unsigned buffer_width, unsigned buffer_height,
422 unsigned blocks_per_line, unsigned blocks_total,
423 unsigned num_channels)
424 {
425 assert(zscan && pipe);
426
427 zscan->pipe = pipe;
428 zscan->buffer_width = buffer_width;
429 zscan->buffer_height = buffer_height;
430 zscan->num_channels = num_channels;
431 zscan->blocks_per_line = blocks_per_line;
432 zscan->blocks_total = blocks_total;
433
434 if(!init_shaders(zscan))
435 return false;
436
437 if(!init_state(zscan)) {
438 cleanup_shaders(zscan);
439 return false;
440 }
441
442 return true;
443 }
444
445 void
446 vl_zscan_cleanup(struct vl_zscan *zscan)
447 {
448 assert(zscan);
449
450 cleanup_shaders(zscan);
451 cleanup_state(zscan);
452 }
453
454 bool
455 vl_zscan_init_buffer(struct vl_zscan *zscan, struct vl_zscan_buffer *buffer,
456 struct pipe_sampler_view *src, struct pipe_surface *dst)
457 {
458 struct pipe_resource res_tmpl, *res;
459 struct pipe_sampler_view sv_tmpl;
460
461 assert(zscan && buffer);
462
463 memset(buffer, 0, sizeof(struct vl_zscan_buffer));
464
465 buffer->zscan = zscan;
466
467 pipe_sampler_view_reference(&buffer->src, src);
468
469 buffer->viewport.scale[0] = dst->width;
470 buffer->viewport.scale[1] = dst->height;
471 buffer->viewport.scale[2] = 1;
472 buffer->viewport.scale[3] = 1;
473 buffer->viewport.translate[0] = 0;
474 buffer->viewport.translate[1] = 0;
475 buffer->viewport.translate[2] = 0;
476 buffer->viewport.translate[3] = 0;
477
478 buffer->fb_state.width = dst->width;
479 buffer->fb_state.height = dst->height;
480 buffer->fb_state.nr_cbufs = 1;
481 pipe_surface_reference(&buffer->fb_state.cbufs[0], dst);
482
483 memset(&res_tmpl, 0, sizeof(res_tmpl));
484 res_tmpl.target = PIPE_TEXTURE_3D;
485 res_tmpl.format = PIPE_FORMAT_R8_UNORM;
486 res_tmpl.width0 = BLOCK_WIDTH * zscan->blocks_per_line;
487 res_tmpl.height0 = BLOCK_HEIGHT;
488 res_tmpl.depth0 = 2;
489 res_tmpl.array_size = 1;
490 res_tmpl.usage = PIPE_USAGE_IMMUTABLE;
491 res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW;
492
493 res = zscan->pipe->screen->resource_create(zscan->pipe->screen, &res_tmpl);
494 if (!res)
495 return false;
496
497 memset(&sv_tmpl, 0, sizeof(sv_tmpl));
498 u_sampler_view_default_template(&sv_tmpl, res, res->format);
499 sv_tmpl.swizzle_r = sv_tmpl.swizzle_g = sv_tmpl.swizzle_b = sv_tmpl.swizzle_a = TGSI_SWIZZLE_X;
500 buffer->quant = zscan->pipe->create_sampler_view(zscan->pipe, res, &sv_tmpl);
501 pipe_resource_reference(&res, NULL);
502 if (!buffer->quant)
503 return false;
504
505 return true;
506 }
507
508 void
509 vl_zscan_cleanup_buffer(struct vl_zscan_buffer *buffer)
510 {
511 assert(buffer);
512
513 pipe_sampler_view_reference(&buffer->src, NULL);
514 pipe_sampler_view_reference(&buffer->layout, NULL);
515 pipe_sampler_view_reference(&buffer->quant, NULL);
516 pipe_surface_reference(&buffer->fb_state.cbufs[0], NULL);
517 }
518
519 void
520 vl_zscan_set_layout(struct vl_zscan_buffer *buffer, struct pipe_sampler_view *layout)
521 {
522 assert(buffer);
523 assert(layout);
524
525 pipe_sampler_view_reference(&buffer->layout, layout);
526 }
527
528 void
529 vl_zscan_upload_quant(struct vl_zscan_buffer *buffer,
530 const uint8_t intra_matrix[64],
531 const uint8_t non_intra_matrix[64])
532 {
533 struct pipe_context *pipe;
534 struct pipe_transfer *buf_transfer;
535 unsigned x, y, i, pitch;
536 uint8_t *intra, *non_intra;
537
538 struct pipe_box rect =
539 {
540 0, 0, 0,
541 BLOCK_WIDTH,
542 BLOCK_HEIGHT,
543 2
544 };
545
546 assert(buffer);
547 assert(intra_matrix);
548 assert(non_intra_matrix);
549
550 pipe = buffer->zscan->pipe;
551
552 rect.width *= buffer->zscan->blocks_per_line;
553
554 buf_transfer = pipe->get_transfer
555 (
556 pipe, buffer->quant->texture,
557 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
558 &rect
559 );
560 if (!buf_transfer)
561 goto error_transfer;
562
563 pitch = buf_transfer->stride;
564
565 non_intra = pipe->transfer_map(pipe, buf_transfer);
566 if (!non_intra)
567 goto error_map;
568
569 intra = non_intra + BLOCK_HEIGHT * pitch;
570
571 for (i = 0; i < buffer->zscan->blocks_per_line; ++i)
572 for (y = 0; y < BLOCK_HEIGHT; ++y)
573 for (x = 0; x < BLOCK_WIDTH; ++x) {
574 intra[i * BLOCK_WIDTH + y * pitch + x] = intra_matrix[x + y * BLOCK_WIDTH];
575 non_intra[i * BLOCK_WIDTH + y * pitch + x] = non_intra_matrix[x + y * BLOCK_WIDTH];
576 }
577
578 pipe->transfer_unmap(pipe, buf_transfer);
579
580 error_map:
581 pipe->transfer_destroy(pipe, buf_transfer);
582
583 error_transfer:
584 return;
585 }
586
587 void
588 vl_zscan_render(struct vl_zscan_buffer *buffer, unsigned num_instances)
589 {
590 struct vl_zscan *zscan;
591
592 assert(buffer);
593
594 zscan = buffer->zscan;
595
596 zscan->pipe->bind_rasterizer_state(zscan->pipe, zscan->rs_state);
597 zscan->pipe->bind_blend_state(zscan->pipe, zscan->blend);
598 zscan->pipe->bind_fragment_sampler_states(zscan->pipe, 3, zscan->samplers);
599 zscan->pipe->set_framebuffer_state(zscan->pipe, &buffer->fb_state);
600 zscan->pipe->set_viewport_state(zscan->pipe, &buffer->viewport);
601 zscan->pipe->set_fragment_sampler_views(zscan->pipe, 3, &buffer->src);
602 zscan->pipe->bind_vs_state(zscan->pipe, zscan->vs);
603 zscan->pipe->bind_fs_state(zscan->pipe, zscan->fs);
604 util_draw_arrays_instanced(zscan->pipe, PIPE_PRIM_QUADS, 0, 4, 0, num_instances);
605 }