Merge remote branch 'origin/master' into pipe-video
[mesa.git] / src / gallium / auxiliary / vl / vl_mc.c
1 /**************************************************************************
2 *
3 * Copyright 2009 Younes Manton.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <assert.h>
29
30 #include <pipe/p_context.h>
31
32 #include <util/u_sampler.h>
33 #include <util/u_draw.h>
34
35 #include <tgsi/tgsi_ureg.h>
36
37 #include "vl_defines.h"
38 #include "vl_vertex_buffers.h"
39 #include "vl_mc.h"
40
41 enum VS_OUTPUT
42 {
43 VS_O_VPOS,
44 VS_O_VTOP,
45 VS_O_VBOTTOM
46 };
47
48 static struct ureg_dst
49 calc_position(struct vl_mc *r, struct ureg_program *shader, struct ureg_src block_scale)
50 {
51 struct ureg_src vrect, vpos;
52 struct ureg_dst t_vpos;
53 struct ureg_dst o_vpos;
54
55 vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
56 vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
57
58 t_vpos = ureg_DECL_temporary(shader);
59
60 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
61
62 /*
63 * block_scale = (MACROBLOCK_WIDTH, MACROBLOCK_HEIGHT) / (dst.width, dst.height)
64 *
65 * t_vpos = (vpos + vrect) * block_scale
66 * o_vpos.xy = t_vpos
67 * o_vpos.zw = vpos
68 */
69 ureg_ADD(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), vpos, vrect);
70 ureg_MUL(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos), block_scale);
71 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
72 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), ureg_imm1f(shader, 1.0f));
73
74 return t_vpos;
75 }
76
77 static struct ureg_dst
78 calc_line(struct ureg_program *shader)
79 {
80 struct ureg_dst tmp;
81 struct ureg_src pos;
82
83 tmp = ureg_DECL_temporary(shader);
84
85 pos = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS, TGSI_INTERPOLATE_LINEAR);
86
87 /*
88 * tmp.y = fraction(pos.y / 2) >= 0.5 ? 1 : 0
89 */
90 ureg_MUL(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), pos, ureg_imm1f(shader, 0.5f));
91 ureg_FRC(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_src(tmp));
92 ureg_SGE(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_src(tmp), ureg_imm1f(shader, 0.5f));
93
94 return tmp;
95 }
96
97 static void *
98 create_ref_vert_shader(struct vl_mc *r)
99 {
100 struct ureg_program *shader;
101 struct ureg_src mv_scale;
102 struct ureg_src vrect, vmv[2];
103 struct ureg_dst t_vpos;
104 struct ureg_dst o_vpos, o_vmv[2];
105 unsigned i;
106
107 shader = ureg_create(TGSI_PROCESSOR_VERTEX);
108 if (!shader)
109 return NULL;
110
111 vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
112 vmv[0] = ureg_DECL_vs_input(shader, VS_I_MV_TOP);
113 vmv[1] = ureg_DECL_vs_input(shader, VS_I_MV_BOTTOM);
114
115 t_vpos = calc_position(r, shader, ureg_imm2f(shader,
116 (float)MACROBLOCK_WIDTH / r->buffer_width,
117 (float)MACROBLOCK_HEIGHT / r->buffer_height)
118 );
119
120 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
121 o_vmv[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP);
122 o_vmv[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM);
123
124 /*
125 * mv_scale.xy = 0.5 / (dst.width, dst.height);
126 * mv_scale.z = 1.0f / 4.0f
127 * mv_scale.w = 1.0f / 255.0f
128 *
129 * // Apply motion vectors
130 * o_vmv[0..1].xy = vmv[0..1] * mv_scale + t_vpos
131 * o_vmv[0..1].zw = vmv[0..1] * mv_scale
132 *
133 */
134
135 mv_scale = ureg_imm4f(shader,
136 0.5f / r->buffer_width,
137 0.5f / r->buffer_height,
138 1.0f / 4.0f,
139 1.0f / PIPE_VIDEO_MV_WEIGHT_MAX);
140
141 for (i = 0; i < 2; ++i) {
142 ureg_MAD(shader, ureg_writemask(o_vmv[i], TGSI_WRITEMASK_XY), mv_scale, vmv[i], ureg_src(t_vpos));
143 ureg_MUL(shader, ureg_writemask(o_vmv[i], TGSI_WRITEMASK_ZW), mv_scale, vmv[i]);
144 }
145
146 ureg_release_temporary(shader, t_vpos);
147
148 ureg_END(shader);
149
150 return ureg_create_shader_and_destroy(shader, r->pipe);
151 }
152
153 static void *
154 create_ref_frag_shader(struct vl_mc *r)
155 {
156 const float y_scale =
157 r->buffer_height / 2 *
158 r->macroblock_size / MACROBLOCK_HEIGHT;
159
160 struct ureg_program *shader;
161 struct ureg_src tc[2], sampler;
162 struct ureg_dst ref, field;
163 struct ureg_dst fragment;
164 unsigned label;
165
166 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
167 if (!shader)
168 return NULL;
169
170 tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP, TGSI_INTERPOLATE_LINEAR);
171 tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM, TGSI_INTERPOLATE_LINEAR);
172
173 sampler = ureg_DECL_sampler(shader, 0);
174 ref = ureg_DECL_temporary(shader);
175
176 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
177
178 field = calc_line(shader);
179
180 /*
181 * ref = field.z ? tc[1] : tc[0]
182 *
183 * // Adjust tc acording to top/bottom field selection
184 * if (|ref.z|) {
185 * ref.y *= y_scale
186 * ref.y = floor(ref.y)
187 * ref.y += ref.z
188 * ref.y /= y_scale
189 * }
190 * fragment.xyz = tex(ref, sampler[0])
191 */
192 ureg_CMP(shader, ureg_writemask(ref, TGSI_WRITEMASK_XYZ),
193 ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Y)),
194 tc[1], tc[0]);
195 ureg_CMP(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W),
196 ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Y)),
197 tc[1], tc[0]);
198
199 ureg_IF(shader, ureg_scalar(ureg_src(ref), TGSI_SWIZZLE_Z), &label);
200
201 ureg_MUL(shader, ureg_writemask(ref, TGSI_WRITEMASK_Y),
202 ureg_src(ref), ureg_imm1f(shader, y_scale));
203 ureg_FLR(shader, ureg_writemask(ref, TGSI_WRITEMASK_Y), ureg_src(ref));
204 ureg_ADD(shader, ureg_writemask(ref, TGSI_WRITEMASK_Y),
205 ureg_src(ref), ureg_scalar(ureg_src(ref), TGSI_SWIZZLE_Z));
206 ureg_MUL(shader, ureg_writemask(ref, TGSI_WRITEMASK_Y),
207 ureg_src(ref), ureg_imm1f(shader, 1.0f / y_scale));
208
209 ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
210 ureg_ENDIF(shader);
211
212 ureg_TEX(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ), TGSI_TEXTURE_2D, ureg_src(ref), sampler);
213
214 ureg_release_temporary(shader, ref);
215
216 ureg_release_temporary(shader, field);
217 ureg_END(shader);
218
219 return ureg_create_shader_and_destroy(shader, r->pipe);
220 }
221
222 static void *
223 create_ycbcr_vert_shader(struct vl_mc *r)
224 {
225 struct ureg_program *shader;
226
227 struct ureg_src vrect, vpos;
228 struct ureg_dst t_vpos, t_vtex;
229 struct ureg_dst o_vpos, o_vtex;
230
231 struct vertex2f scale = {
232 (float)BLOCK_WIDTH / r->buffer_width * MACROBLOCK_WIDTH / r->macroblock_size,
233 (float)BLOCK_HEIGHT / r->buffer_height * MACROBLOCK_HEIGHT / r->macroblock_size
234 };
235
236 unsigned label;
237
238 shader = ureg_create(TGSI_PROCESSOR_VERTEX);
239 if (!shader)
240 return NULL;
241
242 vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
243 vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
244
245 t_vpos = calc_position(r, shader, ureg_imm2f(shader, scale.x, scale.y));
246 t_vtex = ureg_DECL_temporary(shader);
247
248 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
249 o_vtex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP);
250
251 /*
252 * o_vtex.xy = t_vpos
253 * o_vtex.z = intra * 0.5
254 *
255 * if(interlaced) {
256 * t_vtex.xy = vrect.y ? { 0, scale.y } : { -scale.y : 0 }
257 * t_vtex.z = vpos.y % 2
258 * t_vtex.y = t_vtex.z ? t_vtex.x : t_vtex.y
259 * o_vpos.y = t_vtex.y + t_vpos.y
260 *
261 * o_vtex.w = t_vtex.z ? 0 : 1
262 * }
263 *
264 */
265 ureg_MOV(shader, ureg_writemask(o_vtex, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
266 ureg_MUL(shader, ureg_writemask(o_vtex, TGSI_WRITEMASK_Z),
267 ureg_scalar(vpos, TGSI_SWIZZLE_Z), ureg_imm1f(shader, 0.5f));
268 ureg_MOV(shader, ureg_writemask(o_vtex, TGSI_WRITEMASK_W), ureg_imm1f(shader, -1.0f));
269
270 if (r->macroblock_size == MACROBLOCK_HEIGHT) { //TODO
271 ureg_IF(shader, ureg_scalar(vpos, TGSI_SWIZZLE_W), &label);
272
273 ureg_CMP(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_XY),
274 ureg_negate(ureg_scalar(vrect, TGSI_SWIZZLE_Y)),
275 ureg_imm2f(shader, 0.0f, scale.y),
276 ureg_imm2f(shader, -scale.y, 0.0f));
277 ureg_MUL(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Z),
278 ureg_scalar(vpos, TGSI_SWIZZLE_Y), ureg_imm1f(shader, 0.5f));
279
280 ureg_FRC(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Z), ureg_src(t_vtex));
281
282 ureg_CMP(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Y),
283 ureg_negate(ureg_scalar(ureg_src(t_vtex), TGSI_SWIZZLE_Z)),
284 ureg_scalar(ureg_src(t_vtex), TGSI_SWIZZLE_X),
285 ureg_scalar(ureg_src(t_vtex), TGSI_SWIZZLE_Y));
286 ureg_ADD(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_Y),
287 ureg_src(t_vpos), ureg_src(t_vtex));
288
289 ureg_CMP(shader, ureg_writemask(o_vtex, TGSI_WRITEMASK_W),
290 ureg_negate(ureg_scalar(ureg_src(t_vtex), TGSI_SWIZZLE_Z)),
291 ureg_imm1f(shader, 0.0f), ureg_imm1f(shader, 1.0f));
292
293 ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
294 ureg_ENDIF(shader);
295 }
296
297 ureg_release_temporary(shader, t_vtex);
298 ureg_release_temporary(shader, t_vpos);
299
300 ureg_END(shader);
301
302 return ureg_create_shader_and_destroy(shader, r->pipe);
303 }
304
305 static void *
306 create_ycbcr_frag_shader(struct vl_mc *r, float scale)
307 {
308 struct ureg_program *shader;
309 struct ureg_src tc, sampler;
310 struct ureg_dst tmp;
311 struct ureg_dst fragment;
312 unsigned label;
313
314 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
315 if (!shader)
316 return NULL;
317
318 tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP, TGSI_INTERPOLATE_LINEAR);
319
320 sampler = ureg_DECL_sampler(shader, 0);
321
322 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
323
324 tmp = calc_line(shader);
325
326 /*
327 * if (field == tc.w)
328 * kill();
329 * else {
330 * fragment.xyz = tex(tc, sampler) * scale + tc.z
331 * fragment.w = 1.0f
332 * }
333 */
334
335 ureg_SEQ(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y),
336 ureg_scalar(tc, TGSI_SWIZZLE_W), ureg_src(tmp));
337
338 ureg_IF(shader, ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y), &label);
339
340 ureg_KILP(shader);
341
342 ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
343 ureg_ELSE(shader, &label);
344
345 ureg_TEX(shader, tmp, TGSI_TEXTURE_2D, tc, sampler);
346
347 if (scale != 1.0f)
348 ureg_MAD(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ),
349 ureg_src(tmp), ureg_imm1f(shader, scale),
350 ureg_scalar(tc, TGSI_SWIZZLE_Z));
351 else
352 ureg_ADD(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ),
353 ureg_src(tmp), ureg_scalar(tc, TGSI_SWIZZLE_Z));
354
355 ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f));
356
357 ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
358 ureg_ENDIF(shader);
359
360 ureg_release_temporary(shader, tmp);
361
362 return ureg_create_shader_and_destroy(shader, r->pipe);
363 }
364
365 static bool
366 init_pipe_state(struct vl_mc *r)
367 {
368 struct pipe_sampler_state sampler;
369 struct pipe_blend_state blend;
370 struct pipe_rasterizer_state rs_state;
371
372 assert(r);
373
374 memset(&sampler, 0, sizeof(sampler));
375 sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
376 sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
377 sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
378 sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR;
379 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
380 sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
381 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
382 sampler.compare_func = PIPE_FUNC_ALWAYS;
383 sampler.normalized_coords = 1;
384 r->sampler_ref = r->pipe->create_sampler_state(r->pipe, &sampler);
385 if (!r->sampler_ref)
386 goto error_sampler_ref;
387
388 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
389 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
390 r->sampler_ycbcr = r->pipe->create_sampler_state(r->pipe, &sampler);
391 if (!r->sampler_ycbcr)
392 goto error_sampler_ycbcr;
393
394 memset(&blend, 0, sizeof blend);
395 blend.independent_blend_enable = 0;
396 blend.rt[0].blend_enable = 1;
397 blend.rt[0].rgb_func = PIPE_BLEND_ADD;
398 blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA;
399 blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
400 blend.rt[0].alpha_func = PIPE_BLEND_ADD;
401 blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA;
402 blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
403 blend.logicop_enable = 0;
404 blend.logicop_func = PIPE_LOGICOP_CLEAR;
405 blend.rt[0].colormask = PIPE_MASK_RGBA;
406 blend.dither = 0;
407 r->blend_clear = r->pipe->create_blend_state(r->pipe, &blend);
408 if (!r->blend_clear)
409 goto error_blend_clear;
410
411 blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ONE;
412 blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
413 r->blend_add = r->pipe->create_blend_state(r->pipe, &blend);
414 if (!r->blend_add)
415 goto error_blend_add;
416
417 memset(&rs_state, 0, sizeof(rs_state));
418 /*rs_state.sprite_coord_enable */
419 rs_state.sprite_coord_mode = PIPE_SPRITE_COORD_UPPER_LEFT;
420 rs_state.point_quad_rasterization = true;
421 rs_state.point_size = BLOCK_WIDTH;
422 rs_state.gl_rasterization_rules = true;
423 r->rs_state = r->pipe->create_rasterizer_state(r->pipe, &rs_state);
424 if (!r->rs_state)
425 goto error_rs_state;
426
427 return true;
428
429 error_rs_state:
430 r->pipe->delete_blend_state(r->pipe, r->blend_add);
431
432 error_blend_add:
433 r->pipe->delete_blend_state(r->pipe, r->blend_clear);
434
435 error_blend_clear:
436 r->pipe->delete_sampler_state(r->pipe, r->sampler_ref);
437
438 error_sampler_ref:
439 r->pipe->delete_sampler_state(r->pipe, r->sampler_ycbcr);
440
441 error_sampler_ycbcr:
442 return false;
443 }
444
445 static void
446 cleanup_pipe_state(struct vl_mc *r)
447 {
448 assert(r);
449
450 r->pipe->delete_sampler_state(r->pipe, r->sampler_ref);
451 r->pipe->delete_sampler_state(r->pipe, r->sampler_ycbcr);
452 r->pipe->delete_blend_state(r->pipe, r->blend_clear);
453 r->pipe->delete_blend_state(r->pipe, r->blend_add);
454 r->pipe->delete_rasterizer_state(r->pipe, r->rs_state);
455 }
456
457 bool
458 vl_mc_init(struct vl_mc *renderer, struct pipe_context *pipe,
459 unsigned buffer_width, unsigned buffer_height,
460 unsigned macroblock_size, float scale)
461 {
462 assert(renderer);
463 assert(pipe);
464
465 memset(renderer, 0, sizeof(struct vl_mc));
466
467 renderer->pipe = pipe;
468 renderer->buffer_width = buffer_width;
469 renderer->buffer_height = buffer_height;
470 renderer->macroblock_size = macroblock_size;
471
472 if (!init_pipe_state(renderer))
473 goto error_pipe_state;
474
475 renderer->vs_ref = create_ref_vert_shader(renderer);
476 if (!renderer->vs_ref)
477 goto error_vs_ref;
478
479 renderer->vs_ycbcr = create_ycbcr_vert_shader(renderer);
480 if (!renderer->vs_ycbcr)
481 goto error_vs_ycbcr;
482
483 renderer->fs_ref = create_ref_frag_shader(renderer);
484 if (!renderer->fs_ref)
485 goto error_fs_ref;
486
487 renderer->fs_ycbcr = create_ycbcr_frag_shader(renderer, scale);
488 if (!renderer->fs_ycbcr)
489 goto error_fs_ycbcr;
490
491 return true;
492
493 error_fs_ycbcr:
494 renderer->pipe->delete_fs_state(renderer->pipe, renderer->fs_ref);
495
496 error_fs_ref:
497 renderer->pipe->delete_vs_state(renderer->pipe, renderer->vs_ycbcr);
498
499 error_vs_ycbcr:
500 renderer->pipe->delete_vs_state(renderer->pipe, renderer->vs_ref);
501
502 error_vs_ref:
503 cleanup_pipe_state(renderer);
504
505 error_pipe_state:
506 return false;
507 }
508
509 void
510 vl_mc_cleanup(struct vl_mc *renderer)
511 {
512 assert(renderer);
513
514 cleanup_pipe_state(renderer);
515
516 renderer->pipe->delete_vs_state(renderer->pipe, renderer->vs_ref);
517 renderer->pipe->delete_vs_state(renderer->pipe, renderer->vs_ycbcr);
518 renderer->pipe->delete_fs_state(renderer->pipe, renderer->fs_ref);
519 renderer->pipe->delete_fs_state(renderer->pipe, renderer->fs_ycbcr);
520 }
521
522 bool
523 vl_mc_init_buffer(struct vl_mc *renderer, struct vl_mc_buffer *buffer,
524 struct pipe_sampler_view *source)
525 {
526 assert(renderer && buffer);
527 assert(source);
528
529 buffer->renderer = renderer;
530
531 buffer->viewport.scale[2] = 1;
532 buffer->viewport.scale[3] = 1;
533 buffer->viewport.translate[0] = 0;
534 buffer->viewport.translate[1] = 0;
535 buffer->viewport.translate[2] = 0;
536 buffer->viewport.translate[3] = 0;
537
538 buffer->fb_state.nr_cbufs = 1;
539 buffer->fb_state.zsbuf = NULL;
540
541 pipe_sampler_view_reference(&buffer->source, source);
542
543 return true;
544 }
545
546 void
547 vl_mc_cleanup_buffer(struct vl_mc_buffer *buffer)
548 {
549 assert(buffer);
550
551 pipe_sampler_view_reference(&buffer->source, NULL);
552 }
553
554 void
555 vl_mc_set_surface(struct vl_mc_buffer *buffer, struct pipe_surface *surface)
556 {
557 assert(buffer && surface);
558
559 buffer->surface_cleared = false;
560
561 buffer->viewport.scale[0] = surface->width;
562 buffer->viewport.scale[1] = surface->height;
563
564 buffer->fb_state.width = surface->width;
565 buffer->fb_state.height = surface->height;
566 buffer->fb_state.cbufs[0] = surface;
567 }
568
569 static void
570 prepare_pipe_4_rendering(struct vl_mc_buffer *buffer)
571 {
572 struct vl_mc *renderer;
573
574 assert(buffer);
575
576 renderer = buffer->renderer;
577 renderer->pipe->bind_rasterizer_state(renderer->pipe, renderer->rs_state);
578
579 if (buffer->surface_cleared)
580 renderer->pipe->bind_blend_state(renderer->pipe, renderer->blend_add);
581 else {
582 renderer->pipe->bind_blend_state(renderer->pipe, renderer->blend_clear);
583 buffer->surface_cleared = true;
584 }
585
586 renderer->pipe->set_framebuffer_state(renderer->pipe, &buffer->fb_state);
587 renderer->pipe->set_viewport_state(renderer->pipe, &buffer->viewport);
588 }
589
590 void
591 vl_mc_render_ref(struct vl_mc_buffer *buffer, struct pipe_sampler_view *ref)
592 {
593 struct vl_mc *renderer;
594
595 assert(buffer && ref);
596
597 prepare_pipe_4_rendering(buffer);
598
599 renderer = buffer->renderer;
600
601 renderer->pipe->bind_vs_state(renderer->pipe, renderer->vs_ref);
602 renderer->pipe->bind_fs_state(renderer->pipe, renderer->fs_ref);
603
604 renderer->pipe->set_fragment_sampler_views(renderer->pipe, 1, &ref);
605 renderer->pipe->bind_fragment_sampler_states(renderer->pipe, 1, &renderer->sampler_ref);
606
607 util_draw_arrays_instanced(renderer->pipe, PIPE_PRIM_QUADS, 0, 4, 0,
608 renderer->buffer_width / MACROBLOCK_WIDTH *
609 renderer->buffer_height / MACROBLOCK_HEIGHT);
610 }
611
612 void
613 vl_mc_render_ycbcr(struct vl_mc_buffer *buffer, unsigned num_instances)
614 {
615 struct vl_mc *renderer;
616
617 assert(buffer);
618
619 if (num_instances == 0)
620 return;
621
622 prepare_pipe_4_rendering(buffer);
623
624 renderer = buffer->renderer;
625
626 renderer->pipe->bind_vs_state(renderer->pipe, renderer->vs_ycbcr);
627 renderer->pipe->bind_fs_state(renderer->pipe, renderer->fs_ycbcr);
628
629 renderer->pipe->set_fragment_sampler_views(renderer->pipe, 1, &buffer->source);
630 renderer->pipe->bind_fragment_sampler_states(renderer->pipe, 1, &renderer->sampler_ycbcr);
631
632 util_draw_arrays_instanced(renderer->pipe, PIPE_PRIM_QUADS, 0, 4, 0, num_instances);
633 }