Merge branch 'pipe-video'
[mesa.git] / src / gallium / auxiliary / vl / vl_mc.c
1 /**************************************************************************
2 *
3 * Copyright 2009 Younes Manton.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <assert.h>
29
30 #include <pipe/p_context.h>
31
32 #include <util/u_sampler.h>
33 #include <util/u_draw.h>
34
35 #include <tgsi/tgsi_ureg.h>
36
37 #include "vl_defines.h"
38 #include "vl_vertex_buffers.h"
39 #include "vl_mc.h"
40 #include "vl_idct.h"
41
42 enum VS_OUTPUT
43 {
44 VS_O_VPOS,
45 VS_O_VTOP,
46 VS_O_VBOTTOM,
47
48 VS_O_FLAGS = VS_O_VTOP,
49 VS_O_VTEX = VS_O_VBOTTOM
50 };
51
52 static struct ureg_dst
53 calc_position(struct vl_mc *r, struct ureg_program *shader, struct ureg_src block_scale)
54 {
55 struct ureg_src vrect, vpos;
56 struct ureg_dst t_vpos;
57 struct ureg_dst o_vpos;
58
59 vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
60 vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
61
62 t_vpos = ureg_DECL_temporary(shader);
63
64 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
65
66 /*
67 * block_scale = (MACROBLOCK_WIDTH, MACROBLOCK_HEIGHT) / (dst.width, dst.height)
68 *
69 * t_vpos = (vpos + vrect) * block_scale
70 * o_vpos.xy = t_vpos
71 * o_vpos.zw = vpos
72 */
73 ureg_ADD(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), vpos, vrect);
74 ureg_MUL(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos), block_scale);
75 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
76 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), ureg_imm1f(shader, 1.0f));
77
78 return t_vpos;
79 }
80
81 static struct ureg_dst
82 calc_line(struct ureg_program *shader)
83 {
84 struct ureg_dst tmp;
85 struct ureg_src pos;
86
87 tmp = ureg_DECL_temporary(shader);
88
89 pos = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS, TGSI_INTERPOLATE_LINEAR);
90
91 /*
92 * tmp.y = fraction(pos.y / 2) >= 0.5 ? 1 : 0
93 */
94 ureg_MUL(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), pos, ureg_imm1f(shader, 0.5f));
95 ureg_FRC(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_src(tmp));
96 ureg_SGE(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_src(tmp), ureg_imm1f(shader, 0.5f));
97
98 return tmp;
99 }
100
101 static void *
102 create_ref_vert_shader(struct vl_mc *r)
103 {
104 struct ureg_program *shader;
105 struct ureg_src mv_scale;
106 struct ureg_src vrect, vmv[2];
107 struct ureg_dst t_vpos;
108 struct ureg_dst o_vpos, o_vmv[2];
109 unsigned i;
110
111 shader = ureg_create(TGSI_PROCESSOR_VERTEX);
112 if (!shader)
113 return NULL;
114
115 vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
116 vmv[0] = ureg_DECL_vs_input(shader, VS_I_MV_TOP);
117 vmv[1] = ureg_DECL_vs_input(shader, VS_I_MV_BOTTOM);
118
119 t_vpos = calc_position(r, shader, ureg_imm2f(shader,
120 (float)MACROBLOCK_WIDTH / r->buffer_width,
121 (float)MACROBLOCK_HEIGHT / r->buffer_height)
122 );
123
124 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
125 o_vmv[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP);
126 o_vmv[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM);
127
128 /*
129 * mv_scale.xy = 0.5 / (dst.width, dst.height);
130 * mv_scale.z = 1.0f / 4.0f
131 * mv_scale.w = 1.0f / 255.0f
132 *
133 * // Apply motion vectors
134 * o_vmv[0..1].xy = vmv[0..1] * mv_scale + t_vpos
135 * o_vmv[0..1].zw = vmv[0..1] * mv_scale
136 *
137 */
138
139 mv_scale = ureg_imm4f(shader,
140 0.5f / r->buffer_width,
141 0.5f / r->buffer_height,
142 1.0f / 4.0f,
143 1.0f / PIPE_VIDEO_MV_WEIGHT_MAX);
144
145 for (i = 0; i < 2; ++i) {
146 ureg_MAD(shader, ureg_writemask(o_vmv[i], TGSI_WRITEMASK_XY), mv_scale, vmv[i], ureg_src(t_vpos));
147 ureg_MUL(shader, ureg_writemask(o_vmv[i], TGSI_WRITEMASK_ZW), mv_scale, vmv[i]);
148 }
149
150 ureg_release_temporary(shader, t_vpos);
151
152 ureg_END(shader);
153
154 return ureg_create_shader_and_destroy(shader, r->pipe);
155 }
156
157 static void *
158 create_ref_frag_shader(struct vl_mc *r)
159 {
160 const float y_scale =
161 r->buffer_height / 2 *
162 r->macroblock_size / MACROBLOCK_HEIGHT;
163
164 struct ureg_program *shader;
165 struct ureg_src tc[2], sampler;
166 struct ureg_dst ref, field;
167 struct ureg_dst fragment;
168 unsigned label;
169
170 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
171 if (!shader)
172 return NULL;
173
174 tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP, TGSI_INTERPOLATE_LINEAR);
175 tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM, TGSI_INTERPOLATE_LINEAR);
176
177 sampler = ureg_DECL_sampler(shader, 0);
178 ref = ureg_DECL_temporary(shader);
179
180 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
181
182 field = calc_line(shader);
183
184 /*
185 * ref = field.z ? tc[1] : tc[0]
186 *
187 * // Adjust tc acording to top/bottom field selection
188 * if (|ref.z|) {
189 * ref.y *= y_scale
190 * ref.y = floor(ref.y)
191 * ref.y += ref.z
192 * ref.y /= y_scale
193 * }
194 * fragment.xyz = tex(ref, sampler[0])
195 */
196 ureg_CMP(shader, ureg_writemask(ref, TGSI_WRITEMASK_XYZ),
197 ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Y)),
198 tc[1], tc[0]);
199 ureg_CMP(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W),
200 ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Y)),
201 tc[1], tc[0]);
202
203 ureg_IF(shader, ureg_scalar(ureg_src(ref), TGSI_SWIZZLE_Z), &label);
204
205 ureg_MUL(shader, ureg_writemask(ref, TGSI_WRITEMASK_Y),
206 ureg_src(ref), ureg_imm1f(shader, y_scale));
207 ureg_FLR(shader, ureg_writemask(ref, TGSI_WRITEMASK_Y), ureg_src(ref));
208 ureg_ADD(shader, ureg_writemask(ref, TGSI_WRITEMASK_Y),
209 ureg_src(ref), ureg_scalar(ureg_src(ref), TGSI_SWIZZLE_Z));
210 ureg_MUL(shader, ureg_writemask(ref, TGSI_WRITEMASK_Y),
211 ureg_src(ref), ureg_imm1f(shader, 1.0f / y_scale));
212
213 ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
214 ureg_ENDIF(shader);
215
216 ureg_TEX(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ), TGSI_TEXTURE_2D, ureg_src(ref), sampler);
217
218 ureg_release_temporary(shader, ref);
219
220 ureg_release_temporary(shader, field);
221 ureg_END(shader);
222
223 return ureg_create_shader_and_destroy(shader, r->pipe);
224 }
225
226 static void *
227 create_ycbcr_vert_shader(struct vl_mc *r, vl_mc_ycbcr_vert_shader vs_callback, void *callback_priv)
228 {
229 struct ureg_program *shader;
230
231 struct ureg_src vrect, vpos;
232 struct ureg_dst t_vpos, t_vtex;
233 struct ureg_dst o_vpos, o_flags;
234
235 struct vertex2f scale = {
236 (float)BLOCK_WIDTH / r->buffer_width * MACROBLOCK_WIDTH / r->macroblock_size,
237 (float)BLOCK_HEIGHT / r->buffer_height * MACROBLOCK_HEIGHT / r->macroblock_size
238 };
239
240 unsigned label;
241
242 shader = ureg_create(TGSI_PROCESSOR_VERTEX);
243 if (!shader)
244 return NULL;
245
246 vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
247 vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
248
249 t_vpos = calc_position(r, shader, ureg_imm2f(shader, scale.x, scale.y));
250 t_vtex = ureg_DECL_temporary(shader);
251
252 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
253 o_flags = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_FLAGS);
254
255 /*
256 * o_vtex.xy = t_vpos
257 * o_flags.z = intra * 0.5
258 *
259 * if(interlaced) {
260 * t_vtex.xy = vrect.y ? { 0, scale.y } : { -scale.y : 0 }
261 * t_vtex.z = vpos.y % 2
262 * t_vtex.y = t_vtex.z ? t_vtex.x : t_vtex.y
263 * o_vpos.y = t_vtex.y + t_vpos.y
264 *
265 * o_flags.w = t_vtex.z ? 0 : 1
266 * }
267 *
268 */
269
270 vs_callback(callback_priv, r, shader, VS_O_VTEX, t_vpos);
271
272 ureg_MUL(shader, ureg_writemask(o_flags, TGSI_WRITEMASK_Z),
273 ureg_scalar(vpos, TGSI_SWIZZLE_Z), ureg_imm1f(shader, 0.5f));
274 ureg_MOV(shader, ureg_writemask(o_flags, TGSI_WRITEMASK_W), ureg_imm1f(shader, -1.0f));
275
276 if (r->macroblock_size == MACROBLOCK_HEIGHT) { //TODO
277 ureg_IF(shader, ureg_scalar(vpos, TGSI_SWIZZLE_W), &label);
278
279 ureg_CMP(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_XY),
280 ureg_negate(ureg_scalar(vrect, TGSI_SWIZZLE_Y)),
281 ureg_imm2f(shader, 0.0f, scale.y),
282 ureg_imm2f(shader, -scale.y, 0.0f));
283 ureg_MUL(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Z),
284 ureg_scalar(vpos, TGSI_SWIZZLE_Y), ureg_imm1f(shader, 0.5f));
285
286 ureg_FRC(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Z), ureg_src(t_vtex));
287
288 ureg_CMP(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Y),
289 ureg_negate(ureg_scalar(ureg_src(t_vtex), TGSI_SWIZZLE_Z)),
290 ureg_scalar(ureg_src(t_vtex), TGSI_SWIZZLE_X),
291 ureg_scalar(ureg_src(t_vtex), TGSI_SWIZZLE_Y));
292 ureg_ADD(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_Y),
293 ureg_src(t_vpos), ureg_src(t_vtex));
294
295 ureg_CMP(shader, ureg_writemask(o_flags, TGSI_WRITEMASK_W),
296 ureg_negate(ureg_scalar(ureg_src(t_vtex), TGSI_SWIZZLE_Z)),
297 ureg_imm1f(shader, 0.0f), ureg_imm1f(shader, 1.0f));
298
299 ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
300 ureg_ENDIF(shader);
301 }
302
303 ureg_release_temporary(shader, t_vtex);
304 ureg_release_temporary(shader, t_vpos);
305
306 ureg_END(shader);
307
308 return ureg_create_shader_and_destroy(shader, r->pipe);
309 }
310
311 static void *
312 create_ycbcr_frag_shader(struct vl_mc *r, float scale, bool invert,
313 vl_mc_ycbcr_frag_shader fs_callback, void *callback_priv)
314 {
315 struct ureg_program *shader;
316 struct ureg_src flags;
317 struct ureg_dst tmp;
318 struct ureg_dst fragment;
319 unsigned label;
320
321 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
322 if (!shader)
323 return NULL;
324
325 flags = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_FLAGS, TGSI_INTERPOLATE_LINEAR);
326
327 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
328
329 tmp = calc_line(shader);
330
331 /*
332 * if (field == tc.w)
333 * kill();
334 * else {
335 * fragment.xyz = tex(tc, sampler) * scale + tc.z
336 * fragment.w = 1.0f
337 * }
338 */
339
340 ureg_SEQ(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y),
341 ureg_scalar(flags, TGSI_SWIZZLE_W), ureg_src(tmp));
342
343 ureg_IF(shader, ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y), &label);
344
345 ureg_KILP(shader);
346
347 ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
348 ureg_ELSE(shader, &label);
349
350 fs_callback(callback_priv, r, shader, VS_O_VTEX, tmp);
351
352 if (scale != 1.0f)
353 ureg_MAD(shader, ureg_writemask(tmp, TGSI_WRITEMASK_XYZ),
354 ureg_src(tmp), ureg_imm1f(shader, scale),
355 ureg_scalar(flags, TGSI_SWIZZLE_Z));
356 else
357 ureg_ADD(shader, ureg_writemask(tmp, TGSI_WRITEMASK_XYZ),
358 ureg_src(tmp), ureg_scalar(flags, TGSI_SWIZZLE_Z));
359
360 ureg_MUL(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ), ureg_src(tmp), ureg_imm1f(shader, invert ? -1.0f : 1.0f));
361 ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f));
362
363 ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
364 ureg_ENDIF(shader);
365
366 ureg_release_temporary(shader, tmp);
367
368 ureg_END(shader);
369
370 return ureg_create_shader_and_destroy(shader, r->pipe);
371 }
372
373 static bool
374 init_pipe_state(struct vl_mc *r)
375 {
376 struct pipe_sampler_state sampler;
377 struct pipe_blend_state blend;
378 struct pipe_rasterizer_state rs_state;
379 unsigned i;
380
381 assert(r);
382
383 memset(&sampler, 0, sizeof(sampler));
384 sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
385 sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
386 sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
387 sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR;
388 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
389 sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
390 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
391 sampler.compare_func = PIPE_FUNC_ALWAYS;
392 sampler.normalized_coords = 1;
393 r->sampler_ref = r->pipe->create_sampler_state(r->pipe, &sampler);
394 if (!r->sampler_ref)
395 goto error_sampler_ref;
396
397 for (i = 0; i < VL_MC_NUM_BLENDERS; ++i) {
398 memset(&blend, 0, sizeof blend);
399 blend.independent_blend_enable = 0;
400 blend.rt[0].blend_enable = 1;
401 blend.rt[0].rgb_func = PIPE_BLEND_ADD;
402 blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA;
403 blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
404 blend.rt[0].alpha_func = PIPE_BLEND_ADD;
405 blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA;
406 blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
407 blend.logicop_enable = 0;
408 blend.logicop_func = PIPE_LOGICOP_CLEAR;
409 blend.rt[0].colormask = i;
410 blend.dither = 0;
411 r->blend_clear[i] = r->pipe->create_blend_state(r->pipe, &blend);
412 if (!r->blend_clear[i])
413 goto error_blend;
414
415 blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ONE;
416 blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
417 r->blend_add[i] = r->pipe->create_blend_state(r->pipe, &blend);
418 if (!r->blend_add[i])
419 goto error_blend;
420
421 blend.rt[0].rgb_func = PIPE_BLEND_REVERSE_SUBTRACT;
422 blend.rt[0].alpha_dst_factor = PIPE_BLEND_REVERSE_SUBTRACT;
423 r->blend_sub[i] = r->pipe->create_blend_state(r->pipe, &blend);
424 if (!r->blend_sub[i])
425 goto error_blend;
426 }
427
428 memset(&rs_state, 0, sizeof(rs_state));
429 /*rs_state.sprite_coord_enable */
430 rs_state.sprite_coord_mode = PIPE_SPRITE_COORD_UPPER_LEFT;
431 rs_state.point_quad_rasterization = true;
432 rs_state.point_size = BLOCK_WIDTH;
433 rs_state.gl_rasterization_rules = true;
434 r->rs_state = r->pipe->create_rasterizer_state(r->pipe, &rs_state);
435 if (!r->rs_state)
436 goto error_rs_state;
437
438 return true;
439
440 error_rs_state:
441 error_blend:
442 for (i = 0; i < VL_MC_NUM_BLENDERS; ++i) {
443 if (r->blend_sub[i])
444 r->pipe->delete_blend_state(r->pipe, r->blend_sub[i]);
445
446 if (r->blend_add[i])
447 r->pipe->delete_blend_state(r->pipe, r->blend_add[i]);
448
449 if (r->blend_clear[i])
450 r->pipe->delete_blend_state(r->pipe, r->blend_clear[i]);
451 }
452
453 r->pipe->delete_sampler_state(r->pipe, r->sampler_ref);
454
455 error_sampler_ref:
456 return false;
457 }
458
459 static void
460 cleanup_pipe_state(struct vl_mc *r)
461 {
462 unsigned i;
463
464 assert(r);
465
466 r->pipe->delete_sampler_state(r->pipe, r->sampler_ref);
467 for (i = 0; i < VL_MC_NUM_BLENDERS; ++i) {
468 r->pipe->delete_blend_state(r->pipe, r->blend_clear[i]);
469 r->pipe->delete_blend_state(r->pipe, r->blend_add[i]);
470 r->pipe->delete_blend_state(r->pipe, r->blend_sub[i]);
471 }
472 r->pipe->delete_rasterizer_state(r->pipe, r->rs_state);
473 }
474
475 bool
476 vl_mc_init(struct vl_mc *renderer, struct pipe_context *pipe,
477 unsigned buffer_width, unsigned buffer_height,
478 unsigned macroblock_size, float scale,
479 vl_mc_ycbcr_vert_shader vs_callback,
480 vl_mc_ycbcr_frag_shader fs_callback,
481 void *callback_priv)
482 {
483 assert(renderer);
484 assert(pipe);
485
486 memset(renderer, 0, sizeof(struct vl_mc));
487
488 renderer->pipe = pipe;
489 renderer->buffer_width = buffer_width;
490 renderer->buffer_height = buffer_height;
491 renderer->macroblock_size = macroblock_size;
492
493 if (!init_pipe_state(renderer))
494 goto error_pipe_state;
495
496 renderer->vs_ref = create_ref_vert_shader(renderer);
497 if (!renderer->vs_ref)
498 goto error_vs_ref;
499
500 renderer->vs_ycbcr = create_ycbcr_vert_shader(renderer, vs_callback, callback_priv);
501 if (!renderer->vs_ycbcr)
502 goto error_vs_ycbcr;
503
504 renderer->fs_ref = create_ref_frag_shader(renderer);
505 if (!renderer->fs_ref)
506 goto error_fs_ref;
507
508 renderer->fs_ycbcr = create_ycbcr_frag_shader(renderer, scale, false, fs_callback, callback_priv);
509 if (!renderer->fs_ycbcr)
510 goto error_fs_ycbcr;
511
512 renderer->fs_ycbcr_sub = create_ycbcr_frag_shader(renderer, scale, true, fs_callback, callback_priv);
513 if (!renderer->fs_ycbcr_sub)
514 goto error_fs_ycbcr_sub;
515
516 return true;
517
518 error_fs_ycbcr_sub:
519 renderer->pipe->delete_fs_state(renderer->pipe, renderer->fs_ycbcr);
520
521 error_fs_ycbcr:
522 renderer->pipe->delete_fs_state(renderer->pipe, renderer->fs_ref);
523
524 error_fs_ref:
525 renderer->pipe->delete_vs_state(renderer->pipe, renderer->vs_ycbcr);
526
527 error_vs_ycbcr:
528 renderer->pipe->delete_vs_state(renderer->pipe, renderer->vs_ref);
529
530 error_vs_ref:
531 cleanup_pipe_state(renderer);
532
533 error_pipe_state:
534 return false;
535 }
536
537 void
538 vl_mc_cleanup(struct vl_mc *renderer)
539 {
540 assert(renderer);
541
542 cleanup_pipe_state(renderer);
543
544 renderer->pipe->delete_vs_state(renderer->pipe, renderer->vs_ref);
545 renderer->pipe->delete_vs_state(renderer->pipe, renderer->vs_ycbcr);
546 renderer->pipe->delete_fs_state(renderer->pipe, renderer->fs_ref);
547 renderer->pipe->delete_fs_state(renderer->pipe, renderer->fs_ycbcr);
548 renderer->pipe->delete_fs_state(renderer->pipe, renderer->fs_ycbcr_sub);
549 }
550
551 bool
552 vl_mc_init_buffer(struct vl_mc *renderer, struct vl_mc_buffer *buffer)
553 {
554 assert(renderer && buffer);
555
556 buffer->renderer = renderer;
557
558 buffer->viewport.scale[2] = 1;
559 buffer->viewport.scale[3] = 1;
560 buffer->viewport.translate[0] = 0;
561 buffer->viewport.translate[1] = 0;
562 buffer->viewport.translate[2] = 0;
563 buffer->viewport.translate[3] = 0;
564
565 buffer->fb_state.nr_cbufs = 1;
566 buffer->fb_state.zsbuf = NULL;
567
568 return true;
569 }
570
571 void
572 vl_mc_cleanup_buffer(struct vl_mc_buffer *buffer)
573 {
574 assert(buffer);
575 }
576
577 void
578 vl_mc_set_surface(struct vl_mc_buffer *buffer, struct pipe_surface *surface)
579 {
580 assert(buffer && surface);
581
582 buffer->surface_cleared = false;
583
584 buffer->viewport.scale[0] = surface->width;
585 buffer->viewport.scale[1] = surface->height;
586
587 buffer->fb_state.width = surface->width;
588 buffer->fb_state.height = surface->height;
589 buffer->fb_state.cbufs[0] = surface;
590 }
591
592 static void
593 prepare_pipe_4_rendering(struct vl_mc_buffer *buffer, unsigned mask)
594 {
595 struct vl_mc *renderer;
596
597 assert(buffer);
598
599 renderer = buffer->renderer;
600 renderer->pipe->bind_rasterizer_state(renderer->pipe, renderer->rs_state);
601
602 if (buffer->surface_cleared)
603 renderer->pipe->bind_blend_state(renderer->pipe, renderer->blend_add[mask]);
604 else
605 renderer->pipe->bind_blend_state(renderer->pipe, renderer->blend_clear[mask]);
606
607 renderer->pipe->set_framebuffer_state(renderer->pipe, &buffer->fb_state);
608 renderer->pipe->set_viewport_state(renderer->pipe, &buffer->viewport);
609 }
610
611 void
612 vl_mc_render_ref(struct vl_mc_buffer *buffer, struct pipe_sampler_view *ref)
613 {
614 struct vl_mc *renderer;
615
616 assert(buffer && ref);
617
618 prepare_pipe_4_rendering(buffer, PIPE_MASK_R | PIPE_MASK_G | PIPE_MASK_B);
619
620 renderer = buffer->renderer;
621
622 renderer->pipe->bind_vs_state(renderer->pipe, renderer->vs_ref);
623 renderer->pipe->bind_fs_state(renderer->pipe, renderer->fs_ref);
624
625 renderer->pipe->set_fragment_sampler_views(renderer->pipe, 1, &ref);
626 renderer->pipe->bind_fragment_sampler_states(renderer->pipe, 1, &renderer->sampler_ref);
627
628 util_draw_arrays_instanced(renderer->pipe, PIPE_PRIM_QUADS, 0, 4, 0,
629 renderer->buffer_width / MACROBLOCK_WIDTH *
630 renderer->buffer_height / MACROBLOCK_HEIGHT);
631
632 buffer->surface_cleared = true;
633 }
634
635 void
636 vl_mc_render_ycbcr(struct vl_mc_buffer *buffer, unsigned component, unsigned num_instances)
637 {
638 struct vl_mc *renderer;
639 unsigned mask = 1 << component;
640
641 assert(buffer);
642
643 if (num_instances == 0)
644 return;
645
646 prepare_pipe_4_rendering(buffer, mask);
647
648 renderer = buffer->renderer;
649
650 renderer->pipe->bind_vs_state(renderer->pipe, renderer->vs_ycbcr);
651 renderer->pipe->bind_fs_state(renderer->pipe, renderer->fs_ycbcr);
652
653 util_draw_arrays_instanced(renderer->pipe, PIPE_PRIM_QUADS, 0, 4, 0, num_instances);
654
655 if (buffer->surface_cleared) {
656 renderer->pipe->bind_blend_state(renderer->pipe, renderer->blend_sub[mask]);
657 renderer->pipe->bind_fs_state(renderer->pipe, renderer->fs_ycbcr_sub);
658 util_draw_arrays_instanced(renderer->pipe, PIPE_PRIM_QUADS, 0, 4, 0, num_instances);
659 }
660 }