Merge branch 'gallium-polygon-stipple'
[mesa.git] / src / gallium / auxiliary / vl / vl_mc.c
1 /**************************************************************************
2 *
3 * Copyright 2009 Younes Manton.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <assert.h>
29
30 #include <pipe/p_context.h>
31
32 #include <util/u_sampler.h>
33 #include <util/u_draw.h>
34
35 #include <tgsi/tgsi_ureg.h>
36
37 #include "vl_defines.h"
38 #include "vl_vertex_buffers.h"
39 #include "vl_mc.h"
40 #include "vl_idct.h"
41
42 enum VS_OUTPUT
43 {
44 VS_O_VPOS,
45 VS_O_VTOP,
46 VS_O_VBOTTOM,
47
48 VS_O_FLAGS = VS_O_VTOP,
49 VS_O_VTEX = VS_O_VBOTTOM
50 };
51
52 static struct ureg_dst
53 calc_position(struct vl_mc *r, struct ureg_program *shader, struct ureg_src block_scale)
54 {
55 struct ureg_src vrect, vpos;
56 struct ureg_dst t_vpos;
57 struct ureg_dst o_vpos;
58
59 vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
60 vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
61
62 t_vpos = ureg_DECL_temporary(shader);
63
64 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
65
66 /*
67 * block_scale = (MACROBLOCK_WIDTH, MACROBLOCK_HEIGHT) / (dst.width, dst.height)
68 *
69 * t_vpos = (vpos + vrect) * block_scale
70 * o_vpos.xy = t_vpos
71 * o_vpos.zw = vpos
72 */
73 ureg_ADD(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), vpos, vrect);
74 ureg_MUL(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos), block_scale);
75 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
76 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), ureg_imm1f(shader, 1.0f));
77
78 return t_vpos;
79 }
80
81 static struct ureg_dst
82 calc_line(struct ureg_program *shader)
83 {
84 struct ureg_dst tmp;
85 struct ureg_src pos;
86
87 tmp = ureg_DECL_temporary(shader);
88
89 pos = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS, TGSI_INTERPOLATE_LINEAR);
90
91 /*
92 * tmp.y = fraction(pos.y / 2) >= 0.5 ? 1 : 0
93 */
94 ureg_MUL(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), pos, ureg_imm1f(shader, 0.5f));
95 ureg_FRC(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_src(tmp));
96 ureg_SGE(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_src(tmp), ureg_imm1f(shader, 0.5f));
97
98 return tmp;
99 }
100
101 static void *
102 create_ref_vert_shader(struct vl_mc *r)
103 {
104 struct ureg_program *shader;
105 struct ureg_src mv_scale;
106 struct ureg_src vmv[2];
107 struct ureg_dst t_vpos;
108 struct ureg_dst o_vpos, o_vmv[2];
109 unsigned i;
110
111 shader = ureg_create(TGSI_PROCESSOR_VERTEX);
112 if (!shader)
113 return NULL;
114
115 vmv[0] = ureg_DECL_vs_input(shader, VS_I_MV_TOP);
116 vmv[1] = ureg_DECL_vs_input(shader, VS_I_MV_BOTTOM);
117
118 t_vpos = calc_position(r, shader, ureg_imm2f(shader,
119 (float)MACROBLOCK_WIDTH / r->buffer_width,
120 (float)MACROBLOCK_HEIGHT / r->buffer_height)
121 );
122
123 /* XXX The position is not written, which may lead to undefined rendering.
124 * XXX This is a serious bug. */
125 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
126 o_vmv[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP);
127 o_vmv[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM);
128
129 /*
130 * mv_scale.xy = 0.5 / (dst.width, dst.height);
131 * mv_scale.z = 1.0f / 4.0f
132 * mv_scale.w = 1.0f / 255.0f
133 *
134 * // Apply motion vectors
135 * o_vmv[0..1].xy = vmv[0..1] * mv_scale + t_vpos
136 * o_vmv[0..1].zw = vmv[0..1] * mv_scale
137 *
138 */
139
140 mv_scale = ureg_imm4f(shader,
141 0.5f / r->buffer_width,
142 0.5f / r->buffer_height,
143 1.0f / 4.0f,
144 1.0f / PIPE_VIDEO_MV_WEIGHT_MAX);
145
146 for (i = 0; i < 2; ++i) {
147 ureg_MAD(shader, ureg_writemask(o_vmv[i], TGSI_WRITEMASK_XY), mv_scale, vmv[i], ureg_src(t_vpos));
148 ureg_MUL(shader, ureg_writemask(o_vmv[i], TGSI_WRITEMASK_ZW), mv_scale, vmv[i]);
149 }
150
151 ureg_release_temporary(shader, t_vpos);
152
153 ureg_END(shader);
154
155 return ureg_create_shader_and_destroy(shader, r->pipe);
156 }
157
158 static void *
159 create_ref_frag_shader(struct vl_mc *r)
160 {
161 const float y_scale =
162 r->buffer_height / 2 *
163 r->macroblock_size / MACROBLOCK_HEIGHT;
164
165 struct ureg_program *shader;
166 struct ureg_src tc[2], sampler;
167 struct ureg_dst ref, field;
168 struct ureg_dst fragment;
169 unsigned label;
170
171 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
172 if (!shader)
173 return NULL;
174
175 tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP, TGSI_INTERPOLATE_LINEAR);
176 tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM, TGSI_INTERPOLATE_LINEAR);
177
178 sampler = ureg_DECL_sampler(shader, 0);
179 ref = ureg_DECL_temporary(shader);
180
181 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
182
183 field = calc_line(shader);
184
185 /*
186 * ref = field.z ? tc[1] : tc[0]
187 *
188 * // Adjust tc acording to top/bottom field selection
189 * if (|ref.z|) {
190 * ref.y *= y_scale
191 * ref.y = floor(ref.y)
192 * ref.y += ref.z
193 * ref.y /= y_scale
194 * }
195 * fragment.xyz = tex(ref, sampler[0])
196 */
197 ureg_CMP(shader, ureg_writemask(ref, TGSI_WRITEMASK_XYZ),
198 ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Y)),
199 tc[1], tc[0]);
200 ureg_CMP(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W),
201 ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Y)),
202 tc[1], tc[0]);
203
204 ureg_IF(shader, ureg_scalar(ureg_src(ref), TGSI_SWIZZLE_Z), &label);
205
206 ureg_MUL(shader, ureg_writemask(ref, TGSI_WRITEMASK_Y),
207 ureg_src(ref), ureg_imm1f(shader, y_scale));
208 ureg_FLR(shader, ureg_writemask(ref, TGSI_WRITEMASK_Y), ureg_src(ref));
209 ureg_ADD(shader, ureg_writemask(ref, TGSI_WRITEMASK_Y),
210 ureg_src(ref), ureg_scalar(ureg_src(ref), TGSI_SWIZZLE_Z));
211 ureg_MUL(shader, ureg_writemask(ref, TGSI_WRITEMASK_Y),
212 ureg_src(ref), ureg_imm1f(shader, 1.0f / y_scale));
213
214 ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
215 ureg_ENDIF(shader);
216
217 ureg_TEX(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ), TGSI_TEXTURE_2D, ureg_src(ref), sampler);
218
219 ureg_release_temporary(shader, ref);
220
221 ureg_release_temporary(shader, field);
222 ureg_END(shader);
223
224 return ureg_create_shader_and_destroy(shader, r->pipe);
225 }
226
227 static void *
228 create_ycbcr_vert_shader(struct vl_mc *r, vl_mc_ycbcr_vert_shader vs_callback, void *callback_priv)
229 {
230 struct ureg_program *shader;
231
232 struct ureg_src vrect, vpos;
233 struct ureg_dst t_vpos, t_vtex;
234 struct ureg_dst o_vpos, o_flags;
235
236 struct vertex2f scale = {
237 (float)BLOCK_WIDTH / r->buffer_width * MACROBLOCK_WIDTH / r->macroblock_size,
238 (float)BLOCK_HEIGHT / r->buffer_height * MACROBLOCK_HEIGHT / r->macroblock_size
239 };
240
241 unsigned label;
242
243 shader = ureg_create(TGSI_PROCESSOR_VERTEX);
244 if (!shader)
245 return NULL;
246
247 vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
248 vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
249
250 t_vpos = calc_position(r, shader, ureg_imm2f(shader, scale.x, scale.y));
251 t_vtex = ureg_DECL_temporary(shader);
252
253 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
254 o_flags = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_FLAGS);
255
256 /*
257 * o_vtex.xy = t_vpos
258 * o_flags.z = intra * 0.5
259 *
260 * if(interlaced) {
261 * t_vtex.xy = vrect.y ? { 0, scale.y } : { -scale.y : 0 }
262 * t_vtex.z = vpos.y % 2
263 * t_vtex.y = t_vtex.z ? t_vtex.x : t_vtex.y
264 * o_vpos.y = t_vtex.y + t_vpos.y
265 *
266 * o_flags.w = t_vtex.z ? 0 : 1
267 * }
268 *
269 */
270
271 vs_callback(callback_priv, r, shader, VS_O_VTEX, t_vpos);
272
273 ureg_MUL(shader, ureg_writemask(o_flags, TGSI_WRITEMASK_Z),
274 ureg_scalar(vpos, TGSI_SWIZZLE_Z), ureg_imm1f(shader, 0.5f));
275 ureg_MOV(shader, ureg_writemask(o_flags, TGSI_WRITEMASK_W), ureg_imm1f(shader, -1.0f));
276
277 if (r->macroblock_size == MACROBLOCK_HEIGHT) { //TODO
278 ureg_IF(shader, ureg_scalar(vpos, TGSI_SWIZZLE_W), &label);
279
280 ureg_CMP(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_XY),
281 ureg_negate(ureg_scalar(vrect, TGSI_SWIZZLE_Y)),
282 ureg_imm2f(shader, 0.0f, scale.y),
283 ureg_imm2f(shader, -scale.y, 0.0f));
284 ureg_MUL(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Z),
285 ureg_scalar(vpos, TGSI_SWIZZLE_Y), ureg_imm1f(shader, 0.5f));
286
287 ureg_FRC(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Z), ureg_src(t_vtex));
288
289 ureg_CMP(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Y),
290 ureg_negate(ureg_scalar(ureg_src(t_vtex), TGSI_SWIZZLE_Z)),
291 ureg_scalar(ureg_src(t_vtex), TGSI_SWIZZLE_X),
292 ureg_scalar(ureg_src(t_vtex), TGSI_SWIZZLE_Y));
293 ureg_ADD(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_Y),
294 ureg_src(t_vpos), ureg_src(t_vtex));
295
296 ureg_CMP(shader, ureg_writemask(o_flags, TGSI_WRITEMASK_W),
297 ureg_negate(ureg_scalar(ureg_src(t_vtex), TGSI_SWIZZLE_Z)),
298 ureg_imm1f(shader, 0.0f), ureg_imm1f(shader, 1.0f));
299
300 ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
301 ureg_ENDIF(shader);
302 }
303
304 ureg_release_temporary(shader, t_vtex);
305 ureg_release_temporary(shader, t_vpos);
306
307 ureg_END(shader);
308
309 return ureg_create_shader_and_destroy(shader, r->pipe);
310 }
311
312 static void *
313 create_ycbcr_frag_shader(struct vl_mc *r, float scale, bool invert,
314 vl_mc_ycbcr_frag_shader fs_callback, void *callback_priv)
315 {
316 struct ureg_program *shader;
317 struct ureg_src flags;
318 struct ureg_dst tmp;
319 struct ureg_dst fragment;
320 unsigned label;
321
322 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
323 if (!shader)
324 return NULL;
325
326 flags = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_FLAGS, TGSI_INTERPOLATE_LINEAR);
327
328 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
329
330 tmp = calc_line(shader);
331
332 /*
333 * if (field == tc.w)
334 * kill();
335 * else {
336 * fragment.xyz = tex(tc, sampler) * scale + tc.z
337 * fragment.w = 1.0f
338 * }
339 */
340
341 ureg_SEQ(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y),
342 ureg_scalar(flags, TGSI_SWIZZLE_W), ureg_src(tmp));
343
344 ureg_IF(shader, ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y), &label);
345
346 ureg_KILP(shader);
347
348 ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
349 ureg_ELSE(shader, &label);
350
351 fs_callback(callback_priv, r, shader, VS_O_VTEX, tmp);
352
353 if (scale != 1.0f)
354 ureg_MAD(shader, ureg_writemask(tmp, TGSI_WRITEMASK_XYZ),
355 ureg_src(tmp), ureg_imm1f(shader, scale),
356 ureg_scalar(flags, TGSI_SWIZZLE_Z));
357 else
358 ureg_ADD(shader, ureg_writemask(tmp, TGSI_WRITEMASK_XYZ),
359 ureg_src(tmp), ureg_scalar(flags, TGSI_SWIZZLE_Z));
360
361 ureg_MUL(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ), ureg_src(tmp), ureg_imm1f(shader, invert ? -1.0f : 1.0f));
362 ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f));
363
364 ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
365 ureg_ENDIF(shader);
366
367 ureg_release_temporary(shader, tmp);
368
369 ureg_END(shader);
370
371 return ureg_create_shader_and_destroy(shader, r->pipe);
372 }
373
374 static bool
375 init_pipe_state(struct vl_mc *r)
376 {
377 struct pipe_sampler_state sampler;
378 struct pipe_blend_state blend;
379 struct pipe_rasterizer_state rs_state;
380 unsigned i;
381
382 assert(r);
383
384 memset(&sampler, 0, sizeof(sampler));
385 sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
386 sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
387 sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
388 sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR;
389 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
390 sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
391 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
392 sampler.compare_func = PIPE_FUNC_ALWAYS;
393 sampler.normalized_coords = 1;
394 r->sampler_ref = r->pipe->create_sampler_state(r->pipe, &sampler);
395 if (!r->sampler_ref)
396 goto error_sampler_ref;
397
398 for (i = 0; i < VL_MC_NUM_BLENDERS; ++i) {
399 memset(&blend, 0, sizeof blend);
400 blend.independent_blend_enable = 0;
401 blend.rt[0].blend_enable = 1;
402 blend.rt[0].rgb_func = PIPE_BLEND_ADD;
403 blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA;
404 blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
405 blend.rt[0].alpha_func = PIPE_BLEND_ADD;
406 blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA;
407 blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
408 blend.logicop_enable = 0;
409 blend.logicop_func = PIPE_LOGICOP_CLEAR;
410 blend.rt[0].colormask = i;
411 blend.dither = 0;
412 r->blend_clear[i] = r->pipe->create_blend_state(r->pipe, &blend);
413 if (!r->blend_clear[i])
414 goto error_blend;
415
416 blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ONE;
417 blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
418 r->blend_add[i] = r->pipe->create_blend_state(r->pipe, &blend);
419 if (!r->blend_add[i])
420 goto error_blend;
421
422 blend.rt[0].rgb_func = PIPE_BLEND_REVERSE_SUBTRACT;
423 blend.rt[0].alpha_dst_factor = PIPE_BLEND_REVERSE_SUBTRACT;
424 r->blend_sub[i] = r->pipe->create_blend_state(r->pipe, &blend);
425 if (!r->blend_sub[i])
426 goto error_blend;
427 }
428
429 memset(&rs_state, 0, sizeof(rs_state));
430 /*rs_state.sprite_coord_enable */
431 rs_state.sprite_coord_mode = PIPE_SPRITE_COORD_UPPER_LEFT;
432 rs_state.point_quad_rasterization = true;
433 rs_state.point_size = BLOCK_WIDTH;
434 rs_state.gl_rasterization_rules = true;
435 r->rs_state = r->pipe->create_rasterizer_state(r->pipe, &rs_state);
436 if (!r->rs_state)
437 goto error_rs_state;
438
439 return true;
440
441 error_rs_state:
442 error_blend:
443 for (i = 0; i < VL_MC_NUM_BLENDERS; ++i) {
444 if (r->blend_sub[i])
445 r->pipe->delete_blend_state(r->pipe, r->blend_sub[i]);
446
447 if (r->blend_add[i])
448 r->pipe->delete_blend_state(r->pipe, r->blend_add[i]);
449
450 if (r->blend_clear[i])
451 r->pipe->delete_blend_state(r->pipe, r->blend_clear[i]);
452 }
453
454 r->pipe->delete_sampler_state(r->pipe, r->sampler_ref);
455
456 error_sampler_ref:
457 return false;
458 }
459
460 static void
461 cleanup_pipe_state(struct vl_mc *r)
462 {
463 unsigned i;
464
465 assert(r);
466
467 r->pipe->delete_sampler_state(r->pipe, r->sampler_ref);
468 for (i = 0; i < VL_MC_NUM_BLENDERS; ++i) {
469 r->pipe->delete_blend_state(r->pipe, r->blend_clear[i]);
470 r->pipe->delete_blend_state(r->pipe, r->blend_add[i]);
471 r->pipe->delete_blend_state(r->pipe, r->blend_sub[i]);
472 }
473 r->pipe->delete_rasterizer_state(r->pipe, r->rs_state);
474 }
475
476 bool
477 vl_mc_init(struct vl_mc *renderer, struct pipe_context *pipe,
478 unsigned buffer_width, unsigned buffer_height,
479 unsigned macroblock_size, float scale,
480 vl_mc_ycbcr_vert_shader vs_callback,
481 vl_mc_ycbcr_frag_shader fs_callback,
482 void *callback_priv)
483 {
484 assert(renderer);
485 assert(pipe);
486
487 memset(renderer, 0, sizeof(struct vl_mc));
488
489 renderer->pipe = pipe;
490 renderer->buffer_width = buffer_width;
491 renderer->buffer_height = buffer_height;
492 renderer->macroblock_size = macroblock_size;
493
494 if (!init_pipe_state(renderer))
495 goto error_pipe_state;
496
497 renderer->vs_ref = create_ref_vert_shader(renderer);
498 if (!renderer->vs_ref)
499 goto error_vs_ref;
500
501 renderer->vs_ycbcr = create_ycbcr_vert_shader(renderer, vs_callback, callback_priv);
502 if (!renderer->vs_ycbcr)
503 goto error_vs_ycbcr;
504
505 renderer->fs_ref = create_ref_frag_shader(renderer);
506 if (!renderer->fs_ref)
507 goto error_fs_ref;
508
509 renderer->fs_ycbcr = create_ycbcr_frag_shader(renderer, scale, false, fs_callback, callback_priv);
510 if (!renderer->fs_ycbcr)
511 goto error_fs_ycbcr;
512
513 renderer->fs_ycbcr_sub = create_ycbcr_frag_shader(renderer, scale, true, fs_callback, callback_priv);
514 if (!renderer->fs_ycbcr_sub)
515 goto error_fs_ycbcr_sub;
516
517 return true;
518
519 error_fs_ycbcr_sub:
520 renderer->pipe->delete_fs_state(renderer->pipe, renderer->fs_ycbcr);
521
522 error_fs_ycbcr:
523 renderer->pipe->delete_fs_state(renderer->pipe, renderer->fs_ref);
524
525 error_fs_ref:
526 renderer->pipe->delete_vs_state(renderer->pipe, renderer->vs_ycbcr);
527
528 error_vs_ycbcr:
529 renderer->pipe->delete_vs_state(renderer->pipe, renderer->vs_ref);
530
531 error_vs_ref:
532 cleanup_pipe_state(renderer);
533
534 error_pipe_state:
535 return false;
536 }
537
538 void
539 vl_mc_cleanup(struct vl_mc *renderer)
540 {
541 assert(renderer);
542
543 cleanup_pipe_state(renderer);
544
545 renderer->pipe->delete_vs_state(renderer->pipe, renderer->vs_ref);
546 renderer->pipe->delete_vs_state(renderer->pipe, renderer->vs_ycbcr);
547 renderer->pipe->delete_fs_state(renderer->pipe, renderer->fs_ref);
548 renderer->pipe->delete_fs_state(renderer->pipe, renderer->fs_ycbcr);
549 renderer->pipe->delete_fs_state(renderer->pipe, renderer->fs_ycbcr_sub);
550 }
551
552 bool
553 vl_mc_init_buffer(struct vl_mc *renderer, struct vl_mc_buffer *buffer)
554 {
555 assert(renderer && buffer);
556
557 buffer->renderer = renderer;
558
559 buffer->viewport.scale[2] = 1;
560 buffer->viewport.scale[3] = 1;
561 buffer->viewport.translate[0] = 0;
562 buffer->viewport.translate[1] = 0;
563 buffer->viewport.translate[2] = 0;
564 buffer->viewport.translate[3] = 0;
565
566 buffer->fb_state.nr_cbufs = 1;
567 buffer->fb_state.zsbuf = NULL;
568
569 return true;
570 }
571
572 void
573 vl_mc_cleanup_buffer(struct vl_mc_buffer *buffer)
574 {
575 assert(buffer);
576 }
577
578 void
579 vl_mc_set_surface(struct vl_mc_buffer *buffer, struct pipe_surface *surface)
580 {
581 assert(buffer && surface);
582
583 buffer->surface_cleared = false;
584
585 buffer->viewport.scale[0] = surface->width;
586 buffer->viewport.scale[1] = surface->height;
587
588 buffer->fb_state.width = surface->width;
589 buffer->fb_state.height = surface->height;
590 buffer->fb_state.cbufs[0] = surface;
591 }
592
593 static void
594 prepare_pipe_4_rendering(struct vl_mc_buffer *buffer, unsigned component, unsigned mask)
595 {
596 struct vl_mc *renderer;
597
598 assert(buffer);
599
600 renderer = buffer->renderer;
601 renderer->pipe->bind_rasterizer_state(renderer->pipe, renderer->rs_state);
602
603 if (buffer->surface_cleared || component > 0)
604 renderer->pipe->bind_blend_state(renderer->pipe, renderer->blend_add[mask]);
605 else
606 renderer->pipe->bind_blend_state(renderer->pipe, renderer->blend_clear[mask]);
607
608 renderer->pipe->set_framebuffer_state(renderer->pipe, &buffer->fb_state);
609 renderer->pipe->set_viewport_state(renderer->pipe, &buffer->viewport);
610 }
611
612 void
613 vl_mc_render_ref(struct vl_mc_buffer *buffer, struct pipe_sampler_view *ref)
614 {
615 struct vl_mc *renderer;
616
617 assert(buffer && ref);
618
619 prepare_pipe_4_rendering(buffer, 0, PIPE_MASK_R | PIPE_MASK_G | PIPE_MASK_B);
620
621 renderer = buffer->renderer;
622
623 renderer->pipe->bind_vs_state(renderer->pipe, renderer->vs_ref);
624 renderer->pipe->bind_fs_state(renderer->pipe, renderer->fs_ref);
625
626 renderer->pipe->set_fragment_sampler_views(renderer->pipe, 1, &ref);
627 renderer->pipe->bind_fragment_sampler_states(renderer->pipe, 1, &renderer->sampler_ref);
628
629 util_draw_arrays_instanced(renderer->pipe, PIPE_PRIM_QUADS, 0, 4, 0,
630 renderer->buffer_width / MACROBLOCK_WIDTH *
631 renderer->buffer_height / MACROBLOCK_HEIGHT);
632
633 buffer->surface_cleared = true;
634 }
635
636 void
637 vl_mc_render_ycbcr(struct vl_mc_buffer *buffer, unsigned component, unsigned num_instances)
638 {
639 struct vl_mc *renderer;
640 unsigned mask = 1 << component;
641
642 assert(buffer);
643
644 if (num_instances == 0)
645 return;
646
647 prepare_pipe_4_rendering(buffer, component, mask);
648
649 renderer = buffer->renderer;
650
651 renderer->pipe->bind_vs_state(renderer->pipe, renderer->vs_ycbcr);
652 renderer->pipe->bind_fs_state(renderer->pipe, renderer->fs_ycbcr);
653
654 util_draw_arrays_instanced(renderer->pipe, PIPE_PRIM_QUADS, 0, 4, 0, num_instances);
655
656 if (buffer->surface_cleared) {
657 renderer->pipe->bind_blend_state(renderer->pipe, renderer->blend_sub[mask]);
658 renderer->pipe->bind_fs_state(renderer->pipe, renderer->fs_ycbcr_sub);
659 util_draw_arrays_instanced(renderer->pipe, PIPE_PRIM_QUADS, 0, 4, 0, num_instances);
660 }
661 }