s/Tungsten Graphics/VMware/
[mesa.git] / src / gallium / auxiliary / vl / vl_mpeg12_decoder.c
1 /**************************************************************************
2 *
3 * Copyright 2009 Younes Manton.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <math.h>
29 #include <assert.h>
30
31 #include "util/u_memory.h"
32 #include "util/u_sampler.h"
33 #include "util/u_surface.h"
34 #include "util/u_video.h"
35
36 #include "vl_mpeg12_decoder.h"
37 #include "vl_defines.h"
38
39 #define SCALE_FACTOR_SNORM (32768.0f / 256.0f)
40 #define SCALE_FACTOR_SSCALED (1.0f / 256.0f)
41
42 struct format_config {
43 enum pipe_format zscan_source_format;
44 enum pipe_format idct_source_format;
45 enum pipe_format mc_source_format;
46
47 float idct_scale;
48 float mc_scale;
49 };
50
51 static const struct format_config bitstream_format_config[] = {
52 // { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SSCALED },
53 // { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, 1.0f, SCALE_FACTOR_SSCALED },
54 { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SNORM },
55 { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, 1.0f, SCALE_FACTOR_SNORM }
56 };
57
58 static const unsigned num_bitstream_format_configs =
59 sizeof(bitstream_format_config) / sizeof(struct format_config);
60
61 static const struct format_config idct_format_config[] = {
62 // { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SSCALED },
63 // { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, 1.0f, SCALE_FACTOR_SSCALED },
64 { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SNORM },
65 { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, 1.0f, SCALE_FACTOR_SNORM }
66 };
67
68 static const unsigned num_idct_format_configs =
69 sizeof(idct_format_config) / sizeof(struct format_config);
70
71 static const struct format_config mc_format_config[] = {
72 //{ PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_NONE, PIPE_FORMAT_R16_SSCALED, 0.0f, SCALE_FACTOR_SSCALED },
73 { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_NONE, PIPE_FORMAT_R16_SNORM, 0.0f, SCALE_FACTOR_SNORM }
74 };
75
76 static const unsigned num_mc_format_configs =
77 sizeof(mc_format_config) / sizeof(struct format_config);
78
79 static const unsigned const_empty_block_mask_420[3][2][2] = {
80 { { 0x20, 0x10 }, { 0x08, 0x04 } },
81 { { 0x02, 0x02 }, { 0x02, 0x02 } },
82 { { 0x01, 0x01 }, { 0x01, 0x01 } }
83 };
84
85 struct video_buffer_private
86 {
87 struct pipe_sampler_view *sampler_view_planes[VL_NUM_COMPONENTS];
88 struct pipe_surface *surfaces[VL_MAX_SURFACES];
89
90 struct vl_mpeg12_buffer *buffer;
91 };
92
93 static void
94 vl_mpeg12_destroy_buffer(struct vl_mpeg12_buffer *buf);
95
96 static void
97 destroy_video_buffer_private(void *private)
98 {
99 struct video_buffer_private *priv = private;
100 unsigned i;
101
102 for (i = 0; i < VL_NUM_COMPONENTS; ++i)
103 pipe_sampler_view_reference(&priv->sampler_view_planes[i], NULL);
104
105 for (i = 0; i < VL_MAX_SURFACES; ++i)
106 pipe_surface_reference(&priv->surfaces[i], NULL);
107
108 if (priv->buffer)
109 vl_mpeg12_destroy_buffer(priv->buffer);
110
111 FREE(priv);
112 }
113
114 static struct video_buffer_private *
115 get_video_buffer_private(struct vl_mpeg12_decoder *dec, struct pipe_video_buffer *buf)
116 {
117 struct pipe_context *pipe = dec->context;
118 struct video_buffer_private *priv;
119 struct pipe_sampler_view **sv;
120 struct pipe_surface **surf;
121 unsigned i;
122
123 priv = vl_video_buffer_get_associated_data(buf, &dec->base);
124 if (priv)
125 return priv;
126
127 priv = CALLOC_STRUCT(video_buffer_private);
128
129 sv = buf->get_sampler_view_planes(buf);
130 for (i = 0; i < VL_NUM_COMPONENTS; ++i)
131 if (sv[i])
132 priv->sampler_view_planes[i] = pipe->create_sampler_view(pipe, sv[i]->texture, sv[i]);
133
134 surf = buf->get_surfaces(buf);
135 for (i = 0; i < VL_MAX_SURFACES; ++i)
136 if (surf[i])
137 priv->surfaces[i] = pipe->create_surface(pipe, surf[i]->texture, surf[i]);
138
139 vl_video_buffer_set_associated_data(buf, &dec->base, priv, destroy_video_buffer_private);
140
141 return priv;
142 }
143
144 static bool
145 init_zscan_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buffer)
146 {
147 struct pipe_resource *res, res_tmpl;
148 struct pipe_sampler_view sv_tmpl;
149 struct pipe_surface **destination;
150
151 unsigned i;
152
153 assert(dec && buffer);
154
155 memset(&res_tmpl, 0, sizeof(res_tmpl));
156 res_tmpl.target = PIPE_TEXTURE_2D;
157 res_tmpl.format = dec->zscan_source_format;
158 res_tmpl.width0 = dec->blocks_per_line * VL_BLOCK_WIDTH * VL_BLOCK_HEIGHT;
159 res_tmpl.height0 = align(dec->num_blocks, dec->blocks_per_line) / dec->blocks_per_line;
160 res_tmpl.depth0 = 1;
161 res_tmpl.array_size = 1;
162 res_tmpl.usage = PIPE_USAGE_STREAM;
163 res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW;
164
165 res = dec->context->screen->resource_create(dec->context->screen, &res_tmpl);
166 if (!res)
167 goto error_source;
168
169
170 memset(&sv_tmpl, 0, sizeof(sv_tmpl));
171 u_sampler_view_default_template(&sv_tmpl, res, res->format);
172 sv_tmpl.swizzle_r = sv_tmpl.swizzle_g = sv_tmpl.swizzle_b = sv_tmpl.swizzle_a = PIPE_SWIZZLE_RED;
173 buffer->zscan_source = dec->context->create_sampler_view(dec->context, res, &sv_tmpl);
174 pipe_resource_reference(&res, NULL);
175 if (!buffer->zscan_source)
176 goto error_sampler;
177
178 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
179 destination = dec->idct_source->get_surfaces(dec->idct_source);
180 else
181 destination = dec->mc_source->get_surfaces(dec->mc_source);
182
183 if (!destination)
184 goto error_surface;
185
186 for (i = 0; i < VL_NUM_COMPONENTS; ++i)
187 if (!vl_zscan_init_buffer(i == 0 ? &dec->zscan_y : &dec->zscan_c,
188 &buffer->zscan[i], buffer->zscan_source, destination[i]))
189 goto error_plane;
190
191 return true;
192
193 error_plane:
194 for (; i > 0; --i)
195 vl_zscan_cleanup_buffer(&buffer->zscan[i - 1]);
196
197 error_surface:
198 error_sampler:
199 pipe_sampler_view_reference(&buffer->zscan_source, NULL);
200
201 error_source:
202 return false;
203 }
204
205 static void
206 cleanup_zscan_buffer(struct vl_mpeg12_buffer *buffer)
207 {
208 unsigned i;
209
210 assert(buffer);
211
212 for (i = 0; i < VL_NUM_COMPONENTS; ++i)
213 vl_zscan_cleanup_buffer(&buffer->zscan[i]);
214
215 pipe_sampler_view_reference(&buffer->zscan_source, NULL);
216 }
217
218 static bool
219 init_idct_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buffer)
220 {
221 struct pipe_sampler_view **idct_source_sv, **mc_source_sv;
222
223 unsigned i;
224
225 assert(dec && buffer);
226
227 idct_source_sv = dec->idct_source->get_sampler_view_planes(dec->idct_source);
228 if (!idct_source_sv)
229 goto error_source_sv;
230
231 mc_source_sv = dec->mc_source->get_sampler_view_planes(dec->mc_source);
232 if (!mc_source_sv)
233 goto error_mc_source_sv;
234
235 for (i = 0; i < 3; ++i)
236 if (!vl_idct_init_buffer(i == 0 ? &dec->idct_y : &dec->idct_c,
237 &buffer->idct[i], idct_source_sv[i],
238 mc_source_sv[i]))
239 goto error_plane;
240
241 return true;
242
243 error_plane:
244 for (; i > 0; --i)
245 vl_idct_cleanup_buffer(&buffer->idct[i - 1]);
246
247 error_mc_source_sv:
248 error_source_sv:
249 return false;
250 }
251
252 static void
253 cleanup_idct_buffer(struct vl_mpeg12_buffer *buf)
254 {
255 unsigned i;
256
257 assert(buf);
258
259 for (i = 0; i < 3; ++i)
260 vl_idct_cleanup_buffer(&buf->idct[i]);
261 }
262
263 static bool
264 init_mc_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buf)
265 {
266 assert(dec && buf);
267
268 if(!vl_mc_init_buffer(&dec->mc_y, &buf->mc[0]))
269 goto error_mc_y;
270
271 if(!vl_mc_init_buffer(&dec->mc_c, &buf->mc[1]))
272 goto error_mc_cb;
273
274 if(!vl_mc_init_buffer(&dec->mc_c, &buf->mc[2]))
275 goto error_mc_cr;
276
277 return true;
278
279 error_mc_cr:
280 vl_mc_cleanup_buffer(&buf->mc[1]);
281
282 error_mc_cb:
283 vl_mc_cleanup_buffer(&buf->mc[0]);
284
285 error_mc_y:
286 return false;
287 }
288
289 static void
290 cleanup_mc_buffer(struct vl_mpeg12_buffer *buf)
291 {
292 unsigned i;
293
294 assert(buf);
295
296 for (i = 0; i < VL_NUM_COMPONENTS; ++i)
297 vl_mc_cleanup_buffer(&buf->mc[i]);
298 }
299
300 static INLINE void
301 MacroBlockTypeToPipeWeights(const struct pipe_mpeg12_macroblock *mb, unsigned weights[2])
302 {
303 assert(mb);
304
305 switch (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD)) {
306 case PIPE_MPEG12_MB_TYPE_MOTION_FORWARD:
307 weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX;
308 weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
309 break;
310
311 case (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD):
312 weights[0] = PIPE_VIDEO_MV_WEIGHT_HALF;
313 weights[1] = PIPE_VIDEO_MV_WEIGHT_HALF;
314 break;
315
316 case PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD:
317 weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN;
318 weights[1] = PIPE_VIDEO_MV_WEIGHT_MAX;
319 break;
320
321 default:
322 if (mb->macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA) {
323 weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN;
324 weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
325 } else {
326 /* no motion vector, but also not intra mb ->
327 just copy the old frame content */
328 weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX;
329 weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
330 }
331 break;
332 }
333 }
334
335 static INLINE struct vl_motionvector
336 MotionVectorToPipe(const struct pipe_mpeg12_macroblock *mb, unsigned vector,
337 unsigned field_select_mask, unsigned weight)
338 {
339 struct vl_motionvector mv;
340
341 assert(mb);
342
343 if (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD)) {
344 switch (mb->macroblock_modes.bits.frame_motion_type) {
345 case PIPE_MPEG12_MO_TYPE_FRAME:
346 mv.top.x = mb->PMV[0][vector][0];
347 mv.top.y = mb->PMV[0][vector][1];
348 mv.top.field_select = PIPE_VIDEO_FRAME;
349 mv.top.weight = weight;
350
351 mv.bottom.x = mb->PMV[0][vector][0];
352 mv.bottom.y = mb->PMV[0][vector][1];
353 mv.bottom.weight = weight;
354 mv.bottom.field_select = PIPE_VIDEO_FRAME;
355 break;
356
357 case PIPE_MPEG12_MO_TYPE_FIELD:
358 mv.top.x = mb->PMV[0][vector][0];
359 mv.top.y = mb->PMV[0][vector][1];
360 mv.top.field_select = (mb->motion_vertical_field_select & field_select_mask) ?
361 PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD;
362 mv.top.weight = weight;
363
364 mv.bottom.x = mb->PMV[1][vector][0];
365 mv.bottom.y = mb->PMV[1][vector][1];
366 mv.bottom.field_select = (mb->motion_vertical_field_select & (field_select_mask << 2)) ?
367 PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD;
368 mv.bottom.weight = weight;
369 break;
370
371 default: // TODO: Support DUALPRIME and 16x8
372 break;
373 }
374 } else {
375 mv.top.x = mv.top.y = 0;
376 mv.top.field_select = PIPE_VIDEO_FRAME;
377 mv.top.weight = weight;
378
379 mv.bottom.x = mv.bottom.y = 0;
380 mv.bottom.field_select = PIPE_VIDEO_FRAME;
381 mv.bottom.weight = weight;
382 }
383 return mv;
384 }
385
386 static INLINE void
387 UploadYcbcrBlocks(struct vl_mpeg12_decoder *dec,
388 struct vl_mpeg12_buffer *buf,
389 const struct pipe_mpeg12_macroblock *mb)
390 {
391 unsigned intra;
392 unsigned tb, x, y, num_blocks = 0;
393
394 assert(dec && buf);
395 assert(mb);
396
397 if (!mb->coded_block_pattern)
398 return;
399
400 intra = mb->macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA ? 1 : 0;
401
402 for (y = 0; y < 2; ++y) {
403 for (x = 0; x < 2; ++x) {
404 if (mb->coded_block_pattern & const_empty_block_mask_420[0][y][x]) {
405
406 struct vl_ycbcr_block *stream = buf->ycbcr_stream[0];
407 stream->x = mb->x * 2 + x;
408 stream->y = mb->y * 2 + y;
409 stream->intra = intra;
410 stream->coding = mb->macroblock_modes.bits.dct_type;
411 stream->block_num = buf->block_num++;
412
413 buf->num_ycbcr_blocks[0]++;
414 buf->ycbcr_stream[0]++;
415
416 num_blocks++;
417 }
418 }
419 }
420
421 /* TODO: Implement 422, 444 */
422 //assert(ctx->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
423
424 for (tb = 1; tb < 3; ++tb) {
425 if (mb->coded_block_pattern & const_empty_block_mask_420[tb][0][0]) {
426
427 struct vl_ycbcr_block *stream = buf->ycbcr_stream[tb];
428 stream->x = mb->x;
429 stream->y = mb->y;
430 stream->intra = intra;
431 stream->coding = 0;
432 stream->block_num = buf->block_num++;
433
434 buf->num_ycbcr_blocks[tb]++;
435 buf->ycbcr_stream[tb]++;
436
437 num_blocks++;
438 }
439 }
440
441 memcpy(buf->texels, mb->blocks, 64 * sizeof(short) * num_blocks);
442 buf->texels += 64 * num_blocks;
443 }
444
445 static void
446 vl_mpeg12_destroy_buffer(struct vl_mpeg12_buffer *buf)
447 {
448
449 assert(buf);
450
451 cleanup_zscan_buffer(buf);
452 cleanup_idct_buffer(buf);
453 cleanup_mc_buffer(buf);
454 vl_vb_cleanup(&buf->vertex_stream);
455
456 FREE(buf);
457 }
458
459 static void
460 vl_mpeg12_destroy(struct pipe_video_codec *decoder)
461 {
462 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
463 unsigned i;
464
465 assert(decoder);
466
467 /* Asserted in softpipe_delete_fs_state() for some reason */
468 dec->context->bind_vs_state(dec->context, NULL);
469 dec->context->bind_fs_state(dec->context, NULL);
470
471 dec->context->delete_depth_stencil_alpha_state(dec->context, dec->dsa);
472 dec->context->delete_sampler_state(dec->context, dec->sampler_ycbcr);
473
474 vl_mc_cleanup(&dec->mc_y);
475 vl_mc_cleanup(&dec->mc_c);
476 dec->mc_source->destroy(dec->mc_source);
477
478 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
479 vl_idct_cleanup(&dec->idct_y);
480 vl_idct_cleanup(&dec->idct_c);
481 dec->idct_source->destroy(dec->idct_source);
482 }
483
484 vl_zscan_cleanup(&dec->zscan_y);
485 vl_zscan_cleanup(&dec->zscan_c);
486
487 dec->context->delete_vertex_elements_state(dec->context, dec->ves_ycbcr);
488 dec->context->delete_vertex_elements_state(dec->context, dec->ves_mv);
489
490 pipe_resource_reference(&dec->quads.buffer, NULL);
491 pipe_resource_reference(&dec->pos.buffer, NULL);
492
493 pipe_sampler_view_reference(&dec->zscan_linear, NULL);
494 pipe_sampler_view_reference(&dec->zscan_normal, NULL);
495 pipe_sampler_view_reference(&dec->zscan_alternate, NULL);
496
497 for (i = 0; i < 4; ++i)
498 if (dec->dec_buffers[i])
499 vl_mpeg12_destroy_buffer(dec->dec_buffers[i]);
500
501 dec->context->destroy(dec->context);
502
503 FREE(dec);
504 }
505
506 static struct vl_mpeg12_buffer *
507 vl_mpeg12_get_decode_buffer(struct vl_mpeg12_decoder *dec, struct pipe_video_buffer *target)
508 {
509 struct video_buffer_private *priv;
510 struct vl_mpeg12_buffer *buffer;
511
512 assert(dec);
513
514 priv = get_video_buffer_private(dec, target);
515 if (priv->buffer)
516 return priv->buffer;
517
518 buffer = dec->dec_buffers[dec->current_buffer];
519 if (buffer)
520 return buffer;
521
522 buffer = CALLOC_STRUCT(vl_mpeg12_buffer);
523 if (buffer == NULL)
524 return NULL;
525
526 if (!vl_vb_init(&buffer->vertex_stream, dec->context,
527 dec->base.width / VL_MACROBLOCK_WIDTH,
528 dec->base.height / VL_MACROBLOCK_HEIGHT))
529 goto error_vertex_buffer;
530
531 if (!init_mc_buffer(dec, buffer))
532 goto error_mc;
533
534 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
535 if (!init_idct_buffer(dec, buffer))
536 goto error_idct;
537
538 if (!init_zscan_buffer(dec, buffer))
539 goto error_zscan;
540
541 if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
542 vl_mpg12_bs_init(&buffer->bs, &dec->base);
543
544 if (dec->base.expect_chunked_decode)
545 priv->buffer = buffer;
546 else
547 dec->dec_buffers[dec->current_buffer] = buffer;
548
549 return buffer;
550
551 error_zscan:
552 cleanup_idct_buffer(buffer);
553
554 error_idct:
555 cleanup_mc_buffer(buffer);
556
557 error_mc:
558 vl_vb_cleanup(&buffer->vertex_stream);
559
560 error_vertex_buffer:
561 FREE(buffer);
562 return NULL;
563 }
564
565 static void
566 vl_mpeg12_begin_frame(struct pipe_video_codec *decoder,
567 struct pipe_video_buffer *target,
568 struct pipe_picture_desc *picture)
569 {
570 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
571 struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
572 struct vl_mpeg12_buffer *buf;
573
574 struct pipe_resource *tex;
575 struct pipe_box rect = { 0, 0, 0, 1, 1, 1 };
576
577 uint8_t intra_matrix[64];
578 uint8_t non_intra_matrix[64];
579
580 unsigned i;
581
582 assert(dec && target && picture);
583
584 buf = vl_mpeg12_get_decode_buffer(dec, target);
585 assert(buf);
586
587 if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
588 memcpy(intra_matrix, desc->intra_matrix, sizeof(intra_matrix));
589 memcpy(non_intra_matrix, desc->non_intra_matrix, sizeof(non_intra_matrix));
590 intra_matrix[0] = 1 << (7 - desc->intra_dc_precision);
591 } else {
592 memset(intra_matrix, 0x10, sizeof(intra_matrix));
593 memset(non_intra_matrix, 0x10, sizeof(non_intra_matrix));
594 }
595
596 for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
597 struct vl_zscan *zscan = i == 0 ? &dec->zscan_y : &dec->zscan_c;
598 vl_zscan_upload_quant(zscan, &buf->zscan[i], intra_matrix, true);
599 vl_zscan_upload_quant(zscan, &buf->zscan[i], non_intra_matrix, false);
600 }
601
602 vl_vb_map(&buf->vertex_stream, dec->context);
603
604 tex = buf->zscan_source->texture;
605 rect.width = tex->width0;
606 rect.height = tex->height0;
607
608 buf->texels =
609 dec->context->transfer_map(dec->context, tex, 0,
610 PIPE_TRANSFER_WRITE |
611 PIPE_TRANSFER_DISCARD_RANGE,
612 &rect, &buf->tex_transfer);
613
614 buf->block_num = 0;
615
616 for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
617 buf->ycbcr_stream[i] = vl_vb_get_ycbcr_stream(&buf->vertex_stream, i);
618 buf->num_ycbcr_blocks[i] = 0;
619 }
620
621 for (i = 0; i < VL_MAX_REF_FRAMES; ++i)
622 buf->mv_stream[i] = vl_vb_get_mv_stream(&buf->vertex_stream, i);
623
624 if (dec->base.entrypoint >= PIPE_VIDEO_ENTRYPOINT_IDCT) {
625 for (i = 0; i < VL_NUM_COMPONENTS; ++i)
626 vl_zscan_set_layout(&buf->zscan[i], dec->zscan_linear);
627 }
628 }
629
630 static void
631 vl_mpeg12_decode_macroblock(struct pipe_video_codec *decoder,
632 struct pipe_video_buffer *target,
633 struct pipe_picture_desc *picture,
634 const struct pipe_macroblock *macroblocks,
635 unsigned num_macroblocks)
636 {
637 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
638 const struct pipe_mpeg12_macroblock *mb = (const struct pipe_mpeg12_macroblock *)macroblocks;
639 struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
640 struct vl_mpeg12_buffer *buf;
641
642 unsigned i, j, mv_weights[2];
643
644 assert(dec && target && picture);
645 assert(macroblocks && macroblocks->codec == PIPE_VIDEO_FORMAT_MPEG12);
646
647 buf = vl_mpeg12_get_decode_buffer(dec, target);
648 assert(buf);
649
650 for (; num_macroblocks > 0; --num_macroblocks) {
651 unsigned mb_addr = mb->y * dec->width_in_macroblocks + mb->x;
652
653 if (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_PATTERN | PIPE_MPEG12_MB_TYPE_INTRA))
654 UploadYcbcrBlocks(dec, buf, mb);
655
656 MacroBlockTypeToPipeWeights(mb, mv_weights);
657
658 for (i = 0; i < 2; ++i) {
659 if (!desc->ref[i]) continue;
660
661 buf->mv_stream[i][mb_addr] = MotionVectorToPipe
662 (
663 mb, i,
664 i ? PIPE_MPEG12_FS_FIRST_BACKWARD : PIPE_MPEG12_FS_FIRST_FORWARD,
665 mv_weights[i]
666 );
667 }
668
669 /* see section 7.6.6 of the spec */
670 if (mb->num_skipped_macroblocks > 0) {
671 struct vl_motionvector skipped_mv[2];
672
673 if (desc->ref[0] && !desc->ref[1]) {
674 skipped_mv[0].top.x = skipped_mv[0].top.y = 0;
675 skipped_mv[0].top.weight = PIPE_VIDEO_MV_WEIGHT_MAX;
676 } else {
677 skipped_mv[0] = buf->mv_stream[0][mb_addr];
678 skipped_mv[1] = buf->mv_stream[1][mb_addr];
679 }
680 skipped_mv[0].top.field_select = PIPE_VIDEO_FRAME;
681 skipped_mv[1].top.field_select = PIPE_VIDEO_FRAME;
682
683 skipped_mv[0].bottom = skipped_mv[0].top;
684 skipped_mv[1].bottom = skipped_mv[1].top;
685
686 ++mb_addr;
687 for (i = 0; i < mb->num_skipped_macroblocks; ++i, ++mb_addr) {
688 for (j = 0; j < 2; ++j) {
689 if (!desc->ref[j]) continue;
690 buf->mv_stream[j][mb_addr] = skipped_mv[j];
691
692 }
693 }
694 }
695
696 ++mb;
697 }
698 }
699
700 static void
701 vl_mpeg12_decode_bitstream(struct pipe_video_codec *decoder,
702 struct pipe_video_buffer *target,
703 struct pipe_picture_desc *picture,
704 unsigned num_buffers,
705 const void * const *buffers,
706 const unsigned *sizes)
707 {
708 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
709 struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
710 struct vl_mpeg12_buffer *buf;
711
712 unsigned i;
713
714 assert(dec && target && picture);
715
716 buf = vl_mpeg12_get_decode_buffer(dec, target);
717 assert(buf);
718
719 for (i = 0; i < VL_NUM_COMPONENTS; ++i)
720 vl_zscan_set_layout(&buf->zscan[i], desc->alternate_scan ?
721 dec->zscan_alternate : dec->zscan_normal);
722
723 vl_mpg12_bs_decode(&buf->bs, target, desc, num_buffers, buffers, sizes);
724 }
725
726 static void
727 vl_mpeg12_end_frame(struct pipe_video_codec *decoder,
728 struct pipe_video_buffer *target,
729 struct pipe_picture_desc *picture)
730 {
731 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
732 struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
733 struct pipe_sampler_view **ref_frames[2];
734 struct pipe_sampler_view **mc_source_sv;
735 struct pipe_surface **target_surfaces;
736 struct pipe_vertex_buffer vb[3];
737 struct vl_mpeg12_buffer *buf;
738
739 const unsigned *plane_order;
740 unsigned i, j, component;
741 unsigned nr_components;
742
743 assert(dec && target && picture);
744 assert(!target->interlaced);
745
746 buf = vl_mpeg12_get_decode_buffer(dec, target);
747
748 vl_vb_unmap(&buf->vertex_stream, dec->context);
749
750 dec->context->transfer_unmap(dec->context, buf->tex_transfer);
751
752 vb[0] = dec->quads;
753 vb[1] = dec->pos;
754
755 target_surfaces = get_video_buffer_private(dec, target)->surfaces;
756
757 for (i = 0; i < VL_MAX_REF_FRAMES; ++i) {
758 if (desc->ref[i])
759 ref_frames[i] = get_video_buffer_private(dec, desc->ref[i])->sampler_view_planes;
760 else
761 ref_frames[i] = NULL;
762 }
763
764 dec->context->bind_vertex_elements_state(dec->context, dec->ves_mv);
765 for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
766 if (!target_surfaces[i]) continue;
767
768 vl_mc_set_surface(&buf->mc[i], target_surfaces[i]);
769
770 for (j = 0; j < VL_MAX_REF_FRAMES; ++j) {
771 if (!ref_frames[j] || !ref_frames[j][i]) continue;
772
773 vb[2] = vl_vb_get_mv(&buf->vertex_stream, j);;
774 dec->context->set_vertex_buffers(dec->context, 0, 3, vb);
775
776 vl_mc_render_ref(i ? &dec->mc_c : &dec->mc_y, &buf->mc[i], ref_frames[j][i]);
777 }
778 }
779
780 dec->context->bind_vertex_elements_state(dec->context, dec->ves_ycbcr);
781 for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
782 if (!buf->num_ycbcr_blocks[i]) continue;
783
784 vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, i);
785 dec->context->set_vertex_buffers(dec->context, 0, 2, vb);
786
787 vl_zscan_render(i ? &dec->zscan_c : & dec->zscan_y, &buf->zscan[i] , buf->num_ycbcr_blocks[i]);
788
789 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
790 vl_idct_flush(i ? &dec->idct_c : &dec->idct_y, &buf->idct[i], buf->num_ycbcr_blocks[i]);
791 }
792
793 plane_order = vl_video_buffer_plane_order(target->buffer_format);
794 mc_source_sv = dec->mc_source->get_sampler_view_planes(dec->mc_source);
795 for (i = 0, component = 0; component < VL_NUM_COMPONENTS; ++i) {
796 if (!target_surfaces[i]) continue;
797
798 nr_components = util_format_get_nr_components(target_surfaces[i]->texture->format);
799 for (j = 0; j < nr_components; ++j, ++component) {
800 unsigned plane = plane_order[component];
801 if (!buf->num_ycbcr_blocks[plane]) continue;
802
803 vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, plane);
804 dec->context->set_vertex_buffers(dec->context, 0, 2, vb);
805
806 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
807 vl_idct_prepare_stage2(i ? &dec->idct_c : &dec->idct_y, &buf->idct[plane]);
808 else {
809 dec->context->set_sampler_views(dec->context,
810 PIPE_SHADER_FRAGMENT, 0, 1,
811 &mc_source_sv[plane]);
812 dec->context->bind_sampler_states(dec->context,
813 PIPE_SHADER_FRAGMENT,
814 0, 1, &dec->sampler_ycbcr);
815 }
816 vl_mc_render_ycbcr(i ? &dec->mc_c : &dec->mc_y, &buf->mc[i], j, buf->num_ycbcr_blocks[plane]);
817 }
818 }
819 dec->context->flush(dec->context, NULL, 0);
820 ++dec->current_buffer;
821 dec->current_buffer %= 4;
822 }
823
824 static void
825 vl_mpeg12_flush(struct pipe_video_codec *decoder)
826 {
827 assert(decoder);
828
829 //Noop, for shaders it is much faster to flush everything in end_frame
830 }
831
832 static bool
833 init_pipe_state(struct vl_mpeg12_decoder *dec)
834 {
835 struct pipe_depth_stencil_alpha_state dsa;
836 struct pipe_sampler_state sampler;
837 unsigned i;
838
839 assert(dec);
840
841 memset(&dsa, 0, sizeof dsa);
842 dsa.depth.enabled = 0;
843 dsa.depth.writemask = 0;
844 dsa.depth.func = PIPE_FUNC_ALWAYS;
845 for (i = 0; i < 2; ++i) {
846 dsa.stencil[i].enabled = 0;
847 dsa.stencil[i].func = PIPE_FUNC_ALWAYS;
848 dsa.stencil[i].fail_op = PIPE_STENCIL_OP_KEEP;
849 dsa.stencil[i].zpass_op = PIPE_STENCIL_OP_KEEP;
850 dsa.stencil[i].zfail_op = PIPE_STENCIL_OP_KEEP;
851 dsa.stencil[i].valuemask = 0;
852 dsa.stencil[i].writemask = 0;
853 }
854 dsa.alpha.enabled = 0;
855 dsa.alpha.func = PIPE_FUNC_ALWAYS;
856 dsa.alpha.ref_value = 0;
857 dec->dsa = dec->context->create_depth_stencil_alpha_state(dec->context, &dsa);
858 dec->context->bind_depth_stencil_alpha_state(dec->context, dec->dsa);
859
860 memset(&sampler, 0, sizeof(sampler));
861 sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
862 sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
863 sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
864 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
865 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
866 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
867 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
868 sampler.compare_func = PIPE_FUNC_ALWAYS;
869 sampler.normalized_coords = 1;
870 dec->sampler_ycbcr = dec->context->create_sampler_state(dec->context, &sampler);
871 if (!dec->sampler_ycbcr)
872 return false;
873
874 return true;
875 }
876
877 static const struct format_config*
878 find_format_config(struct vl_mpeg12_decoder *dec, const struct format_config configs[], unsigned num_configs)
879 {
880 struct pipe_screen *screen;
881 unsigned i;
882
883 assert(dec);
884
885 screen = dec->context->screen;
886
887 for (i = 0; i < num_configs; ++i) {
888 if (!screen->is_format_supported(screen, configs[i].zscan_source_format, PIPE_TEXTURE_2D,
889 1, PIPE_BIND_SAMPLER_VIEW))
890 continue;
891
892 if (configs[i].idct_source_format != PIPE_FORMAT_NONE) {
893 if (!screen->is_format_supported(screen, configs[i].idct_source_format, PIPE_TEXTURE_2D,
894 1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET))
895 continue;
896
897 if (!screen->is_format_supported(screen, configs[i].mc_source_format, PIPE_TEXTURE_3D,
898 1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET))
899 continue;
900 } else {
901 if (!screen->is_format_supported(screen, configs[i].mc_source_format, PIPE_TEXTURE_2D,
902 1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET))
903 continue;
904 }
905 return &configs[i];
906 }
907
908 return NULL;
909 }
910
911 static bool
912 init_zscan(struct vl_mpeg12_decoder *dec, const struct format_config* format_config)
913 {
914 unsigned num_channels;
915
916 assert(dec);
917
918 dec->zscan_source_format = format_config->zscan_source_format;
919 dec->zscan_linear = vl_zscan_layout(dec->context, vl_zscan_linear, dec->blocks_per_line);
920 dec->zscan_normal = vl_zscan_layout(dec->context, vl_zscan_normal, dec->blocks_per_line);
921 dec->zscan_alternate = vl_zscan_layout(dec->context, vl_zscan_alternate, dec->blocks_per_line);
922
923 num_channels = dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT ? 4 : 1;
924
925 if (!vl_zscan_init(&dec->zscan_y, dec->context, dec->base.width, dec->base.height,
926 dec->blocks_per_line, dec->num_blocks, num_channels))
927 return false;
928
929 if (!vl_zscan_init(&dec->zscan_c, dec->context, dec->chroma_width, dec->chroma_height,
930 dec->blocks_per_line, dec->num_blocks, num_channels))
931 return false;
932
933 return true;
934 }
935
936 static bool
937 init_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_config)
938 {
939 unsigned nr_of_idct_render_targets, max_inst;
940 enum pipe_format formats[3];
941 struct pipe_video_buffer templat;
942
943 struct pipe_sampler_view *matrix = NULL;
944
945 nr_of_idct_render_targets = dec->context->screen->get_param
946 (
947 dec->context->screen, PIPE_CAP_MAX_RENDER_TARGETS
948 );
949
950 max_inst = dec->context->screen->get_shader_param
951 (
952 dec->context->screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_INSTRUCTIONS
953 );
954
955 // Just assume we need 32 inst per render target, not 100% true, but should work in most cases
956 if (nr_of_idct_render_targets >= 4 && max_inst >= 32*4)
957 // more than 4 render targets usually doesn't makes any seens
958 nr_of_idct_render_targets = 4;
959 else
960 nr_of_idct_render_targets = 1;
961
962 formats[0] = formats[1] = formats[2] = format_config->idct_source_format;
963 memset(&templat, 0, sizeof(templat));
964 templat.width = dec->base.width / 4;
965 templat.height = dec->base.height;
966 templat.chroma_format = dec->base.chroma_format;
967 dec->idct_source = vl_video_buffer_create_ex
968 (
969 dec->context, &templat,
970 formats, 1, 1, PIPE_USAGE_STATIC
971 );
972
973 if (!dec->idct_source)
974 goto error_idct_source;
975
976 formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
977 memset(&templat, 0, sizeof(templat));
978 templat.width = dec->base.width / nr_of_idct_render_targets;
979 templat.height = dec->base.height / 4;
980 templat.chroma_format = dec->base.chroma_format;
981 dec->mc_source = vl_video_buffer_create_ex
982 (
983 dec->context, &templat,
984 formats, nr_of_idct_render_targets, 1, PIPE_USAGE_STATIC
985 );
986
987 if (!dec->mc_source)
988 goto error_mc_source;
989
990 if (!(matrix = vl_idct_upload_matrix(dec->context, format_config->idct_scale)))
991 goto error_matrix;
992
993 if (!vl_idct_init(&dec->idct_y, dec->context, dec->base.width, dec->base.height,
994 nr_of_idct_render_targets, matrix, matrix))
995 goto error_y;
996
997 if(!vl_idct_init(&dec->idct_c, dec->context, dec->chroma_width, dec->chroma_height,
998 nr_of_idct_render_targets, matrix, matrix))
999 goto error_c;
1000
1001 pipe_sampler_view_reference(&matrix, NULL);
1002
1003 return true;
1004
1005 error_c:
1006 vl_idct_cleanup(&dec->idct_y);
1007
1008 error_y:
1009 pipe_sampler_view_reference(&matrix, NULL);
1010
1011 error_matrix:
1012 dec->mc_source->destroy(dec->mc_source);
1013
1014 error_mc_source:
1015 dec->idct_source->destroy(dec->idct_source);
1016
1017 error_idct_source:
1018 return false;
1019 }
1020
1021 static bool
1022 init_mc_source_widthout_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_config)
1023 {
1024 enum pipe_format formats[3];
1025 struct pipe_video_buffer templat;
1026
1027 formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
1028 memset(&templat, 0, sizeof(templat));
1029 templat.width = dec->base.width;
1030 templat.height = dec->base.height;
1031 templat.chroma_format = dec->base.chroma_format;
1032 dec->mc_source = vl_video_buffer_create_ex
1033 (
1034 dec->context, &templat,
1035 formats, 1, 1, PIPE_USAGE_STATIC
1036 );
1037
1038 return dec->mc_source != NULL;
1039 }
1040
1041 static void
1042 mc_vert_shader_callback(void *priv, struct vl_mc *mc,
1043 struct ureg_program *shader,
1044 unsigned first_output,
1045 struct ureg_dst tex)
1046 {
1047 struct vl_mpeg12_decoder *dec = priv;
1048 struct ureg_dst o_vtex;
1049
1050 assert(priv && mc);
1051 assert(shader);
1052
1053 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1054 struct vl_idct *idct = mc == &dec->mc_y ? &dec->idct_y : &dec->idct_c;
1055 vl_idct_stage2_vert_shader(idct, shader, first_output, tex);
1056 } else {
1057 o_vtex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, first_output);
1058 ureg_MOV(shader, ureg_writemask(o_vtex, TGSI_WRITEMASK_XY), ureg_src(tex));
1059 }
1060 }
1061
1062 static void
1063 mc_frag_shader_callback(void *priv, struct vl_mc *mc,
1064 struct ureg_program *shader,
1065 unsigned first_input,
1066 struct ureg_dst dst)
1067 {
1068 struct vl_mpeg12_decoder *dec = priv;
1069 struct ureg_src src, sampler;
1070
1071 assert(priv && mc);
1072 assert(shader);
1073
1074 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1075 struct vl_idct *idct = mc == &dec->mc_y ? &dec->idct_y : &dec->idct_c;
1076 vl_idct_stage2_frag_shader(idct, shader, first_input, dst);
1077 } else {
1078 src = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, first_input, TGSI_INTERPOLATE_LINEAR);
1079 sampler = ureg_DECL_sampler(shader, 0);
1080 ureg_TEX(shader, dst, TGSI_TEXTURE_2D, src, sampler);
1081 }
1082 }
1083
1084 struct pipe_video_codec *
1085 vl_create_mpeg12_decoder(struct pipe_context *context,
1086 const struct pipe_video_codec *templat)
1087 {
1088 const unsigned block_size_pixels = VL_BLOCK_WIDTH * VL_BLOCK_HEIGHT;
1089 const struct format_config *format_config;
1090 struct vl_mpeg12_decoder *dec;
1091
1092 assert(u_reduce_video_profile(templat->profile) == PIPE_VIDEO_FORMAT_MPEG12);
1093
1094 dec = CALLOC_STRUCT(vl_mpeg12_decoder);
1095
1096 if (!dec)
1097 return NULL;
1098
1099 dec->base = *templat;
1100 dec->base.context = context;
1101 dec->context = context->screen->context_create(context->screen, NULL);
1102
1103 dec->base.destroy = vl_mpeg12_destroy;
1104 dec->base.begin_frame = vl_mpeg12_begin_frame;
1105 dec->base.decode_macroblock = vl_mpeg12_decode_macroblock;
1106 dec->base.decode_bitstream = vl_mpeg12_decode_bitstream;
1107 dec->base.end_frame = vl_mpeg12_end_frame;
1108 dec->base.flush = vl_mpeg12_flush;
1109
1110 dec->blocks_per_line = MAX2(util_next_power_of_two(dec->base.width) / block_size_pixels, 4);
1111 dec->num_blocks = (dec->base.width * dec->base.height) / block_size_pixels;
1112 dec->width_in_macroblocks = align(dec->base.width, VL_MACROBLOCK_WIDTH) / VL_MACROBLOCK_WIDTH;
1113
1114 /* TODO: Implement 422, 444 */
1115 assert(dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
1116
1117 if (dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) {
1118 dec->chroma_width = dec->base.width / 2;
1119 dec->chroma_height = dec->base.height / 2;
1120 dec->num_blocks = dec->num_blocks * 2;
1121 } else if (dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422) {
1122 dec->chroma_width = dec->base.width / 2;
1123 dec->chroma_height = dec->base.height;
1124 dec->num_blocks = dec->num_blocks * 2 + dec->num_blocks;
1125 } else {
1126 dec->chroma_width = dec->base.width;
1127 dec->chroma_height = dec->base.height;
1128 dec->num_blocks = dec->num_blocks * 3;
1129 }
1130
1131 dec->quads = vl_vb_upload_quads(dec->context);
1132 dec->pos = vl_vb_upload_pos(
1133 dec->context,
1134 dec->base.width / VL_MACROBLOCK_WIDTH,
1135 dec->base.height / VL_MACROBLOCK_HEIGHT
1136 );
1137
1138 dec->ves_ycbcr = vl_vb_get_ves_ycbcr(dec->context);
1139 dec->ves_mv = vl_vb_get_ves_mv(dec->context);
1140
1141 switch (templat->entrypoint) {
1142 case PIPE_VIDEO_ENTRYPOINT_BITSTREAM:
1143 format_config = find_format_config(dec, bitstream_format_config, num_bitstream_format_configs);
1144 break;
1145
1146 case PIPE_VIDEO_ENTRYPOINT_IDCT:
1147 format_config = find_format_config(dec, idct_format_config, num_idct_format_configs);
1148 break;
1149
1150 case PIPE_VIDEO_ENTRYPOINT_MC:
1151 format_config = find_format_config(dec, mc_format_config, num_mc_format_configs);
1152 break;
1153
1154 default:
1155 assert(0);
1156 FREE(dec);
1157 return NULL;
1158 }
1159
1160 if (!format_config) {
1161 FREE(dec);
1162 return NULL;
1163 }
1164
1165 if (!init_zscan(dec, format_config))
1166 goto error_zscan;
1167
1168 if (templat->entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1169 if (!init_idct(dec, format_config))
1170 goto error_sources;
1171 } else {
1172 if (!init_mc_source_widthout_idct(dec, format_config))
1173 goto error_sources;
1174 }
1175
1176 if (!vl_mc_init(&dec->mc_y, dec->context, dec->base.width, dec->base.height,
1177 VL_MACROBLOCK_HEIGHT, format_config->mc_scale,
1178 mc_vert_shader_callback, mc_frag_shader_callback, dec))
1179 goto error_mc_y;
1180
1181 // TODO
1182 if (!vl_mc_init(&dec->mc_c, dec->context, dec->base.width, dec->base.height,
1183 VL_BLOCK_HEIGHT, format_config->mc_scale,
1184 mc_vert_shader_callback, mc_frag_shader_callback, dec))
1185 goto error_mc_c;
1186
1187 if (!init_pipe_state(dec))
1188 goto error_pipe_state;
1189
1190 return &dec->base;
1191
1192 error_pipe_state:
1193 vl_mc_cleanup(&dec->mc_c);
1194
1195 error_mc_c:
1196 vl_mc_cleanup(&dec->mc_y);
1197
1198 error_mc_y:
1199 if (templat->entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1200 vl_idct_cleanup(&dec->idct_y);
1201 vl_idct_cleanup(&dec->idct_c);
1202 dec->idct_source->destroy(dec->idct_source);
1203 }
1204 dec->mc_source->destroy(dec->mc_source);
1205
1206 error_sources:
1207 vl_zscan_cleanup(&dec->zscan_y);
1208 vl_zscan_cleanup(&dec->zscan_c);
1209
1210 error_zscan:
1211 FREE(dec);
1212 return NULL;
1213 }