g3dvl: Fix memory leaks on error paths.
[mesa.git] / src / gallium / auxiliary / vl / vl_mpeg12_decoder.c
1 /**************************************************************************
2 *
3 * Copyright 2009 Younes Manton.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <math.h>
29 #include <assert.h>
30
31 #include "util/u_memory.h"
32 #include "util/u_rect.h"
33 #include "util/u_sampler.h"
34 #include "util/u_video.h"
35
36 #include "vl_mpeg12_decoder.h"
37 #include "vl_defines.h"
38
39 #define SCALE_FACTOR_SNORM (32768.0f / 256.0f)
40 #define SCALE_FACTOR_SSCALED (1.0f / 256.0f)
41
42 struct format_config {
43 enum pipe_format zscan_source_format;
44 enum pipe_format idct_source_format;
45 enum pipe_format mc_source_format;
46
47 float idct_scale;
48 float mc_scale;
49 };
50
51 static const struct format_config bitstream_format_config[] = {
52 // { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SSCALED },
53 // { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, 1.0f, SCALE_FACTOR_SSCALED },
54 { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SNORM },
55 { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, 1.0f, SCALE_FACTOR_SNORM }
56 };
57
58 static const unsigned num_bitstream_format_configs =
59 sizeof(bitstream_format_config) / sizeof(struct format_config);
60
61 static const struct format_config idct_format_config[] = {
62 // { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SSCALED },
63 // { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, 1.0f, SCALE_FACTOR_SSCALED },
64 { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SNORM },
65 { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, 1.0f, SCALE_FACTOR_SNORM }
66 };
67
68 static const unsigned num_idct_format_configs =
69 sizeof(idct_format_config) / sizeof(struct format_config);
70
71 static const struct format_config mc_format_config[] = {
72 //{ PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_NONE, PIPE_FORMAT_R16_SSCALED, 0.0f, SCALE_FACTOR_SSCALED },
73 { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_NONE, PIPE_FORMAT_R16_SNORM, 0.0f, SCALE_FACTOR_SNORM }
74 };
75
76 static const unsigned num_mc_format_configs =
77 sizeof(mc_format_config) / sizeof(struct format_config);
78
79 static const unsigned const_empty_block_mask_420[3][2][2] = {
80 { { 0x20, 0x10 }, { 0x08, 0x04 } },
81 { { 0x02, 0x02 }, { 0x02, 0x02 } },
82 { { 0x01, 0x01 }, { 0x01, 0x01 } }
83 };
84
85 static bool
86 init_zscan_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buffer)
87 {
88 struct pipe_resource *res, res_tmpl;
89 struct pipe_sampler_view sv_tmpl;
90 struct pipe_surface **destination;
91
92 unsigned i;
93
94 assert(dec && buffer);
95
96 memset(&res_tmpl, 0, sizeof(res_tmpl));
97 res_tmpl.target = PIPE_TEXTURE_2D;
98 res_tmpl.format = dec->zscan_source_format;
99 res_tmpl.width0 = dec->blocks_per_line * BLOCK_WIDTH * BLOCK_HEIGHT;
100 res_tmpl.height0 = align(dec->num_blocks, dec->blocks_per_line) / dec->blocks_per_line;
101 res_tmpl.depth0 = 1;
102 res_tmpl.array_size = 1;
103 res_tmpl.usage = PIPE_USAGE_STREAM;
104 res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW;
105
106 res = dec->base.context->screen->resource_create(dec->base.context->screen, &res_tmpl);
107 if (!res)
108 goto error_source;
109
110
111 memset(&sv_tmpl, 0, sizeof(sv_tmpl));
112 u_sampler_view_default_template(&sv_tmpl, res, res->format);
113 sv_tmpl.swizzle_r = sv_tmpl.swizzle_g = sv_tmpl.swizzle_b = sv_tmpl.swizzle_a = PIPE_SWIZZLE_RED;
114 buffer->zscan_source = dec->base.context->create_sampler_view(dec->base.context, res, &sv_tmpl);
115 pipe_resource_reference(&res, NULL);
116 if (!buffer->zscan_source)
117 goto error_sampler;
118
119 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
120 destination = dec->idct_source->get_surfaces(dec->idct_source);
121 else
122 destination = dec->mc_source->get_surfaces(dec->mc_source);
123
124 if (!destination)
125 goto error_surface;
126
127 for (i = 0; i < VL_MAX_PLANES; ++i)
128 if (!vl_zscan_init_buffer(i == 0 ? &dec->zscan_y : &dec->zscan_c,
129 &buffer->zscan[i], buffer->zscan_source, destination[i]))
130 goto error_plane;
131
132 return true;
133
134 error_plane:
135 for (; i > 0; --i)
136 vl_zscan_cleanup_buffer(&buffer->zscan[i - 1]);
137
138 error_surface:
139 error_sampler:
140 pipe_sampler_view_reference(&buffer->zscan_source, NULL);
141
142 error_source:
143 return false;
144 }
145
146 static void
147 cleanup_zscan_buffer(struct vl_mpeg12_buffer *buffer)
148 {
149 unsigned i;
150
151 assert(buffer);
152
153 for (i = 0; i < VL_MAX_PLANES; ++i)
154 vl_zscan_cleanup_buffer(&buffer->zscan[i]);
155
156 pipe_sampler_view_reference(&buffer->zscan_source, NULL);
157 }
158
159 static bool
160 init_idct_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buffer)
161 {
162 struct pipe_sampler_view **idct_source_sv, **mc_source_sv;
163
164 unsigned i;
165
166 assert(dec && buffer);
167
168 idct_source_sv = dec->idct_source->get_sampler_view_planes(dec->idct_source);
169 if (!idct_source_sv)
170 goto error_source_sv;
171
172 mc_source_sv = dec->mc_source->get_sampler_view_planes(dec->mc_source);
173 if (!mc_source_sv)
174 goto error_mc_source_sv;
175
176 for (i = 0; i < 3; ++i)
177 if (!vl_idct_init_buffer(i == 0 ? &dec->idct_y : &dec->idct_c,
178 &buffer->idct[i], idct_source_sv[i],
179 mc_source_sv[i]))
180 goto error_plane;
181
182 return true;
183
184 error_plane:
185 for (; i > 0; --i)
186 vl_idct_cleanup_buffer(&buffer->idct[i - 1]);
187
188 error_mc_source_sv:
189 error_source_sv:
190 return false;
191 }
192
193 static void
194 cleanup_idct_buffer(struct vl_mpeg12_buffer *buf)
195 {
196 unsigned i;
197
198 assert(buf);
199
200 for (i = 0; i < 3; ++i)
201 vl_idct_cleanup_buffer(&buf->idct[0]);
202 }
203
204 static bool
205 init_mc_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buf)
206 {
207 assert(dec && buf);
208
209 if(!vl_mc_init_buffer(&dec->mc_y, &buf->mc[0]))
210 goto error_mc_y;
211
212 if(!vl_mc_init_buffer(&dec->mc_c, &buf->mc[1]))
213 goto error_mc_cb;
214
215 if(!vl_mc_init_buffer(&dec->mc_c, &buf->mc[2]))
216 goto error_mc_cr;
217
218 return true;
219
220 error_mc_cr:
221 vl_mc_cleanup_buffer(&buf->mc[1]);
222
223 error_mc_cb:
224 vl_mc_cleanup_buffer(&buf->mc[0]);
225
226 error_mc_y:
227 return false;
228 }
229
230 static void
231 cleanup_mc_buffer(struct vl_mpeg12_buffer *buf)
232 {
233 unsigned i;
234
235 assert(buf);
236
237 for (i = 0; i < VL_MAX_PLANES; ++i)
238 vl_mc_cleanup_buffer(&buf->mc[i]);
239 }
240
241 static INLINE void
242 MacroBlockTypeToPipeWeights(const struct pipe_mpeg12_macroblock *mb, unsigned weights[2])
243 {
244 assert(mb);
245
246 switch (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD)) {
247 case PIPE_MPEG12_MB_TYPE_MOTION_FORWARD:
248 weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX;
249 weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
250 break;
251
252 case (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD):
253 weights[0] = PIPE_VIDEO_MV_WEIGHT_HALF;
254 weights[1] = PIPE_VIDEO_MV_WEIGHT_HALF;
255 break;
256
257 case PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD:
258 weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN;
259 weights[1] = PIPE_VIDEO_MV_WEIGHT_MAX;
260 break;
261
262 default:
263 if (mb->macroblock_type & PIPE_MPEG12_MB_TYPE_PATTERN) {
264 /* patern without a motion vector, just copy the old frame content */
265 weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX;
266 weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
267 } else {
268 weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN;
269 weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
270 }
271 break;
272 }
273 }
274
275 static INLINE struct vl_motionvector
276 MotionVectorToPipe(const struct pipe_mpeg12_macroblock *mb, unsigned vector,
277 unsigned field_select_mask, unsigned weight)
278 {
279 struct vl_motionvector mv;
280
281 assert(mb);
282
283 if (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD)) {
284 switch (mb->macroblock_modes.bits.frame_motion_type) {
285 case PIPE_MPEG12_MO_TYPE_FRAME:
286 mv.top.x = mb->PMV[0][vector][0];
287 mv.top.y = mb->PMV[0][vector][1];
288 mv.top.field_select = PIPE_VIDEO_FRAME;
289 mv.top.weight = weight;
290
291 mv.bottom.x = mb->PMV[0][vector][0];
292 mv.bottom.y = mb->PMV[0][vector][1];
293 mv.bottom.weight = weight;
294 mv.bottom.field_select = PIPE_VIDEO_FRAME;
295 break;
296
297 case PIPE_MPEG12_MO_TYPE_FIELD:
298 mv.top.x = mb->PMV[0][vector][0];
299 mv.top.y = mb->PMV[0][vector][1];
300 mv.top.field_select = (mb->motion_vertical_field_select & field_select_mask) ?
301 PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD;
302 mv.top.weight = weight;
303
304 mv.bottom.x = mb->PMV[1][vector][0];
305 mv.bottom.y = mb->PMV[1][vector][1];
306 mv.bottom.field_select = (mb->motion_vertical_field_select & (field_select_mask << 2)) ?
307 PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD;
308 mv.bottom.weight = weight;
309 break;
310
311 default: // TODO: Support DUALPRIME and 16x8
312 break;
313 }
314 } else {
315 mv.top.x = mv.top.y = 0;
316 mv.top.field_select = PIPE_VIDEO_FRAME;
317 mv.top.weight = weight;
318
319 mv.bottom.x = mv.bottom.y = 0;
320 mv.bottom.field_select = PIPE_VIDEO_FRAME;
321 mv.bottom.weight = weight;
322 }
323 return mv;
324 }
325
326 static INLINE void
327 UploadYcbcrBlocks(struct vl_mpeg12_decoder *dec,
328 struct vl_mpeg12_buffer *buf,
329 const struct pipe_mpeg12_macroblock *mb)
330 {
331 unsigned intra;
332 unsigned tb, x, y, num_blocks = 0;
333
334 assert(dec && buf);
335 assert(mb);
336
337 if (!mb->coded_block_pattern)
338 return;
339
340 intra = mb->macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA ? 1 : 0;
341
342 for (y = 0; y < 2; ++y) {
343 for (x = 0; x < 2; ++x) {
344 if (mb->coded_block_pattern & const_empty_block_mask_420[0][y][x]) {
345
346 struct vl_ycbcr_block *stream = buf->ycbcr_stream[0];
347 stream->x = mb->x * 2 + x;
348 stream->y = mb->y * 2 + y;
349 stream->intra = intra;
350 stream->coding = mb->macroblock_modes.bits.dct_type;
351 stream->block_num = buf->block_num++;
352
353 buf->num_ycbcr_blocks[0]++;
354 buf->ycbcr_stream[0]++;
355
356 num_blocks++;
357 }
358 }
359 }
360
361 /* TODO: Implement 422, 444 */
362 //assert(ctx->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
363
364 for (tb = 1; tb < 3; ++tb) {
365 if (mb->coded_block_pattern & const_empty_block_mask_420[tb][0][0]) {
366
367 struct vl_ycbcr_block *stream = buf->ycbcr_stream[tb];
368 stream->x = mb->x;
369 stream->y = mb->y;
370 stream->intra = intra;
371 stream->coding = 0;
372 stream->block_num = buf->block_num++;
373
374 buf->num_ycbcr_blocks[tb]++;
375 buf->ycbcr_stream[tb]++;
376
377 num_blocks++;
378 }
379 }
380
381 memcpy(buf->texels, mb->blocks, 64 * sizeof(short) * num_blocks);
382 buf->texels += 64 * num_blocks;
383 }
384
385 static void
386 vl_mpeg12_destroy(struct pipe_video_decoder *decoder)
387 {
388 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
389
390 assert(decoder);
391
392 /* Asserted in softpipe_delete_fs_state() for some reason */
393 dec->base.context->bind_vs_state(dec->base.context, NULL);
394 dec->base.context->bind_fs_state(dec->base.context, NULL);
395
396 dec->base.context->delete_depth_stencil_alpha_state(dec->base.context, dec->dsa);
397 dec->base.context->delete_sampler_state(dec->base.context, dec->sampler_ycbcr);
398
399 vl_mc_cleanup(&dec->mc_y);
400 vl_mc_cleanup(&dec->mc_c);
401 dec->mc_source->destroy(dec->mc_source);
402
403 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
404 vl_idct_cleanup(&dec->idct_y);
405 vl_idct_cleanup(&dec->idct_c);
406 dec->idct_source->destroy(dec->idct_source);
407 }
408
409 vl_zscan_cleanup(&dec->zscan_y);
410 vl_zscan_cleanup(&dec->zscan_c);
411
412 dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_ycbcr);
413 dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_mv);
414
415 pipe_resource_reference(&dec->quads.buffer, NULL);
416 pipe_resource_reference(&dec->pos.buffer, NULL);
417
418 pipe_sampler_view_reference(&dec->zscan_linear, NULL);
419 pipe_sampler_view_reference(&dec->zscan_normal, NULL);
420 pipe_sampler_view_reference(&dec->zscan_alternate, NULL);
421
422 FREE(dec);
423 }
424
425 static void *
426 vl_mpeg12_create_buffer(struct pipe_video_decoder *decoder)
427 {
428 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
429 struct vl_mpeg12_buffer *buffer;
430
431 assert(dec);
432
433 buffer = CALLOC_STRUCT(vl_mpeg12_buffer);
434 if (buffer == NULL)
435 return NULL;
436
437 if (!vl_vb_init(&buffer->vertex_stream, dec->base.context,
438 dec->base.width / MACROBLOCK_WIDTH,
439 dec->base.height / MACROBLOCK_HEIGHT))
440 goto error_vertex_buffer;
441
442 if (!init_mc_buffer(dec, buffer))
443 goto error_mc;
444
445 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
446 if (!init_idct_buffer(dec, buffer))
447 goto error_idct;
448
449 if (!init_zscan_buffer(dec, buffer))
450 goto error_zscan;
451
452 if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
453 vl_mpg12_bs_init(&buffer->bs, decoder);
454
455 return buffer;
456
457 error_zscan:
458 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
459 cleanup_idct_buffer(buffer);
460
461 error_idct:
462 cleanup_mc_buffer(buffer);
463
464 error_mc:
465 vl_vb_cleanup(&buffer->vertex_stream);
466
467 error_vertex_buffer:
468 FREE(buffer);
469 return NULL;
470 }
471
472 static void
473 vl_mpeg12_destroy_buffer(struct pipe_video_decoder *decoder, void *buffer)
474 {
475 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
476 struct vl_mpeg12_buffer *buf = buffer;
477
478 assert(dec && buf);
479
480 cleanup_zscan_buffer(buf);
481
482 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
483 cleanup_idct_buffer(buf);
484
485 cleanup_mc_buffer(buf);
486
487 vl_vb_cleanup(&buf->vertex_stream);
488
489 FREE(buf);
490 }
491
492 static void
493 vl_mpeg12_set_decode_buffer(struct pipe_video_decoder *decoder, void *buffer)
494 {
495 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
496
497 assert(dec && buffer);
498
499 dec->current_buffer = buffer;
500 }
501
502 static void
503 vl_mpeg12_set_picture_parameters(struct pipe_video_decoder *decoder,
504 struct pipe_picture_desc *picture)
505 {
506 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
507 struct pipe_mpeg12_picture_desc *pic = (struct pipe_mpeg12_picture_desc *)picture;
508
509 assert(dec && pic);
510
511 dec->picture_desc = *pic;
512 }
513
514 static void
515 vl_mpeg12_set_quant_matrix(struct pipe_video_decoder *decoder,
516 const struct pipe_quant_matrix *matrix)
517 {
518 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
519 const struct pipe_mpeg12_quant_matrix *m = (const struct pipe_mpeg12_quant_matrix *)matrix;
520
521 assert(dec);
522 assert(matrix->codec == PIPE_VIDEO_CODEC_MPEG12);
523
524 memcpy(dec->intra_matrix, m->intra_matrix, 64);
525 memcpy(dec->non_intra_matrix, m->non_intra_matrix, 64);
526 }
527
528 static void
529 vl_mpeg12_set_decode_target(struct pipe_video_decoder *decoder,
530 struct pipe_video_buffer *target)
531 {
532 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
533 struct pipe_surface **surfaces;
534 unsigned i;
535
536 assert(dec);
537
538 surfaces = target->get_surfaces(target);
539 for (i = 0; i < VL_MAX_PLANES; ++i)
540 pipe_surface_reference(&dec->target_surfaces[i], surfaces[i]);
541 }
542
543 static void
544 vl_mpeg12_set_reference_frames(struct pipe_video_decoder *decoder,
545 struct pipe_video_buffer **ref_frames,
546 unsigned num_ref_frames)
547 {
548 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
549 struct pipe_sampler_view **sv;
550 unsigned i,j;
551
552 assert(dec);
553 assert(num_ref_frames <= VL_MAX_REF_FRAMES);
554
555 for (i = 0; i < num_ref_frames; ++i) {
556 sv = ref_frames[i]->get_sampler_view_planes(ref_frames[i]);
557 for (j = 0; j < VL_MAX_PLANES; ++j)
558 pipe_sampler_view_reference(&dec->ref_frames[i][j], sv[j]);
559 }
560
561 for (; i < VL_MAX_REF_FRAMES; ++i)
562 for (j = 0; j < VL_MAX_PLANES; ++j)
563 pipe_sampler_view_reference(&dec->ref_frames[i][j], NULL);
564 }
565
566 static void
567 vl_mpeg12_begin_frame(struct pipe_video_decoder *decoder)
568 {
569 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
570 struct vl_mpeg12_buffer *buf;
571
572 struct pipe_resource *tex;
573 struct pipe_box rect = { 0, 0, 0, 1, 1, 1 };
574
575 unsigned i;
576
577 assert(dec);
578
579 buf = dec->current_buffer;
580 assert(buf);
581
582 if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
583 dec->intra_matrix[0] = 1 << (7 - dec->picture_desc.intra_dc_precision);
584
585 for (i = 0; i < VL_MAX_PLANES; ++i) {
586 vl_zscan_upload_quant(&buf->zscan[i], dec->intra_matrix, true);
587 vl_zscan_upload_quant(&buf->zscan[i], dec->non_intra_matrix, false);
588 }
589
590 vl_vb_map(&buf->vertex_stream, dec->base.context);
591
592 tex = buf->zscan_source->texture;
593 rect.width = tex->width0;
594 rect.height = tex->height0;
595
596 buf->tex_transfer = dec->base.context->get_transfer
597 (
598 dec->base.context, tex,
599 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
600 &rect
601 );
602
603 buf->block_num = 0;
604 buf->texels = dec->base.context->transfer_map(dec->base.context, buf->tex_transfer);
605
606 for (i = 0; i < VL_MAX_PLANES; ++i) {
607 buf->ycbcr_stream[i] = vl_vb_get_ycbcr_stream(&buf->vertex_stream, i);
608 buf->num_ycbcr_blocks[i] = 0;
609 }
610
611 for (i = 0; i < VL_MAX_REF_FRAMES; ++i)
612 buf->mv_stream[i] = vl_vb_get_mv_stream(&buf->vertex_stream, i);
613
614 if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
615 vl_mpg12_bs_set_picture_desc(&buf->bs, &dec->picture_desc);
616
617 } else {
618
619 for (i = 0; i < VL_MAX_PLANES; ++i)
620 vl_zscan_set_layout(&buf->zscan[i], dec->zscan_linear);
621 }
622 }
623
624 static void
625 vl_mpeg12_decode_macroblock(struct pipe_video_decoder *decoder,
626 const struct pipe_macroblock *macroblocks,
627 unsigned num_macroblocks)
628 {
629 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
630 const struct pipe_mpeg12_macroblock *mb = (const struct pipe_mpeg12_macroblock *)macroblocks;
631 struct vl_mpeg12_buffer *buf;
632
633 unsigned i, j, mv_weights[2];
634
635 assert(dec && dec->current_buffer);
636 assert(macroblocks && macroblocks->codec == PIPE_VIDEO_CODEC_MPEG12);
637
638 buf = dec->current_buffer;
639 assert(buf);
640
641 for (; num_macroblocks > 0; --num_macroblocks) {
642 unsigned mb_addr = mb->y * dec->width_in_macroblocks + mb->x;
643
644 if (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_PATTERN | PIPE_MPEG12_MB_TYPE_INTRA))
645 UploadYcbcrBlocks(dec, buf, mb);
646
647 MacroBlockTypeToPipeWeights(mb, mv_weights);
648
649 for (i = 0; i < 2; ++i) {
650 if (!dec->ref_frames[i][0]) continue;
651
652 buf->mv_stream[i][mb_addr] = MotionVectorToPipe
653 (
654 mb, i,
655 i ? PIPE_MPEG12_FS_FIRST_BACKWARD : PIPE_MPEG12_FS_FIRST_FORWARD,
656 mv_weights[i]
657 );
658 }
659
660 /* see section 7.6.6 of the spec */
661 if (mb->num_skipped_macroblocks > 0) {
662 struct vl_motionvector skipped_mv[2];
663
664 if (dec->ref_frames[0][0] && !dec->ref_frames[1][0]) {
665 skipped_mv[0].top.x = skipped_mv[0].top.y = 0;
666 skipped_mv[0].top.weight = PIPE_VIDEO_MV_WEIGHT_MAX;
667 } else {
668 skipped_mv[0] = buf->mv_stream[0][mb_addr];
669 skipped_mv[1] = buf->mv_stream[1][mb_addr];
670 }
671 skipped_mv[0].top.field_select = PIPE_VIDEO_FRAME;
672 skipped_mv[1].top.field_select = PIPE_VIDEO_FRAME;
673
674 skipped_mv[0].bottom = skipped_mv[0].top;
675 skipped_mv[1].bottom = skipped_mv[1].top;
676
677 ++mb_addr;
678 for (i = 0; i < mb->num_skipped_macroblocks; ++i, ++mb_addr) {
679 for (j = 0; j < 2; ++j) {
680 if (!dec->ref_frames[j][0]) continue;
681 buf->mv_stream[j][mb_addr] = skipped_mv[j];
682
683 }
684 }
685 }
686
687 ++mb;
688 }
689 }
690
691 static void
692 vl_mpeg12_decode_bitstream(struct pipe_video_decoder *decoder,
693 unsigned num_bytes, const void *data)
694 {
695 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
696 struct vl_mpeg12_buffer *buf;
697
698 unsigned i;
699
700 assert(dec && dec->current_buffer);
701
702 buf = dec->current_buffer;
703 assert(buf);
704
705 for (i = 0; i < VL_MAX_PLANES; ++i)
706 vl_zscan_set_layout(&buf->zscan[i], dec->picture_desc.alternate_scan ?
707 dec->zscan_alternate : dec->zscan_normal);
708
709 vl_mpg12_bs_decode(&buf->bs, num_bytes, data);
710 }
711
712 static void
713 vl_mpeg12_end_frame(struct pipe_video_decoder *decoder)
714 {
715 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
716 struct pipe_sampler_view **mc_source_sv;
717 struct pipe_vertex_buffer vb[3];
718 struct vl_mpeg12_buffer *buf;
719
720 unsigned i, j, component;
721 unsigned nr_components;
722
723 assert(dec && dec->current_buffer);
724
725 buf = dec->current_buffer;
726
727 vl_vb_unmap(&buf->vertex_stream, dec->base.context);
728
729 dec->base.context->transfer_unmap(dec->base.context, buf->tex_transfer);
730 dec->base.context->transfer_destroy(dec->base.context, buf->tex_transfer);
731
732 vb[0] = dec->quads;
733 vb[1] = dec->pos;
734
735 dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_mv);
736 for (i = 0; i < VL_MAX_PLANES; ++i) {
737 if (!dec->target_surfaces[i]) continue;
738
739 vl_mc_set_surface(&buf->mc[i], dec->target_surfaces[i]);
740
741 for (j = 0; j < VL_MAX_REF_FRAMES; ++j) {
742 if (!dec->ref_frames[j][i]) continue;
743
744 vb[2] = vl_vb_get_mv(&buf->vertex_stream, j);;
745 dec->base.context->set_vertex_buffers(dec->base.context, 3, vb);
746
747 vl_mc_render_ref(&buf->mc[i], dec->ref_frames[j][i]);
748 }
749 }
750
751 dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_ycbcr);
752 for (i = 0; i < VL_MAX_PLANES; ++i) {
753 if (!buf->num_ycbcr_blocks[i]) continue;
754
755 vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, i);
756 dec->base.context->set_vertex_buffers(dec->base.context, 2, vb);
757
758 vl_zscan_render(&buf->zscan[i] , buf->num_ycbcr_blocks[i]);
759
760 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
761 vl_idct_flush(&buf->idct[i], buf->num_ycbcr_blocks[i]);
762 }
763
764 mc_source_sv = dec->mc_source->get_sampler_view_planes(dec->mc_source);
765 for (i = 0, component = 0; i < VL_MAX_PLANES; ++i) {
766 if (!dec->target_surfaces[i]) continue;
767
768 nr_components = util_format_get_nr_components(dec->target_surfaces[i]->texture->format);
769 for (j = 0; j < nr_components; ++j, ++component) {
770 if (!buf->num_ycbcr_blocks[i]) continue;
771
772 vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, component);
773 dec->base.context->set_vertex_buffers(dec->base.context, 2, vb);
774
775 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
776 vl_idct_prepare_stage2(&buf->idct[component]);
777 else {
778 dec->base.context->set_fragment_sampler_views(dec->base.context, 1, &mc_source_sv[component]);
779 dec->base.context->bind_fragment_sampler_states(dec->base.context, 1, &dec->sampler_ycbcr);
780 }
781 vl_mc_render_ycbcr(&buf->mc[i], j, buf->num_ycbcr_blocks[component]);
782 }
783 }
784 }
785
786 static void
787 vl_mpeg12_flush(struct pipe_video_decoder *decoder)
788 {
789 assert(decoder);
790
791 //Noop, for shaders it is much faster to flush everything in end_frame
792 }
793
794 static bool
795 init_pipe_state(struct vl_mpeg12_decoder *dec)
796 {
797 struct pipe_depth_stencil_alpha_state dsa;
798 struct pipe_sampler_state sampler;
799 unsigned i;
800
801 assert(dec);
802
803 memset(&dsa, 0, sizeof dsa);
804 dsa.depth.enabled = 0;
805 dsa.depth.writemask = 0;
806 dsa.depth.func = PIPE_FUNC_ALWAYS;
807 for (i = 0; i < 2; ++i) {
808 dsa.stencil[i].enabled = 0;
809 dsa.stencil[i].func = PIPE_FUNC_ALWAYS;
810 dsa.stencil[i].fail_op = PIPE_STENCIL_OP_KEEP;
811 dsa.stencil[i].zpass_op = PIPE_STENCIL_OP_KEEP;
812 dsa.stencil[i].zfail_op = PIPE_STENCIL_OP_KEEP;
813 dsa.stencil[i].valuemask = 0;
814 dsa.stencil[i].writemask = 0;
815 }
816 dsa.alpha.enabled = 0;
817 dsa.alpha.func = PIPE_FUNC_ALWAYS;
818 dsa.alpha.ref_value = 0;
819 dec->dsa = dec->base.context->create_depth_stencil_alpha_state(dec->base.context, &dsa);
820 dec->base.context->bind_depth_stencil_alpha_state(dec->base.context, dec->dsa);
821
822 memset(&sampler, 0, sizeof(sampler));
823 sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
824 sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
825 sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
826 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
827 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
828 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
829 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
830 sampler.compare_func = PIPE_FUNC_ALWAYS;
831 sampler.normalized_coords = 1;
832 dec->sampler_ycbcr = dec->base.context->create_sampler_state(dec->base.context, &sampler);
833 if (!dec->sampler_ycbcr)
834 return false;
835
836 return true;
837 }
838
839 static const struct format_config*
840 find_format_config(struct vl_mpeg12_decoder *dec, const struct format_config configs[], unsigned num_configs)
841 {
842 struct pipe_screen *screen;
843 unsigned i;
844
845 assert(dec);
846
847 screen = dec->base.context->screen;
848
849 for (i = 0; i < num_configs; ++i) {
850 if (!screen->is_format_supported(screen, configs[i].zscan_source_format, PIPE_TEXTURE_2D,
851 1, PIPE_BIND_SAMPLER_VIEW))
852 continue;
853
854 if (configs[i].idct_source_format != PIPE_FORMAT_NONE) {
855 if (!screen->is_format_supported(screen, configs[i].idct_source_format, PIPE_TEXTURE_2D,
856 1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET))
857 continue;
858
859 if (!screen->is_format_supported(screen, configs[i].mc_source_format, PIPE_TEXTURE_3D,
860 1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET))
861 continue;
862 } else {
863 if (!screen->is_format_supported(screen, configs[i].mc_source_format, PIPE_TEXTURE_2D,
864 1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET))
865 continue;
866 }
867 return &configs[i];
868 }
869
870 return NULL;
871 }
872
873 static bool
874 init_zscan(struct vl_mpeg12_decoder *dec, const struct format_config* format_config)
875 {
876 unsigned num_channels;
877
878 assert(dec);
879
880 dec->zscan_source_format = format_config->zscan_source_format;
881 dec->zscan_linear = vl_zscan_layout(dec->base.context, vl_zscan_linear, dec->blocks_per_line);
882 dec->zscan_normal = vl_zscan_layout(dec->base.context, vl_zscan_normal, dec->blocks_per_line);
883 dec->zscan_alternate = vl_zscan_layout(dec->base.context, vl_zscan_alternate, dec->blocks_per_line);
884
885 num_channels = dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT ? 4 : 1;
886
887 if (!vl_zscan_init(&dec->zscan_y, dec->base.context, dec->base.width, dec->base.height,
888 dec->blocks_per_line, dec->num_blocks, num_channels))
889 return false;
890
891 if (!vl_zscan_init(&dec->zscan_c, dec->base.context, dec->chroma_width, dec->chroma_height,
892 dec->blocks_per_line, dec->num_blocks, num_channels))
893 return false;
894
895 return true;
896 }
897
898 static bool
899 init_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_config)
900 {
901 unsigned nr_of_idct_render_targets, max_inst;
902 enum pipe_format formats[3];
903
904 struct pipe_sampler_view *matrix = NULL;
905
906 nr_of_idct_render_targets = dec->base.context->screen->get_param
907 (
908 dec->base.context->screen, PIPE_CAP_MAX_RENDER_TARGETS
909 );
910
911 max_inst = dec->base.context->screen->get_shader_param
912 (
913 dec->base.context->screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_INSTRUCTIONS
914 );
915
916 // Just assume we need 32 inst per render target, not 100% true, but should work in most cases
917 if (nr_of_idct_render_targets >= 4 && max_inst >= 32*4)
918 // more than 4 render targets usually doesn't makes any seens
919 nr_of_idct_render_targets = 4;
920 else
921 nr_of_idct_render_targets = 1;
922
923 formats[0] = formats[1] = formats[2] = format_config->idct_source_format;
924 dec->idct_source = vl_video_buffer_create_ex
925 (
926 dec->base.context, dec->base.width / 4, dec->base.height, 1,
927 dec->base.chroma_format, formats, PIPE_USAGE_STATIC
928 );
929
930 if (!dec->idct_source)
931 goto error_idct_source;
932
933 formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
934 dec->mc_source = vl_video_buffer_create_ex
935 (
936 dec->base.context, dec->base.width / nr_of_idct_render_targets,
937 dec->base.height / 4, nr_of_idct_render_targets,
938 dec->base.chroma_format, formats, PIPE_USAGE_STATIC
939 );
940
941 if (!dec->mc_source)
942 goto error_mc_source;
943
944 if (!(matrix = vl_idct_upload_matrix(dec->base.context, format_config->idct_scale)))
945 goto error_matrix;
946
947 if (!vl_idct_init(&dec->idct_y, dec->base.context, dec->base.width, dec->base.height,
948 nr_of_idct_render_targets, matrix, matrix))
949 goto error_y;
950
951 if(!vl_idct_init(&dec->idct_c, dec->base.context, dec->chroma_width, dec->chroma_height,
952 nr_of_idct_render_targets, matrix, matrix))
953 goto error_c;
954
955 pipe_sampler_view_reference(&matrix, NULL);
956
957 return true;
958
959 error_c:
960 vl_idct_cleanup(&dec->idct_y);
961
962 error_y:
963 pipe_sampler_view_reference(&matrix, NULL);
964
965 error_matrix:
966 dec->mc_source->destroy(dec->mc_source);
967
968 error_mc_source:
969 dec->idct_source->destroy(dec->idct_source);
970
971 error_idct_source:
972 return false;
973 }
974
975 static bool
976 init_mc_source_widthout_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_config)
977 {
978 enum pipe_format formats[3];
979
980 formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
981 dec->mc_source = vl_video_buffer_create_ex
982 (
983 dec->base.context, dec->base.width, dec->base.height, 1,
984 dec->base.chroma_format, formats, PIPE_USAGE_STATIC
985 );
986
987 return dec->mc_source != NULL;
988 }
989
990 static void
991 mc_vert_shader_callback(void *priv, struct vl_mc *mc,
992 struct ureg_program *shader,
993 unsigned first_output,
994 struct ureg_dst tex)
995 {
996 struct vl_mpeg12_decoder *dec = priv;
997 struct ureg_dst o_vtex;
998
999 assert(priv && mc);
1000 assert(shader);
1001
1002 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1003 struct vl_idct *idct = mc == &dec->mc_y ? &dec->idct_y : &dec->idct_c;
1004 vl_idct_stage2_vert_shader(idct, shader, first_output, tex);
1005 } else {
1006 o_vtex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, first_output);
1007 ureg_MOV(shader, ureg_writemask(o_vtex, TGSI_WRITEMASK_XY), ureg_src(tex));
1008 }
1009 }
1010
1011 static void
1012 mc_frag_shader_callback(void *priv, struct vl_mc *mc,
1013 struct ureg_program *shader,
1014 unsigned first_input,
1015 struct ureg_dst dst)
1016 {
1017 struct vl_mpeg12_decoder *dec = priv;
1018 struct ureg_src src, sampler;
1019
1020 assert(priv && mc);
1021 assert(shader);
1022
1023 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1024 struct vl_idct *idct = mc == &dec->mc_y ? &dec->idct_y : &dec->idct_c;
1025 vl_idct_stage2_frag_shader(idct, shader, first_input, dst);
1026 } else {
1027 src = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, first_input, TGSI_INTERPOLATE_LINEAR);
1028 sampler = ureg_DECL_sampler(shader, 0);
1029 ureg_TEX(shader, dst, TGSI_TEXTURE_2D, src, sampler);
1030 }
1031 }
1032
1033 struct pipe_video_decoder *
1034 vl_create_mpeg12_decoder(struct pipe_context *context,
1035 enum pipe_video_profile profile,
1036 enum pipe_video_entrypoint entrypoint,
1037 enum pipe_video_chroma_format chroma_format,
1038 unsigned width, unsigned height, unsigned max_references)
1039 {
1040 const unsigned block_size_pixels = BLOCK_WIDTH * BLOCK_HEIGHT;
1041 const struct format_config *format_config;
1042 struct vl_mpeg12_decoder *dec;
1043
1044 assert(u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12);
1045
1046 dec = CALLOC_STRUCT(vl_mpeg12_decoder);
1047
1048 if (!dec)
1049 return NULL;
1050
1051 dec->base.context = context;
1052 dec->base.profile = profile;
1053 dec->base.entrypoint = entrypoint;
1054 dec->base.chroma_format = chroma_format;
1055 dec->base.width = width;
1056 dec->base.height = height;
1057 dec->base.max_references = max_references;
1058
1059 dec->base.destroy = vl_mpeg12_destroy;
1060 dec->base.create_buffer = vl_mpeg12_create_buffer;
1061 dec->base.destroy_buffer = vl_mpeg12_destroy_buffer;
1062 dec->base.set_decode_buffer = vl_mpeg12_set_decode_buffer;
1063 dec->base.set_picture_parameters = vl_mpeg12_set_picture_parameters;
1064 dec->base.set_quant_matrix = vl_mpeg12_set_quant_matrix;
1065 dec->base.set_decode_target = vl_mpeg12_set_decode_target;
1066 dec->base.set_reference_frames = vl_mpeg12_set_reference_frames;
1067 dec->base.begin_frame = vl_mpeg12_begin_frame;
1068 dec->base.decode_macroblock = vl_mpeg12_decode_macroblock;
1069 dec->base.decode_bitstream = vl_mpeg12_decode_bitstream;
1070 dec->base.end_frame = vl_mpeg12_end_frame;
1071 dec->base.flush = vl_mpeg12_flush;
1072
1073 dec->blocks_per_line = MAX2(util_next_power_of_two(dec->base.width) / block_size_pixels, 4);
1074 dec->num_blocks = (dec->base.width * dec->base.height) / block_size_pixels;
1075 dec->width_in_macroblocks = align(dec->base.width, MACROBLOCK_WIDTH) / MACROBLOCK_WIDTH;
1076
1077 /* TODO: Implement 422, 444 */
1078 assert(dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
1079
1080 if (dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) {
1081 dec->chroma_width = dec->base.width / 2;
1082 dec->chroma_height = dec->base.height / 2;
1083 dec->num_blocks = dec->num_blocks * 2;
1084 } else if (dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422) {
1085 dec->chroma_width = dec->base.width;
1086 dec->chroma_height = dec->base.height / 2;
1087 dec->num_blocks = dec->num_blocks * 2 + dec->num_blocks;
1088 } else {
1089 dec->chroma_width = dec->base.width;
1090 dec->chroma_height = dec->base.height;
1091 dec->num_blocks = dec->num_blocks * 3;
1092 }
1093
1094 dec->quads = vl_vb_upload_quads(dec->base.context);
1095 dec->pos = vl_vb_upload_pos(
1096 dec->base.context,
1097 dec->base.width / MACROBLOCK_WIDTH,
1098 dec->base.height / MACROBLOCK_HEIGHT
1099 );
1100
1101 dec->ves_ycbcr = vl_vb_get_ves_ycbcr(dec->base.context);
1102 dec->ves_mv = vl_vb_get_ves_mv(dec->base.context);
1103
1104 switch (entrypoint) {
1105 case PIPE_VIDEO_ENTRYPOINT_BITSTREAM:
1106 format_config = find_format_config(dec, bitstream_format_config, num_bitstream_format_configs);
1107 break;
1108
1109 case PIPE_VIDEO_ENTRYPOINT_IDCT:
1110 format_config = find_format_config(dec, idct_format_config, num_idct_format_configs);
1111 break;
1112
1113 case PIPE_VIDEO_ENTRYPOINT_MC:
1114 format_config = find_format_config(dec, mc_format_config, num_mc_format_configs);
1115 break;
1116
1117 default:
1118 assert(0);
1119 FREE(dec);
1120 return NULL;
1121 }
1122
1123 if (!format_config) {
1124 FREE(dec);
1125 return NULL;
1126 }
1127
1128 if (!init_zscan(dec, format_config))
1129 goto error_zscan;
1130
1131 if (entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1132 if (!init_idct(dec, format_config))
1133 goto error_sources;
1134 } else {
1135 if (!init_mc_source_widthout_idct(dec, format_config))
1136 goto error_sources;
1137 }
1138
1139 if (!vl_mc_init(&dec->mc_y, dec->base.context, dec->base.width, dec->base.height,
1140 MACROBLOCK_HEIGHT, format_config->mc_scale,
1141 mc_vert_shader_callback, mc_frag_shader_callback, dec))
1142 goto error_mc_y;
1143
1144 // TODO
1145 if (!vl_mc_init(&dec->mc_c, dec->base.context, dec->base.width, dec->base.height,
1146 BLOCK_HEIGHT, format_config->mc_scale,
1147 mc_vert_shader_callback, mc_frag_shader_callback, dec))
1148 goto error_mc_c;
1149
1150 if (!init_pipe_state(dec))
1151 goto error_pipe_state;
1152
1153 memset(dec->intra_matrix, 0x10, 64);
1154 memset(dec->non_intra_matrix, 0x10, 64);
1155
1156 return &dec->base;
1157
1158 error_pipe_state:
1159 vl_mc_cleanup(&dec->mc_c);
1160
1161 error_mc_c:
1162 vl_mc_cleanup(&dec->mc_y);
1163
1164 error_mc_y:
1165 if (entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1166 vl_idct_cleanup(&dec->idct_y);
1167 vl_idct_cleanup(&dec->idct_c);
1168 dec->idct_source->destroy(dec->idct_source);
1169 }
1170 dec->mc_source->destroy(dec->mc_source);
1171
1172 error_sources:
1173 vl_zscan_cleanup(&dec->zscan_y);
1174 vl_zscan_cleanup(&dec->zscan_c);
1175
1176 error_zscan:
1177 FREE(dec);
1178 return NULL;
1179 }