gallium: add new double-related shader caps to all the getters
[mesa.git] / src / gallium / state_trackers / va / picture.c
1 /**************************************************************************
2 *
3 * Copyright 2010 Thomas Balling Sørensen & Orasanu Lucian.
4 * Copyright 2014 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include "pipe/p_video_codec.h"
30
31 #include "util/u_handle_table.h"
32 #include "util/u_video.h"
33
34 #include "vl/vl_vlc.h"
35
36 #include "va_private.h"
37
38 VAStatus
39 vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID render_target)
40 {
41 vlVaDriver *drv;
42 vlVaContext *context;
43 vlVaSurface *surf;
44
45 if (!ctx)
46 return VA_STATUS_ERROR_INVALID_CONTEXT;
47
48 drv = VL_VA_DRIVER(ctx);
49 if (!drv)
50 return VA_STATUS_ERROR_INVALID_CONTEXT;
51
52 context = handle_table_get(drv->htab, context_id);
53 if (!context)
54 return VA_STATUS_ERROR_INVALID_CONTEXT;
55
56 surf = handle_table_get(drv->htab, render_target);
57 if (!surf || !surf->buffer)
58 return VA_STATUS_ERROR_INVALID_SURFACE;
59
60 context->target = surf->buffer;
61 context->decoder->begin_frame(context->decoder, context->target, NULL);
62
63 return VA_STATUS_SUCCESS;
64 }
65
66 static void
67 getReferenceFrame(vlVaDriver *drv, VASurfaceID surface_id,
68 struct pipe_video_buffer **ref_frame)
69 {
70 vlVaSurface *surf = handle_table_get(drv->htab, surface_id);
71 if (surf)
72 *ref_frame = surf->buffer;
73 else
74 *ref_frame = NULL;
75 }
76
77 static void
78 handlePictureParameterBuffer(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
79 {
80 VAPictureParameterBufferMPEG2 *mpeg2;
81 VAPictureParameterBufferH264 *h264;
82 VAPictureParameterBufferVC1 * vc1;
83 VAPictureParameterBufferMPEG4 *mpeg4;
84 vlVaSurface *surf_forward;
85 vlVaSurface *surf_backward;
86 unsigned int i;
87 static const uint8_t default_intra_quant_matrix[64] = { 0 };
88 static const uint8_t default_non_intra_quant_matrix[64] = { 0 };
89
90 switch (u_reduce_video_profile(context->decoder->profile)) {
91 case PIPE_VIDEO_FORMAT_MPEG12:
92 assert(buf->size >= sizeof(VAPictureParameterBufferMPEG2) && buf->num_elements == 1);
93 mpeg2 = buf->data;
94 /*horizontal_size;*/
95 /*vertical_size;*/
96 getReferenceFrame(drv, mpeg2->forward_reference_picture, &context->desc.mpeg12.ref[0]);
97 getReferenceFrame(drv, mpeg2->backward_reference_picture, &context->desc.mpeg12.ref[1]);
98 context->desc.mpeg12.picture_coding_type = mpeg2->picture_coding_type;
99 context->desc.mpeg12.f_code[0][0] = ((mpeg2->f_code >> 12) & 0xf) - 1;
100 context->desc.mpeg12.f_code[0][1] = ((mpeg2->f_code >> 8) & 0xf) - 1;
101 context->desc.mpeg12.f_code[1][0] = ((mpeg2->f_code >> 4) & 0xf) - 1;
102 context->desc.mpeg12.f_code[1][1] = (mpeg2->f_code & 0xf) - 1;
103 context->desc.mpeg12.intra_dc_precision =
104 mpeg2->picture_coding_extension.bits.intra_dc_precision;
105 context->desc.mpeg12.picture_structure =
106 mpeg2->picture_coding_extension.bits.picture_structure;
107 context->desc.mpeg12.top_field_first =
108 mpeg2->picture_coding_extension.bits.top_field_first;
109 context->desc.mpeg12.frame_pred_frame_dct =
110 mpeg2->picture_coding_extension.bits.frame_pred_frame_dct;
111 context->desc.mpeg12.concealment_motion_vectors =
112 mpeg2->picture_coding_extension.bits.concealment_motion_vectors;
113 context->desc.mpeg12.q_scale_type =
114 mpeg2->picture_coding_extension.bits.q_scale_type;
115 context->desc.mpeg12.intra_vlc_format =
116 mpeg2->picture_coding_extension.bits.intra_vlc_format;
117 context->desc.mpeg12.alternate_scan =
118 mpeg2->picture_coding_extension.bits.alternate_scan;
119 /*repeat_first_field*/
120 /*progressive_frame*/
121 /*is_first_field*/
122 break;
123
124 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
125 assert(buf->size >= sizeof(VAPictureParameterBufferH264) && buf->num_elements == 1);
126 h264 = buf->data;
127 /*CurrPic*/
128 context->desc.h264.field_order_cnt[0] = h264->CurrPic.TopFieldOrderCnt;
129 context->desc.h264.field_order_cnt[1] = h264->CurrPic.BottomFieldOrderCnt;
130 /*ReferenceFrames[16]*/
131 /*picture_width_in_mbs_minus1*/
132 /*picture_height_in_mbs_minus1*/
133 /*bit_depth_luma_minus8*/
134 /*bit_depth_chroma_minus8*/
135 context->desc.h264.num_ref_frames = h264->num_ref_frames;
136 /*chroma_format_idc*/
137 /*residual_colour_transform_flag*/
138 /*gaps_in_frame_num_value_allowed_flag*/
139 context->desc.h264.pps->sps->frame_mbs_only_flag =
140 h264->seq_fields.bits.frame_mbs_only_flag;
141 context->desc.h264.pps->sps->mb_adaptive_frame_field_flag =
142 h264->seq_fields.bits.mb_adaptive_frame_field_flag;
143 context->desc.h264.pps->sps->direct_8x8_inference_flag =
144 h264->seq_fields.bits.direct_8x8_inference_flag;
145 /*MinLumaBiPredSize8x8*/
146 context->desc.h264.pps->sps->log2_max_frame_num_minus4 =
147 h264->seq_fields.bits.log2_max_frame_num_minus4;
148 context->desc.h264.pps->sps->pic_order_cnt_type =
149 h264->seq_fields.bits.pic_order_cnt_type;
150 context->desc.h264.pps->sps->log2_max_pic_order_cnt_lsb_minus4 =
151 h264->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4;
152 context->desc.h264.pps->sps->delta_pic_order_always_zero_flag =
153 h264->seq_fields.bits.delta_pic_order_always_zero_flag;
154 /*num_slice_groups_minus1*/
155 /*slice_group_map_type*/
156 /*slice_group_change_rate_minus1*/
157 context->desc.h264.pps->pic_init_qp_minus26 =
158 h264->pic_init_qp_minus26;
159 /*pic_init_qs_minus26*/
160 context->desc.h264.pps->chroma_qp_index_offset =
161 h264->chroma_qp_index_offset;
162 context->desc.h264.pps->second_chroma_qp_index_offset =
163 h264->second_chroma_qp_index_offset;
164 context->desc.h264.pps->entropy_coding_mode_flag =
165 h264->pic_fields.bits.entropy_coding_mode_flag;
166 context->desc.h264.pps->weighted_pred_flag =
167 h264->pic_fields.bits.weighted_pred_flag;
168 context->desc.h264.pps->weighted_bipred_idc =
169 h264->pic_fields.bits.weighted_bipred_idc;
170 context->desc.h264.pps->transform_8x8_mode_flag =
171 h264->pic_fields.bits.transform_8x8_mode_flag;
172 context->desc.h264.field_pic_flag =
173 h264->pic_fields.bits.field_pic_flag;
174 context->desc.h264.pps->constrained_intra_pred_flag =
175 h264->pic_fields.bits.constrained_intra_pred_flag;
176 context->desc.h264.pps->bottom_field_pic_order_in_frame_present_flag =
177 h264->pic_fields.bits.pic_order_present_flag;
178 context->desc.h264.pps->deblocking_filter_control_present_flag =
179 h264->pic_fields.bits.deblocking_filter_control_present_flag;
180 context->desc.h264.pps->redundant_pic_cnt_present_flag =
181 h264->pic_fields.bits.redundant_pic_cnt_present_flag;
182 /*reference_pic_flag*/
183 context->desc.h264.frame_num = h264->frame_num;
184 break;
185
186 case PIPE_VIDEO_FORMAT_VC1:
187 assert(buf->size >= sizeof(VAPictureParameterBufferVC1) && buf->num_elements == 1);
188 vc1 = buf->data;
189 getReferenceFrame(drv, vc1->forward_reference_picture, &context->desc.vc1.ref[0]);
190 getReferenceFrame(drv, vc1->backward_reference_picture, &context->desc.vc1.ref[1]);
191 context->desc.vc1.picture_type = vc1->picture_fields.bits.picture_type;
192 context->desc.vc1.frame_coding_mode = vc1->picture_fields.bits.frame_coding_mode;
193 context->desc.vc1.postprocflag = vc1->post_processing != 0;
194 context->desc.vc1.pulldown = vc1->sequence_fields.bits.pulldown;
195 context->desc.vc1.interlace = vc1->sequence_fields.bits.interlace;
196 context->desc.vc1.tfcntrflag = vc1->sequence_fields.bits.tfcntrflag;
197 context->desc.vc1.finterpflag = vc1->sequence_fields.bits.finterpflag;
198 context->desc.vc1.psf = vc1->sequence_fields.bits.psf;
199 context->desc.vc1.dquant = vc1->pic_quantizer_fields.bits.dquant;
200 context->desc.vc1.panscan_flag = vc1->entrypoint_fields.bits.panscan_flag;
201 context->desc.vc1.refdist_flag =
202 vc1->reference_fields.bits.reference_distance_flag;
203 context->desc.vc1.quantizer = vc1->pic_quantizer_fields.bits.quantizer;
204 context->desc.vc1.extended_mv = vc1->mv_fields.bits.extended_mv_flag;
205 context->desc.vc1.extended_dmv = vc1->mv_fields.bits.extended_dmv_flag;
206 context->desc.vc1.overlap = vc1->sequence_fields.bits.overlap;
207 context->desc.vc1.vstransform =
208 vc1->transform_fields.bits.variable_sized_transform_flag;
209 context->desc.vc1.loopfilter = vc1->entrypoint_fields.bits.loopfilter;
210 context->desc.vc1.fastuvmc = vc1->fast_uvmc_flag;
211 context->desc.vc1.range_mapy_flag = vc1->range_mapping_fields.bits.luma_flag;
212 context->desc.vc1.range_mapy = vc1->range_mapping_fields.bits.luma;
213 context->desc.vc1.range_mapuv_flag = vc1->range_mapping_fields.bits.chroma_flag;
214 context->desc.vc1.range_mapuv = vc1->range_mapping_fields.bits.chroma;
215 context->desc.vc1.multires = vc1->sequence_fields.bits.multires;
216 context->desc.vc1.syncmarker = vc1->sequence_fields.bits.syncmarker;
217 context->desc.vc1.rangered = vc1->sequence_fields.bits.rangered;
218 context->desc.vc1.maxbframes = vc1->sequence_fields.bits.max_b_frames;
219 context->desc.vc1.deblockEnable = vc1->post_processing != 0;
220 context->desc.vc1.pquant = vc1->pic_quantizer_fields.bits.pic_quantizer_scale;
221 break;
222
223 case PIPE_VIDEO_FORMAT_MPEG4:
224 assert(buf->size >= sizeof(VAPictureParameterBufferMPEG4) && buf->num_elements == 1);
225 mpeg4 = buf->data;
226
227 context->mpeg4.pps = *mpeg4;
228
229 /* vop_width */
230 /* vop_height */
231 /* forward_reference_picture */
232 /* backward_reference_picture */
233 context->desc.mpeg4.short_video_header =
234 mpeg4->vol_fields.bits.short_video_header;
235 /* chroma_format */
236 context->desc.mpeg4.interlaced = mpeg4->vol_fields.bits.interlaced;
237 /* obmc_disable */
238 /* sprite_enable */
239 /* sprite_warping_accuracy */
240 context->desc.mpeg4.quant_type = mpeg4->vol_fields.bits.quant_type;
241 context->desc.mpeg4.quarter_sample = mpeg4->vol_fields.bits.quarter_sample;
242 /* data_partitioned */
243 /* reversible_vlc */
244 context->desc.mpeg4.resync_marker_disable =
245 mpeg4->vol_fields.bits.resync_marker_disable;
246 /* no_of_sprite_warping_points */
247 /* sprite_trajectory_du */
248 /* sprite_trajectory_dv */
249 /* quant_precision */
250 context->desc.mpeg4.vop_coding_type = mpeg4->vop_fields.bits.vop_coding_type;
251 /* backward_reference_vop_coding_type */
252 /* vop_rounding_type */
253 /* intra_dc_vlc_thr */
254 context->desc.mpeg4.top_field_first =
255 mpeg4->vop_fields.bits.top_field_first;
256 context->desc.mpeg4.alternate_vertical_scan_flag =
257 mpeg4->vop_fields.bits.alternate_vertical_scan_flag;
258 context->desc.mpeg4.vop_fcode_forward = mpeg4->vop_fcode_forward;
259 context->desc.mpeg4.vop_fcode_backward = mpeg4->vop_fcode_backward;
260 context->desc.mpeg4.vop_time_increment_resolution =
261 mpeg4->vop_time_increment_resolution;
262 /* num_gobs_in_vop */
263 /* num_macroblocks_in_gob */
264 context->desc.mpeg4.trb[0] = mpeg4->TRB;
265 context->desc.mpeg4.trb[1] = mpeg4->TRB;
266 context->desc.mpeg4.trd[0] = mpeg4->TRD;
267 context->desc.mpeg4.trd[1] = mpeg4->TRD;
268
269 /* default [non-]intra quant matrix because mpv does not set these
270 matrices */
271 if (!context->desc.mpeg4.intra_matrix)
272 context->desc.mpeg4.intra_matrix = default_intra_quant_matrix;
273 if (!context->desc.mpeg4.non_intra_matrix)
274 context->desc.mpeg4.non_intra_matrix = default_non_intra_quant_matrix;
275
276 surf_forward = handle_table_get(drv->htab, mpeg4->forward_reference_picture);
277 if (surf_forward)
278 context->desc.mpeg4.ref[0] = surf_forward->buffer;
279 surf_backward = handle_table_get(drv->htab, mpeg4->backward_reference_picture);
280 if (surf_backward)
281 context->desc.mpeg4.ref[1] = surf_backward->buffer;
282
283 context->mpeg4.vti_bits = 0;
284 for (i = context->desc.mpeg4.vop_time_increment_resolution; i > 0; i /= 2)
285 ++context->mpeg4.vti_bits;
286
287 break;
288
289 default:
290 break;
291 }
292 }
293
294 static void
295 handleIQMatrixBuffer(vlVaContext *context, vlVaBuffer *buf)
296 {
297 VAIQMatrixBufferMPEG2 *mpeg2;
298 VAIQMatrixBufferH264 *h264;
299 VAIQMatrixBufferMPEG4 *mpeg4;
300
301 switch (u_reduce_video_profile(context->decoder->profile)) {
302 case PIPE_VIDEO_FORMAT_MPEG12:
303 assert(buf->size >= sizeof(VAIQMatrixBufferMPEG2) && buf->num_elements == 1);
304 mpeg2 = buf->data;
305 if (mpeg2->load_intra_quantiser_matrix)
306 context->desc.mpeg12.intra_matrix = mpeg2->intra_quantiser_matrix;
307 else
308 context->desc.mpeg12.intra_matrix = NULL;
309
310 if (mpeg2->load_non_intra_quantiser_matrix)
311 context->desc.mpeg12.non_intra_matrix = mpeg2->non_intra_quantiser_matrix;
312 else
313 context->desc.mpeg12.non_intra_matrix = NULL;
314 break;
315
316 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
317 assert(buf->size >= sizeof(VAIQMatrixBufferH264) && buf->num_elements == 1);
318 h264 = buf->data;
319 memcpy(&context->desc.h264.pps->ScalingList4x4, h264->ScalingList4x4, 6 * 16);
320 memcpy(&context->desc.h264.pps->ScalingList8x8, h264->ScalingList8x8, 2 * 64);
321 break;
322
323 case PIPE_VIDEO_FORMAT_MPEG4:
324 assert(buf->size >= sizeof(VAIQMatrixBufferMPEG4) && buf->num_elements == 1);
325 mpeg4 = buf->data;
326
327 if (mpeg4->load_intra_quant_mat)
328 context->desc.mpeg4.intra_matrix = mpeg4->intra_quant_mat;
329 else
330 context->desc.mpeg4.intra_matrix = NULL;
331
332 if (mpeg4->load_non_intra_quant_mat)
333 context->desc.mpeg4.non_intra_matrix = mpeg4->non_intra_quant_mat;
334 else
335 context->desc.mpeg4.non_intra_matrix = NULL;
336 break;
337
338 default:
339 break;
340 }
341 }
342
343 static void
344 handleSliceParameterBuffer(vlVaContext *context, vlVaBuffer *buf)
345 {
346 VASliceParameterBufferH264 *h264;
347 VASliceParameterBufferMPEG4 *mpeg4;
348
349 switch (u_reduce_video_profile(context->decoder->profile)) {
350 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
351 assert(buf->size >= sizeof(VASliceParameterBufferH264) && buf->num_elements == 1);
352 h264 = buf->data;
353 context->desc.h264.num_ref_idx_l0_active_minus1 =
354 h264->num_ref_idx_l0_active_minus1;
355 context->desc.h264.num_ref_idx_l1_active_minus1 =
356 h264->num_ref_idx_l1_active_minus1;
357 break;
358 case PIPE_VIDEO_FORMAT_MPEG4:
359 assert(buf->size >= sizeof(VASliceParameterBufferMPEG4) && buf->num_elements == 1);
360 mpeg4 = buf->data;
361
362 context->mpeg4.quant_scale = mpeg4->quant_scale;
363 break;
364 default:
365 break;
366 }
367 }
368
369 struct bit_stream
370 {
371 uint8_t *data;
372 unsigned int length; /* bits */
373 unsigned int pos; /* bits */
374 };
375
376 static inline void
377 write_bit(struct bit_stream *writer, unsigned int bit)
378 {
379 assert(writer->length > (writer)->pos);
380 writer->data[writer->pos>>3] |= ((bit & 1)<<(7 - (writer->pos & 7)));
381 writer->pos++;
382 }
383
384 static inline void
385 write_bits(struct bit_stream *writer, unsigned int bits, unsigned int len)
386 {
387 int i;
388 assert(len <= sizeof(bits)*8);
389 for (i = len - 1; i >= 0; i--)
390 write_bit(writer, bits>>i);
391 }
392
393 static void
394 vlVaDecoderFixMPEG4Startcode(vlVaContext *context)
395 {
396 uint8_t vop[] = { 0x00, 0x00, 0x01, 0xb6, 0x00, 0x00, 0x00, 0x00, 0x00 };
397 struct bit_stream bs_vop = {vop, sizeof(vop)*8, 32};
398 unsigned int vop_time_inc;
399 int mod_time;
400 unsigned int vop_size;
401 unsigned int vop_coding_type = context->desc.mpeg4.vop_coding_type;
402
403 context->mpeg4.start_code_size = 0;
404 memset(context->mpeg4.start_code, 0, sizeof(context->mpeg4.start_code));
405 if (vop_coding_type+1 == PIPE_MPEG12_PICTURE_CODING_TYPE_I) {
406 unsigned int vop_time = context->mpeg4.frame_num/
407 context->desc.mpeg4.vop_time_increment_resolution;
408 unsigned int vop_hour = vop_time / 3600;
409 unsigned int vop_minute = (vop_time / 60) % 60;
410 unsigned int vop_second = vop_time % 60;
411 uint8_t group_of_vop[] = { 0x00, 0x00, 0x01, 0xb3, 0x00, 0x00, 0x00 };
412 struct bit_stream bs_gvop = {group_of_vop, sizeof(group_of_vop)*8, 32};
413
414 write_bits(&bs_gvop, vop_hour, 5);
415 write_bits(&bs_gvop, vop_minute, 6);
416 write_bit(&bs_gvop, 1); /* marker_bit */
417 write_bits(&bs_gvop, vop_second, 6);
418 write_bit(&bs_gvop, 0); /* closed_gov */ /* TODO replace magic */
419 write_bit(&bs_gvop, 0); /* broken_link */
420 write_bit(&bs_gvop, 0); /* padding */
421 write_bits(&bs_gvop, 7, 3); /* padding */
422
423 memcpy(context->mpeg4.start_code, group_of_vop, sizeof(group_of_vop));
424 context->mpeg4.start_code_size += sizeof(group_of_vop);
425 }
426
427 write_bits(&bs_vop, vop_coding_type, 2);
428 mod_time = context->mpeg4.frame_num %
429 context->desc.mpeg4.vop_time_increment_resolution == 0 &&
430 vop_coding_type+1 != PIPE_MPEG12_PICTURE_CODING_TYPE_I;
431 while (mod_time--)
432 write_bit(&bs_vop, 1); /* modulo_time_base */
433 write_bit(&bs_vop, 0); /* modulo_time_base */
434
435 write_bit(&bs_vop, 1); /* marker_bit */
436 vop_time_inc = context->mpeg4.frame_num %
437 context->desc.mpeg4.vop_time_increment_resolution;
438 write_bits(&bs_vop, vop_time_inc, context->mpeg4.vti_bits);
439 write_bit(&bs_vop, 1); /* marker_bit */
440 write_bit(&bs_vop, 1); /* vop_coded */
441 if (vop_coding_type+1 == PIPE_MPEG12_PICTURE_CODING_TYPE_P)
442 write_bit(&bs_vop, context->mpeg4.pps.vop_fields.bits.vop_rounding_type);
443 write_bits(&bs_vop, context->mpeg4.pps.vop_fields.bits.intra_dc_vlc_thr, 3);
444 if (context->mpeg4.pps.vol_fields.bits.interlaced) {
445 write_bit(&bs_vop, context->mpeg4.pps.vop_fields.bits.top_field_first);
446 write_bit(&bs_vop, context->mpeg4.pps.vop_fields.bits.alternate_vertical_scan_flag);
447 }
448
449 write_bits(&bs_vop, context->mpeg4.quant_scale, context->mpeg4.pps.quant_precision);
450 if (vop_coding_type+1 != PIPE_MPEG12_PICTURE_CODING_TYPE_I)
451 write_bits(&bs_vop, context->desc.mpeg4.vop_fcode_forward, 3);
452 if (vop_coding_type+1 == PIPE_MPEG12_PICTURE_CODING_TYPE_B)
453 write_bits(&bs_vop, context->desc.mpeg4.vop_fcode_backward, 3);
454
455 vop_size = bs_vop.pos/8;
456 memcpy(context->mpeg4.start_code + context->mpeg4.start_code_size, vop, vop_size);
457 context->mpeg4.start_code_size += vop_size;
458 }
459
460 static unsigned int
461 bufHasStartcode(vlVaBuffer *buf, unsigned int code, unsigned int bits)
462 {
463 struct vl_vlc vlc = {0};
464 int i;
465
466 /* search the first 64 bytes for a startcode */
467 vl_vlc_init(&vlc, 1, (const void * const*)&buf->data, &buf->size);
468 for (i = 0; i < 64 && vl_vlc_bits_left(&vlc) >= bits; ++i) {
469 if (vl_vlc_peekbits(&vlc, bits) == code)
470 return 1;
471 vl_vlc_eatbits(&vlc, 8);
472 vl_vlc_fillbits(&vlc);
473 }
474
475 return 0;
476 }
477
478 static void
479 handleVASliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
480 {
481 enum pipe_video_format format;
482 unsigned num_buffers = 0;
483 void * const *buffers[2];
484 unsigned sizes[2];
485 static const uint8_t start_code_h264[] = { 0x00, 0x00, 0x01 };
486 static const uint8_t start_code_vc1[] = { 0x00, 0x00, 0x01, 0x0d };
487
488 format = u_reduce_video_profile(context->decoder->profile);
489 switch (format) {
490 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
491 if (bufHasStartcode(buf, 0x000001, 24))
492 break;
493
494 buffers[num_buffers] = (void *const)&start_code_h264;
495 sizes[num_buffers++] = sizeof(start_code_h264);
496 break;
497 case PIPE_VIDEO_FORMAT_VC1:
498 if (bufHasStartcode(buf, 0x0000010d, 32) ||
499 bufHasStartcode(buf, 0x0000010c, 32) ||
500 bufHasStartcode(buf, 0x0000010b, 32))
501 break;
502
503 buffers[num_buffers] = (void *const)&start_code_vc1;
504 sizes[num_buffers++] = sizeof(start_code_vc1);
505 break;
506 case PIPE_VIDEO_FORMAT_MPEG4:
507 if (bufHasStartcode(buf, 0x000001, 24))
508 break;
509
510 vlVaDecoderFixMPEG4Startcode(context);
511 buffers[num_buffers] = (void *)context->mpeg4.start_code;
512 sizes[num_buffers++] = context->mpeg4.start_code_size;
513 default:
514 break;
515 }
516
517 buffers[num_buffers] = buf->data;
518 sizes[num_buffers] = buf->size;
519 ++num_buffers;
520 context->decoder->decode_bitstream(context->decoder, context->target, NULL,
521 num_buffers, (const void * const*)buffers, sizes);
522 }
523
524 VAStatus
525 vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buffers, int num_buffers)
526 {
527 vlVaDriver *drv;
528 vlVaContext *context;
529
530 unsigned i;
531
532 if (!ctx)
533 return VA_STATUS_ERROR_INVALID_CONTEXT;
534
535 drv = VL_VA_DRIVER(ctx);
536 if (!drv)
537 return VA_STATUS_ERROR_INVALID_CONTEXT;
538
539 context = handle_table_get(drv->htab, context_id);
540 if (!context)
541 return VA_STATUS_ERROR_INVALID_CONTEXT;
542
543 for (i = 0; i < num_buffers; ++i) {
544 vlVaBuffer *buf = handle_table_get(drv->htab, buffers[i]);
545 if (!buf)
546 return VA_STATUS_ERROR_INVALID_BUFFER;
547
548 switch (buf->type) {
549 case VAPictureParameterBufferType:
550 handlePictureParameterBuffer(drv, context, buf);
551 break;
552
553 case VAIQMatrixBufferType:
554 handleIQMatrixBuffer(context, buf);
555 break;
556
557 case VASliceParameterBufferType:
558 handleSliceParameterBuffer(context, buf);
559 break;
560
561 case VASliceDataBufferType:
562 handleVASliceDataBufferType(context, buf);
563 break;
564
565 default:
566 break;
567 }
568 }
569
570 return VA_STATUS_SUCCESS;
571 }
572
573 VAStatus
574 vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
575 {
576 vlVaDriver *drv;
577 vlVaContext *context;
578
579 if (!ctx)
580 return VA_STATUS_ERROR_INVALID_CONTEXT;
581
582 drv = VL_VA_DRIVER(ctx);
583 if (!drv)
584 return VA_STATUS_ERROR_INVALID_CONTEXT;
585
586 context = handle_table_get(drv->htab, context_id);
587 if (!context)
588 return VA_STATUS_ERROR_INVALID_CONTEXT;
589
590 context->mpeg4.frame_num++;
591 context->decoder->end_frame(context->decoder, context->target, &context->desc.base);
592
593 return VA_STATUS_SUCCESS;
594 }