57d1fb1eb69eaa25da41409c97ed3a4010252c1b
[mesa.git] / src / gallium / state_trackers / va / picture.c
1 /**************************************************************************
2 *
3 * Copyright 2010 Thomas Balling Sørensen & Orasanu Lucian.
4 * Copyright 2014 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include "pipe/p_video_codec.h"
30
31 #include "util/u_handle_table.h"
32 #include "util/u_video.h"
33
34 #include "vl/vl_vlc.h"
35
36 #include "va_private.h"
37
38 VAStatus
39 vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID render_target)
40 {
41 vlVaDriver *drv;
42 vlVaContext *context;
43 vlVaSurface *surf;
44
45 if (!ctx)
46 return VA_STATUS_ERROR_INVALID_CONTEXT;
47
48 drv = VL_VA_DRIVER(ctx);
49 if (!drv)
50 return VA_STATUS_ERROR_INVALID_CONTEXT;
51
52 context = handle_table_get(drv->htab, context_id);
53 if (!context)
54 return VA_STATUS_ERROR_INVALID_CONTEXT;
55
56 surf = handle_table_get(drv->htab, render_target);
57 if (!surf || !surf->buffer)
58 return VA_STATUS_ERROR_INVALID_SURFACE;
59
60 context->target = surf->buffer;
61 context->decoder->begin_frame(context->decoder, context->target, NULL);
62
63 return VA_STATUS_SUCCESS;
64 }
65
66 static void
67 getReferenceFrame(vlVaDriver *drv, VASurfaceID surface_id,
68 struct pipe_video_buffer **ref_frame)
69 {
70 vlVaSurface *surf = handle_table_get(drv->htab, surface_id);
71 if (surf)
72 *ref_frame = surf->buffer;
73 else
74 *ref_frame = NULL;
75 }
76
77 static void
78 handlePictureParameterBuffer(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
79 {
80 VAPictureParameterBufferMPEG2 *mpeg2;
81 VAPictureParameterBufferH264 *h264;
82 VAPictureParameterBufferVC1 * vc1;
83
84 switch (u_reduce_video_profile(context->decoder->profile)) {
85 case PIPE_VIDEO_FORMAT_MPEG12:
86 assert(buf->size >= sizeof(VAPictureParameterBufferMPEG2) && buf->num_elements == 1);
87 mpeg2 = buf->data;
88 /*horizontal_size;*/
89 /*vertical_size;*/
90 getReferenceFrame(drv, mpeg2->forward_reference_picture, &context->desc.mpeg12.ref[0]);
91 getReferenceFrame(drv, mpeg2->backward_reference_picture, &context->desc.mpeg12.ref[1]);
92 context->desc.mpeg12.picture_coding_type = mpeg2->picture_coding_type;
93 context->desc.mpeg12.f_code[0][0] = ((mpeg2->f_code >> 12) & 0xf) - 1;
94 context->desc.mpeg12.f_code[0][1] = ((mpeg2->f_code >> 8) & 0xf) - 1;
95 context->desc.mpeg12.f_code[1][0] = ((mpeg2->f_code >> 4) & 0xf) - 1;
96 context->desc.mpeg12.f_code[1][1] = (mpeg2->f_code & 0xf) - 1;
97 context->desc.mpeg12.intra_dc_precision =
98 mpeg2->picture_coding_extension.bits.intra_dc_precision;
99 context->desc.mpeg12.picture_structure =
100 mpeg2->picture_coding_extension.bits.picture_structure;
101 context->desc.mpeg12.top_field_first =
102 mpeg2->picture_coding_extension.bits.top_field_first;
103 context->desc.mpeg12.frame_pred_frame_dct =
104 mpeg2->picture_coding_extension.bits.frame_pred_frame_dct;
105 context->desc.mpeg12.concealment_motion_vectors =
106 mpeg2->picture_coding_extension.bits.concealment_motion_vectors;
107 context->desc.mpeg12.q_scale_type =
108 mpeg2->picture_coding_extension.bits.q_scale_type;
109 context->desc.mpeg12.intra_vlc_format =
110 mpeg2->picture_coding_extension.bits.intra_vlc_format;
111 context->desc.mpeg12.alternate_scan =
112 mpeg2->picture_coding_extension.bits.alternate_scan;
113 /*repeat_first_field*/
114 /*progressive_frame*/
115 /*is_first_field*/
116 break;
117
118 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
119 assert(buf->size >= sizeof(VAPictureParameterBufferH264) && buf->num_elements == 1);
120 h264 = buf->data;
121 /*CurrPic*/
122 context->desc.h264.field_order_cnt[0] = h264->CurrPic.TopFieldOrderCnt;
123 context->desc.h264.field_order_cnt[1] = h264->CurrPic.BottomFieldOrderCnt;
124 /*ReferenceFrames[16]*/
125 /*picture_width_in_mbs_minus1*/
126 /*picture_height_in_mbs_minus1*/
127 /*bit_depth_luma_minus8*/
128 /*bit_depth_chroma_minus8*/
129 context->desc.h264.num_ref_frames = h264->num_ref_frames;
130 /*chroma_format_idc*/
131 /*residual_colour_transform_flag*/
132 /*gaps_in_frame_num_value_allowed_flag*/
133 context->desc.h264.pps->sps->frame_mbs_only_flag =
134 h264->seq_fields.bits.frame_mbs_only_flag;
135 context->desc.h264.pps->sps->mb_adaptive_frame_field_flag =
136 h264->seq_fields.bits.mb_adaptive_frame_field_flag;
137 context->desc.h264.pps->sps->direct_8x8_inference_flag =
138 h264->seq_fields.bits.direct_8x8_inference_flag;
139 /*MinLumaBiPredSize8x8*/
140 context->desc.h264.pps->sps->log2_max_frame_num_minus4 =
141 h264->seq_fields.bits.log2_max_frame_num_minus4;
142 context->desc.h264.pps->sps->pic_order_cnt_type =
143 h264->seq_fields.bits.pic_order_cnt_type;
144 context->desc.h264.pps->sps->log2_max_pic_order_cnt_lsb_minus4 =
145 h264->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4;
146 context->desc.h264.pps->sps->delta_pic_order_always_zero_flag =
147 h264->seq_fields.bits.delta_pic_order_always_zero_flag;
148 /*num_slice_groups_minus1*/
149 /*slice_group_map_type*/
150 /*slice_group_change_rate_minus1*/
151 context->desc.h264.pps->pic_init_qp_minus26 =
152 h264->pic_init_qp_minus26;
153 /*pic_init_qs_minus26*/
154 context->desc.h264.pps->chroma_qp_index_offset =
155 h264->chroma_qp_index_offset;
156 context->desc.h264.pps->second_chroma_qp_index_offset =
157 h264->second_chroma_qp_index_offset;
158 context->desc.h264.pps->entropy_coding_mode_flag =
159 h264->pic_fields.bits.entropy_coding_mode_flag;
160 context->desc.h264.pps->weighted_pred_flag =
161 h264->pic_fields.bits.weighted_pred_flag;
162 context->desc.h264.pps->weighted_bipred_idc =
163 h264->pic_fields.bits.weighted_bipred_idc;
164 context->desc.h264.pps->transform_8x8_mode_flag =
165 h264->pic_fields.bits.transform_8x8_mode_flag;
166 context->desc.h264.field_pic_flag =
167 h264->pic_fields.bits.field_pic_flag;
168 context->desc.h264.pps->constrained_intra_pred_flag =
169 h264->pic_fields.bits.constrained_intra_pred_flag;
170 context->desc.h264.pps->bottom_field_pic_order_in_frame_present_flag =
171 h264->pic_fields.bits.pic_order_present_flag;
172 context->desc.h264.pps->deblocking_filter_control_present_flag =
173 h264->pic_fields.bits.deblocking_filter_control_present_flag;
174 context->desc.h264.pps->redundant_pic_cnt_present_flag =
175 h264->pic_fields.bits.redundant_pic_cnt_present_flag;
176 /*reference_pic_flag*/
177 context->desc.h264.frame_num = h264->frame_num;
178 break;
179
180 case PIPE_VIDEO_FORMAT_VC1:
181 assert(buf->size >= sizeof(VAPictureParameterBufferVC1) && buf->num_elements == 1);
182 vc1 = buf->data;
183 getReferenceFrame(drv, vc1->forward_reference_picture, &context->desc.vc1.ref[0]);
184 getReferenceFrame(drv, vc1->backward_reference_picture, &context->desc.vc1.ref[1]);
185 context->desc.vc1.picture_type = vc1->picture_fields.bits.picture_type;
186 context->desc.vc1.frame_coding_mode = vc1->picture_fields.bits.frame_coding_mode;
187 context->desc.vc1.postprocflag = vc1->post_processing != 0;
188 context->desc.vc1.pulldown = vc1->sequence_fields.bits.pulldown;
189 context->desc.vc1.interlace = vc1->sequence_fields.bits.interlace;
190 context->desc.vc1.tfcntrflag = vc1->sequence_fields.bits.tfcntrflag;
191 context->desc.vc1.finterpflag = vc1->sequence_fields.bits.finterpflag;
192 context->desc.vc1.psf = vc1->sequence_fields.bits.psf;
193 context->desc.vc1.dquant = vc1->pic_quantizer_fields.bits.dquant;
194 context->desc.vc1.panscan_flag = vc1->entrypoint_fields.bits.panscan_flag;
195 context->desc.vc1.refdist_flag =
196 vc1->reference_fields.bits.reference_distance_flag;
197 context->desc.vc1.quantizer = vc1->pic_quantizer_fields.bits.quantizer;
198 context->desc.vc1.extended_mv = vc1->mv_fields.bits.extended_mv_flag;
199 context->desc.vc1.extended_dmv = vc1->mv_fields.bits.extended_dmv_flag;
200 context->desc.vc1.overlap = vc1->sequence_fields.bits.overlap;
201 context->desc.vc1.vstransform =
202 vc1->transform_fields.bits.variable_sized_transform_flag;
203 context->desc.vc1.loopfilter = vc1->entrypoint_fields.bits.loopfilter;
204 context->desc.vc1.fastuvmc = vc1->fast_uvmc_flag;
205 context->desc.vc1.range_mapy_flag = vc1->range_mapping_fields.bits.luma_flag;
206 context->desc.vc1.range_mapy = vc1->range_mapping_fields.bits.luma;
207 context->desc.vc1.range_mapuv_flag = vc1->range_mapping_fields.bits.chroma_flag;
208 context->desc.vc1.range_mapuv = vc1->range_mapping_fields.bits.chroma;
209 context->desc.vc1.multires = vc1->sequence_fields.bits.multires;
210 context->desc.vc1.syncmarker = vc1->sequence_fields.bits.syncmarker;
211 context->desc.vc1.rangered = vc1->sequence_fields.bits.rangered;
212 context->desc.vc1.maxbframes = vc1->sequence_fields.bits.max_b_frames;
213 context->desc.vc1.deblockEnable = vc1->post_processing != 0;
214 context->desc.vc1.pquant = vc1->pic_quantizer_fields.bits.pic_quantizer_scale;
215 break;
216
217 default:
218 break;
219 }
220 }
221
222 static void
223 handleIQMatrixBuffer(vlVaContext *context, vlVaBuffer *buf)
224 {
225 VAIQMatrixBufferMPEG2 *mpeg2;
226 VAIQMatrixBufferH264 *h264;
227
228 switch (u_reduce_video_profile(context->decoder->profile)) {
229 case PIPE_VIDEO_FORMAT_MPEG12:
230 assert(buf->size >= sizeof(VAIQMatrixBufferMPEG2) && buf->num_elements == 1);
231 mpeg2 = buf->data;
232 if (mpeg2->load_intra_quantiser_matrix)
233 context->desc.mpeg12.intra_matrix = mpeg2->intra_quantiser_matrix;
234 else
235 context->desc.mpeg12.intra_matrix = NULL;
236
237 if (mpeg2->load_non_intra_quantiser_matrix)
238 context->desc.mpeg12.non_intra_matrix = mpeg2->non_intra_quantiser_matrix;
239 else
240 context->desc.mpeg12.non_intra_matrix = NULL;
241 break;
242
243 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
244 assert(buf->size >= sizeof(VAIQMatrixBufferH264) && buf->num_elements == 1);
245 h264 = buf->data;
246 memcpy(&context->desc.h264.pps->ScalingList4x4, h264->ScalingList4x4, 6 * 16);
247 memcpy(&context->desc.h264.pps->ScalingList8x8, h264->ScalingList8x8, 2 * 64);
248 break;
249
250 default:
251 break;
252 }
253 }
254
255 static void
256 handleSliceParameterBuffer(vlVaContext *context, vlVaBuffer *buf)
257 {
258 VASliceParameterBufferH264 *h264;
259
260 switch (u_reduce_video_profile(context->decoder->profile)) {
261 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
262 assert(buf->size >= sizeof(VASliceParameterBufferH264) && buf->num_elements == 1);
263 h264 = buf->data;
264 context->desc.h264.num_ref_idx_l0_active_minus1 =
265 h264->num_ref_idx_l0_active_minus1;
266 context->desc.h264.num_ref_idx_l1_active_minus1 =
267 h264->num_ref_idx_l1_active_minus1;
268 break;
269
270 default:
271 break;
272 }
273 }
274
275 static void
276 handleVASliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
277 {
278 unsigned num_buffers = 0;
279 void * const *buffers[2];
280 unsigned sizes[2];
281 enum pipe_video_format format;
282
283 format = u_reduce_video_profile(context->decoder->profile);
284 if (format == PIPE_VIDEO_FORMAT_MPEG4_AVC ||
285 format == PIPE_VIDEO_FORMAT_VC1) {
286 struct vl_vlc vlc = {0};
287 bool found = false;
288 int peek_bits, i;
289
290 /* search the first 64 bytes for a startcode */
291 vl_vlc_init(&vlc, 1, (const void * const*)&buf->data, &buf->size);
292 peek_bits = (format == PIPE_VIDEO_FORMAT_MPEG4_AVC) ? 24 : 32;
293 for (i = 0; i < 64 && vl_vlc_bits_left(&vlc) >= peek_bits; ++i) {
294 uint32_t value = vl_vlc_peekbits(&vlc, peek_bits);
295 if ((format == PIPE_VIDEO_FORMAT_MPEG4_AVC && value == 0x000001) ||
296 (format == PIPE_VIDEO_FORMAT_VC1 && (value == 0x0000010d ||
297 value == 0x0000010c || value == 0x0000010b))) {
298 found = true;
299 break;
300 }
301 vl_vlc_eatbits(&vlc, 8);
302 vl_vlc_fillbits(&vlc);
303 }
304 /* none found, ok add one manually */
305 if (!found) {
306 static const uint8_t start_code_h264[] = { 0x00, 0x00, 0x01 };
307 static const uint8_t start_code_vc1[] = { 0x00, 0x00, 0x01, 0x0d };
308
309 if (format == PIPE_VIDEO_FORMAT_MPEG4_AVC) {
310 buffers[num_buffers] = (void *const)&start_code_h264;
311 sizes[num_buffers] = sizeof(start_code_h264);
312 }
313 else {
314 buffers[num_buffers] = (void *const)&start_code_vc1;
315 sizes[num_buffers] = sizeof(start_code_vc1);
316 }
317 ++num_buffers;
318 }
319 }
320 buffers[num_buffers] = buf->data;
321 sizes[num_buffers] = buf->size;
322 ++num_buffers;
323 context->decoder->decode_bitstream(context->decoder, context->target, NULL,
324 num_buffers, (const void * const*)buffers, sizes);
325 }
326
327 VAStatus
328 vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buffers, int num_buffers)
329 {
330 vlVaDriver *drv;
331 vlVaContext *context;
332
333 unsigned i;
334
335 if (!ctx)
336 return VA_STATUS_ERROR_INVALID_CONTEXT;
337
338 drv = VL_VA_DRIVER(ctx);
339 if (!drv)
340 return VA_STATUS_ERROR_INVALID_CONTEXT;
341
342 context = handle_table_get(drv->htab, context_id);
343 if (!context)
344 return VA_STATUS_ERROR_INVALID_CONTEXT;
345
346 for (i = 0; i < num_buffers; ++i) {
347 vlVaBuffer *buf = handle_table_get(drv->htab, buffers[i]);
348 if (!buf)
349 return VA_STATUS_ERROR_INVALID_BUFFER;
350
351 switch (buf->type) {
352 case VAPictureParameterBufferType:
353 handlePictureParameterBuffer(drv, context, buf);
354 break;
355
356 case VAIQMatrixBufferType:
357 handleIQMatrixBuffer(context, buf);
358 break;
359
360 case VASliceParameterBufferType:
361 handleSliceParameterBuffer(context, buf);
362 break;
363
364 case VASliceDataBufferType:
365 handleVASliceDataBufferType(context, buf);
366 break;
367
368 default:
369 break;
370 }
371 }
372
373 return VA_STATUS_SUCCESS;
374 }
375
376 VAStatus
377 vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
378 {
379 vlVaDriver *drv;
380 vlVaContext *context;
381
382 if (!ctx)
383 return VA_STATUS_ERROR_INVALID_CONTEXT;
384
385 drv = VL_VA_DRIVER(ctx);
386 if (!drv)
387 return VA_STATUS_ERROR_INVALID_CONTEXT;
388
389 context = handle_table_get(drv->htab, context_id);
390 if (!context)
391 return VA_STATUS_ERROR_INVALID_CONTEXT;
392
393 context->decoder->end_frame(context->decoder, context->target, &context->desc.base);
394
395 return VA_STATUS_SUCCESS;
396 }