radeonsi: allow fewer input SGPRs in 2nd shader of merged shaders
[mesa.git] / src / gallium / state_trackers / va / picture.c
1 /**************************************************************************
2 *
3 * Copyright 2010 Thomas Balling Sørensen & Orasanu Lucian.
4 * Copyright 2014 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include "pipe/p_video_codec.h"
30
31 #include "util/u_handle_table.h"
32 #include "util/u_video.h"
33
34 #include "vl/vl_vlc.h"
35 #include "vl/vl_winsys.h"
36
37 #include "va_private.h"
38
39 VAStatus
40 vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID render_target)
41 {
42 vlVaDriver *drv;
43 vlVaContext *context;
44 vlVaSurface *surf;
45
46 if (!ctx)
47 return VA_STATUS_ERROR_INVALID_CONTEXT;
48
49 drv = VL_VA_DRIVER(ctx);
50 if (!drv)
51 return VA_STATUS_ERROR_INVALID_CONTEXT;
52
53 mtx_lock(&drv->mutex);
54 context = handle_table_get(drv->htab, context_id);
55 if (!context) {
56 mtx_unlock(&drv->mutex);
57 return VA_STATUS_ERROR_INVALID_CONTEXT;
58 }
59
60 if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG12) {
61 context->desc.mpeg12.intra_matrix = NULL;
62 context->desc.mpeg12.non_intra_matrix = NULL;
63 }
64
65 surf = handle_table_get(drv->htab, render_target);
66 mtx_unlock(&drv->mutex);
67 if (!surf || !surf->buffer)
68 return VA_STATUS_ERROR_INVALID_SURFACE;
69
70 context->target_id = render_target;
71 surf->ctx = context_id;
72 context->target = surf->buffer;
73 context->mjpeg.sampling_factor = 0;
74
75 if (!context->decoder) {
76
77 /* VPP */
78 if (context->templat.profile == PIPE_VIDEO_PROFILE_UNKNOWN &&
79 context->target->buffer_format != PIPE_FORMAT_B8G8R8A8_UNORM &&
80 context->target->buffer_format != PIPE_FORMAT_R8G8B8A8_UNORM &&
81 context->target->buffer_format != PIPE_FORMAT_B8G8R8X8_UNORM &&
82 context->target->buffer_format != PIPE_FORMAT_R8G8B8X8_UNORM &&
83 context->target->buffer_format != PIPE_FORMAT_NV12 &&
84 context->target->buffer_format != PIPE_FORMAT_P016)
85 return VA_STATUS_ERROR_UNIMPLEMENTED;
86
87 return VA_STATUS_SUCCESS;
88 }
89
90 if (context->decoder->entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE)
91 context->needs_begin_frame = true;
92
93 return VA_STATUS_SUCCESS;
94 }
95
96 void
97 vlVaGetReferenceFrame(vlVaDriver *drv, VASurfaceID surface_id,
98 struct pipe_video_buffer **ref_frame)
99 {
100 vlVaSurface *surf = handle_table_get(drv->htab, surface_id);
101 if (surf)
102 *ref_frame = surf->buffer;
103 else
104 *ref_frame = NULL;
105 }
106
107 static VAStatus
108 handlePictureParameterBuffer(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
109 {
110 VAStatus vaStatus = VA_STATUS_SUCCESS;
111
112 switch (u_reduce_video_profile(context->templat.profile)) {
113 case PIPE_VIDEO_FORMAT_MPEG12:
114 vlVaHandlePictureParameterBufferMPEG12(drv, context, buf);
115 break;
116
117 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
118 vlVaHandlePictureParameterBufferH264(drv, context, buf);
119 break;
120
121 case PIPE_VIDEO_FORMAT_VC1:
122 vlVaHandlePictureParameterBufferVC1(drv, context, buf);
123 break;
124
125 case PIPE_VIDEO_FORMAT_MPEG4:
126 vlVaHandlePictureParameterBufferMPEG4(drv, context, buf);
127 break;
128
129 case PIPE_VIDEO_FORMAT_HEVC:
130 vlVaHandlePictureParameterBufferHEVC(drv, context, buf);
131 break;
132
133 case PIPE_VIDEO_FORMAT_JPEG:
134 vlVaHandlePictureParameterBufferMJPEG(drv, context, buf);
135 break;
136
137 default:
138 break;
139 }
140
141 /* Create the decoder once max_references is known. */
142 if (!context->decoder) {
143 enum pipe_video_format format =
144 u_reduce_video_profile(context->templat.profile);
145
146 if (!context->target)
147 return VA_STATUS_ERROR_INVALID_CONTEXT;
148
149 if (context->templat.max_references == 0 &&
150 format != PIPE_VIDEO_FORMAT_JPEG)
151 return VA_STATUS_ERROR_INVALID_BUFFER;
152
153 if (format == PIPE_VIDEO_FORMAT_MPEG4_AVC)
154 context->templat.level = u_get_h264_level(context->templat.width,
155 context->templat.height, &context->templat.max_references);
156
157 context->decoder = drv->pipe->create_video_codec(drv->pipe,
158 &context->templat);
159
160 if (!context->decoder)
161 return VA_STATUS_ERROR_ALLOCATION_FAILED;
162
163 context->needs_begin_frame = true;
164 }
165
166 return vaStatus;
167 }
168
169 static void
170 handleIQMatrixBuffer(vlVaContext *context, vlVaBuffer *buf)
171 {
172 switch (u_reduce_video_profile(context->templat.profile)) {
173 case PIPE_VIDEO_FORMAT_MPEG12:
174 vlVaHandleIQMatrixBufferMPEG12(context, buf);
175 break;
176
177 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
178 vlVaHandleIQMatrixBufferH264(context, buf);
179 break;
180
181 case PIPE_VIDEO_FORMAT_MPEG4:
182 vlVaHandleIQMatrixBufferMPEG4(context, buf);
183 break;
184
185 case PIPE_VIDEO_FORMAT_HEVC:
186 vlVaHandleIQMatrixBufferHEVC(context, buf);
187 break;
188
189 case PIPE_VIDEO_FORMAT_JPEG:
190 vlVaHandleIQMatrixBufferMJPEG(context, buf);
191 break;
192
193 default:
194 break;
195 }
196 }
197
198 static void
199 handleSliceParameterBuffer(vlVaContext *context, vlVaBuffer *buf)
200 {
201 switch (u_reduce_video_profile(context->templat.profile)) {
202 case PIPE_VIDEO_FORMAT_MPEG12:
203 vlVaHandleSliceParameterBufferMPEG12(context, buf);
204 break;
205
206 case PIPE_VIDEO_FORMAT_VC1:
207 vlVaHandleSliceParameterBufferVC1(context, buf);
208 break;
209
210 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
211 vlVaHandleSliceParameterBufferH264(context, buf);
212 break;
213
214 case PIPE_VIDEO_FORMAT_MPEG4:
215 vlVaHandleSliceParameterBufferMPEG4(context, buf);
216 break;
217
218 case PIPE_VIDEO_FORMAT_HEVC:
219 vlVaHandleSliceParameterBufferHEVC(context, buf);
220 break;
221
222 case PIPE_VIDEO_FORMAT_JPEG:
223 vlVaHandleSliceParameterBufferMJPEG(context, buf);
224 break;
225
226 default:
227 break;
228 }
229 }
230
231 static unsigned int
232 bufHasStartcode(vlVaBuffer *buf, unsigned int code, unsigned int bits)
233 {
234 struct vl_vlc vlc = {0};
235 int i;
236
237 /* search the first 64 bytes for a startcode */
238 vl_vlc_init(&vlc, 1, (const void * const*)&buf->data, &buf->size);
239 for (i = 0; i < 64 && vl_vlc_bits_left(&vlc) >= bits; ++i) {
240 if (vl_vlc_peekbits(&vlc, bits) == code)
241 return 1;
242 vl_vlc_eatbits(&vlc, 8);
243 vl_vlc_fillbits(&vlc);
244 }
245
246 return 0;
247 }
248
249 static void
250 handleVASliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
251 {
252 enum pipe_video_format format;
253 unsigned num_buffers = 0;
254 void * const *buffers[2];
255 unsigned sizes[2];
256 static const uint8_t start_code_h264[] = { 0x00, 0x00, 0x01 };
257 static const uint8_t start_code_h265[] = { 0x00, 0x00, 0x01 };
258 static const uint8_t start_code_vc1[] = { 0x00, 0x00, 0x01, 0x0d };
259
260 format = u_reduce_video_profile(context->templat.profile);
261 switch (format) {
262 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
263 if (bufHasStartcode(buf, 0x000001, 24))
264 break;
265
266 buffers[num_buffers] = (void *const)&start_code_h264;
267 sizes[num_buffers++] = sizeof(start_code_h264);
268 break;
269 case PIPE_VIDEO_FORMAT_HEVC:
270 if (bufHasStartcode(buf, 0x000001, 24))
271 break;
272
273 buffers[num_buffers] = (void *const)&start_code_h265;
274 sizes[num_buffers++] = sizeof(start_code_h265);
275 break;
276 case PIPE_VIDEO_FORMAT_VC1:
277 if (bufHasStartcode(buf, 0x0000010d, 32) ||
278 bufHasStartcode(buf, 0x0000010c, 32) ||
279 bufHasStartcode(buf, 0x0000010b, 32))
280 break;
281
282 if (context->decoder->profile == PIPE_VIDEO_PROFILE_VC1_ADVANCED) {
283 buffers[num_buffers] = (void *const)&start_code_vc1;
284 sizes[num_buffers++] = sizeof(start_code_vc1);
285 }
286 break;
287 case PIPE_VIDEO_FORMAT_MPEG4:
288 if (bufHasStartcode(buf, 0x000001, 24))
289 break;
290
291 vlVaDecoderFixMPEG4Startcode(context);
292 buffers[num_buffers] = (void *)context->mpeg4.start_code;
293 sizes[num_buffers++] = context->mpeg4.start_code_size;
294 break;
295 case PIPE_VIDEO_FORMAT_JPEG:
296 break;
297 default:
298 break;
299 }
300
301 buffers[num_buffers] = buf->data;
302 sizes[num_buffers] = buf->size;
303 ++num_buffers;
304
305 if (context->needs_begin_frame) {
306 context->decoder->begin_frame(context->decoder, context->target,
307 &context->desc.base);
308 context->needs_begin_frame = false;
309 }
310 context->decoder->decode_bitstream(context->decoder, context->target, &context->desc.base,
311 num_buffers, (const void * const*)buffers, sizes);
312 }
313
314 static VAStatus
315 handleVAEncMiscParameterTypeRateControl(vlVaContext *context, VAEncMiscParameterBuffer *misc)
316 {
317 VAStatus status = VA_STATUS_SUCCESS;
318
319 switch (u_reduce_video_profile(context->templat.profile)) {
320 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
321 status = vlVaHandleVAEncMiscParameterTypeRateControlH264(context, misc);
322 break;
323
324 case PIPE_VIDEO_FORMAT_HEVC:
325 status = vlVaHandleVAEncMiscParameterTypeRateControlHEVC(context, misc);
326 break;
327
328 default:
329 break;
330 }
331
332 return status;
333 }
334
335 static VAStatus
336 handleVAEncMiscParameterTypeFrameRate(vlVaContext *context, VAEncMiscParameterBuffer *misc)
337 {
338 VAStatus status = VA_STATUS_SUCCESS;
339
340 switch (u_reduce_video_profile(context->templat.profile)) {
341 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
342 status = vlVaHandleVAEncMiscParameterTypeFrameRateH264(context, misc);
343 break;
344
345 case PIPE_VIDEO_FORMAT_HEVC:
346 status = vlVaHandleVAEncMiscParameterTypeFrameRateHEVC(context, misc);
347 break;
348
349 default:
350 break;
351 }
352
353 return status;
354 }
355
356 static VAStatus
357 handleVAEncSequenceParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
358 {
359 VAStatus status = VA_STATUS_SUCCESS;
360
361 switch (u_reduce_video_profile(context->templat.profile)) {
362 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
363 status = vlVaHandleVAEncSequenceParameterBufferTypeH264(drv, context, buf);
364 break;
365
366 case PIPE_VIDEO_FORMAT_HEVC:
367 status = vlVaHandleVAEncSequenceParameterBufferTypeHEVC(drv, context, buf);
368 break;
369
370 default:
371 break;
372 }
373
374 return status;
375 }
376
377 static VAStatus
378 handleVAEncMiscParameterBufferType(vlVaContext *context, vlVaBuffer *buf)
379 {
380 VAStatus vaStatus = VA_STATUS_SUCCESS;
381 VAEncMiscParameterBuffer *misc;
382 misc = buf->data;
383
384 switch (misc->type) {
385 case VAEncMiscParameterTypeRateControl:
386 vaStatus = handleVAEncMiscParameterTypeRateControl(context, misc);
387 break;
388
389 case VAEncMiscParameterTypeFrameRate:
390 vaStatus = handleVAEncMiscParameterTypeFrameRate(context, misc);
391 break;
392
393 default:
394 break;
395 }
396
397 return vaStatus;
398 }
399
400 static VAStatus
401 handleVAEncPictureParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
402 {
403 VAStatus status = VA_STATUS_SUCCESS;
404
405 switch (u_reduce_video_profile(context->templat.profile)) {
406 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
407 status = vlVaHandleVAEncPictureParameterBufferTypeH264(drv, context, buf);
408 break;
409
410 case PIPE_VIDEO_FORMAT_HEVC:
411 status = vlVaHandleVAEncPictureParameterBufferTypeHEVC(drv, context, buf);
412 break;
413
414 default:
415 break;
416 }
417
418 return status;
419 }
420
421 static VAStatus
422 handleVAEncSliceParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
423 {
424 VAStatus status = VA_STATUS_SUCCESS;
425
426 switch (u_reduce_video_profile(context->templat.profile)) {
427 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
428 status = vlVaHandleVAEncSliceParameterBufferTypeH264(drv, context, buf);
429 break;
430
431 case PIPE_VIDEO_FORMAT_HEVC:
432 status = vlVaHandleVAEncSliceParameterBufferTypeHEVC(drv, context, buf);
433 break;
434
435 default:
436 break;
437 }
438
439 return status;
440 }
441
442 VAStatus
443 vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buffers, int num_buffers)
444 {
445 vlVaDriver *drv;
446 vlVaContext *context;
447 VAStatus vaStatus = VA_STATUS_SUCCESS;
448
449 unsigned i;
450
451 if (!ctx)
452 return VA_STATUS_ERROR_INVALID_CONTEXT;
453
454 drv = VL_VA_DRIVER(ctx);
455 if (!drv)
456 return VA_STATUS_ERROR_INVALID_CONTEXT;
457
458 mtx_lock(&drv->mutex);
459 context = handle_table_get(drv->htab, context_id);
460 if (!context) {
461 mtx_unlock(&drv->mutex);
462 return VA_STATUS_ERROR_INVALID_CONTEXT;
463 }
464
465 for (i = 0; i < num_buffers; ++i) {
466 vlVaBuffer *buf = handle_table_get(drv->htab, buffers[i]);
467 if (!buf) {
468 mtx_unlock(&drv->mutex);
469 return VA_STATUS_ERROR_INVALID_BUFFER;
470 }
471
472 switch (buf->type) {
473 case VAPictureParameterBufferType:
474 vaStatus = handlePictureParameterBuffer(drv, context, buf);
475 break;
476
477 case VAIQMatrixBufferType:
478 handleIQMatrixBuffer(context, buf);
479 break;
480
481 case VASliceParameterBufferType:
482 handleSliceParameterBuffer(context, buf);
483 break;
484
485 case VASliceDataBufferType:
486 handleVASliceDataBufferType(context, buf);
487 break;
488 case VAProcPipelineParameterBufferType:
489 vaStatus = vlVaHandleVAProcPipelineParameterBufferType(drv, context, buf);
490 break;
491
492 case VAEncSequenceParameterBufferType:
493 vaStatus = handleVAEncSequenceParameterBufferType(drv, context, buf);
494 break;
495
496 case VAEncMiscParameterBufferType:
497 vaStatus = handleVAEncMiscParameterBufferType(context, buf);
498 break;
499
500 case VAEncPictureParameterBufferType:
501 vaStatus = handleVAEncPictureParameterBufferType(drv, context, buf);
502 break;
503
504 case VAEncSliceParameterBufferType:
505 vaStatus = handleVAEncSliceParameterBufferType(drv, context, buf);
506 break;
507
508 case VAHuffmanTableBufferType:
509 vlVaHandleHuffmanTableBufferType(context, buf);
510 break;
511
512 default:
513 break;
514 }
515 }
516 mtx_unlock(&drv->mutex);
517
518 return vaStatus;
519 }
520
521 VAStatus
522 vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
523 {
524 vlVaDriver *drv;
525 vlVaContext *context;
526 vlVaBuffer *coded_buf;
527 vlVaSurface *surf;
528 void *feedback;
529 struct pipe_screen *screen;
530 bool supported;
531 bool realloc = false;
532 enum pipe_format format;
533
534 if (!ctx)
535 return VA_STATUS_ERROR_INVALID_CONTEXT;
536
537 drv = VL_VA_DRIVER(ctx);
538 if (!drv)
539 return VA_STATUS_ERROR_INVALID_CONTEXT;
540
541 mtx_lock(&drv->mutex);
542 context = handle_table_get(drv->htab, context_id);
543 mtx_unlock(&drv->mutex);
544 if (!context)
545 return VA_STATUS_ERROR_INVALID_CONTEXT;
546
547 if (!context->decoder) {
548 if (context->templat.profile != PIPE_VIDEO_PROFILE_UNKNOWN)
549 return VA_STATUS_ERROR_INVALID_CONTEXT;
550
551 /* VPP */
552 return VA_STATUS_SUCCESS;
553 }
554
555 mtx_lock(&drv->mutex);
556 surf = handle_table_get(drv->htab, context->target_id);
557 context->mpeg4.frame_num++;
558
559 screen = context->decoder->context->screen;
560 supported = screen->get_video_param(screen, context->decoder->profile,
561 context->decoder->entrypoint,
562 surf->buffer->interlaced ?
563 PIPE_VIDEO_CAP_SUPPORTS_INTERLACED :
564 PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE);
565
566 if (!supported) {
567 surf->templat.interlaced = screen->get_video_param(screen,
568 context->decoder->profile,
569 context->decoder->entrypoint,
570 PIPE_VIDEO_CAP_PREFERS_INTERLACED);
571 realloc = true;
572 }
573
574 format = screen->get_video_param(screen, context->decoder->profile,
575 PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
576 PIPE_VIDEO_CAP_PREFERED_FORMAT);
577
578 if (surf->buffer->buffer_format != format &&
579 surf->buffer->buffer_format == PIPE_FORMAT_NV12) {
580 /* check originally as NV12 only */
581 surf->templat.buffer_format = format;
582 realloc = true;
583 }
584
585 if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_JPEG &&
586 surf->buffer->buffer_format == PIPE_FORMAT_NV12) {
587 if (context->mjpeg.sampling_factor == 0x211111 ||
588 context->mjpeg.sampling_factor == 0x221212) {
589 surf->templat.buffer_format = PIPE_FORMAT_YUYV;
590 realloc = true;
591 } else if (context->mjpeg.sampling_factor != 0x221111) {
592 /* Not NV12 either */
593 mtx_unlock(&drv->mutex);
594 return VA_STATUS_ERROR_INVALID_SURFACE;
595 }
596 }
597
598 if (realloc) {
599 struct pipe_video_buffer *old_buf = surf->buffer;
600
601 if (vlVaHandleSurfaceAllocate(drv, surf, &surf->templat) != VA_STATUS_SUCCESS) {
602 mtx_unlock(&drv->mutex);
603 return VA_STATUS_ERROR_ALLOCATION_FAILED;
604 }
605
606 if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
607 if (old_buf->interlaced) {
608 struct u_rect src_rect, dst_rect;
609
610 dst_rect.x0 = src_rect.x0 = 0;
611 dst_rect.y0 = src_rect.y0 = 0;
612 dst_rect.x1 = src_rect.x1 = surf->templat.width;
613 dst_rect.y1 = src_rect.y1 = surf->templat.height;
614 vl_compositor_yuv_deint_full(&drv->cstate, &drv->compositor,
615 old_buf, surf->buffer,
616 &src_rect, &dst_rect, VL_COMPOSITOR_WEAVE);
617 } else {
618 /* Can't convert from progressive to interlaced yet */
619 mtx_unlock(&drv->mutex);
620 return VA_STATUS_ERROR_INVALID_SURFACE;
621 }
622 }
623
624 old_buf->destroy(old_buf);
625 context->target = surf->buffer;
626 }
627
628 if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
629 coded_buf = context->coded_buf;
630 if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC) {
631 getEncParamPresetH264(context);
632 context->desc.h264enc.frame_num_cnt++;
633 } else if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_HEVC)
634 getEncParamPresetH265(context);
635 context->decoder->begin_frame(context->decoder, context->target, &context->desc.base);
636 context->decoder->encode_bitstream(context->decoder, context->target,
637 coded_buf->derived_surface.resource, &feedback);
638 surf->feedback = feedback;
639 surf->coded_buf = coded_buf;
640 }
641
642 context->decoder->end_frame(context->decoder, context->target, &context->desc.base);
643 if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE &&
644 u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC) {
645 int idr_period = context->desc.h264enc.gop_size / context->gop_coeff;
646 int p_remain_in_idr = idr_period - context->desc.h264enc.frame_num;
647 surf->frame_num_cnt = context->desc.h264enc.frame_num_cnt;
648 surf->force_flushed = false;
649 if (context->first_single_submitted) {
650 context->decoder->flush(context->decoder);
651 context->first_single_submitted = false;
652 surf->force_flushed = true;
653 }
654 if (p_remain_in_idr == 1) {
655 if ((context->desc.h264enc.frame_num_cnt % 2) != 0) {
656 context->decoder->flush(context->decoder);
657 context->first_single_submitted = true;
658 }
659 else
660 context->first_single_submitted = false;
661 surf->force_flushed = true;
662 }
663 } else if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE &&
664 u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_HEVC)
665 context->desc.h265enc.frame_num++;
666 mtx_unlock(&drv->mutex);
667 return VA_STATUS_SUCCESS;
668 }