Android: gallium_dri: pass dri.sym to linker
[mesa.git] / src / gallium / state_trackers / va / picture.c
index 338e0902d658a7dbf5da16c70b8246fbc6b3b90d..b2be7af8c4699e35215c433151ce19538d60f9b8 100644 (file)
@@ -65,6 +65,7 @@ vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID rende
    context->target_id = render_target;
    surf->ctx = context_id;
    context->target = surf->buffer;
+   context->mjpeg.sampling_factor = 0;
 
    if (!context->decoder) {
 
@@ -164,20 +165,27 @@ handlePictureParameterBuffer(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *
       vlVaHandlePictureParameterBufferHEVC(drv, context, buf);
       break;
 
+  case PIPE_VIDEO_FORMAT_JPEG:
+      vlVaHandlePictureParameterBufferMJPEG(drv, context, buf);
+      break;
+
    default:
       break;
    }
 
    /* Create the decoder once max_references is known. */
    if (!context->decoder) {
+      enum pipe_video_format format =
+         u_reduce_video_profile(context->templat.profile);
+
       if (!context->target)
          return VA_STATUS_ERROR_INVALID_CONTEXT;
 
-      if (context->templat.max_references == 0)
+      if (context->templat.max_references == 0 &&
+         format != PIPE_VIDEO_FORMAT_JPEG)
          return VA_STATUS_ERROR_INVALID_BUFFER;
 
-      if (u_reduce_video_profile(context->templat.profile) ==
-          PIPE_VIDEO_FORMAT_MPEG4_AVC)
+      if (format == PIPE_VIDEO_FORMAT_MPEG4_AVC)
          context->templat.level = u_get_h264_level(context->templat.width,
             context->templat.height, &context->templat.max_references);
 
@@ -213,6 +221,10 @@ handleIQMatrixBuffer(vlVaContext *context, vlVaBuffer *buf)
       vlVaHandleIQMatrixBufferHEVC(context, buf);
       break;
 
+   case PIPE_VIDEO_FORMAT_JPEG:
+      vlVaHandleIQMatrixBufferMJPEG(context, buf);
+      break;
+
    default:
       break;
    }
@@ -242,6 +254,10 @@ handleSliceParameterBuffer(vlVaContext *context, vlVaBuffer *buf)
       vlVaHandleSliceParameterBufferHEVC(context, buf);
       break;
 
+   case PIPE_VIDEO_FORMAT_JPEG:
+      vlVaHandleSliceParameterBufferMJPEG(context, buf);
+      break;
+
    default:
       break;
    }
@@ -310,6 +326,9 @@ handleVASliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
       vlVaDecoderFixMPEG4Startcode(context);
       buffers[num_buffers] = (void *)context->mpeg4.start_code;
       sizes[num_buffers++] = context->mpeg4.start_code_size;
+   case PIPE_VIDEO_FORMAT_JPEG:
+      /* TODO */
+      break;
    default:
       break;
    }
@@ -550,6 +569,10 @@ vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buff
          vaStatus = handleVAEncSliceParameterBufferType(drv, context, buf);
          break;
 
+      case VAHuffmanTableBufferType:
+         vlVaHandleHuffmanTableBufferType(context, buf);
+         break;
+
       default:
          break;
       }
@@ -567,6 +590,10 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
    vlVaBuffer *coded_buf;
    vlVaSurface *surf;
    void *feedback;
+   struct pipe_screen *screen;
+   bool interlaced;
+   bool realloc = false;
+   enum pipe_format format;
 
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
@@ -593,6 +620,53 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
    surf = handle_table_get(drv->htab, context->target_id);
    context->mpeg4.frame_num++;
 
+   screen = context->decoder->context->screen;
+   interlaced = screen->get_video_param(screen, context->decoder->profile,
+                                        PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
+                                        PIPE_VIDEO_CAP_SUPPORTS_INTERLACED);
+
+   if (surf->buffer->interlaced != interlaced) {
+      surf->templat.interlaced = screen->get_video_param(screen, context->decoder->profile,
+                                                         PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
+                                                         PIPE_VIDEO_CAP_PREFERS_INTERLACED);
+      realloc = true;
+   }
+
+   format = screen->get_video_param(screen, context->decoder->profile,
+                                    PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
+                                    PIPE_VIDEO_CAP_PREFERED_FORMAT);
+
+   if (surf->buffer->buffer_format != format &&
+       surf->buffer->buffer_format == PIPE_FORMAT_NV12) {
+      /* check originally as NV12 only */
+      surf->templat.buffer_format = format;
+      realloc = true;
+   }
+
+   if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_JPEG &&
+       surf->buffer->buffer_format == PIPE_FORMAT_NV12) {
+      if (context->mjpeg.sampling_factor == 0x211111 ||
+          context->mjpeg.sampling_factor == 0x221212) {
+         surf->templat.buffer_format = PIPE_FORMAT_YUYV;
+         realloc = true;
+      } else if (context->mjpeg.sampling_factor != 0x221111) {
+         /* Not NV12 either */
+         mtx_unlock(&drv->mutex);
+         return VA_STATUS_ERROR_INVALID_SURFACE;
+      }
+   }
+
+   if (realloc) {
+      surf->buffer->destroy(surf->buffer);
+
+      if (vlVaHandleSurfaceAllocate(ctx, surf, &surf->templat) != VA_STATUS_SUCCESS) {
+         mtx_unlock(&drv->mutex);
+         return VA_STATUS_ERROR_ALLOCATION_FAILED;
+      }
+
+      context->target = surf->buffer;
+   }
+
    if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
       coded_buf = context->coded_buf;
       getEncParamPreset(context);