[g3dvl] and finally remove pipe_video_context
authorChristian König <deathsimple@vodafone.de>
Fri, 8 Jul 2011 17:22:43 +0000 (19:22 +0200)
committerChristian König <deathsimple@vodafone.de>
Fri, 8 Jul 2011 17:22:43 +0000 (19:22 +0200)
31 files changed:
src/gallium/auxiliary/Makefile
src/gallium/auxiliary/vl/vl_compositor.h
src/gallium/auxiliary/vl/vl_context.c [deleted file]
src/gallium/auxiliary/vl/vl_context.h [deleted file]
src/gallium/auxiliary/vl/vl_decoder.c [new file with mode: 0644]
src/gallium/auxiliary/vl/vl_decoder.h [new file with mode: 0644]
src/gallium/auxiliary/vl/vl_mpeg12_decoder.c
src/gallium/auxiliary/vl/vl_mpeg12_decoder.h
src/gallium/auxiliary/vl/vl_video_buffer.h
src/gallium/drivers/r300/r300_context.c
src/gallium/drivers/r300/r300_screen.c
src/gallium/drivers/r600/r600_pipe.c
src/gallium/drivers/softpipe/sp_context.c
src/gallium/drivers/softpipe/sp_screen.c
src/gallium/include/pipe/p_context.h
src/gallium/include/pipe/p_screen.h
src/gallium/include/pipe/p_video_context.h [deleted file]
src/gallium/include/pipe/p_video_decoder.h [new file with mode: 0644]
src/gallium/state_trackers/vdpau/decode.c
src/gallium/state_trackers/vdpau/device.c
src/gallium/state_trackers/vdpau/mixer.c
src/gallium/state_trackers/vdpau/output.c
src/gallium/state_trackers/vdpau/presentation.c
src/gallium/state_trackers/vdpau/surface.c
src/gallium/state_trackers/vdpau/vdpau_private.h
src/gallium/state_trackers/xorg/xvmc/context.c
src/gallium/state_trackers/xorg/xvmc/subpicture.c
src/gallium/state_trackers/xorg/xvmc/surface.c
src/gallium/winsys/g3dvl/dri/dri_winsys.c
src/gallium/winsys/g3dvl/vl_winsys.h
src/gallium/winsys/g3dvl/xlib/xsp_winsys.c

index e37cf21416a9180d1717c83280f28a16ff9617ba..7dae7bc908b8277151e43922fbc337697e88efef 100644 (file)
@@ -148,9 +148,9 @@ C_SOURCES = \
        util/u_resource.c \
        util/u_upload_mgr.c \
        util/u_vbuf_mgr.c \
-       vl/vl_context.c \
        vl/vl_csc.c \
        vl/vl_compositor.c \
+       vl/vl_decoder.c \
        vl/vl_mpeg12_decoder.c \
        vl/vl_mpeg12_bitstream.c \
        vl/vl_zscan.c \
index 97601897b662c606fd4ca597f19bd9f3e4b95402..df662db4d912c65dc76424e1ab25e01bff307e82 100644 (file)
@@ -29,7 +29,7 @@
 #define vl_compositor_h
 
 #include <pipe/p_state.h>
-#include <pipe/p_video_context.h>
+#include <pipe/p_video_decoder.h>
 #include <pipe/p_video_state.h>
 
 #include "vl_types.h"
diff --git a/src/gallium/auxiliary/vl/vl_context.c b/src/gallium/auxiliary/vl/vl_context.c
deleted file mode 100644 (file)
index fec227d..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2009 Younes Manton.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include <pipe/p_video_context.h>
-
-#include <util/u_memory.h>
-#include <util/u_rect.h>
-#include <util/u_video.h>
-
-#include "vl_context.h"
-#include "vl_compositor.h"
-#include "vl_mpeg12_decoder.h"
-
-static void
-vl_context_destroy(struct pipe_video_context *context)
-{
-   struct vl_context *ctx = (struct vl_context*)context;
-
-   assert(context);
-
-   FREE(ctx);
-}
-
-static struct pipe_video_decoder *
-vl_context_create_decoder(struct pipe_video_context *context,
-                          enum pipe_video_profile profile,
-                          enum pipe_video_entrypoint entrypoint,
-                          enum pipe_video_chroma_format chroma_format,
-                          unsigned width, unsigned height)
-{
-   struct vl_context *ctx = (struct vl_context*)context;
-   unsigned buffer_width, buffer_height;
-   bool pot_buffers;
-
-   assert(context);
-   assert(width > 0 && height > 0);
-   
-   pot_buffers = !ctx->base.screen->get_video_param(ctx->base.screen, profile, PIPE_VIDEO_CAP_NPOT_TEXTURES);
-
-   buffer_width = pot_buffers ? util_next_power_of_two(width) : align(width, MACROBLOCK_WIDTH);
-   buffer_height = pot_buffers ? util_next_power_of_two(height) : align(height, MACROBLOCK_HEIGHT);
-
-   switch (u_reduce_video_profile(profile)) {
-      case PIPE_VIDEO_CODEC_MPEG12:
-         return vl_create_mpeg12_decoder(context, ctx->pipe, profile, entrypoint,
-                                         chroma_format, buffer_width, buffer_height);
-      default:
-         return NULL;
-   }
-   return NULL;
-}
-
-struct pipe_video_context *
-vl_create_context(struct pipe_context *pipe)
-{
-   struct vl_context *ctx;
-
-   ctx = CALLOC_STRUCT(vl_context);
-
-   if (!ctx)
-      return NULL;
-
-   ctx->base.screen = pipe->screen;
-
-   ctx->base.destroy = vl_context_destroy;
-   ctx->base.create_decoder = vl_context_create_decoder;
-
-   ctx->pipe = pipe;
-
-   return &ctx->base;
-}
diff --git a/src/gallium/auxiliary/vl/vl_context.h b/src/gallium/auxiliary/vl/vl_context.h
deleted file mode 100644 (file)
index 4fbe265..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2009 Younes Manton.
- * Copyright 2011 Christian König.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifndef vl_context_h
-#define vl_context_h
-
-#include <pipe/p_video_context.h>
-
-struct pipe_screen;
-struct pipe_context;
-
-struct vl_context
-{
-   struct pipe_video_context base;
-   struct pipe_context *pipe;
-};
-
-/* drivers can call this function in their pipe_video_context constructors and pass it
-   an accelerated pipe_context along with suitable buffering modes, etc */
-struct pipe_video_context *
-vl_create_context(struct pipe_context *pipe);
-
-#endif /* vl_context_h */
diff --git a/src/gallium/auxiliary/vl/vl_decoder.c b/src/gallium/auxiliary/vl/vl_decoder.c
new file mode 100644 (file)
index 0000000..2be5c17
--- /dev/null
@@ -0,0 +1,65 @@
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <pipe/p_video_decoder.h>
+
+#include <util/u_video.h>
+
+#include "vl_decoder.h"
+#include "vl_mpeg12_decoder.h"
+
+struct pipe_video_decoder *
+vl_create_decoder(struct pipe_context *pipe,
+                  enum pipe_video_profile profile,
+                  enum pipe_video_entrypoint entrypoint,
+                  enum pipe_video_chroma_format chroma_format,
+                  unsigned width, unsigned height)
+{
+   unsigned buffer_width, buffer_height;
+   bool pot_buffers;
+
+   assert(pipe);
+   assert(width > 0 && height > 0);
+   
+   pot_buffers = !pipe->screen->get_video_param
+   (
+      pipe->screen,
+      profile,
+      PIPE_VIDEO_CAP_NPOT_TEXTURES
+   );
+
+   buffer_width = pot_buffers ? util_next_power_of_two(width) : align(width, MACROBLOCK_WIDTH);
+   buffer_height = pot_buffers ? util_next_power_of_two(height) : align(height, MACROBLOCK_HEIGHT);
+
+   switch (u_reduce_video_profile(profile)) {
+      case PIPE_VIDEO_CODEC_MPEG12:
+         return vl_create_mpeg12_decoder(pipe, profile, entrypoint, chroma_format, buffer_width, buffer_height);
+      default:
+         return NULL;
+   }
+   return NULL;
+}
diff --git a/src/gallium/auxiliary/vl/vl_decoder.h b/src/gallium/auxiliary/vl/vl_decoder.h
new file mode 100644 (file)
index 0000000..440f5ec
--- /dev/null
@@ -0,0 +1,44 @@
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * Copyright 2011 Christian König.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef vl_decoder_h
+#define vl_decoder_h
+
+#include <pipe/p_video_decoder.h>
+
+/**
+ * standard implementation of pipe->create_video_decoder
+ */
+struct pipe_video_decoder *
+vl_create_decoder(struct pipe_context *pipe,
+                  enum pipe_video_profile profile,
+                  enum pipe_video_entrypoint entrypoint,
+                  enum pipe_video_chroma_format chroma_format,
+                  unsigned width, unsigned height);
+
+#endif /* vl_decoder_h */
index c2ddd2cb2cec3935d1da1fb081b31ae681a44bec..b866e0e5aec07677ed44d23de8fb5f42f4a655f9 100644 (file)
@@ -94,7 +94,7 @@ init_zscan_buffer(struct vl_mpeg12_buffer *buffer)
    formats[0] = formats[1] = formats[2] = dec->zscan_source_format;
    buffer->zscan_source = vl_video_buffer_create_ex
    (
-      dec->pipe,
+      dec->base.context,
       dec->blocks_per_line * BLOCK_WIDTH * BLOCK_HEIGHT,
       align(dec->num_blocks, dec->blocks_per_line) / dec->blocks_per_line,
       1, PIPE_VIDEO_CHROMA_FORMAT_444, formats, PIPE_USAGE_STATIC
@@ -277,7 +277,7 @@ vl_mpeg12_buffer_begin_frame(struct pipe_video_decode_buffer *buffer)
    dec = (struct vl_mpeg12_decoder *)buf->base.decoder;
    assert(dec);
 
-   vl_vb_map(&buf->vertex_stream, dec->pipe);
+   vl_vb_map(&buf->vertex_stream, dec->base.context);
 
    sampler_views = buf->zscan_source->get_sampler_view_planes(buf->zscan_source);
 
@@ -293,14 +293,14 @@ vl_mpeg12_buffer_begin_frame(struct pipe_video_decode_buffer *buffer)
          1
       };
 
-      buf->tex_transfer[i] = dec->pipe->get_transfer
+      buf->tex_transfer[i] = dec->base.context->get_transfer
       (
-         dec->pipe, tex,
+         dec->base.context, tex,
          0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
          &rect
       );
 
-      buf->texels[i] = dec->pipe->transfer_map(dec->pipe, buf->tex_transfer[i]);
+      buf->texels[i] = dec->base.context->transfer_map(dec->base.context, buf->tex_transfer[i]);
    }
 
    if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
@@ -407,11 +407,11 @@ vl_mpeg12_buffer_end_frame(struct pipe_video_decode_buffer *buffer)
    dec = (struct vl_mpeg12_decoder *)buf->base.decoder;
    assert(dec);
 
-   vl_vb_unmap(&buf->vertex_stream, dec->pipe);
+   vl_vb_unmap(&buf->vertex_stream, dec->base.context);
 
    for (i = 0; i < VL_MAX_PLANES; ++i) {
-      dec->pipe->transfer_unmap(dec->pipe, buf->tex_transfer[i]);
-      dec->pipe->transfer_destroy(dec->pipe, buf->tex_transfer[i]);
+      dec->base.context->transfer_unmap(dec->base.context, buf->tex_transfer[i]);
+      dec->base.context->transfer_destroy(dec->base.context, buf->tex_transfer[i]);
    }
 }
 
@@ -423,11 +423,11 @@ vl_mpeg12_destroy(struct pipe_video_decoder *decoder)
    assert(decoder);
 
    /* Asserted in softpipe_delete_fs_state() for some reason */
-   dec->pipe->bind_vs_state(dec->pipe, NULL);
-   dec->pipe->bind_fs_state(dec->pipe, NULL);
+   dec->base.context->bind_vs_state(dec->base.context, NULL);
+   dec->base.context->bind_fs_state(dec->base.context, NULL);
 
-   dec->pipe->delete_depth_stencil_alpha_state(dec->pipe, dec->dsa);
-   dec->pipe->delete_sampler_state(dec->pipe, dec->sampler_ycbcr);
+   dec->base.context->delete_depth_stencil_alpha_state(dec->base.context, dec->dsa);
+   dec->base.context->delete_sampler_state(dec->base.context, dec->sampler_ycbcr);
 
    vl_mc_cleanup(&dec->mc_y);
    vl_mc_cleanup(&dec->mc_c);
@@ -442,8 +442,8 @@ vl_mpeg12_destroy(struct pipe_video_decoder *decoder)
    vl_zscan_cleanup(&dec->zscan_y);
    vl_zscan_cleanup(&dec->zscan_c);
 
-   dec->pipe->delete_vertex_elements_state(dec->pipe, dec->ves_ycbcr);
-   dec->pipe->delete_vertex_elements_state(dec->pipe, dec->ves_mv);
+   dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_ycbcr);
+   dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_mv);
 
    pipe_resource_reference(&dec->quads.buffer, NULL);
    pipe_resource_reference(&dec->pos.buffer, NULL);
@@ -478,7 +478,7 @@ vl_mpeg12_create_buffer(struct pipe_video_decoder *decoder)
    buffer->base.decode_bitstream = vl_mpeg12_buffer_decode_bitstream;
    buffer->base.end_frame = vl_mpeg12_buffer_end_frame;
 
-   if (!vl_vb_init(&buffer->vertex_stream, dec->pipe,
+   if (!vl_vb_init(&buffer->vertex_stream, dec->base.context,
                    dec->base.width / MACROBLOCK_WIDTH,
                    dec->base.height / MACROBLOCK_HEIGHT))
       goto error_vertex_buffer;
@@ -545,7 +545,7 @@ vl_mpeg12_decoder_flush_buffer(struct pipe_video_decode_buffer *buffer,
 
    surfaces = dst->get_surfaces(dst);
 
-   dec->pipe->bind_vertex_elements_state(dec->pipe, dec->ves_mv);
+   dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_mv);
    for (i = 0; i < VL_MAX_PLANES; ++i) {
       if (!surfaces[i]) continue;
 
@@ -555,7 +555,7 @@ vl_mpeg12_decoder_flush_buffer(struct pipe_video_decode_buffer *buffer,
          if (!sv[j]) continue;
 
          vb[2] = vl_vb_get_mv(&buf->vertex_stream, j);;
-         dec->pipe->set_vertex_buffers(dec->pipe, 3, vb);
+         dec->base.context->set_vertex_buffers(dec->base.context, 3, vb);
 
          vl_mc_render_ref(&buf->mc[i], sv[j][i]);
       }
@@ -563,12 +563,12 @@ vl_mpeg12_decoder_flush_buffer(struct pipe_video_decode_buffer *buffer,
 
    vb[2] = dec->block_num;
 
-   dec->pipe->bind_vertex_elements_state(dec->pipe, dec->ves_ycbcr);
+   dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_ycbcr);
    for (i = 0; i < VL_MAX_PLANES; ++i) {
       if (!num_ycbcr_blocks[i]) continue;
 
       vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, i);
-      dec->pipe->set_vertex_buffers(dec->pipe, 3, vb);
+      dec->base.context->set_vertex_buffers(dec->base.context, 3, vb);
 
       vl_zscan_render(&buf->zscan[i] , num_ycbcr_blocks[i]);
 
@@ -585,13 +585,13 @@ vl_mpeg12_decoder_flush_buffer(struct pipe_video_decode_buffer *buffer,
          if (!num_ycbcr_blocks[i]) continue;
 
          vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, component);
-         dec->pipe->set_vertex_buffers(dec->pipe, 3, vb);
+         dec->base.context->set_vertex_buffers(dec->base.context, 3, vb);
 
          if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
             vl_idct_prepare_stage2(component == 0 ? &dec->idct_y : &dec->idct_c, &buf->idct[component]);
          else {
-            dec->pipe->set_fragment_sampler_views(dec->pipe, 1, &mc_source_sv[component]);
-            dec->pipe->bind_fragment_sampler_states(dec->pipe, 1, &dec->sampler_ycbcr);
+            dec->base.context->set_fragment_sampler_views(dec->base.context, 1, &mc_source_sv[component]);
+            dec->base.context->bind_fragment_sampler_states(dec->base.context, 1, &dec->sampler_ycbcr);
          }
          vl_mc_render_ycbcr(&buf->mc[i], j, num_ycbcr_blocks[component]);
       }
@@ -623,8 +623,8 @@ init_pipe_state(struct vl_mpeg12_decoder *dec)
    dsa.alpha.enabled = 0;
    dsa.alpha.func = PIPE_FUNC_ALWAYS;
    dsa.alpha.ref_value = 0;
-   dec->dsa = dec->pipe->create_depth_stencil_alpha_state(dec->pipe, &dsa);
-   dec->pipe->bind_depth_stencil_alpha_state(dec->pipe, dec->dsa);
+   dec->dsa = dec->base.context->create_depth_stencil_alpha_state(dec->base.context, &dsa);
+   dec->base.context->bind_depth_stencil_alpha_state(dec->base.context, dec->dsa);
 
    memset(&sampler, 0, sizeof(sampler));
    sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
@@ -636,7 +636,7 @@ init_pipe_state(struct vl_mpeg12_decoder *dec)
    sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
    sampler.compare_func = PIPE_FUNC_ALWAYS;
    sampler.normalized_coords = 1;
-   dec->sampler_ycbcr = dec->pipe->create_sampler_state(dec->pipe, &sampler);
+   dec->sampler_ycbcr = dec->base.context->create_sampler_state(dec->base.context, &sampler);
    if (!dec->sampler_ycbcr)
       return false;
 
@@ -651,7 +651,7 @@ find_format_config(struct vl_mpeg12_decoder *dec, const struct format_config con
 
    assert(dec);
 
-   screen = dec->pipe->screen;
+   screen = dec->base.context->screen;
 
    for (i = 0; i < num_configs; ++i) {
       if (!screen->is_format_supported(screen, configs[i].zscan_source_format, PIPE_TEXTURE_2D,
@@ -685,17 +685,17 @@ init_zscan(struct vl_mpeg12_decoder *dec, const struct format_config* format_con
    assert(dec);
 
    dec->zscan_source_format = format_config->zscan_source_format;
-   dec->zscan_linear = vl_zscan_layout(dec->pipe, vl_zscan_linear, dec->blocks_per_line);
-   dec->zscan_normal = vl_zscan_layout(dec->pipe, vl_zscan_normal, dec->blocks_per_line);
-   dec->zscan_alternate = vl_zscan_layout(dec->pipe, vl_zscan_alternate, dec->blocks_per_line);
+   dec->zscan_linear = vl_zscan_layout(dec->base.context, vl_zscan_linear, dec->blocks_per_line);
+   dec->zscan_normal = vl_zscan_layout(dec->base.context, vl_zscan_normal, dec->blocks_per_line);
+   dec->zscan_alternate = vl_zscan_layout(dec->base.context, vl_zscan_alternate, dec->blocks_per_line);
 
    num_channels = dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT ? 4 : 1;
 
-   if (!vl_zscan_init(&dec->zscan_y, dec->pipe, dec->base.width, dec->base.height,
+   if (!vl_zscan_init(&dec->zscan_y, dec->base.context, dec->base.width, dec->base.height,
                       dec->blocks_per_line, dec->num_blocks, num_channels))
       return false;
 
-   if (!vl_zscan_init(&dec->zscan_c, dec->pipe, dec->chroma_width, dec->chroma_height,
+   if (!vl_zscan_init(&dec->zscan_c, dec->base.context, dec->chroma_width, dec->chroma_height,
                       dec->blocks_per_line, dec->num_blocks, num_channels))
       return false;
 
@@ -710,8 +710,15 @@ init_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_conf
 
    struct pipe_sampler_view *matrix = NULL;
 
-   nr_of_idct_render_targets = dec->pipe->screen->get_param(dec->pipe->screen, PIPE_CAP_MAX_RENDER_TARGETS);
-   max_inst = dec->pipe->screen->get_shader_param(dec->pipe->screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_INSTRUCTIONS);
+   nr_of_idct_render_targets = dec->base.context->screen->get_param
+   (
+      dec->base.context->screen, PIPE_CAP_MAX_RENDER_TARGETS
+   );
+   
+   max_inst = dec->base.context->screen->get_shader_param
+   (
+      dec->base.context->screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_INSTRUCTIONS
+   );
 
    // Just assume we need 32 inst per render target, not 100% true, but should work in most cases
    if (nr_of_idct_render_targets >= 4 && max_inst >= 32*4)
@@ -723,7 +730,7 @@ init_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_conf
    formats[0] = formats[1] = formats[2] = format_config->idct_source_format;
    dec->idct_source = vl_video_buffer_create_ex
    (
-      dec->pipe, dec->base.width / 4, dec->base.height, 1,
+      dec->base.context, dec->base.width / 4, dec->base.height, 1,
       dec->base.chroma_format, formats, PIPE_USAGE_STATIC
    );
 
@@ -733,7 +740,7 @@ init_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_conf
    formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
    dec->mc_source = vl_video_buffer_create_ex
    (
-      dec->pipe, dec->base.width / nr_of_idct_render_targets,
+      dec->base.context, dec->base.width / nr_of_idct_render_targets,
       dec->base.height / 4, nr_of_idct_render_targets,
       dec->base.chroma_format, formats, PIPE_USAGE_STATIC
    );
@@ -741,14 +748,14 @@ init_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_conf
    if (!dec->mc_source)
       goto error_mc_source;
 
-   if (!(matrix = vl_idct_upload_matrix(dec->pipe, format_config->idct_scale)))
+   if (!(matrix = vl_idct_upload_matrix(dec->base.context, format_config->idct_scale)))
       goto error_matrix;
 
-   if (!vl_idct_init(&dec->idct_y, dec->pipe, dec->base.width, dec->base.height,
+   if (!vl_idct_init(&dec->idct_y, dec->base.context, dec->base.width, dec->base.height,
                      nr_of_idct_render_targets, matrix, matrix))
       goto error_y;
 
-   if(!vl_idct_init(&dec->idct_c, dec->pipe, dec->chroma_width, dec->chroma_height,
+   if(!vl_idct_init(&dec->idct_c, dec->base.context, dec->chroma_width, dec->chroma_height,
                     nr_of_idct_render_targets, matrix, matrix))
       goto error_c;
 
@@ -780,7 +787,7 @@ init_mc_source_widthout_idct(struct vl_mpeg12_decoder *dec, const struct format_
    formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
    dec->mc_source = vl_video_buffer_create_ex
    (
-      dec->pipe, dec->base.width, dec->base.height, 1,
+      dec->base.context, dec->base.width, dec->base.height, 1,
       dec->base.chroma_format, formats, PIPE_USAGE_STATIC
    );
       
@@ -831,8 +838,7 @@ mc_frag_shader_callback(void *priv, struct vl_mc *mc,
 }
 
 struct pipe_video_decoder *
-vl_create_mpeg12_decoder(struct pipe_video_context *context,
-                         struct pipe_context *pipe,
+vl_create_mpeg12_decoder(struct pipe_context *context,
                          enum pipe_video_profile profile,
                          enum pipe_video_entrypoint entrypoint,
                          enum pipe_video_chroma_format chroma_format,
@@ -860,21 +866,19 @@ vl_create_mpeg12_decoder(struct pipe_video_context *context,
    dec->base.create_buffer = vl_mpeg12_create_buffer;
    dec->base.flush_buffer = vl_mpeg12_decoder_flush_buffer;
 
-   dec->pipe = pipe;
-
    dec->blocks_per_line = MAX2(util_next_power_of_two(dec->base.width) / block_size_pixels, 4);
    dec->num_blocks = (dec->base.width * dec->base.height) / block_size_pixels;
 
-   dec->quads = vl_vb_upload_quads(dec->pipe);
+   dec->quads = vl_vb_upload_quads(dec->base.context);
    dec->pos = vl_vb_upload_pos(
-      dec->pipe,
+      dec->base.context,
       dec->base.width / MACROBLOCK_WIDTH,
       dec->base.height / MACROBLOCK_HEIGHT
    );
-   dec->block_num = vl_vb_upload_block_num(dec->pipe, dec->num_blocks);
+   dec->block_num = vl_vb_upload_block_num(dec->base.context, dec->num_blocks);
 
-   dec->ves_ycbcr = vl_vb_get_ves_ycbcr(dec->pipe);
-   dec->ves_mv = vl_vb_get_ves_mv(dec->pipe);
+   dec->ves_ycbcr = vl_vb_get_ves_ycbcr(dec->base.context);
+   dec->ves_mv = vl_vb_get_ves_mv(dec->base.context);
 
    /* TODO: Implement 422, 444 */
    assert(dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
@@ -922,12 +926,14 @@ vl_create_mpeg12_decoder(struct pipe_video_context *context,
          goto error_sources;
    }
 
-   if (!vl_mc_init(&dec->mc_y, dec->pipe, dec->base.width, dec->base.height, MACROBLOCK_HEIGHT, format_config->mc_scale,
+   if (!vl_mc_init(&dec->mc_y, dec->base.context, dec->base.width, dec->base.height,
+                   MACROBLOCK_HEIGHT, format_config->mc_scale,
                    mc_vert_shader_callback, mc_frag_shader_callback, dec))
       goto error_mc_y;
 
    // TODO
-   if (!vl_mc_init(&dec->mc_c, dec->pipe, dec->base.width, dec->base.height, BLOCK_HEIGHT, format_config->mc_scale,
+   if (!vl_mc_init(&dec->mc_c, dec->base.context, dec->base.width, dec->base.height,
+                   BLOCK_HEIGHT, format_config->mc_scale,
                    mc_vert_shader_callback, mc_frag_shader_callback, dec))
       goto error_mc_c;
 
index 474ae2d5d29014cad545b8e6209d7e1dba799429..01265e368a3c898a881d6b4f2c11bc8a8526a3e9 100644 (file)
@@ -28,7 +28,7 @@
 #ifndef vl_mpeg12_decoder_h
 #define vl_mpeg12_decoder_h
 
-#include <pipe/p_video_context.h>
+#include <pipe/p_video_decoder.h>
 
 #include "vl_mpeg12_bitstream.h"
 #include "vl_zscan.h"
@@ -44,7 +44,6 @@ struct pipe_context;
 struct vl_mpeg12_decoder
 {
    struct pipe_video_decoder base;
-   struct pipe_context *pipe;
 
    unsigned chroma_width, chroma_height;
 
@@ -93,11 +92,11 @@ struct vl_mpeg12_buffer
    short *texels[VL_MAX_PLANES];
 };
 
-/* drivers can call this function in their pipe_video_context constructors and pass it
-   an accelerated pipe_context along with suitable buffering modes, etc */
+/**
+ * creates a shader based mpeg12 decoder
+ */
 struct pipe_video_decoder *
-vl_create_mpeg12_decoder(struct pipe_video_context *context,
-                         struct pipe_context *pipe,
+vl_create_mpeg12_decoder(struct pipe_context *pipe,
                          enum pipe_video_profile profile,
                          enum pipe_video_entrypoint entrypoint,
                          enum pipe_video_chroma_format chroma_format,
index 172f332712baea5866452c464686d2610e8beb11..78aac3fa0f22ac8518339af072b150e30182cfc1 100644 (file)
@@ -29,7 +29,7 @@
 #define vl_ycbcr_buffer_h
 
 #include <pipe/p_context.h>
-#include <pipe/p_video_context.h>
+#include <pipe/p_video_decoder.h>
 
 #include "vl_defines.h"
 
index 7d22ffb6a9474809959104986c547daa37efa19c..d94ac74f0e5d512018b293228ce650d3a472d5d6 100644 (file)
@@ -27,6 +27,7 @@
 #include "util/u_simple_list.h"
 #include "util/u_upload_mgr.h"
 #include "os/os_time.h"
+#include "vl/vl_decoder.h"
 #include "vl/vl_video_buffer.h"
 
 #include "r300_cb.h"
@@ -438,6 +439,7 @@ struct pipe_context* r300_create_context(struct pipe_screen* screen,
     r300_init_state_functions(r300);
     r300_init_resource_functions(r300);
     
+    r300->context.create_video_decoder = vl_create_decoder;
     r300->context.create_video_buffer = vl_video_buffer_create;
 
     r300->vbuf_mgr = u_vbuf_mgr_create(&r300->context, 1024 * 1024, 16,
index 141df11ef91ecb3b868a6b32bdff9f1819e5331d..19b273f4f492b73ad219aa1c937f61da65739232 100644 (file)
@@ -25,7 +25,6 @@
 #include "util/u_format_s3tc.h"
 #include "util/u_memory.h"
 #include "os/os_time.h"
-#include "vl/vl_context.h"
 #include "vl/vl_video_buffer.h"
 
 #include "r300_context.h"
@@ -425,14 +424,6 @@ static boolean r300_is_format_supported(struct pipe_screen* screen,
     return retval == usage;
 }
 
-static struct pipe_video_context *
-r300_video_create(struct pipe_screen *screen, struct pipe_context *pipe)
-{
-   assert(screen);
-
-   return vl_create_context(pipe);
-}
-
 static void r300_destroy_screen(struct pipe_screen* pscreen)
 {
     struct r300_screen* r300screen = r300_screen(pscreen);
@@ -533,7 +524,6 @@ struct pipe_screen* r300_screen_create(struct radeon_winsys *rws)
     r300screen->screen.is_format_supported = r300_is_format_supported;
     r300screen->screen.is_video_format_supported = vl_video_buffer_is_format_supported;
     r300screen->screen.context_create = r300_create_context;
-    r300screen->screen.video_context_create = r300_video_create;
     r300screen->screen.fence_reference = r300_fence_reference;
     r300screen->screen.fence_signalled = r300_fence_signalled;
     r300screen->screen.fence_finish = r300_fence_finish;
index d8b51ea4871455f861bb984b8c72905d87bcb3be..76bb1883ede6ce1d68320e8ec9dc5da4bc5198eb 100644 (file)
@@ -38,7 +38,7 @@
 #include <util/u_memory.h>
 #include <util/u_inlines.h>
 #include "util/u_upload_mgr.h"
-#include <vl/vl_context.h>
+#include <vl/vl_decoder.h>
 #include <vl/vl_video_buffer.h>
 #include "os/os_time.h"
 #include <pipebuffer/pb_buffer.h>
@@ -226,6 +226,8 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void
        r600_init_context_resource_functions(rctx);
        r600_init_surface_functions(rctx);
        rctx->context.draw_vbo = r600_draw_vbo;
+
+       rctx->context.create_video_decoder = vl_create_decoder;
        rctx->context.create_video_buffer = vl_video_buffer_create;
 
        switch (r600_get_family(rctx->radeon)) {
@@ -302,14 +304,6 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void
        return &rctx->context;
 }
 
-static struct pipe_video_context *
-r600_video_create(struct pipe_screen *screen, struct pipe_context *pipe)
-{
-       assert(screen && pipe);
-
-       return vl_create_context(pipe);
-}
-
 /*
  * pipe_screen
  */
@@ -679,7 +673,6 @@ struct pipe_screen *r600_screen_create(struct radeon *radeon)
        rscreen->screen.is_format_supported = r600_is_format_supported;
        rscreen->screen.is_video_format_supported = vl_video_buffer_is_format_supported;
        rscreen->screen.context_create = r600_create_context;
-       rscreen->screen.video_context_create = r600_video_create;
        rscreen->screen.fence_reference = r600_fence_reference;
        rscreen->screen.fence_signalled = r600_fence_signalled;
        rscreen->screen.fence_finish = r600_fence_finish;
index 0e623944e59cceadb32c7264d8bee7083a8f1106..2c43602ea1ca5d5ec1bca8212ae3c9b9c2f52128 100644 (file)
@@ -37,6 +37,7 @@
 #include "util/u_memory.h"
 #include "util/u_inlines.h"
 #include "tgsi/tgsi_exec.h"
+#include "vl/vl_decoder.h"
 #include "vl/vl_video_buffer.h"
 #include "sp_clear.h"
 #include "sp_context.h"
@@ -260,6 +261,7 @@ softpipe_create_context( struct pipe_screen *screen,
 
    softpipe->pipe.render_condition = softpipe_render_condition;
    
+   softpipe->pipe.create_video_decoder = vl_create_decoder;
    softpipe->pipe.create_video_buffer = vl_video_buffer_create;
 
    /*
index 98147cfd3c8c638a02b366cec5206b129ac4665d..f952e6046f05263da4bdb2ebf8f0e55f0d1d536b 100644 (file)
@@ -33,7 +33,6 @@
 #include "pipe/p_defines.h"
 #include "pipe/p_screen.h"
 #include "draw/draw_context.h"
-#include "vl/vl_context.h"
 #include "vl/vl_video_buffer.h"
 
 #include "state_tracker/sw_winsys.h"
@@ -300,14 +299,6 @@ softpipe_flush_frontbuffer(struct pipe_screen *_screen,
       winsys->displaytarget_display(winsys, texture->dt, context_private);
 }
 
-static struct pipe_video_context *
-sp_video_create(struct pipe_screen *screen, struct pipe_context *context)
-{
-   assert(screen);
-
-   return vl_create_context(context);
-}
-
 /**
  * Create a new pipe_screen object
  * Note: we're not presently subclassing pipe_screen (no softpipe_screen).
@@ -335,7 +326,6 @@ softpipe_create_screen(struct sw_winsys *winsys)
    screen->base.is_video_format_supported = vl_video_buffer_is_format_supported;
    screen->base.context_create = softpipe_create_context;
    screen->base.flush_frontbuffer = softpipe_flush_frontbuffer;
-   screen->base.video_context_create = sp_video_create;
 
    util_format_s3tc_init();
 
index c02b060e4bc30ac8579e9e68bc0ecf068c3ab503..ac290495a436931e7b709d029c69727e0c9f83a1 100644 (file)
@@ -59,6 +59,8 @@ struct pipe_vertex_buffer;
 struct pipe_vertex_element;
 struct pipe_viewport_state;
 
+enum pipe_video_profile;
+enum pipe_video_entrypoint;
 enum pipe_video_chroma_format;
 enum pipe_format;
 
@@ -399,6 +401,15 @@ struct pipe_context {
     */
    void (*texture_barrier)(struct pipe_context *);
    
+   /**
+    * Creates a video decoder for a specific video codec/profile
+    */
+   struct pipe_video_decoder *(*create_video_decoder)( struct pipe_context *context,
+                                                       enum pipe_video_profile profile,
+                                                       enum pipe_video_entrypoint entrypoint,
+                                                       enum pipe_video_chroma_format chroma_format,
+                                                       unsigned width, unsigned height );
+
    /**
     * Creates a video buffer as decoding target
     */
index 28209346c7871775c0a64e96907dc8e26ad8cc73..b77cf24d542bd993331175f46eebdf7707be5dcd 100644 (file)
@@ -100,9 +100,6 @@ struct pipe_screen {
 
    struct pipe_context * (*context_create)( struct pipe_screen *, void *priv );
 
-   struct pipe_video_context * (*video_context_create)( struct pipe_screen *screen,
-                                                        struct pipe_context *context );
-
    /**
     * Check if the given pipe_format is supported as a texture or
     * drawing surface.
diff --git a/src/gallium/include/pipe/p_video_context.h b/src/gallium/include/pipe/p_video_context.h
deleted file mode 100644 (file)
index 0ac0c4b..0000000
+++ /dev/null
@@ -1,194 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2009 Younes Manton.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifndef PIPE_VIDEO_CONTEXT_H
-#define PIPE_VIDEO_CONTEXT_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <pipe/p_video_state.h>
-
-struct pipe_screen;
-struct pipe_surface;
-struct pipe_macroblock;
-struct pipe_picture_desc;
-struct pipe_fence_handle;
-
-/**
- * Gallium video rendering context
- */
-struct pipe_video_context
-{
-   struct pipe_screen *screen;
-
-   /**
-    * destroy context, all objects created from this context
-    * (buffers, decoders, compositors etc...) must be freed before calling this
-    */
-   void (*destroy)(struct pipe_video_context *context);
-
-   /**
-    * create a decoder for a specific video profile
-    */
-   struct pipe_video_decoder *(*create_decoder)(struct pipe_video_context *context,
-                                                enum pipe_video_profile profile,
-                                                enum pipe_video_entrypoint entrypoint,
-                                                enum pipe_video_chroma_format chroma_format,
-                                                unsigned width, unsigned height);
-
-};
-
-/**
- * decoder for a specific video codec
- */
-struct pipe_video_decoder
-{
-   struct pipe_video_context *context;
-
-   enum pipe_video_profile profile;
-   enum pipe_video_entrypoint entrypoint;
-   enum pipe_video_chroma_format chroma_format;
-   unsigned width;
-   unsigned height;
-
-   /**
-    * destroy this video decoder
-    */
-   void (*destroy)(struct pipe_video_decoder *decoder);
-
-   /**
-    * Creates a buffer as decoding input
-    */
-   struct pipe_video_decode_buffer *(*create_buffer)(struct pipe_video_decoder *decoder);
-
-   /**
-    * flush decoder buffer to video hardware
-    */
-   void (*flush_buffer)(struct pipe_video_decode_buffer *decbuf,
-                        unsigned num_ycbcr_blocks[3],
-                        struct pipe_video_buffer *ref_frames[2],
-                        struct pipe_video_buffer *dst);
-};
-
-/**
- * input buffer for a decoder
- */
-struct pipe_video_decode_buffer
-{
-   struct pipe_video_decoder *decoder;
-
-   /**
-    * destroy this decode buffer
-    */
-   void (*destroy)(struct pipe_video_decode_buffer *decbuf);
-
-   /**
-    * map the input buffer into memory before starting decoding
-    */
-   void (*begin_frame)(struct pipe_video_decode_buffer *decbuf);
-
-   /**
-    * set the quantification matrixes
-    */
-   void (*set_quant_matrix)(struct pipe_video_decode_buffer *decbuf,
-                            const uint8_t intra_matrix[64],
-                            const uint8_t non_intra_matrix[64]);
-
-   /**
-    * get the pointer where to put the ycbcr blocks of a component
-    */
-   struct pipe_ycbcr_block *(*get_ycbcr_stream)(struct pipe_video_decode_buffer *, int component);
-
-   /**
-    * get the pointer where to put the ycbcr dct block data of a component
-    */
-   short *(*get_ycbcr_buffer)(struct pipe_video_decode_buffer *, int component);
-
-   /**
-    * get the stride of the mv buffer
-    */
-   unsigned (*get_mv_stream_stride)(struct pipe_video_decode_buffer *decbuf);
-
-   /**
-    * get the pointer where to put the motion vectors of a ref frame
-    */
-   struct pipe_motionvector *(*get_mv_stream)(struct pipe_video_decode_buffer *decbuf, int ref_frame);
-
-   /**
-    * decode a bitstream
-    */
-   void (*decode_bitstream)(struct pipe_video_decode_buffer *decbuf,
-                            unsigned num_bytes, const void *data,
-                            struct pipe_mpeg12_picture_desc *picture,
-                            unsigned num_ycbcr_blocks[3]);
-
-   /**
-    * unmap decoder buffer before flushing
-    */
-   void (*end_frame)(struct pipe_video_decode_buffer *decbuf);
-};
-
-/**
- * output for decoding / input for displaying
- */
-struct pipe_video_buffer
-{
-   struct pipe_context *context;
-
-   enum pipe_format buffer_format;
-   enum pipe_video_chroma_format chroma_format;
-   unsigned width;
-   unsigned height;
-
-   /**
-    * destroy this video buffer
-    */
-   void (*destroy)(struct pipe_video_buffer *buffer);
-
-   /**
-    * get a individual sampler view for each plane
-    */
-   struct pipe_sampler_view **(*get_sampler_view_planes)(struct pipe_video_buffer *buffer);
-
-   /**
-    * get a individual sampler view for each component
-    */
-   struct pipe_sampler_view **(*get_sampler_view_components)(struct pipe_video_buffer *buffer);
-
-   /**
-    * get a individual surfaces for each plane
-    */
-   struct pipe_surface **(*get_surfaces)(struct pipe_video_buffer *buffer);
-};
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* PIPE_VIDEO_CONTEXT_H */
diff --git a/src/gallium/include/pipe/p_video_decoder.h b/src/gallium/include/pipe/p_video_decoder.h
new file mode 100644 (file)
index 0000000..deda992
--- /dev/null
@@ -0,0 +1,170 @@
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef PIPE_VIDEO_CONTEXT_H
+#define PIPE_VIDEO_CONTEXT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <pipe/p_video_state.h>
+
+struct pipe_screen;
+struct pipe_surface;
+struct pipe_macroblock;
+struct pipe_picture_desc;
+struct pipe_fence_handle;
+
+/**
+ * Gallium video decoder for a specific codec/profile
+ */
+struct pipe_video_decoder
+{
+   struct pipe_context *context;
+
+   enum pipe_video_profile profile;
+   enum pipe_video_entrypoint entrypoint;
+   enum pipe_video_chroma_format chroma_format;
+   unsigned width;
+   unsigned height;
+
+   /**
+    * destroy this video decoder
+    */
+   void (*destroy)(struct pipe_video_decoder *decoder);
+
+   /**
+    * Creates a buffer as decoding input
+    */
+   struct pipe_video_decode_buffer *(*create_buffer)(struct pipe_video_decoder *decoder);
+
+   /**
+    * flush decoder buffer to video hardware
+    */
+   void (*flush_buffer)(struct pipe_video_decode_buffer *decbuf,
+                        unsigned num_ycbcr_blocks[3],
+                        struct pipe_video_buffer *ref_frames[2],
+                        struct pipe_video_buffer *dst);
+};
+
+/**
+ * input buffer for a decoder
+ */
+struct pipe_video_decode_buffer
+{
+   struct pipe_video_decoder *decoder;
+
+   /**
+    * destroy this decode buffer
+    */
+   void (*destroy)(struct pipe_video_decode_buffer *decbuf);
+
+   /**
+    * map the input buffer into memory before starting decoding
+    */
+   void (*begin_frame)(struct pipe_video_decode_buffer *decbuf);
+
+   /**
+    * set the quantification matrixes
+    */
+   void (*set_quant_matrix)(struct pipe_video_decode_buffer *decbuf,
+                            const uint8_t intra_matrix[64],
+                            const uint8_t non_intra_matrix[64]);
+
+   /**
+    * get the pointer where to put the ycbcr blocks of a component
+    */
+   struct pipe_ycbcr_block *(*get_ycbcr_stream)(struct pipe_video_decode_buffer *, int component);
+
+   /**
+    * get the pointer where to put the ycbcr dct block data of a component
+    */
+   short *(*get_ycbcr_buffer)(struct pipe_video_decode_buffer *, int component);
+
+   /**
+    * get the stride of the mv buffer
+    */
+   unsigned (*get_mv_stream_stride)(struct pipe_video_decode_buffer *decbuf);
+
+   /**
+    * get the pointer where to put the motion vectors of a ref frame
+    */
+   struct pipe_motionvector *(*get_mv_stream)(struct pipe_video_decode_buffer *decbuf, int ref_frame);
+
+   /**
+    * decode a bitstream
+    */
+   void (*decode_bitstream)(struct pipe_video_decode_buffer *decbuf,
+                            unsigned num_bytes, const void *data,
+                            struct pipe_mpeg12_picture_desc *picture,
+                            unsigned num_ycbcr_blocks[3]);
+
+   /**
+    * unmap decoder buffer before flushing
+    */
+   void (*end_frame)(struct pipe_video_decode_buffer *decbuf);
+};
+
+/**
+ * output for decoding / input for displaying
+ */
+struct pipe_video_buffer
+{
+   struct pipe_context *context;
+
+   enum pipe_format buffer_format;
+   enum pipe_video_chroma_format chroma_format;
+   unsigned width;
+   unsigned height;
+
+   /**
+    * destroy this video buffer
+    */
+   void (*destroy)(struct pipe_video_buffer *buffer);
+
+   /**
+    * get a individual sampler view for each plane
+    */
+   struct pipe_sampler_view **(*get_sampler_view_planes)(struct pipe_video_buffer *buffer);
+
+   /**
+    * get a individual sampler view for each component
+    */
+   struct pipe_sampler_view **(*get_sampler_view_components)(struct pipe_video_buffer *buffer);
+
+   /**
+    * get a individual surfaces for each plane
+    */
+   struct pipe_surface **(*get_surfaces)(struct pipe_video_buffer *buffer);
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PIPE_VIDEO_CONTEXT_H */
index 8458864cfc17fa222c096806c9a9ada41f48b34c..4d01fe6a68edf962d347fe0031b6a19353ab5da0 100644 (file)
@@ -25,8 +25,6 @@
  *
  **************************************************************************/
 
-#include <pipe/p_video_context.h>
-
 #include <util/u_memory.h>
 #include <util/u_math.h>
 #include <util/u_debug.h>
@@ -41,7 +39,7 @@ vlVdpDecoderCreate(VdpDevice device,
                    VdpDecoder *decoder)
 {
    enum pipe_video_profile p_profile;
-   struct pipe_video_context *vpipe;
+   struct pipe_context *pipe;
    vlVdpDevice *dev;
    vlVdpDecoder *vldecoder;
    VdpStatus ret;
@@ -63,7 +61,7 @@ vlVdpDecoderCreate(VdpDevice device,
    if (!dev)
       return VDP_STATUS_INVALID_HANDLE;
 
-   vpipe = dev->context->vpipe;
+   pipe = dev->context->pipe;
 
    vldecoder = CALLOC(1,sizeof(vlVdpDecoder));
    if (!vldecoder)
@@ -72,9 +70,9 @@ vlVdpDecoderCreate(VdpDevice device,
    vldecoder->device = dev;
 
    // TODO: Define max_references. Used mainly for H264
-   vldecoder->decoder = vpipe->create_decoder
+   vldecoder->decoder = pipe->create_video_decoder
    (
-      vpipe, p_profile,
+      pipe, p_profile,
       PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
       PIPE_VIDEO_CHROMA_FORMAT_420,
       width, height
index 30c6b7aae4babc57946ca24624ac7f709383fbea..41248cde7056469556eb1f61865417c6c03bb6ed 100644 (file)
@@ -26,7 +26,6 @@
  **************************************************************************/
 
 #include <pipe/p_compiler.h>
-#include <pipe/p_video_context.h>
 
 #include <util/u_memory.h>
 #include <util/u_debug.h>
index ea6d50d745780daffc651937e09c03278e777e2a..d5187006bfc1da5b9dd3318426c11d3960fd130e 100644 (file)
@@ -44,7 +44,6 @@ vlVdpVideoMixerCreate(VdpDevice device,
                       VdpVideoMixer *mixer)
 {
    vlVdpVideoMixer *vmixer = NULL;
-   struct pipe_video_context *context;
    VdpStatus ret;
    float csc[16];
 
@@ -54,8 +53,6 @@ vlVdpVideoMixerCreate(VdpDevice device,
    if (!dev)
       return VDP_STATUS_INVALID_HANDLE;
 
-   context = dev->context->vpipe;
-
    vmixer = CALLOC(1, sizeof(vlVdpVideoMixer));
    if (!vmixer)
       return VDP_STATUS_RESOURCES;
index b45f699b83f8a0d7a9ba4d6ff4aabf986eaa6ef0..fc9e02ded47a04f6430c1ec59db40880b7d74eb4 100644 (file)
@@ -41,7 +41,6 @@ vlVdpOutputSurfaceCreate(VdpDevice device,
                          VdpOutputSurface  *surface)
 {
    struct pipe_context *pipe;
-   struct pipe_video_context *context;
    struct pipe_resource res_tmpl, *res;
    struct pipe_sampler_view sv_templ;
    struct pipe_surface surf_templ;
@@ -57,8 +56,7 @@ vlVdpOutputSurfaceCreate(VdpDevice device,
       return VDP_STATUS_INVALID_HANDLE;
 
    pipe = dev->context->pipe;
-   context = dev->context->vpipe;
-   if (!pipe || !context)
+   if (!pipe)
       return VDP_STATUS_INVALID_HANDLE;
 
    vlsurface = CALLOC(1, sizeof(vlVdpOutputSurface));
@@ -76,7 +74,7 @@ vlVdpOutputSurfaceCreate(VdpDevice device,
    res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
    res_tmpl.usage = PIPE_USAGE_STATIC;
 
-   res = context->screen->resource_create(context->screen, &res_tmpl);
+   res = pipe->screen->resource_create(pipe->screen, &res_tmpl);
    if (!res) {
       FREE(dev);
       return VDP_STATUS_ERROR;
index 0f87ca78972c2ef1f8e878547cb81dc38476f6d9..16beb289c4240c7e2cec1a2044c31eabba59a590 100644 (file)
@@ -40,7 +40,6 @@ vlVdpPresentationQueueCreate(VdpDevice device,
                              VdpPresentationQueue *presentation_queue)
 {
    vlVdpPresentationQueue *pq = NULL;
-   struct pipe_video_context *context;
    VdpStatus ret;
 
    VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Creating PresentationQueue\n");
@@ -59,8 +58,6 @@ vlVdpPresentationQueueCreate(VdpDevice device,
    if (dev != pqt->device)
       return VDP_STATUS_HANDLE_DEVICE_MISMATCH;
 
-   context = dev->context->vpipe;
-
    pq = CALLOC(1, sizeof(vlVdpPresentationQueue));
    if (!pq)
       return VDP_STATUS_RESOURCES;
@@ -175,9 +172,9 @@ vlVdpPresentationQueueDisplay(VdpPresentationQueue presentation_queue,
    vl_compositor_render(&pq->compositor, PIPE_MPEG12_PICTURE_TYPE_FRAME,
                         drawable_surface, NULL, NULL);
 
-   pq->device->context->vpipe->screen->flush_frontbuffer
+   pq->device->context->pipe->screen->flush_frontbuffer
    (
-      pq->device->context->vpipe->screen,
+      pq->device->context->pipe->screen,
       drawable_surface->texture,
       0, 0,
       vl_contextprivate_get(pq->device->context, drawable_surface)
index b8c4d2cd150b34237dea6ea60ddcef6402359079..877d0259c564f96d4160159ef056d1bf70d7328b 100644 (file)
@@ -28,7 +28,6 @@
 
 #include <assert.h>
 
-#include <pipe/p_video_context.h>
 #include <pipe/p_state.h>
 
 #include <util/u_memory.h>
@@ -161,7 +160,6 @@ vlVdpVideoSurfacePutBitsYCbCr(VdpVideoSurface surface,
 {
    enum pipe_format pformat = FormatToPipe(source_ycbcr_format);
    struct pipe_context *pipe;
-   struct pipe_video_context *context;
    struct pipe_sampler_view **sampler_views;
    unsigned i;
 
@@ -173,8 +171,7 @@ vlVdpVideoSurfacePutBitsYCbCr(VdpVideoSurface surface,
       return VDP_STATUS_INVALID_HANDLE;
 
    pipe = p_surf->device->context->pipe;
-   context = p_surf->device->context->vpipe;
-   if (!pipe && !context)
+   if (!pipe)
       return VDP_STATUS_INVALID_HANDLE;
 
    if (p_surf->video_buffer == NULL || pformat != p_surf->video_buffer->buffer_format) {
index ada17dfadc98a25e39d4d3ce77961a9091f26b09..8a97c99bda9553928460bab09fc0a7aa2db42e60 100644 (file)
@@ -34,7 +34,7 @@
 #include <vdpau/vdpau_x11.h>
 
 #include <pipe/p_compiler.h>
-#include <pipe/p_video_context.h>
+#include <pipe/p_video_decoder.h>
 
 #include <util/u_debug.h>
 #include <vl/vl_compositor.h>
index 7b74825b37ef48cf737d65045ab6400cf3727b92..f21ebda76d395fc48e2c6b8da829c1e55b98b8f5 100644 (file)
@@ -31,7 +31,7 @@
 #include <X11/extensions/XvMClib.h>
 
 #include <pipe/p_screen.h>
-#include <pipe/p_video_context.h>
+#include <pipe/p_video_decoder.h>
 #include <pipe/p_video_state.h>
 #include <pipe/p_state.h>
 
@@ -244,13 +244,14 @@ Status XvMCCreateContext(Display *dpy, XvPortID port, int surface_type_id,
       return BadAlloc;
    }
 
-   context_priv->decoder = vctx->vpipe->create_decoder(vctx->vpipe,
-                                                       ProfileToPipe(mc_type),
-                                                       (mc_type & XVMC_IDCT) ?
-                                                          PIPE_VIDEO_ENTRYPOINT_IDCT :
-                                                          PIPE_VIDEO_ENTRYPOINT_MC,
-                                                       FormatToPipe(chroma_format),
-                                                       width, height);
+   context_priv->decoder = vctx->pipe->create_video_decoder
+   (
+      vctx->pipe,
+      ProfileToPipe(mc_type),
+      (mc_type & XVMC_IDCT) ? PIPE_VIDEO_ENTRYPOINT_IDCT : PIPE_VIDEO_ENTRYPOINT_MC,
+      FormatToPipe(chroma_format),
+      width, height
+   );
 
    if (!context_priv->decoder) {
       XVMC_MSG(XVMC_ERR, "[XvMC] Could not create VL decoder.\n");
index 4ecb0e1f887cab9c81addb778c5cbfcb01147965..7d6ff061eb73b9c18c66410bd46565be77a8ff94 100644 (file)
@@ -32,7 +32,7 @@
 #include <xorg/fourcc.h>
 
 #include <pipe/p_screen.h>
-#include <pipe/p_video_context.h>
+#include <pipe/p_video_decoder.h>
 #include <pipe/p_state.h>
 
 #include <util/u_memory.h>
@@ -224,7 +224,6 @@ Status XvMCCreateSubpicture(Display *dpy, XvMCContext *context, XvMCSubpicture *
    XvMCContextPrivate *context_priv;
    XvMCSubpicturePrivate *subpicture_priv;
    struct pipe_context *pipe;
-   struct pipe_video_context *vpipe;
    struct pipe_resource tex_templ, *tex;
    struct pipe_sampler_view sampler_templ;
    Status ret;
@@ -238,7 +237,6 @@ Status XvMCCreateSubpicture(Display *dpy, XvMCContext *context, XvMCSubpicture *
 
    context_priv = context->privData;
    pipe = context_priv->vctx->pipe;
-   vpipe = context_priv->vctx->vpipe;
 
    if (!subpicture)
       return XvMCBadSubpicture;
@@ -259,9 +257,9 @@ Status XvMCCreateSubpicture(Display *dpy, XvMCContext *context, XvMCSubpicture *
    tex_templ.target = PIPE_TEXTURE_2D;
    tex_templ.format = XvIDToPipe(xvimage_id);
    tex_templ.last_level = 0;
-   if (vpipe->screen->get_video_param(vpipe->screen,
-                                      PIPE_VIDEO_PROFILE_UNKNOWN,
-                                      PIPE_VIDEO_CAP_NPOT_TEXTURES)) {
+   if (pipe->screen->get_video_param(pipe->screen,
+                                     PIPE_VIDEO_PROFILE_UNKNOWN,
+                                     PIPE_VIDEO_CAP_NPOT_TEXTURES)) {
       tex_templ.width0 = width;
       tex_templ.height0 = height;
    }
@@ -275,7 +273,7 @@ Status XvMCCreateSubpicture(Display *dpy, XvMCContext *context, XvMCSubpicture *
    tex_templ.bind = PIPE_BIND_SAMPLER_VIEW;
    tex_templ.flags = 0;
 
-   tex = vpipe->screen->resource_create(vpipe->screen, &tex_templ);
+   tex = pipe->screen->resource_create(pipe->screen, &tex_templ);
 
    memset(&sampler_templ, 0, sizeof(sampler_templ));
    u_sampler_view_default_template(&sampler_templ, tex, tex->format);
@@ -305,7 +303,7 @@ Status XvMCCreateSubpicture(Display *dpy, XvMCContext *context, XvMCSubpicture *
       tex_templ.height0 = 1;
       tex_templ.usage = PIPE_USAGE_STATIC;
 
-      tex = vpipe->screen->resource_create(vpipe->screen, &tex_templ);
+      tex = pipe->screen->resource_create(pipe->screen, &tex_templ);
 
       memset(&sampler_templ, 0, sizeof(sampler_templ));
       u_sampler_view_default_template(&sampler_templ, tex, tex->format);
index 0370a6e858f037824e86dbfa045c1fd5d56a3fd6..e8ca8152e7ae8abf5c66e10e070349ca31cbc9b0 100644 (file)
@@ -30,7 +30,7 @@
 
 #include <X11/Xlibint.h>
 
-#include <pipe/p_video_context.h>
+#include <pipe/p_video_decoder.h>
 #include <pipe/p_video_state.h>
 #include <pipe/p_state.h>
 
@@ -304,7 +304,6 @@ Status XvMCCreateSurface(Display *dpy, XvMCContext *context, XvMCSurface *surfac
 
    XvMCContextPrivate *context_priv;
    struct pipe_context *pipe;
-   struct pipe_video_context *vpipe;
    XvMCSurfacePrivate *surface_priv;
 
    XVMC_MSG(XVMC_TRACE, "[XvMC] Creating surface %p.\n", surface);
@@ -318,7 +317,6 @@ Status XvMCCreateSurface(Display *dpy, XvMCContext *context, XvMCSurface *surfac
 
    context_priv = context->privData;
    pipe = context_priv->vctx->pipe;
-   vpipe = context_priv->vctx->vpipe;
 
    surface_priv = CALLOC(1, sizeof(XvMCSurfacePrivate));
    if (!surface_priv)
@@ -357,10 +355,8 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur
                          XvMCMacroBlockArray *macroblocks, XvMCBlockArray *blocks
 )
 {
-   struct pipe_video_context *vpipe;
    struct pipe_video_decode_buffer *t_buffer;
 
-   XvMCContextPrivate *context_priv;
    XvMCSurfacePrivate *target_surface_priv;
    XvMCSurfacePrivate *past_surface_priv;
    XvMCSurfacePrivate *future_surface_priv;
@@ -406,9 +402,6 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur
    assert(!past_surface || past_surface_priv->context == context);
    assert(!future_surface || future_surface_priv->context == context);
 
-   context_priv = context->privData;
-   vpipe = context_priv->vctx->vpipe;
-
    t_buffer = target_surface_priv->decode_buffer;
 
    // enshure that all reference frames are flushed
@@ -496,7 +489,7 @@ Status XvMCPutSurface(Display *dpy, XvMCSurface *surface, Drawable drawable,
 {
    static int dump_window = -1;
 
-   struct pipe_video_context *vpipe;
+   struct pipe_context *pipe;
    struct vl_compositor *compositor;
 
    XvMCSurfacePrivate *surface_priv;
@@ -522,7 +515,7 @@ Status XvMCPutSurface(Display *dpy, XvMCSurface *surface, Drawable drawable,
    assert(srcy + srch - 1 < surface->height);
 
    subpicture_priv = surface_priv->subpicture ? surface_priv->subpicture->privData : NULL;
-   vpipe = context_priv->vctx->vpipe;
+   pipe = context_priv->vctx->pipe;
    compositor = &context_priv->compositor;
 
    if (!context_priv->drawable_surface ||
@@ -571,15 +564,15 @@ Status XvMCPutSurface(Display *dpy, XvMCSurface *surface, Drawable drawable,
    }
 
    // Workaround for r600g, there seems to be a bug in the fence refcounting code
-   vpipe->screen->fence_reference(vpipe->screen, &surface_priv->fence, NULL);
+   pipe->screen->fence_reference(pipe->screen, &surface_priv->fence, NULL);
 
    vl_compositor_render(compositor, PictureToPipe(flags), context_priv->drawable_surface, &dst_rect, &surface_priv->fence);
 
    XVMC_MSG(XVMC_TRACE, "[XvMC] Submitted surface %p for display. Pushing to front buffer.\n", surface);
 
-   vpipe->screen->flush_frontbuffer
+   pipe->screen->flush_frontbuffer
    (
-      vpipe->screen,
+      pipe->screen,
       context_priv->drawable_surface->texture,
       0, 0,
       vl_contextprivate_get(context_priv->vctx, context_priv->drawable_surface)
@@ -606,7 +599,7 @@ Status XvMCPutSurface(Display *dpy, XvMCSurface *surface, Drawable drawable,
 PUBLIC
 Status XvMCGetSurfaceStatus(Display *dpy, XvMCSurface *surface, int *status)
 {
-   struct pipe_video_context *vpipe;
+   struct pipe_context *pipe;
    XvMCSurfacePrivate *surface_priv;
    XvMCContextPrivate *context_priv;
 
@@ -619,12 +612,12 @@ Status XvMCGetSurfaceStatus(Display *dpy, XvMCSurface *surface, int *status)
 
    surface_priv = surface->privData;
    context_priv = surface_priv->context->privData;
-   vpipe = context_priv->vctx->vpipe;
+   pipe = context_priv->vctx->pipe;
 
    *status = 0;
 
    if (surface_priv->fence)
-      if (!vpipe->screen->fence_signalled(vpipe->screen, surface_priv->fence))
+      if (!pipe->screen->fence_signalled(pipe->screen, surface_priv->fence))
          *status |= XVMC_RENDERING;
 
    return Success;
index 1d066f826db5feffeebd0e6f30c33d03bd77d2c8..f854e924fa4d7b198fbec384e41e501eef0ddefe 100644 (file)
 
 #include <vl_winsys.h>
 #include <driclient.h>
-#include <pipe/p_video_context.h>
+#include <pipe/p_screen.h>
+#include <pipe/p_context.h>
 #include <pipe/p_state.h>
 #include <util/u_memory.h>
 #include <util/u_hash.h>
 #include <util/u_hash_table.h>
+#include <util/u_inlines.h>
 #include <state_tracker/drm_driver.h>
 #include <X11/Xlibint.h>
 
@@ -237,13 +239,6 @@ vl_video_create(struct vl_screen *vscreen)
    struct vl_dri_screen *vl_dri_scrn = (struct vl_dri_screen*)vscreen;
    struct vl_dri_context *vl_dri_ctx;
 
-   if (!vscreen->pscreen->video_context_create) {
-      debug_printf("[G3DVL] No video support found on %s/%s.\n",
-                   vscreen->pscreen->get_vendor(vscreen->pscreen),
-                   vscreen->pscreen->get_name(vscreen->pscreen));
-      goto no_vpipe;
-   }
-
    vl_dri_ctx = CALLOC_STRUCT(vl_dri_context);
    if (!vl_dri_ctx)
       goto no_struct;
@@ -256,11 +251,6 @@ vl_video_create(struct vl_screen *vscreen)
       goto no_pipe;
    }
 
-   vl_dri_ctx->base.vpipe = vscreen->pscreen->video_context_create(vscreen->pscreen, vl_dri_ctx->base.pipe);
-
-   if (!vl_dri_ctx->base.vpipe)
-      goto no_pipe;
-
    vl_dri_ctx->base.vscreen = vscreen;
    vl_dri_ctx->fd = vl_dri_scrn->dri_screen->fd;
 
@@ -270,7 +260,6 @@ no_pipe:
    FREE(vl_dri_ctx);
 
 no_struct:
-no_vpipe:
    return NULL;
 }
 
@@ -281,6 +270,5 @@ void vl_video_destroy(struct vl_context *vctx)
    assert(vctx);
 
    vl_dri_ctx->base.pipe->destroy(vl_dri_ctx->base.pipe);
-   vl_dri_ctx->base.vpipe->destroy(vl_dri_ctx->base.vpipe);
    FREE(vl_dri_ctx);
 }
index 2d80c1d9b3279fe1b69614fc9c9ee917d2f9977b..384a8ba5f17ce95ffe2b98bde5dff7589642e420 100644 (file)
@@ -33,7 +33,6 @@
 #include <pipe/p_format.h>
 
 struct pipe_screen;
-struct pipe_video_context;
 struct pipe_surface;
 
 struct vl_screen
@@ -45,7 +44,6 @@ struct vl_context
 {
    struct vl_screen *vscreen;
    struct pipe_context *pipe;
-   struct pipe_video_context *vpipe;
 };
 
 struct vl_screen*
index 0487bd9c560ba5a7c2f6f2bb6b830ea3b1bf927d..92f0bd6da907a40af2afc29e6dafcc6ec5fd613b 100644 (file)
@@ -28,7 +28,6 @@
 #include <X11/Xlibint.h>
 
 #include <pipe/p_state.h>
-#include <pipe/p_video_context.h>
 
 #include <util/u_memory.h>
 #include <util/u_format.h>
@@ -173,30 +172,21 @@ struct vl_context*
 vl_video_create(struct vl_screen *vscreen)
 {
    struct pipe_context *pipe;
-   struct pipe_video_context *vpipe;
    struct vl_context *vctx;
 
    assert(vscreen);
-   assert(vscreen->pscreen->video_context_create);
 
    pipe = vscreen->pscreen->context_create(vscreen->pscreen, NULL);
    if (!pipe)
       return NULL;
 
-   vpipe = vscreen->pscreen->video_context_create(vscreen->pscreen, pipe);
-   if (!vpipe) {
-      pipe->destroy(pipe);
-      return NULL;
-   }
-
    vctx = CALLOC_STRUCT(vl_context);
    if (!vctx) {
       pipe->destroy(pipe);
-      vpipe->destroy(vpipe);
       return NULL;
    }
 
-   vctx->vpipe = vpipe;
+   vctx->pipe = pipe;
    vctx->vscreen = vscreen;
 
    return vctx;
@@ -207,6 +197,5 @@ void vl_video_destroy(struct vl_context *vctx)
    assert(vctx);
 
    vctx->pipe->destroy(vctx->pipe);
-   vctx->vpipe->destroy(vctx->vpipe);
    FREE(vctx);
 }