util/u_resource.c \
util/u_upload_mgr.c \
util/u_vbuf_mgr.c \
- vl/vl_context.c \
vl/vl_csc.c \
vl/vl_compositor.c \
+ vl/vl_decoder.c \
vl/vl_mpeg12_decoder.c \
vl/vl_mpeg12_bitstream.c \
vl/vl_zscan.c \
#define vl_compositor_h
#include <pipe/p_state.h>
-#include <pipe/p_video_context.h>
+#include <pipe/p_video_decoder.h>
#include <pipe/p_video_state.h>
#include "vl_types.h"
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2009 Younes Manton.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include <pipe/p_video_context.h>
-
-#include <util/u_memory.h>
-#include <util/u_rect.h>
-#include <util/u_video.h>
-
-#include "vl_context.h"
-#include "vl_compositor.h"
-#include "vl_mpeg12_decoder.h"
-
-static void
-vl_context_destroy(struct pipe_video_context *context)
-{
- struct vl_context *ctx = (struct vl_context*)context;
-
- assert(context);
-
- FREE(ctx);
-}
-
-static struct pipe_video_decoder *
-vl_context_create_decoder(struct pipe_video_context *context,
- enum pipe_video_profile profile,
- enum pipe_video_entrypoint entrypoint,
- enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height)
-{
- struct vl_context *ctx = (struct vl_context*)context;
- unsigned buffer_width, buffer_height;
- bool pot_buffers;
-
- assert(context);
- assert(width > 0 && height > 0);
-
- pot_buffers = !ctx->base.screen->get_video_param(ctx->base.screen, profile, PIPE_VIDEO_CAP_NPOT_TEXTURES);
-
- buffer_width = pot_buffers ? util_next_power_of_two(width) : align(width, MACROBLOCK_WIDTH);
- buffer_height = pot_buffers ? util_next_power_of_two(height) : align(height, MACROBLOCK_HEIGHT);
-
- switch (u_reduce_video_profile(profile)) {
- case PIPE_VIDEO_CODEC_MPEG12:
- return vl_create_mpeg12_decoder(context, ctx->pipe, profile, entrypoint,
- chroma_format, buffer_width, buffer_height);
- default:
- return NULL;
- }
- return NULL;
-}
-
-struct pipe_video_context *
-vl_create_context(struct pipe_context *pipe)
-{
- struct vl_context *ctx;
-
- ctx = CALLOC_STRUCT(vl_context);
-
- if (!ctx)
- return NULL;
-
- ctx->base.screen = pipe->screen;
-
- ctx->base.destroy = vl_context_destroy;
- ctx->base.create_decoder = vl_context_create_decoder;
-
- ctx->pipe = pipe;
-
- return &ctx->base;
-}
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2009 Younes Manton.
- * Copyright 2011 Christian König.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifndef vl_context_h
-#define vl_context_h
-
-#include <pipe/p_video_context.h>
-
-struct pipe_screen;
-struct pipe_context;
-
-struct vl_context
-{
- struct pipe_video_context base;
- struct pipe_context *pipe;
-};
-
-/* drivers can call this function in their pipe_video_context constructors and pass it
- an accelerated pipe_context along with suitable buffering modes, etc */
-struct pipe_video_context *
-vl_create_context(struct pipe_context *pipe);
-
-#endif /* vl_context_h */
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <pipe/p_video_decoder.h>
+
+#include <util/u_video.h>
+
+#include "vl_decoder.h"
+#include "vl_mpeg12_decoder.h"
+
+struct pipe_video_decoder *
+vl_create_decoder(struct pipe_context *pipe,
+ enum pipe_video_profile profile,
+ enum pipe_video_entrypoint entrypoint,
+ enum pipe_video_chroma_format chroma_format,
+ unsigned width, unsigned height)
+{
+ unsigned buffer_width, buffer_height;
+ bool pot_buffers;
+
+ assert(pipe);
+ assert(width > 0 && height > 0);
+
+ pot_buffers = !pipe->screen->get_video_param
+ (
+ pipe->screen,
+ profile,
+ PIPE_VIDEO_CAP_NPOT_TEXTURES
+ );
+
+ buffer_width = pot_buffers ? util_next_power_of_two(width) : align(width, MACROBLOCK_WIDTH);
+ buffer_height = pot_buffers ? util_next_power_of_two(height) : align(height, MACROBLOCK_HEIGHT);
+
+ switch (u_reduce_video_profile(profile)) {
+ case PIPE_VIDEO_CODEC_MPEG12:
+ return vl_create_mpeg12_decoder(pipe, profile, entrypoint, chroma_format, buffer_width, buffer_height);
+ default:
+ return NULL;
+ }
+ return NULL;
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * Copyright 2011 Christian König.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef vl_decoder_h
+#define vl_decoder_h
+
+#include <pipe/p_video_decoder.h>
+
+/**
+ * standard implementation of pipe->create_video_decoder
+ */
+struct pipe_video_decoder *
+vl_create_decoder(struct pipe_context *pipe,
+ enum pipe_video_profile profile,
+ enum pipe_video_entrypoint entrypoint,
+ enum pipe_video_chroma_format chroma_format,
+ unsigned width, unsigned height);
+
+#endif /* vl_decoder_h */
formats[0] = formats[1] = formats[2] = dec->zscan_source_format;
buffer->zscan_source = vl_video_buffer_create_ex
(
- dec->pipe,
+ dec->base.context,
dec->blocks_per_line * BLOCK_WIDTH * BLOCK_HEIGHT,
align(dec->num_blocks, dec->blocks_per_line) / dec->blocks_per_line,
1, PIPE_VIDEO_CHROMA_FORMAT_444, formats, PIPE_USAGE_STATIC
dec = (struct vl_mpeg12_decoder *)buf->base.decoder;
assert(dec);
- vl_vb_map(&buf->vertex_stream, dec->pipe);
+ vl_vb_map(&buf->vertex_stream, dec->base.context);
sampler_views = buf->zscan_source->get_sampler_view_planes(buf->zscan_source);
1
};
- buf->tex_transfer[i] = dec->pipe->get_transfer
+ buf->tex_transfer[i] = dec->base.context->get_transfer
(
- dec->pipe, tex,
+ dec->base.context, tex,
0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
&rect
);
- buf->texels[i] = dec->pipe->transfer_map(dec->pipe, buf->tex_transfer[i]);
+ buf->texels[i] = dec->base.context->transfer_map(dec->base.context, buf->tex_transfer[i]);
}
if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
dec = (struct vl_mpeg12_decoder *)buf->base.decoder;
assert(dec);
- vl_vb_unmap(&buf->vertex_stream, dec->pipe);
+ vl_vb_unmap(&buf->vertex_stream, dec->base.context);
for (i = 0; i < VL_MAX_PLANES; ++i) {
- dec->pipe->transfer_unmap(dec->pipe, buf->tex_transfer[i]);
- dec->pipe->transfer_destroy(dec->pipe, buf->tex_transfer[i]);
+ dec->base.context->transfer_unmap(dec->base.context, buf->tex_transfer[i]);
+ dec->base.context->transfer_destroy(dec->base.context, buf->tex_transfer[i]);
}
}
assert(decoder);
/* Asserted in softpipe_delete_fs_state() for some reason */
- dec->pipe->bind_vs_state(dec->pipe, NULL);
- dec->pipe->bind_fs_state(dec->pipe, NULL);
+ dec->base.context->bind_vs_state(dec->base.context, NULL);
+ dec->base.context->bind_fs_state(dec->base.context, NULL);
- dec->pipe->delete_depth_stencil_alpha_state(dec->pipe, dec->dsa);
- dec->pipe->delete_sampler_state(dec->pipe, dec->sampler_ycbcr);
+ dec->base.context->delete_depth_stencil_alpha_state(dec->base.context, dec->dsa);
+ dec->base.context->delete_sampler_state(dec->base.context, dec->sampler_ycbcr);
vl_mc_cleanup(&dec->mc_y);
vl_mc_cleanup(&dec->mc_c);
vl_zscan_cleanup(&dec->zscan_y);
vl_zscan_cleanup(&dec->zscan_c);
- dec->pipe->delete_vertex_elements_state(dec->pipe, dec->ves_ycbcr);
- dec->pipe->delete_vertex_elements_state(dec->pipe, dec->ves_mv);
+ dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_ycbcr);
+ dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_mv);
pipe_resource_reference(&dec->quads.buffer, NULL);
pipe_resource_reference(&dec->pos.buffer, NULL);
buffer->base.decode_bitstream = vl_mpeg12_buffer_decode_bitstream;
buffer->base.end_frame = vl_mpeg12_buffer_end_frame;
- if (!vl_vb_init(&buffer->vertex_stream, dec->pipe,
+ if (!vl_vb_init(&buffer->vertex_stream, dec->base.context,
dec->base.width / MACROBLOCK_WIDTH,
dec->base.height / MACROBLOCK_HEIGHT))
goto error_vertex_buffer;
surfaces = dst->get_surfaces(dst);
- dec->pipe->bind_vertex_elements_state(dec->pipe, dec->ves_mv);
+ dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_mv);
for (i = 0; i < VL_MAX_PLANES; ++i) {
if (!surfaces[i]) continue;
if (!sv[j]) continue;
vb[2] = vl_vb_get_mv(&buf->vertex_stream, j);;
- dec->pipe->set_vertex_buffers(dec->pipe, 3, vb);
+ dec->base.context->set_vertex_buffers(dec->base.context, 3, vb);
vl_mc_render_ref(&buf->mc[i], sv[j][i]);
}
vb[2] = dec->block_num;
- dec->pipe->bind_vertex_elements_state(dec->pipe, dec->ves_ycbcr);
+ dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_ycbcr);
for (i = 0; i < VL_MAX_PLANES; ++i) {
if (!num_ycbcr_blocks[i]) continue;
vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, i);
- dec->pipe->set_vertex_buffers(dec->pipe, 3, vb);
+ dec->base.context->set_vertex_buffers(dec->base.context, 3, vb);
vl_zscan_render(&buf->zscan[i] , num_ycbcr_blocks[i]);
if (!num_ycbcr_blocks[i]) continue;
vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, component);
- dec->pipe->set_vertex_buffers(dec->pipe, 3, vb);
+ dec->base.context->set_vertex_buffers(dec->base.context, 3, vb);
if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
vl_idct_prepare_stage2(component == 0 ? &dec->idct_y : &dec->idct_c, &buf->idct[component]);
else {
- dec->pipe->set_fragment_sampler_views(dec->pipe, 1, &mc_source_sv[component]);
- dec->pipe->bind_fragment_sampler_states(dec->pipe, 1, &dec->sampler_ycbcr);
+ dec->base.context->set_fragment_sampler_views(dec->base.context, 1, &mc_source_sv[component]);
+ dec->base.context->bind_fragment_sampler_states(dec->base.context, 1, &dec->sampler_ycbcr);
}
vl_mc_render_ycbcr(&buf->mc[i], j, num_ycbcr_blocks[component]);
}
dsa.alpha.enabled = 0;
dsa.alpha.func = PIPE_FUNC_ALWAYS;
dsa.alpha.ref_value = 0;
- dec->dsa = dec->pipe->create_depth_stencil_alpha_state(dec->pipe, &dsa);
- dec->pipe->bind_depth_stencil_alpha_state(dec->pipe, dec->dsa);
+ dec->dsa = dec->base.context->create_depth_stencil_alpha_state(dec->base.context, &dsa);
+ dec->base.context->bind_depth_stencil_alpha_state(dec->base.context, dec->dsa);
memset(&sampler, 0, sizeof(sampler));
sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
sampler.compare_func = PIPE_FUNC_ALWAYS;
sampler.normalized_coords = 1;
- dec->sampler_ycbcr = dec->pipe->create_sampler_state(dec->pipe, &sampler);
+ dec->sampler_ycbcr = dec->base.context->create_sampler_state(dec->base.context, &sampler);
if (!dec->sampler_ycbcr)
return false;
assert(dec);
- screen = dec->pipe->screen;
+ screen = dec->base.context->screen;
for (i = 0; i < num_configs; ++i) {
if (!screen->is_format_supported(screen, configs[i].zscan_source_format, PIPE_TEXTURE_2D,
assert(dec);
dec->zscan_source_format = format_config->zscan_source_format;
- dec->zscan_linear = vl_zscan_layout(dec->pipe, vl_zscan_linear, dec->blocks_per_line);
- dec->zscan_normal = vl_zscan_layout(dec->pipe, vl_zscan_normal, dec->blocks_per_line);
- dec->zscan_alternate = vl_zscan_layout(dec->pipe, vl_zscan_alternate, dec->blocks_per_line);
+ dec->zscan_linear = vl_zscan_layout(dec->base.context, vl_zscan_linear, dec->blocks_per_line);
+ dec->zscan_normal = vl_zscan_layout(dec->base.context, vl_zscan_normal, dec->blocks_per_line);
+ dec->zscan_alternate = vl_zscan_layout(dec->base.context, vl_zscan_alternate, dec->blocks_per_line);
num_channels = dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT ? 4 : 1;
- if (!vl_zscan_init(&dec->zscan_y, dec->pipe, dec->base.width, dec->base.height,
+ if (!vl_zscan_init(&dec->zscan_y, dec->base.context, dec->base.width, dec->base.height,
dec->blocks_per_line, dec->num_blocks, num_channels))
return false;
- if (!vl_zscan_init(&dec->zscan_c, dec->pipe, dec->chroma_width, dec->chroma_height,
+ if (!vl_zscan_init(&dec->zscan_c, dec->base.context, dec->chroma_width, dec->chroma_height,
dec->blocks_per_line, dec->num_blocks, num_channels))
return false;
struct pipe_sampler_view *matrix = NULL;
- nr_of_idct_render_targets = dec->pipe->screen->get_param(dec->pipe->screen, PIPE_CAP_MAX_RENDER_TARGETS);
- max_inst = dec->pipe->screen->get_shader_param(dec->pipe->screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_INSTRUCTIONS);
+ nr_of_idct_render_targets = dec->base.context->screen->get_param
+ (
+ dec->base.context->screen, PIPE_CAP_MAX_RENDER_TARGETS
+ );
+
+ max_inst = dec->base.context->screen->get_shader_param
+ (
+ dec->base.context->screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_INSTRUCTIONS
+ );
// Just assume we need 32 inst per render target, not 100% true, but should work in most cases
if (nr_of_idct_render_targets >= 4 && max_inst >= 32*4)
formats[0] = formats[1] = formats[2] = format_config->idct_source_format;
dec->idct_source = vl_video_buffer_create_ex
(
- dec->pipe, dec->base.width / 4, dec->base.height, 1,
+ dec->base.context, dec->base.width / 4, dec->base.height, 1,
dec->base.chroma_format, formats, PIPE_USAGE_STATIC
);
formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
dec->mc_source = vl_video_buffer_create_ex
(
- dec->pipe, dec->base.width / nr_of_idct_render_targets,
+ dec->base.context, dec->base.width / nr_of_idct_render_targets,
dec->base.height / 4, nr_of_idct_render_targets,
dec->base.chroma_format, formats, PIPE_USAGE_STATIC
);
if (!dec->mc_source)
goto error_mc_source;
- if (!(matrix = vl_idct_upload_matrix(dec->pipe, format_config->idct_scale)))
+ if (!(matrix = vl_idct_upload_matrix(dec->base.context, format_config->idct_scale)))
goto error_matrix;
- if (!vl_idct_init(&dec->idct_y, dec->pipe, dec->base.width, dec->base.height,
+ if (!vl_idct_init(&dec->idct_y, dec->base.context, dec->base.width, dec->base.height,
nr_of_idct_render_targets, matrix, matrix))
goto error_y;
- if(!vl_idct_init(&dec->idct_c, dec->pipe, dec->chroma_width, dec->chroma_height,
+ if(!vl_idct_init(&dec->idct_c, dec->base.context, dec->chroma_width, dec->chroma_height,
nr_of_idct_render_targets, matrix, matrix))
goto error_c;
formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
dec->mc_source = vl_video_buffer_create_ex
(
- dec->pipe, dec->base.width, dec->base.height, 1,
+ dec->base.context, dec->base.width, dec->base.height, 1,
dec->base.chroma_format, formats, PIPE_USAGE_STATIC
);
}
struct pipe_video_decoder *
-vl_create_mpeg12_decoder(struct pipe_video_context *context,
- struct pipe_context *pipe,
+vl_create_mpeg12_decoder(struct pipe_context *context,
enum pipe_video_profile profile,
enum pipe_video_entrypoint entrypoint,
enum pipe_video_chroma_format chroma_format,
dec->base.create_buffer = vl_mpeg12_create_buffer;
dec->base.flush_buffer = vl_mpeg12_decoder_flush_buffer;
- dec->pipe = pipe;
-
dec->blocks_per_line = MAX2(util_next_power_of_two(dec->base.width) / block_size_pixels, 4);
dec->num_blocks = (dec->base.width * dec->base.height) / block_size_pixels;
- dec->quads = vl_vb_upload_quads(dec->pipe);
+ dec->quads = vl_vb_upload_quads(dec->base.context);
dec->pos = vl_vb_upload_pos(
- dec->pipe,
+ dec->base.context,
dec->base.width / MACROBLOCK_WIDTH,
dec->base.height / MACROBLOCK_HEIGHT
);
- dec->block_num = vl_vb_upload_block_num(dec->pipe, dec->num_blocks);
+ dec->block_num = vl_vb_upload_block_num(dec->base.context, dec->num_blocks);
- dec->ves_ycbcr = vl_vb_get_ves_ycbcr(dec->pipe);
- dec->ves_mv = vl_vb_get_ves_mv(dec->pipe);
+ dec->ves_ycbcr = vl_vb_get_ves_ycbcr(dec->base.context);
+ dec->ves_mv = vl_vb_get_ves_mv(dec->base.context);
/* TODO: Implement 422, 444 */
assert(dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
goto error_sources;
}
- if (!vl_mc_init(&dec->mc_y, dec->pipe, dec->base.width, dec->base.height, MACROBLOCK_HEIGHT, format_config->mc_scale,
+ if (!vl_mc_init(&dec->mc_y, dec->base.context, dec->base.width, dec->base.height,
+ MACROBLOCK_HEIGHT, format_config->mc_scale,
mc_vert_shader_callback, mc_frag_shader_callback, dec))
goto error_mc_y;
// TODO
- if (!vl_mc_init(&dec->mc_c, dec->pipe, dec->base.width, dec->base.height, BLOCK_HEIGHT, format_config->mc_scale,
+ if (!vl_mc_init(&dec->mc_c, dec->base.context, dec->base.width, dec->base.height,
+ BLOCK_HEIGHT, format_config->mc_scale,
mc_vert_shader_callback, mc_frag_shader_callback, dec))
goto error_mc_c;
#ifndef vl_mpeg12_decoder_h
#define vl_mpeg12_decoder_h
-#include <pipe/p_video_context.h>
+#include <pipe/p_video_decoder.h>
#include "vl_mpeg12_bitstream.h"
#include "vl_zscan.h"
struct vl_mpeg12_decoder
{
struct pipe_video_decoder base;
- struct pipe_context *pipe;
unsigned chroma_width, chroma_height;
short *texels[VL_MAX_PLANES];
};
-/* drivers can call this function in their pipe_video_context constructors and pass it
- an accelerated pipe_context along with suitable buffering modes, etc */
+/**
+ * creates a shader based mpeg12 decoder
+ */
struct pipe_video_decoder *
-vl_create_mpeg12_decoder(struct pipe_video_context *context,
- struct pipe_context *pipe,
+vl_create_mpeg12_decoder(struct pipe_context *pipe,
enum pipe_video_profile profile,
enum pipe_video_entrypoint entrypoint,
enum pipe_video_chroma_format chroma_format,
#define vl_ycbcr_buffer_h
#include <pipe/p_context.h>
-#include <pipe/p_video_context.h>
+#include <pipe/p_video_decoder.h>
#include "vl_defines.h"
#include "util/u_simple_list.h"
#include "util/u_upload_mgr.h"
#include "os/os_time.h"
+#include "vl/vl_decoder.h"
#include "vl/vl_video_buffer.h"
#include "r300_cb.h"
r300_init_state_functions(r300);
r300_init_resource_functions(r300);
+ r300->context.create_video_decoder = vl_create_decoder;
r300->context.create_video_buffer = vl_video_buffer_create;
r300->vbuf_mgr = u_vbuf_mgr_create(&r300->context, 1024 * 1024, 16,
#include "util/u_format_s3tc.h"
#include "util/u_memory.h"
#include "os/os_time.h"
-#include "vl/vl_context.h"
#include "vl/vl_video_buffer.h"
#include "r300_context.h"
return retval == usage;
}
-static struct pipe_video_context *
-r300_video_create(struct pipe_screen *screen, struct pipe_context *pipe)
-{
- assert(screen);
-
- return vl_create_context(pipe);
-}
-
static void r300_destroy_screen(struct pipe_screen* pscreen)
{
struct r300_screen* r300screen = r300_screen(pscreen);
r300screen->screen.is_format_supported = r300_is_format_supported;
r300screen->screen.is_video_format_supported = vl_video_buffer_is_format_supported;
r300screen->screen.context_create = r300_create_context;
- r300screen->screen.video_context_create = r300_video_create;
r300screen->screen.fence_reference = r300_fence_reference;
r300screen->screen.fence_signalled = r300_fence_signalled;
r300screen->screen.fence_finish = r300_fence_finish;
#include <util/u_memory.h>
#include <util/u_inlines.h>
#include "util/u_upload_mgr.h"
-#include <vl/vl_context.h>
+#include <vl/vl_decoder.h>
#include <vl/vl_video_buffer.h>
#include "os/os_time.h"
#include <pipebuffer/pb_buffer.h>
r600_init_context_resource_functions(rctx);
r600_init_surface_functions(rctx);
rctx->context.draw_vbo = r600_draw_vbo;
+
+ rctx->context.create_video_decoder = vl_create_decoder;
rctx->context.create_video_buffer = vl_video_buffer_create;
switch (r600_get_family(rctx->radeon)) {
return &rctx->context;
}
-static struct pipe_video_context *
-r600_video_create(struct pipe_screen *screen, struct pipe_context *pipe)
-{
- assert(screen && pipe);
-
- return vl_create_context(pipe);
-}
-
/*
* pipe_screen
*/
rscreen->screen.is_format_supported = r600_is_format_supported;
rscreen->screen.is_video_format_supported = vl_video_buffer_is_format_supported;
rscreen->screen.context_create = r600_create_context;
- rscreen->screen.video_context_create = r600_video_create;
rscreen->screen.fence_reference = r600_fence_reference;
rscreen->screen.fence_signalled = r600_fence_signalled;
rscreen->screen.fence_finish = r600_fence_finish;
#include "util/u_memory.h"
#include "util/u_inlines.h"
#include "tgsi/tgsi_exec.h"
+#include "vl/vl_decoder.h"
#include "vl/vl_video_buffer.h"
#include "sp_clear.h"
#include "sp_context.h"
softpipe->pipe.render_condition = softpipe_render_condition;
+ softpipe->pipe.create_video_decoder = vl_create_decoder;
softpipe->pipe.create_video_buffer = vl_video_buffer_create;
/*
#include "pipe/p_defines.h"
#include "pipe/p_screen.h"
#include "draw/draw_context.h"
-#include "vl/vl_context.h"
#include "vl/vl_video_buffer.h"
#include "state_tracker/sw_winsys.h"
winsys->displaytarget_display(winsys, texture->dt, context_private);
}
-static struct pipe_video_context *
-sp_video_create(struct pipe_screen *screen, struct pipe_context *context)
-{
- assert(screen);
-
- return vl_create_context(context);
-}
-
/**
* Create a new pipe_screen object
* Note: we're not presently subclassing pipe_screen (no softpipe_screen).
screen->base.is_video_format_supported = vl_video_buffer_is_format_supported;
screen->base.context_create = softpipe_create_context;
screen->base.flush_frontbuffer = softpipe_flush_frontbuffer;
- screen->base.video_context_create = sp_video_create;
util_format_s3tc_init();
struct pipe_vertex_element;
struct pipe_viewport_state;
+enum pipe_video_profile;
+enum pipe_video_entrypoint;
enum pipe_video_chroma_format;
enum pipe_format;
*/
void (*texture_barrier)(struct pipe_context *);
+ /**
+ * Creates a video decoder for a specific video codec/profile
+ */
+ struct pipe_video_decoder *(*create_video_decoder)( struct pipe_context *context,
+ enum pipe_video_profile profile,
+ enum pipe_video_entrypoint entrypoint,
+ enum pipe_video_chroma_format chroma_format,
+ unsigned width, unsigned height );
+
/**
* Creates a video buffer as decoding target
*/
struct pipe_context * (*context_create)( struct pipe_screen *, void *priv );
- struct pipe_video_context * (*video_context_create)( struct pipe_screen *screen,
- struct pipe_context *context );
-
/**
* Check if the given pipe_format is supported as a texture or
* drawing surface.
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2009 Younes Manton.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifndef PIPE_VIDEO_CONTEXT_H
-#define PIPE_VIDEO_CONTEXT_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <pipe/p_video_state.h>
-
-struct pipe_screen;
-struct pipe_surface;
-struct pipe_macroblock;
-struct pipe_picture_desc;
-struct pipe_fence_handle;
-
-/**
- * Gallium video rendering context
- */
-struct pipe_video_context
-{
- struct pipe_screen *screen;
-
- /**
- * destroy context, all objects created from this context
- * (buffers, decoders, compositors etc...) must be freed before calling this
- */
- void (*destroy)(struct pipe_video_context *context);
-
- /**
- * create a decoder for a specific video profile
- */
- struct pipe_video_decoder *(*create_decoder)(struct pipe_video_context *context,
- enum pipe_video_profile profile,
- enum pipe_video_entrypoint entrypoint,
- enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height);
-
-};
-
-/**
- * decoder for a specific video codec
- */
-struct pipe_video_decoder
-{
- struct pipe_video_context *context;
-
- enum pipe_video_profile profile;
- enum pipe_video_entrypoint entrypoint;
- enum pipe_video_chroma_format chroma_format;
- unsigned width;
- unsigned height;
-
- /**
- * destroy this video decoder
- */
- void (*destroy)(struct pipe_video_decoder *decoder);
-
- /**
- * Creates a buffer as decoding input
- */
- struct pipe_video_decode_buffer *(*create_buffer)(struct pipe_video_decoder *decoder);
-
- /**
- * flush decoder buffer to video hardware
- */
- void (*flush_buffer)(struct pipe_video_decode_buffer *decbuf,
- unsigned num_ycbcr_blocks[3],
- struct pipe_video_buffer *ref_frames[2],
- struct pipe_video_buffer *dst);
-};
-
-/**
- * input buffer for a decoder
- */
-struct pipe_video_decode_buffer
-{
- struct pipe_video_decoder *decoder;
-
- /**
- * destroy this decode buffer
- */
- void (*destroy)(struct pipe_video_decode_buffer *decbuf);
-
- /**
- * map the input buffer into memory before starting decoding
- */
- void (*begin_frame)(struct pipe_video_decode_buffer *decbuf);
-
- /**
- * set the quantification matrixes
- */
- void (*set_quant_matrix)(struct pipe_video_decode_buffer *decbuf,
- const uint8_t intra_matrix[64],
- const uint8_t non_intra_matrix[64]);
-
- /**
- * get the pointer where to put the ycbcr blocks of a component
- */
- struct pipe_ycbcr_block *(*get_ycbcr_stream)(struct pipe_video_decode_buffer *, int component);
-
- /**
- * get the pointer where to put the ycbcr dct block data of a component
- */
- short *(*get_ycbcr_buffer)(struct pipe_video_decode_buffer *, int component);
-
- /**
- * get the stride of the mv buffer
- */
- unsigned (*get_mv_stream_stride)(struct pipe_video_decode_buffer *decbuf);
-
- /**
- * get the pointer where to put the motion vectors of a ref frame
- */
- struct pipe_motionvector *(*get_mv_stream)(struct pipe_video_decode_buffer *decbuf, int ref_frame);
-
- /**
- * decode a bitstream
- */
- void (*decode_bitstream)(struct pipe_video_decode_buffer *decbuf,
- unsigned num_bytes, const void *data,
- struct pipe_mpeg12_picture_desc *picture,
- unsigned num_ycbcr_blocks[3]);
-
- /**
- * unmap decoder buffer before flushing
- */
- void (*end_frame)(struct pipe_video_decode_buffer *decbuf);
-};
-
-/**
- * output for decoding / input for displaying
- */
-struct pipe_video_buffer
-{
- struct pipe_context *context;
-
- enum pipe_format buffer_format;
- enum pipe_video_chroma_format chroma_format;
- unsigned width;
- unsigned height;
-
- /**
- * destroy this video buffer
- */
- void (*destroy)(struct pipe_video_buffer *buffer);
-
- /**
- * get a individual sampler view for each plane
- */
- struct pipe_sampler_view **(*get_sampler_view_planes)(struct pipe_video_buffer *buffer);
-
- /**
- * get a individual sampler view for each component
- */
- struct pipe_sampler_view **(*get_sampler_view_components)(struct pipe_video_buffer *buffer);
-
- /**
- * get a individual surfaces for each plane
- */
- struct pipe_surface **(*get_surfaces)(struct pipe_video_buffer *buffer);
-};
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* PIPE_VIDEO_CONTEXT_H */
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef PIPE_VIDEO_CONTEXT_H
+#define PIPE_VIDEO_CONTEXT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <pipe/p_video_state.h>
+
+struct pipe_screen;
+struct pipe_surface;
+struct pipe_macroblock;
+struct pipe_picture_desc;
+struct pipe_fence_handle;
+
+/**
+ * Gallium video decoder for a specific codec/profile
+ */
+struct pipe_video_decoder
+{
+ struct pipe_context *context;
+
+ enum pipe_video_profile profile;
+ enum pipe_video_entrypoint entrypoint;
+ enum pipe_video_chroma_format chroma_format;
+ unsigned width;
+ unsigned height;
+
+ /**
+ * destroy this video decoder
+ */
+ void (*destroy)(struct pipe_video_decoder *decoder);
+
+ /**
+ * Creates a buffer as decoding input
+ */
+ struct pipe_video_decode_buffer *(*create_buffer)(struct pipe_video_decoder *decoder);
+
+ /**
+ * flush decoder buffer to video hardware
+ */
+ void (*flush_buffer)(struct pipe_video_decode_buffer *decbuf,
+ unsigned num_ycbcr_blocks[3],
+ struct pipe_video_buffer *ref_frames[2],
+ struct pipe_video_buffer *dst);
+};
+
+/**
+ * input buffer for a decoder
+ */
+struct pipe_video_decode_buffer
+{
+ struct pipe_video_decoder *decoder;
+
+ /**
+ * destroy this decode buffer
+ */
+ void (*destroy)(struct pipe_video_decode_buffer *decbuf);
+
+ /**
+ * map the input buffer into memory before starting decoding
+ */
+ void (*begin_frame)(struct pipe_video_decode_buffer *decbuf);
+
+ /**
+ * set the quantification matrixes
+ */
+ void (*set_quant_matrix)(struct pipe_video_decode_buffer *decbuf,
+ const uint8_t intra_matrix[64],
+ const uint8_t non_intra_matrix[64]);
+
+ /**
+ * get the pointer where to put the ycbcr blocks of a component
+ */
+ struct pipe_ycbcr_block *(*get_ycbcr_stream)(struct pipe_video_decode_buffer *, int component);
+
+ /**
+ * get the pointer where to put the ycbcr dct block data of a component
+ */
+ short *(*get_ycbcr_buffer)(struct pipe_video_decode_buffer *, int component);
+
+ /**
+ * get the stride of the mv buffer
+ */
+ unsigned (*get_mv_stream_stride)(struct pipe_video_decode_buffer *decbuf);
+
+ /**
+ * get the pointer where to put the motion vectors of a ref frame
+ */
+ struct pipe_motionvector *(*get_mv_stream)(struct pipe_video_decode_buffer *decbuf, int ref_frame);
+
+ /**
+ * decode a bitstream
+ */
+ void (*decode_bitstream)(struct pipe_video_decode_buffer *decbuf,
+ unsigned num_bytes, const void *data,
+ struct pipe_mpeg12_picture_desc *picture,
+ unsigned num_ycbcr_blocks[3]);
+
+ /**
+ * unmap decoder buffer before flushing
+ */
+ void (*end_frame)(struct pipe_video_decode_buffer *decbuf);
+};
+
+/**
+ * output for decoding / input for displaying
+ */
+struct pipe_video_buffer
+{
+ struct pipe_context *context;
+
+ enum pipe_format buffer_format;
+ enum pipe_video_chroma_format chroma_format;
+ unsigned width;
+ unsigned height;
+
+ /**
+ * destroy this video buffer
+ */
+ void (*destroy)(struct pipe_video_buffer *buffer);
+
+ /**
+ * get a individual sampler view for each plane
+ */
+ struct pipe_sampler_view **(*get_sampler_view_planes)(struct pipe_video_buffer *buffer);
+
+ /**
+ * get a individual sampler view for each component
+ */
+ struct pipe_sampler_view **(*get_sampler_view_components)(struct pipe_video_buffer *buffer);
+
+ /**
+ * get a individual surfaces for each plane
+ */
+ struct pipe_surface **(*get_surfaces)(struct pipe_video_buffer *buffer);
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PIPE_VIDEO_CONTEXT_H */
*
**************************************************************************/
-#include <pipe/p_video_context.h>
-
#include <util/u_memory.h>
#include <util/u_math.h>
#include <util/u_debug.h>
VdpDecoder *decoder)
{
enum pipe_video_profile p_profile;
- struct pipe_video_context *vpipe;
+ struct pipe_context *pipe;
vlVdpDevice *dev;
vlVdpDecoder *vldecoder;
VdpStatus ret;
if (!dev)
return VDP_STATUS_INVALID_HANDLE;
- vpipe = dev->context->vpipe;
+ pipe = dev->context->pipe;
vldecoder = CALLOC(1,sizeof(vlVdpDecoder));
if (!vldecoder)
vldecoder->device = dev;
// TODO: Define max_references. Used mainly for H264
- vldecoder->decoder = vpipe->create_decoder
+ vldecoder->decoder = pipe->create_video_decoder
(
- vpipe, p_profile,
+ pipe, p_profile,
PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
PIPE_VIDEO_CHROMA_FORMAT_420,
width, height
**************************************************************************/
#include <pipe/p_compiler.h>
-#include <pipe/p_video_context.h>
#include <util/u_memory.h>
#include <util/u_debug.h>
VdpVideoMixer *mixer)
{
vlVdpVideoMixer *vmixer = NULL;
- struct pipe_video_context *context;
VdpStatus ret;
float csc[16];
if (!dev)
return VDP_STATUS_INVALID_HANDLE;
- context = dev->context->vpipe;
-
vmixer = CALLOC(1, sizeof(vlVdpVideoMixer));
if (!vmixer)
return VDP_STATUS_RESOURCES;
VdpOutputSurface *surface)
{
struct pipe_context *pipe;
- struct pipe_video_context *context;
struct pipe_resource res_tmpl, *res;
struct pipe_sampler_view sv_templ;
struct pipe_surface surf_templ;
return VDP_STATUS_INVALID_HANDLE;
pipe = dev->context->pipe;
- context = dev->context->vpipe;
- if (!pipe || !context)
+ if (!pipe)
return VDP_STATUS_INVALID_HANDLE;
vlsurface = CALLOC(1, sizeof(vlVdpOutputSurface));
res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
res_tmpl.usage = PIPE_USAGE_STATIC;
- res = context->screen->resource_create(context->screen, &res_tmpl);
+ res = pipe->screen->resource_create(pipe->screen, &res_tmpl);
if (!res) {
FREE(dev);
return VDP_STATUS_ERROR;
VdpPresentationQueue *presentation_queue)
{
vlVdpPresentationQueue *pq = NULL;
- struct pipe_video_context *context;
VdpStatus ret;
VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Creating PresentationQueue\n");
if (dev != pqt->device)
return VDP_STATUS_HANDLE_DEVICE_MISMATCH;
- context = dev->context->vpipe;
-
pq = CALLOC(1, sizeof(vlVdpPresentationQueue));
if (!pq)
return VDP_STATUS_RESOURCES;
vl_compositor_render(&pq->compositor, PIPE_MPEG12_PICTURE_TYPE_FRAME,
drawable_surface, NULL, NULL);
- pq->device->context->vpipe->screen->flush_frontbuffer
+ pq->device->context->pipe->screen->flush_frontbuffer
(
- pq->device->context->vpipe->screen,
+ pq->device->context->pipe->screen,
drawable_surface->texture,
0, 0,
vl_contextprivate_get(pq->device->context, drawable_surface)
#include <assert.h>
-#include <pipe/p_video_context.h>
#include <pipe/p_state.h>
#include <util/u_memory.h>
{
enum pipe_format pformat = FormatToPipe(source_ycbcr_format);
struct pipe_context *pipe;
- struct pipe_video_context *context;
struct pipe_sampler_view **sampler_views;
unsigned i;
return VDP_STATUS_INVALID_HANDLE;
pipe = p_surf->device->context->pipe;
- context = p_surf->device->context->vpipe;
- if (!pipe && !context)
+ if (!pipe)
return VDP_STATUS_INVALID_HANDLE;
if (p_surf->video_buffer == NULL || pformat != p_surf->video_buffer->buffer_format) {
#include <vdpau/vdpau_x11.h>
#include <pipe/p_compiler.h>
-#include <pipe/p_video_context.h>
+#include <pipe/p_video_decoder.h>
#include <util/u_debug.h>
#include <vl/vl_compositor.h>
#include <X11/extensions/XvMClib.h>
#include <pipe/p_screen.h>
-#include <pipe/p_video_context.h>
+#include <pipe/p_video_decoder.h>
#include <pipe/p_video_state.h>
#include <pipe/p_state.h>
return BadAlloc;
}
- context_priv->decoder = vctx->vpipe->create_decoder(vctx->vpipe,
- ProfileToPipe(mc_type),
- (mc_type & XVMC_IDCT) ?
- PIPE_VIDEO_ENTRYPOINT_IDCT :
- PIPE_VIDEO_ENTRYPOINT_MC,
- FormatToPipe(chroma_format),
- width, height);
+ context_priv->decoder = vctx->pipe->create_video_decoder
+ (
+ vctx->pipe,
+ ProfileToPipe(mc_type),
+ (mc_type & XVMC_IDCT) ? PIPE_VIDEO_ENTRYPOINT_IDCT : PIPE_VIDEO_ENTRYPOINT_MC,
+ FormatToPipe(chroma_format),
+ width, height
+ );
if (!context_priv->decoder) {
XVMC_MSG(XVMC_ERR, "[XvMC] Could not create VL decoder.\n");
#include <xorg/fourcc.h>
#include <pipe/p_screen.h>
-#include <pipe/p_video_context.h>
+#include <pipe/p_video_decoder.h>
#include <pipe/p_state.h>
#include <util/u_memory.h>
XvMCContextPrivate *context_priv;
XvMCSubpicturePrivate *subpicture_priv;
struct pipe_context *pipe;
- struct pipe_video_context *vpipe;
struct pipe_resource tex_templ, *tex;
struct pipe_sampler_view sampler_templ;
Status ret;
context_priv = context->privData;
pipe = context_priv->vctx->pipe;
- vpipe = context_priv->vctx->vpipe;
if (!subpicture)
return XvMCBadSubpicture;
tex_templ.target = PIPE_TEXTURE_2D;
tex_templ.format = XvIDToPipe(xvimage_id);
tex_templ.last_level = 0;
- if (vpipe->screen->get_video_param(vpipe->screen,
- PIPE_VIDEO_PROFILE_UNKNOWN,
- PIPE_VIDEO_CAP_NPOT_TEXTURES)) {
+ if (pipe->screen->get_video_param(pipe->screen,
+ PIPE_VIDEO_PROFILE_UNKNOWN,
+ PIPE_VIDEO_CAP_NPOT_TEXTURES)) {
tex_templ.width0 = width;
tex_templ.height0 = height;
}
tex_templ.bind = PIPE_BIND_SAMPLER_VIEW;
tex_templ.flags = 0;
- tex = vpipe->screen->resource_create(vpipe->screen, &tex_templ);
+ tex = pipe->screen->resource_create(pipe->screen, &tex_templ);
memset(&sampler_templ, 0, sizeof(sampler_templ));
u_sampler_view_default_template(&sampler_templ, tex, tex->format);
tex_templ.height0 = 1;
tex_templ.usage = PIPE_USAGE_STATIC;
- tex = vpipe->screen->resource_create(vpipe->screen, &tex_templ);
+ tex = pipe->screen->resource_create(pipe->screen, &tex_templ);
memset(&sampler_templ, 0, sizeof(sampler_templ));
u_sampler_view_default_template(&sampler_templ, tex, tex->format);
#include <X11/Xlibint.h>
-#include <pipe/p_video_context.h>
+#include <pipe/p_video_decoder.h>
#include <pipe/p_video_state.h>
#include <pipe/p_state.h>
XvMCContextPrivate *context_priv;
struct pipe_context *pipe;
- struct pipe_video_context *vpipe;
XvMCSurfacePrivate *surface_priv;
XVMC_MSG(XVMC_TRACE, "[XvMC] Creating surface %p.\n", surface);
context_priv = context->privData;
pipe = context_priv->vctx->pipe;
- vpipe = context_priv->vctx->vpipe;
surface_priv = CALLOC(1, sizeof(XvMCSurfacePrivate));
if (!surface_priv)
XvMCMacroBlockArray *macroblocks, XvMCBlockArray *blocks
)
{
- struct pipe_video_context *vpipe;
struct pipe_video_decode_buffer *t_buffer;
- XvMCContextPrivate *context_priv;
XvMCSurfacePrivate *target_surface_priv;
XvMCSurfacePrivate *past_surface_priv;
XvMCSurfacePrivate *future_surface_priv;
assert(!past_surface || past_surface_priv->context == context);
assert(!future_surface || future_surface_priv->context == context);
- context_priv = context->privData;
- vpipe = context_priv->vctx->vpipe;
-
t_buffer = target_surface_priv->decode_buffer;
// enshure that all reference frames are flushed
{
static int dump_window = -1;
- struct pipe_video_context *vpipe;
+ struct pipe_context *pipe;
struct vl_compositor *compositor;
XvMCSurfacePrivate *surface_priv;
assert(srcy + srch - 1 < surface->height);
subpicture_priv = surface_priv->subpicture ? surface_priv->subpicture->privData : NULL;
- vpipe = context_priv->vctx->vpipe;
+ pipe = context_priv->vctx->pipe;
compositor = &context_priv->compositor;
if (!context_priv->drawable_surface ||
}
// Workaround for r600g, there seems to be a bug in the fence refcounting code
- vpipe->screen->fence_reference(vpipe->screen, &surface_priv->fence, NULL);
+ pipe->screen->fence_reference(pipe->screen, &surface_priv->fence, NULL);
vl_compositor_render(compositor, PictureToPipe(flags), context_priv->drawable_surface, &dst_rect, &surface_priv->fence);
XVMC_MSG(XVMC_TRACE, "[XvMC] Submitted surface %p for display. Pushing to front buffer.\n", surface);
- vpipe->screen->flush_frontbuffer
+ pipe->screen->flush_frontbuffer
(
- vpipe->screen,
+ pipe->screen,
context_priv->drawable_surface->texture,
0, 0,
vl_contextprivate_get(context_priv->vctx, context_priv->drawable_surface)
PUBLIC
Status XvMCGetSurfaceStatus(Display *dpy, XvMCSurface *surface, int *status)
{
- struct pipe_video_context *vpipe;
+ struct pipe_context *pipe;
XvMCSurfacePrivate *surface_priv;
XvMCContextPrivate *context_priv;
surface_priv = surface->privData;
context_priv = surface_priv->context->privData;
- vpipe = context_priv->vctx->vpipe;
+ pipe = context_priv->vctx->pipe;
*status = 0;
if (surface_priv->fence)
- if (!vpipe->screen->fence_signalled(vpipe->screen, surface_priv->fence))
+ if (!pipe->screen->fence_signalled(pipe->screen, surface_priv->fence))
*status |= XVMC_RENDERING;
return Success;
#include <vl_winsys.h>
#include <driclient.h>
-#include <pipe/p_video_context.h>
+#include <pipe/p_screen.h>
+#include <pipe/p_context.h>
#include <pipe/p_state.h>
#include <util/u_memory.h>
#include <util/u_hash.h>
#include <util/u_hash_table.h>
+#include <util/u_inlines.h>
#include <state_tracker/drm_driver.h>
#include <X11/Xlibint.h>
struct vl_dri_screen *vl_dri_scrn = (struct vl_dri_screen*)vscreen;
struct vl_dri_context *vl_dri_ctx;
- if (!vscreen->pscreen->video_context_create) {
- debug_printf("[G3DVL] No video support found on %s/%s.\n",
- vscreen->pscreen->get_vendor(vscreen->pscreen),
- vscreen->pscreen->get_name(vscreen->pscreen));
- goto no_vpipe;
- }
-
vl_dri_ctx = CALLOC_STRUCT(vl_dri_context);
if (!vl_dri_ctx)
goto no_struct;
goto no_pipe;
}
- vl_dri_ctx->base.vpipe = vscreen->pscreen->video_context_create(vscreen->pscreen, vl_dri_ctx->base.pipe);
-
- if (!vl_dri_ctx->base.vpipe)
- goto no_pipe;
-
vl_dri_ctx->base.vscreen = vscreen;
vl_dri_ctx->fd = vl_dri_scrn->dri_screen->fd;
FREE(vl_dri_ctx);
no_struct:
-no_vpipe:
return NULL;
}
assert(vctx);
vl_dri_ctx->base.pipe->destroy(vl_dri_ctx->base.pipe);
- vl_dri_ctx->base.vpipe->destroy(vl_dri_ctx->base.vpipe);
FREE(vl_dri_ctx);
}
#include <pipe/p_format.h>
struct pipe_screen;
-struct pipe_video_context;
struct pipe_surface;
struct vl_screen
{
struct vl_screen *vscreen;
struct pipe_context *pipe;
- struct pipe_video_context *vpipe;
};
struct vl_screen*
#include <X11/Xlibint.h>
#include <pipe/p_state.h>
-#include <pipe/p_video_context.h>
#include <util/u_memory.h>
#include <util/u_format.h>
vl_video_create(struct vl_screen *vscreen)
{
struct pipe_context *pipe;
- struct pipe_video_context *vpipe;
struct vl_context *vctx;
assert(vscreen);
- assert(vscreen->pscreen->video_context_create);
pipe = vscreen->pscreen->context_create(vscreen->pscreen, NULL);
if (!pipe)
return NULL;
- vpipe = vscreen->pscreen->video_context_create(vscreen->pscreen, pipe);
- if (!vpipe) {
- pipe->destroy(pipe);
- return NULL;
- }
-
vctx = CALLOC_STRUCT(vl_context);
if (!vctx) {
pipe->destroy(pipe);
- vpipe->destroy(vpipe);
return NULL;
}
- vctx->vpipe = vpipe;
+ vctx->pipe = pipe;
vctx->vscreen = vscreen;
return vctx;
assert(vctx);
vctx->pipe->destroy(vctx->pipe);
- vctx->vpipe->destroy(vctx->vpipe);
FREE(vctx);
}