return NULL;
}
-static struct pipe_video_buffer *
-vl_context_create_buffer(struct pipe_video_context *context,
- enum pipe_format buffer_format,
- enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height)
-{
- struct vl_context *ctx = (struct vl_context*)context;
- const enum pipe_format *resource_formats;
- struct pipe_video_buffer *result;
- unsigned buffer_width, buffer_height;
- bool pot_buffers;
-
- assert(context);
- assert(width > 0 && height > 0);
-
- pot_buffers = !ctx->base.screen->get_video_param
- (
- ctx->base.screen,
- PIPE_VIDEO_PROFILE_UNKNOWN,
- PIPE_VIDEO_CAP_NPOT_TEXTURES
- );
-
- resource_formats = vl_video_buffer_formats(ctx->pipe->screen, buffer_format);
- if (!resource_formats)
- return NULL;
-
- buffer_width = pot_buffers ? util_next_power_of_two(width) : align(width, MACROBLOCK_WIDTH);
- buffer_height = pot_buffers ? util_next_power_of_two(height) : align(height, MACROBLOCK_HEIGHT);
-
- result = vl_video_buffer_init(context, ctx->pipe,
- buffer_width, buffer_height, 1,
- chroma_format,
- resource_formats,
- PIPE_USAGE_STATIC);
- if (result) // TODO move format handling into vl_video_buffer
- result->buffer_format = buffer_format;
-
- return result;
-}
-
struct pipe_video_context *
vl_create_context(struct pipe_context *pipe)
{
ctx->base.destroy = vl_context_destroy;
ctx->base.create_decoder = vl_context_create_decoder;
- ctx->base.create_buffer = vl_context_create_buffer;
ctx->pipe = pipe;
dec = (struct vl_mpeg12_decoder*)buffer->base.decoder;
formats[0] = formats[1] = formats[2] = dec->zscan_source_format;
- buffer->zscan_source = vl_video_buffer_init(dec->base.context, dec->pipe,
- dec->blocks_per_line * BLOCK_WIDTH * BLOCK_HEIGHT,
- align(dec->num_blocks, dec->blocks_per_line) / dec->blocks_per_line,
- 1, PIPE_VIDEO_CHROMA_FORMAT_444,
- formats, PIPE_USAGE_STATIC);
+ buffer->zscan_source = vl_video_buffer_create_ex
+ (
+ dec->pipe,
+ dec->blocks_per_line * BLOCK_WIDTH * BLOCK_HEIGHT,
+ align(dec->num_blocks, dec->blocks_per_line) / dec->blocks_per_line,
+ 1, PIPE_VIDEO_CHROMA_FORMAT_444, formats, PIPE_USAGE_STATIC
+ );
+
if (!buffer->zscan_source)
goto error_source;
nr_of_idct_render_targets = 1;
formats[0] = formats[1] = formats[2] = format_config->idct_source_format;
- dec->idct_source = vl_video_buffer_init(dec->base.context, dec->pipe,
- dec->base.width / 4, dec->base.height, 1,
- dec->base.chroma_format,
- formats, PIPE_USAGE_STATIC);
+ dec->idct_source = vl_video_buffer_create_ex
+ (
+ dec->pipe, dec->base.width / 4, dec->base.height, 1,
+ dec->base.chroma_format, formats, PIPE_USAGE_STATIC
+ );
+
if (!dec->idct_source)
goto error_idct_source;
formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
- dec->mc_source = vl_video_buffer_init(dec->base.context, dec->pipe,
- dec->base.width / nr_of_idct_render_targets,
- dec->base.height / 4, nr_of_idct_render_targets,
- dec->base.chroma_format,
- formats, PIPE_USAGE_STATIC);
+ dec->mc_source = vl_video_buffer_create_ex
+ (
+ dec->pipe, dec->base.width / nr_of_idct_render_targets,
+ dec->base.height / 4, nr_of_idct_render_targets,
+ dec->base.chroma_format, formats, PIPE_USAGE_STATIC
+ );
if (!dec->mc_source)
goto error_mc_source;
enum pipe_format formats[3];
formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
- dec->mc_source = vl_video_buffer_init(dec->base.context, dec->pipe,
- dec->base.width, dec->base.height, 1,
- dec->base.chroma_format,
- formats, PIPE_USAGE_STATIC);
-
+ dec->mc_source = vl_video_buffer_create_ex
+ (
+ dec->pipe, dec->base.width, dec->base.height, 1,
+ dec->base.chroma_format, formats, PIPE_USAGE_STATIC
+ );
+
return dec->mc_source != NULL;
}
assert(buf);
- pipe = buf->pipe;
+ pipe = buf->base.context;
for (i = 0; i < buf->num_planes; ++i ) {
if (!buf->sampler_view_planes[i]) {
assert(buf);
- pipe = buf->pipe;
+ pipe = buf->base.context;
for (component = 0, i = 0; i < buf->num_planes; ++i ) {
unsigned nr_components = util_format_get_nr_components(buf->resources[i]->format);
assert(buf);
- pipe = buf->pipe;
+ pipe = buf->base.context;
for (i = 0; i < buf->num_planes; ++i ) {
if (!buf->surfaces[i]) {
}
struct pipe_video_buffer *
-vl_video_buffer_init(struct pipe_video_context *context,
- struct pipe_context *pipe,
- unsigned width, unsigned height, unsigned depth,
- enum pipe_video_chroma_format chroma_format,
- const enum pipe_format resource_formats[VL_MAX_PLANES],
- unsigned usage)
+vl_video_buffer_create(struct pipe_context *pipe,
+ enum pipe_format buffer_format,
+ enum pipe_video_chroma_format chroma_format,
+ unsigned width, unsigned height)
+{
+ const enum pipe_format *resource_formats;
+ struct pipe_video_buffer *result;
+ unsigned buffer_width, buffer_height;
+ bool pot_buffers;
+
+ assert(pipe);
+ assert(width > 0 && height > 0);
+
+ pot_buffers = !pipe->screen->get_video_param
+ (
+ pipe->screen,
+ PIPE_VIDEO_PROFILE_UNKNOWN,
+ PIPE_VIDEO_CAP_NPOT_TEXTURES
+ );
+
+ resource_formats = vl_video_buffer_formats(pipe->screen, buffer_format);
+ if (!resource_formats)
+ return NULL;
+
+ buffer_width = pot_buffers ? util_next_power_of_two(width) : align(width, MACROBLOCK_WIDTH);
+ buffer_height = pot_buffers ? util_next_power_of_two(height) : align(height, MACROBLOCK_HEIGHT);
+
+ result = vl_video_buffer_create_ex
+ (
+ pipe, buffer_width, buffer_height, 1,
+ chroma_format, resource_formats, PIPE_USAGE_STATIC
+ );
+ if (result)
+ result->buffer_format = buffer_format;
+
+ return result;
+}
+
+struct pipe_video_buffer *
+vl_video_buffer_create_ex(struct pipe_context *pipe,
+ unsigned width, unsigned height, unsigned depth,
+ enum pipe_video_chroma_format chroma_format,
+ const enum pipe_format resource_formats[VL_MAX_PLANES],
+ unsigned usage)
{
struct vl_video_buffer *buffer;
struct pipe_resource templ;
unsigned i;
- assert(context && pipe);
+ assert(pipe);
buffer = CALLOC_STRUCT(vl_video_buffer);
+ buffer->base.context = pipe;
buffer->base.destroy = vl_video_buffer_destroy;
buffer->base.get_sampler_view_planes = vl_video_buffer_sampler_view_planes;
buffer->base.get_sampler_view_components = vl_video_buffer_sampler_view_components;
buffer->base.chroma_format = chroma_format;
buffer->base.width = width;
buffer->base.height = height;
- buffer->pipe = pipe;
buffer->num_planes = 1;
memset(&templ, 0, sizeof(templ));
struct vl_video_buffer
{
struct pipe_video_buffer base;
- struct pipe_context *pipe;
unsigned num_planes;
struct pipe_resource *resources[VL_MAX_PLANES];
struct pipe_sampler_view *sampler_view_planes[VL_MAX_PLANES];
vl_video_buffer_is_format_supported(struct pipe_screen *screen,
enum pipe_format format,
enum pipe_video_profile profile);
+
+/**
+ * creates a video buffer, can be used as a standard implementation for pipe->create_video_buffer
+ */
+struct pipe_video_buffer *
+vl_video_buffer_create(struct pipe_context *pipe,
+ enum pipe_format buffer_format,
+ enum pipe_video_chroma_format chroma_format,
+ unsigned width, unsigned height);
/**
- * initialize a buffer, creating its resources
+ * extended create function, gets depth, usage and formats for each plane seperately
*/
struct pipe_video_buffer *
-vl_video_buffer_init(struct pipe_video_context *context,
- struct pipe_context *pipe,
- unsigned width, unsigned height, unsigned depth,
- enum pipe_video_chroma_format chroma_format,
- const enum pipe_format resource_formats[VL_MAX_PLANES],
- unsigned usage);
+vl_video_buffer_create_ex(struct pipe_context *pipe,
+ unsigned width, unsigned height, unsigned depth,
+ enum pipe_video_chroma_format chroma_format,
+ const enum pipe_format resource_formats[VL_MAX_PLANES],
+ unsigned usage);
+
#endif /* vl_ycbcr_buffer_h */
#include "util/u_simple_list.h"
#include "util/u_upload_mgr.h"
#include "os/os_time.h"
+#include "vl/vl_video_buffer.h"
#include "r300_cb.h"
#include "r300_context.h"
r300_init_query_functions(r300);
r300_init_state_functions(r300);
r300_init_resource_functions(r300);
+
+ r300->context.create_video_buffer = vl_video_buffer_create;
r300->vbuf_mgr = u_vbuf_mgr_create(&r300->context, 1024 * 1024, 16,
PIPE_BIND_VERTEX_BUFFER |
r600_init_context_resource_functions(rctx);
r600_init_surface_functions(rctx);
rctx->context.draw_vbo = r600_draw_vbo;
+ rctx->context.create_video_buffer = vl_video_buffer_create;
switch (r600_get_family(rctx->radeon)) {
case CHIP_R600:
#include "util/u_memory.h"
#include "util/u_inlines.h"
#include "tgsi/tgsi_exec.h"
+#include "vl/vl_video_buffer.h"
#include "sp_clear.h"
#include "sp_context.h"
#include "sp_flush.h"
softpipe->pipe.flush = softpipe_flush_wrapped;
softpipe->pipe.render_condition = softpipe_render_condition;
+
+ softpipe->pipe.create_video_buffer = vl_video_buffer_create;
/*
* Alloc caches for accessing drawing surfaces and textures.
struct pipe_vertex_element;
struct pipe_viewport_state;
+enum pipe_video_chroma_format;
+enum pipe_format;
+
/**
* Gallium rendering context. Basically:
* - state setting functions
* Flush any pending framebuffer writes and invalidate texture caches.
*/
void (*texture_barrier)(struct pipe_context *);
+
+ /**
+ * Creates a video buffer as decoding target
+ */
+ struct pipe_video_buffer *(*create_video_buffer)( struct pipe_context *context,
+ enum pipe_format buffer_format,
+ enum pipe_video_chroma_format chroma_format,
+ unsigned width, unsigned height );
};
enum pipe_video_chroma_format chroma_format,
unsigned width, unsigned height);
- /**
- * Creates a buffer as decoding target
- */
- struct pipe_video_buffer *(*create_buffer)(struct pipe_video_context *context,
- enum pipe_format buffer_format,
- enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height);
};
/**
*/
struct pipe_video_buffer
{
- struct pipe_video_context *context;
+ struct pipe_context *context;
enum pipe_format buffer_format;
enum pipe_video_chroma_format chroma_format;
}
p_surf->device = dev;
- p_surf->video_buffer = dev->context->vpipe->create_buffer
+ p_surf->video_buffer = dev->context->pipe->create_video_buffer
(
- dev->context->vpipe,
+ dev->context->pipe,
PIPE_FORMAT_YV12, // most common used
ChromaToPipe(chroma_type),
width, height
};
XvMCContextPrivate *context_priv;
+ struct pipe_context *pipe;
struct pipe_video_context *vpipe;
XvMCSurfacePrivate *surface_priv;
return XvMCBadSurface;
context_priv = context->privData;
+ pipe = context_priv->vctx->pipe;
vpipe = context_priv->vctx->vpipe;
surface_priv = CALLOC(1, sizeof(XvMCSurfacePrivate));
surface_priv->decode_buffer->set_quant_matrix(surface_priv->decode_buffer, dummy_quant, dummy_quant);
surface_priv->mv_stride = surface_priv->decode_buffer->get_mv_stream_stride(surface_priv->decode_buffer);
- surface_priv->video_buffer = vpipe->create_buffer(vpipe, PIPE_FORMAT_NV12,
- context_priv->decoder->chroma_format,
- context_priv->decoder->width,
- context_priv->decoder->height);
+ surface_priv->video_buffer = pipe->create_video_buffer
+ (
+ pipe, PIPE_FORMAT_NV12, context_priv->decoder->chroma_format,
+ context_priv->decoder->width, context_priv->decoder->height
+ );
+
surface_priv->context = context;
surface->surface_id = XAllocID(dpy);