--- /dev/null
- compositor->pipe->flush(compositor->pipe, PIPE_FLUSH_RENDER_CACHE, fence);
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vl_compositor.h"
+#include "util/u_draw.h"
+#include <assert.h>
+#include <pipe/p_context.h>
+#include <util/u_inlines.h>
+#include <util/u_memory.h>
+#include <util/u_keymap.h>
+#include <util/u_draw.h>
+#include <util/u_sampler.h>
+#include <tgsi/tgsi_ureg.h>
+#include "vl_csc.h"
+
+struct vertex_shader_consts
+{
+ struct vertex4f dst_scale;
+ struct vertex4f dst_trans;
+ struct vertex4f src_scale;
+ struct vertex4f src_trans;
+};
+
+struct fragment_shader_consts
+{
+ float matrix[16];
+};
+
+static bool
+u_video_rects_equal(struct pipe_video_rect *a, struct pipe_video_rect *b)
+{
+ assert(a && b);
+
+ if (a->x != b->x)
+ return false;
+ if (a->y != b->y)
+ return false;
+ if (a->w != b->w)
+ return false;
+ if (a->h != b->h)
+ return false;
+
+ return true;
+}
+
+static bool
+create_vert_shader(struct vl_compositor *c)
+{
+ struct ureg_program *shader;
+ struct ureg_src vpos, vtex;
+ struct ureg_dst o_vpos, o_vtex;
+
+ shader = ureg_create(TGSI_PROCESSOR_VERTEX);
+ if (!shader)
+ return false;
+
+ vpos = ureg_DECL_vs_input(shader, 0);
+ vtex = ureg_DECL_vs_input(shader, 1);
+ o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, 0);
+ o_vtex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, 1);
+
+ /*
+ * o_vpos = vpos
+ * o_vtex = vtex
+ */
+ ureg_MOV(shader, o_vpos, vpos);
+ ureg_MOV(shader, o_vtex, vtex);
+
+ ureg_END(shader);
+
+ c->vertex_shader = ureg_create_shader_and_destroy(shader, c->pipe);
+ if (!c->vertex_shader)
+ return false;
+
+ return true;
+}
+
+static bool
+create_frag_shader_ycbcr_2_rgb(struct vl_compositor *c)
+{
+ struct ureg_program *shader;
+ struct ureg_src tc;
+ struct ureg_src csc[4];
+ struct ureg_src sampler;
+ struct ureg_dst texel;
+ struct ureg_dst fragment;
+ unsigned i;
+
+ shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
+ if (!shader)
+ return false;
+
+ tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, 1, TGSI_INTERPOLATE_LINEAR);
+ for (i = 0; i < 4; ++i)
+ csc[i] = ureg_DECL_constant(shader, i);
+ sampler = ureg_DECL_sampler(shader, 0);
+ texel = ureg_DECL_temporary(shader);
+ fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
+
+ /*
+ * texel = tex(tc, sampler)
+ * fragment = csc * texel
+ */
+ ureg_TEX(shader, texel, TGSI_TEXTURE_2D, tc, sampler);
+ for (i = 0; i < 4; ++i)
+ ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(texel));
+
+ ureg_release_temporary(shader, texel);
+ ureg_END(shader);
+
+ c->fragment_shader.ycbcr_2_rgb = ureg_create_shader_and_destroy(shader, c->pipe);
+ if (!c->fragment_shader.ycbcr_2_rgb)
+ return false;
+
+ return true;
+}
+
+static bool
+create_frag_shader_rgb_2_rgb(struct vl_compositor *c)
+{
+ struct ureg_program *shader;
+ struct ureg_src tc;
+ struct ureg_src sampler;
+ struct ureg_dst fragment;
+
+ shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
+ if (!shader)
+ return false;
+
+ tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, 1, TGSI_INTERPOLATE_LINEAR);
+ sampler = ureg_DECL_sampler(shader, 0);
+ fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
+
+ /*
+ * fragment = tex(tc, sampler)
+ */
+ ureg_TEX(shader, fragment, TGSI_TEXTURE_2D, tc, sampler);
+ ureg_END(shader);
+
+ c->fragment_shader.rgb_2_rgb = ureg_create_shader_and_destroy(shader, c->pipe);
+ if (!c->fragment_shader.rgb_2_rgb)
+ return false;
+
+ return true;
+}
+
+static bool
+init_pipe_state(struct vl_compositor *c)
+{
+ struct pipe_sampler_state sampler;
+
+ assert(c);
+
+ c->fb_state.nr_cbufs = 1;
+ c->fb_state.zsbuf = NULL;
+
+ memset(&sampler, 0, sizeof(sampler));
+ sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR;
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
+ sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
+ sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
+ sampler.compare_func = PIPE_FUNC_ALWAYS;
+ sampler.normalized_coords = 1;
+ /*sampler.lod_bias = ;*/
+ /*sampler.min_lod = ;*/
+ /*sampler.max_lod = ;*/
+ /*sampler.border_color[i] = ;*/
+ /*sampler.max_anisotropy = ;*/
+ c->sampler = c->pipe->create_sampler_state(c->pipe, &sampler);
+
+ return true;
+}
+
+static void cleanup_pipe_state(struct vl_compositor *c)
+{
+ assert(c);
+
+ c->pipe->delete_sampler_state(c->pipe, c->sampler);
+}
+
+static bool
+init_shaders(struct vl_compositor *c)
+{
+ assert(c);
+
+ if (!create_vert_shader(c)) {
+ debug_printf("Unable to create vertex shader.\n");
+ return false;
+ }
+ if (!create_frag_shader_ycbcr_2_rgb(c)) {
+ debug_printf("Unable to create YCbCr-to-RGB fragment shader.\n");
+ return false;
+ }
+ if (!create_frag_shader_rgb_2_rgb(c)) {
+ debug_printf("Unable to create RGB-to-RGB fragment shader.\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void cleanup_shaders(struct vl_compositor *c)
+{
+ assert(c);
+
+ c->pipe->delete_vs_state(c->pipe, c->vertex_shader);
+ c->pipe->delete_fs_state(c->pipe, c->fragment_shader.ycbcr_2_rgb);
+ c->pipe->delete_fs_state(c->pipe, c->fragment_shader.rgb_2_rgb);
+}
+
+static bool
+init_buffers(struct vl_compositor *c)
+{
+ struct fragment_shader_consts fsc;
+ struct pipe_vertex_element vertex_elems[2];
+
+ assert(c);
+
+ /*
+ * Create our vertex buffer and vertex buffer elements
+ */
+ c->vertex_buf.stride = sizeof(struct vertex4f);
+ c->vertex_buf.buffer_offset = 0;
+ /* XXX: Create with DYNAMIC or STREAM */
+ c->vertex_buf.buffer = pipe_buffer_create
+ (
+ c->pipe->screen,
+ PIPE_BIND_VERTEX_BUFFER,
+ PIPE_USAGE_STATIC,
+ sizeof(struct vertex4f) * (VL_COMPOSITOR_MAX_LAYERS + 2) * 6
+ );
+
+ vertex_elems[0].src_offset = 0;
+ vertex_elems[0].instance_divisor = 0;
+ vertex_elems[0].vertex_buffer_index = 0;
+ vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
+ vertex_elems[1].src_offset = sizeof(struct vertex2f);
+ vertex_elems[1].instance_divisor = 0;
+ vertex_elems[1].vertex_buffer_index = 0;
+ vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
+ c->vertex_elems_state = c->pipe->create_vertex_elements_state(c->pipe, 2, vertex_elems);
+
+ /*
+ * Create our fragment shader's constant buffer
+ * Const buffer contains the color conversion matrix and bias vectors
+ */
+ /* XXX: Create with IMMUTABLE/STATIC... although it does change every once in a long while... */
+ c->fs_const_buf = pipe_buffer_create
+ (
+ c->pipe->screen,
+ PIPE_BIND_CONSTANT_BUFFER,
+ PIPE_USAGE_STATIC,
+ sizeof(struct fragment_shader_consts)
+ );
+
+ vl_csc_get_matrix(VL_CSC_COLOR_STANDARD_IDENTITY, NULL, true, fsc.matrix);
+
+ vl_compositor_set_csc_matrix(c, fsc.matrix);
+
+ return true;
+}
+
+static void
+cleanup_buffers(struct vl_compositor *c)
+{
+ assert(c);
+
+ c->pipe->delete_vertex_elements_state(c->pipe, c->vertex_elems_state);
+ pipe_resource_reference(&c->vertex_buf.buffer, NULL);
+ pipe_resource_reference(&c->fs_const_buf, NULL);
+}
+
+static void
+texview_map_delete(const struct keymap *map,
+ const void *key, void *data,
+ void *user)
+{
+ struct pipe_sampler_view *sv = (struct pipe_sampler_view*)data;
+
+ assert(map);
+ assert(key);
+ assert(data);
+ assert(user);
+
+ pipe_sampler_view_reference(&sv, NULL);
+}
+
+bool vl_compositor_init(struct vl_compositor *compositor, struct pipe_context *pipe)
+{
+ unsigned i;
+
+ assert(compositor);
+
+ memset(compositor, 0, sizeof(struct vl_compositor));
+
+ compositor->pipe = pipe;
+
+ compositor->texview_map = util_new_keymap(sizeof(struct pipe_surface*), -1,
+ texview_map_delete);
+ if (!compositor->texview_map)
+ return false;
+
+ if (!init_pipe_state(compositor)) {
+ util_delete_keymap(compositor->texview_map, compositor->pipe);
+ return false;
+ }
+ if (!init_shaders(compositor)) {
+ util_delete_keymap(compositor->texview_map, compositor->pipe);
+ cleanup_pipe_state(compositor);
+ return false;
+ }
+ if (!init_buffers(compositor)) {
+ util_delete_keymap(compositor->texview_map, compositor->pipe);
+ cleanup_shaders(compositor);
+ cleanup_pipe_state(compositor);
+ return false;
+ }
+
+ compositor->fb_state.width = 0;
+ compositor->fb_state.height = 0;
+ compositor->bg = NULL;
+ compositor->dirty_bg = false;
+ for (i = 0; i < VL_COMPOSITOR_MAX_LAYERS; ++i)
+ compositor->layers[i] = NULL;
+ compositor->dirty_layers = 0;
+
+ return true;
+}
+
+void vl_compositor_cleanup(struct vl_compositor *compositor)
+{
+ assert(compositor);
+
+ util_delete_keymap(compositor->texview_map, compositor->pipe);
+ cleanup_buffers(compositor);
+ cleanup_shaders(compositor);
+ cleanup_pipe_state(compositor);
+}
+
+void vl_compositor_set_background(struct vl_compositor *compositor,
+ struct pipe_surface *bg, struct pipe_video_rect *bg_src_rect)
+{
+ assert(compositor);
+ assert((bg && bg_src_rect) || (!bg && !bg_src_rect));
+
+ if (compositor->bg != bg ||
+ !u_video_rects_equal(&compositor->bg_src_rect, bg_src_rect)) {
+ pipe_surface_reference(&compositor->bg, bg);
+ /*if (!u_video_rects_equal(&compositor->bg_src_rect, bg_src_rect))*/
+ compositor->bg_src_rect = *bg_src_rect;
+ compositor->dirty_bg = true;
+ }
+}
+
+void vl_compositor_set_layers(struct vl_compositor *compositor,
+ struct pipe_surface *layers[],
+ struct pipe_video_rect *src_rects[],
+ struct pipe_video_rect *dst_rects[],
+ unsigned num_layers)
+{
+ unsigned i;
+
+ assert(compositor);
+ assert(num_layers <= VL_COMPOSITOR_MAX_LAYERS);
+
+ for (i = 0; i < num_layers; ++i)
+ {
+ assert((layers[i] && src_rects[i] && dst_rects[i]) ||
+ (!layers[i] && !src_rects[i] && !dst_rects[i]));
+
+ if (compositor->layers[i] != layers[i] ||
+ !u_video_rects_equal(&compositor->layer_src_rects[i], src_rects[i]) ||
+ !u_video_rects_equal(&compositor->layer_dst_rects[i], dst_rects[i]))
+ {
+ pipe_surface_reference(&compositor->layers[i], layers[i]);
+ /*if (!u_video_rects_equal(&compositor->layer_src_rects[i], src_rects[i]))*/
+ compositor->layer_src_rects[i] = *src_rects[i];
+ /*if (!u_video_rects_equal(&compositor->layer_dst_rects[i], dst_rects[i]))*/
+ compositor->layer_dst_rects[i] = *dst_rects[i];
+ compositor->dirty_layers |= 1 << i;
+ }
+
+ if (layers[i])
+ compositor->dirty_layers |= 1 << i;
+ }
+
+ for (; i < VL_COMPOSITOR_MAX_LAYERS; ++i)
+ pipe_surface_reference(&compositor->layers[i], NULL);
+}
+
+static void gen_rect_verts(unsigned pos,
+ struct pipe_video_rect *src_rect,
+ struct vertex2f *src_inv_size,
+ struct pipe_video_rect *dst_rect,
+ struct vertex2f *dst_inv_size,
+ struct vertex4f *vb)
+{
+ assert(pos < VL_COMPOSITOR_MAX_LAYERS + 2);
+ assert(src_rect);
+ assert(src_inv_size);
+ assert((dst_rect && dst_inv_size) /*|| (!dst_rect && !dst_inv_size)*/);
+ assert(vb);
+
+ vb[pos * 6 + 0].x = dst_rect->x * dst_inv_size->x;
+ vb[pos * 6 + 0].y = dst_rect->y * dst_inv_size->y;
+ vb[pos * 6 + 0].z = src_rect->x * src_inv_size->x;
+ vb[pos * 6 + 0].w = src_rect->y * src_inv_size->y;
+
+ vb[pos * 6 + 1].x = dst_rect->x * dst_inv_size->x;
+ vb[pos * 6 + 1].y = (dst_rect->y + dst_rect->h) * dst_inv_size->y;
+ vb[pos * 6 + 1].z = src_rect->x * src_inv_size->x;
+ vb[pos * 6 + 1].w = (src_rect->y + src_rect->h) * src_inv_size->y;
+
+ vb[pos * 6 + 2].x = (dst_rect->x + dst_rect->w) * dst_inv_size->x;
+ vb[pos * 6 + 2].y = dst_rect->y * dst_inv_size->y;
+ vb[pos * 6 + 2].z = (src_rect->x + src_rect->w) * src_inv_size->x;
+ vb[pos * 6 + 2].w = src_rect->y * src_inv_size->y;
+
+ vb[pos * 6 + 3].x = (dst_rect->x + dst_rect->w) * dst_inv_size->x;
+ vb[pos * 6 + 3].y = dst_rect->y * dst_inv_size->y;
+ vb[pos * 6 + 3].z = (src_rect->x + src_rect->w) * src_inv_size->x;
+ vb[pos * 6 + 3].w = src_rect->y * src_inv_size->y;
+
+ vb[pos * 6 + 4].x = dst_rect->x * dst_inv_size->x;
+ vb[pos * 6 + 4].y = (dst_rect->y + dst_rect->h) * dst_inv_size->y;
+ vb[pos * 6 + 4].z = src_rect->x * src_inv_size->x;
+ vb[pos * 6 + 4].w = (src_rect->y + src_rect->h) * src_inv_size->y;
+
+ vb[pos * 6 + 5].x = (dst_rect->x + dst_rect->w) * dst_inv_size->x;
+ vb[pos * 6 + 5].y = (dst_rect->y + dst_rect->h) * dst_inv_size->y;
+ vb[pos * 6 + 5].z = (src_rect->x + src_rect->w) * src_inv_size->x;
+ vb[pos * 6 + 5].w = (src_rect->y + src_rect->h) * src_inv_size->y;
+}
+
+static unsigned gen_data(struct vl_compositor *c,
+ struct pipe_surface *src_surface,
+ struct pipe_video_rect *src_rect,
+ struct pipe_video_rect *dst_rect,
+ struct pipe_surface **textures,
+ void **frag_shaders)
+{
+ void *vb;
+ struct pipe_transfer *buf_transfer;
+ unsigned num_rects = 0;
+ unsigned i;
+
+ assert(c);
+ assert(src_surface);
+ assert(src_rect);
+ assert(dst_rect);
+ assert(textures);
+
+ vb = pipe_buffer_map(c->pipe, c->vertex_buf.buffer,
+ PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
+ &buf_transfer);
+
+ if (!vb)
+ return 0;
+
+ if (c->dirty_bg) {
+ struct vertex2f bg_inv_size = {1.0f / c->bg->width, 1.0f / c->bg->height};
+ gen_rect_verts(num_rects, &c->bg_src_rect, &bg_inv_size, NULL, NULL, vb);
+ textures[num_rects] = c->bg;
+ /* XXX: Hack */
+ frag_shaders[num_rects] = c->fragment_shader.rgb_2_rgb;
+ ++num_rects;
+ c->dirty_bg = false;
+ }
+
+ {
+ struct vertex2f src_inv_size = { 1.0f / src_surface->width, 1.0f / src_surface->height};
+ gen_rect_verts(num_rects, src_rect, &src_inv_size, dst_rect, &c->fb_inv_size, vb);
+ textures[num_rects] = src_surface;
+ /* XXX: Hack, sort of */
+ frag_shaders[num_rects] = c->fragment_shader.ycbcr_2_rgb;
+ ++num_rects;
+ }
+
+ for (i = 0; c->dirty_layers > 0; i++) {
+ assert(i < VL_COMPOSITOR_MAX_LAYERS);
+
+ if (c->dirty_layers & (1 << i)) {
+ struct vertex2f layer_inv_size = {1.0f / c->layers[i]->width, 1.0f / c->layers[i]->height};
+ gen_rect_verts(num_rects, &c->layer_src_rects[i], &layer_inv_size,
+ &c->layer_dst_rects[i], &c->fb_inv_size, vb);
+ textures[num_rects] = c->layers[i];
+ /* XXX: Hack */
+ frag_shaders[num_rects] = c->fragment_shader.rgb_2_rgb;
+ ++num_rects;
+ c->dirty_layers &= ~(1 << i);
+ }
+ }
+
+ pipe_buffer_unmap(c->pipe, buf_transfer);
+
+ return num_rects;
+}
+
+static void draw_layers(struct vl_compositor *c,
+ struct pipe_surface *src_surface,
+ struct pipe_video_rect *src_rect,
+ struct pipe_video_rect *dst_rect)
+{
+ unsigned num_rects;
+ struct pipe_surface *src_surfaces[VL_COMPOSITOR_MAX_LAYERS + 2];
+ void *frag_shaders[VL_COMPOSITOR_MAX_LAYERS + 2];
+ unsigned i;
+
+ assert(c);
+ assert(src_surface);
+ assert(src_rect);
+ assert(dst_rect);
+
+ num_rects = gen_data(c, src_surface, src_rect, dst_rect, src_surfaces, frag_shaders);
+
+ for (i = 0; i < num_rects; ++i) {
+ boolean delete_view = FALSE;
+ struct pipe_sampler_view *surface_view = (struct pipe_sampler_view*)util_keymap_lookup(c->texview_map,
+ &src_surfaces[i]);
+ if (!surface_view) {
+ struct pipe_sampler_view templat;
+ u_sampler_view_default_template(&templat, src_surfaces[i]->texture,
+ src_surfaces[i]->texture->format);
+ surface_view = c->pipe->create_sampler_view(c->pipe, src_surfaces[i]->texture,
+ &templat);
+ if (!surface_view)
+ return;
+
+ delete_view = !util_keymap_insert(c->texview_map, &src_surfaces[i],
+ surface_view, c->pipe);
+ }
+
+ c->pipe->bind_fs_state(c->pipe, frag_shaders[i]);
+ c->pipe->set_fragment_sampler_views(c->pipe, 1, &surface_view);
+
+ util_draw_arrays(c->pipe, PIPE_PRIM_TRIANGLES, i * 6, 6);
+
+ if (delete_view) {
+ pipe_sampler_view_reference(&surface_view, NULL);
+ }
+ }
+}
+
+void vl_compositor_render(struct vl_compositor *compositor,
+ struct pipe_surface *src_surface,
+ enum pipe_mpeg12_picture_type picture_type,
+ /*unsigned num_past_surfaces,
+ struct pipe_surface *past_surfaces,
+ unsigned num_future_surfaces,
+ struct pipe_surface *future_surfaces,*/
+ struct pipe_video_rect *src_area,
+ struct pipe_surface *dst_surface,
+ struct pipe_video_rect *dst_area,
+ struct pipe_fence_handle **fence)
+{
+ assert(compositor);
+ assert(src_surface);
+ assert(src_area);
+ assert(dst_surface);
+ assert(dst_area);
+ assert(picture_type == PIPE_MPEG12_PICTURE_TYPE_FRAME);
+
+ if (compositor->fb_state.width != dst_surface->width) {
+ compositor->fb_inv_size.x = 1.0f / dst_surface->width;
+ compositor->fb_state.width = dst_surface->width;
+ }
+ if (compositor->fb_state.height != dst_surface->height) {
+ compositor->fb_inv_size.y = 1.0f / dst_surface->height;
+ compositor->fb_state.height = dst_surface->height;
+ }
+
+ compositor->fb_state.cbufs[0] = dst_surface;
+
+ compositor->viewport.scale[0] = compositor->fb_state.width;
+ compositor->viewport.scale[1] = compositor->fb_state.height;
+ compositor->viewport.scale[2] = 1;
+ compositor->viewport.scale[3] = 1;
+ compositor->viewport.translate[0] = 0;
+ compositor->viewport.translate[1] = 0;
+ compositor->viewport.translate[2] = 0;
+ compositor->viewport.translate[3] = 0;
+
+ compositor->pipe->set_framebuffer_state(compositor->pipe, &compositor->fb_state);
+ compositor->pipe->set_viewport_state(compositor->pipe, &compositor->viewport);
+ compositor->pipe->bind_fragment_sampler_states(compositor->pipe, 1, &compositor->sampler);
+ compositor->pipe->bind_vs_state(compositor->pipe, compositor->vertex_shader);
+ compositor->pipe->set_vertex_buffers(compositor->pipe, 1, &compositor->vertex_buf);
+ compositor->pipe->bind_vertex_elements_state(compositor->pipe, compositor->vertex_elems_state);
+ compositor->pipe->set_constant_buffer(compositor->pipe, PIPE_SHADER_FRAGMENT, 0, compositor->fs_const_buf);
+
+ draw_layers(compositor, src_surface, src_area, dst_area);
+
+ assert(!compositor->dirty_bg && !compositor->dirty_layers);
++ compositor->pipe->flush(compositor->pipe, fence);
+}
+
+void vl_compositor_set_csc_matrix(struct vl_compositor *compositor, const float *mat)
+{
+ struct pipe_transfer *buf_transfer;
+
+ assert(compositor);
+
+ memcpy
+ (
+ pipe_buffer_map(compositor->pipe, compositor->fs_const_buf,
+ PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
+ &buf_transfer),
+ mat,
+ sizeof(struct fragment_shader_consts)
+ );
+
+ pipe_buffer_unmap(compositor->pipe, buf_transfer);
+}
--- /dev/null
- /* XXX: Temporary; not all paths are NPOT-tested */
- if (geom & PIPE_TEXTURE_GEOM_NON_POWER_OF_TWO)
- return FALSE;
-
-
- return ctx->pipe->screen->is_format_supported(ctx->pipe->screen, format, PIPE_TEXTURE_2D,
- 0, usage, geom);
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_inlines.h"
+#include "util/u_memory.h"
+
+#include "vl_mpeg12_context.h"
+#include "vl_defines.h"
+#include <pipe/p_shader_tokens.h>
+#include <util/u_inlines.h>
+#include <util/u_memory.h>
+#include <util/u_keymap.h>
+#include <util/u_rect.h>
+#include <util/u_video.h>
+#include <util/u_surface.h>
+
+#define NUM_BUFFERS 2
+
+static const unsigned const_empty_block_mask_420[3][2][2] = {
+ { { 0x20, 0x10 }, { 0x08, 0x04 } },
+ { { 0x02, 0x02 }, { 0x02, 0x02 } },
+ { { 0x01, 0x01 }, { 0x01, 0x01 } }
+};
+
+static void
+flush_buffer(struct vl_mpeg12_context *ctx)
+{
+ unsigned ne_start, ne_num, e_start, e_num;
+ assert(ctx);
+
+ if(ctx->cur_buffer != NULL) {
+
+ vl_vb_unmap(&ctx->cur_buffer->vertex_stream, ctx->pipe);
+ vl_idct_unmap_buffers(&ctx->idct_y, &ctx->cur_buffer->idct_y);
+ vl_idct_unmap_buffers(&ctx->idct_cr, &ctx->cur_buffer->idct_cr);
+ vl_idct_unmap_buffers(&ctx->idct_cb, &ctx->cur_buffer->idct_cb);
+ vl_vb_restart(&ctx->cur_buffer->vertex_stream,
+ &ne_start, &ne_num, &e_start, &e_num);
+
+ ctx->pipe->set_vertex_buffers(ctx->pipe, 2, ctx->cur_buffer->vertex_bufs.all);
+ ctx->pipe->bind_vertex_elements_state(ctx->pipe, ctx->vertex_elems_state);
+ vl_idct_flush(&ctx->idct_y, &ctx->cur_buffer->idct_y, ne_num);
+ vl_idct_flush(&ctx->idct_cr, &ctx->cur_buffer->idct_cr, ne_num);
+ vl_idct_flush(&ctx->idct_cb, &ctx->cur_buffer->idct_cb, ne_num);
+ vl_mpeg12_mc_renderer_flush(&ctx->mc_renderer, &ctx->cur_buffer->mc,
+ ne_start, ne_num, e_start, e_num);
+
+ ctx->cur_buffer = NULL;
+ }
+}
+
+static void
+rotate_buffer(struct vl_mpeg12_context *ctx)
+{
+ struct pipe_resource *y, *cr, *cb;
+ static unsigned key = 0;
+ struct vl_mpeg12_buffer *buffer;
+
+ assert(ctx);
+
+ flush_buffer(ctx);
+
+ buffer = (struct vl_mpeg12_buffer*)util_keymap_lookup(ctx->buffer_map, &key);
+ if (!buffer) {
+ boolean added_to_map;
+
+ buffer = CALLOC_STRUCT(vl_mpeg12_buffer);
+ if (buffer == NULL)
+ return;
+
+ buffer->vertex_bufs.individual.quad.stride = ctx->quads.stride;
+ buffer->vertex_bufs.individual.quad.buffer_offset = ctx->quads.buffer_offset;
+ pipe_resource_reference(&buffer->vertex_bufs.individual.quad.buffer, ctx->quads.buffer);
+
+ buffer->vertex_bufs.individual.stream = vl_vb_init(&buffer->vertex_stream, ctx->pipe,
+ ctx->vertex_buffer_size);
+ if (!(y = vl_idct_init_buffer(&ctx->idct_y, &buffer->idct_y))) {
+ FREE(buffer);
+ return;
+ }
+
+ if (!(cr = vl_idct_init_buffer(&ctx->idct_cr, &buffer->idct_cr))) {
+ FREE(buffer);
+ return;
+ }
+
+ if (!(cb = vl_idct_init_buffer(&ctx->idct_cb, &buffer->idct_cb))) {
+ FREE(buffer);
+ return;
+ }
+
+ if(!vl_mpeg12_mc_init_buffer(&ctx->mc_renderer, &buffer->mc, y, cr, cb)) {
+ FREE(buffer);
+ return;
+ }
+
+ added_to_map = util_keymap_insert(ctx->buffer_map, &key, buffer, ctx);
+ assert(added_to_map);
+ }
+ ++key;
+ key %= NUM_BUFFERS;
+ ctx->cur_buffer = buffer;
+
+ vl_vb_map(&ctx->cur_buffer->vertex_stream, ctx->pipe);
+ vl_idct_map_buffers(&ctx->idct_y, &ctx->cur_buffer->idct_y);
+ vl_idct_map_buffers(&ctx->idct_cr, &ctx->cur_buffer->idct_cr);
+ vl_idct_map_buffers(&ctx->idct_cb, &ctx->cur_buffer->idct_cb);
+}
+
+static void
+delete_buffer(const struct keymap *map,
+ const void *key, void *data,
+ void *user)
+{
+ struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)user;
+ struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)data;
+
+ assert(map);
+ assert(key);
+ assert(data);
+ assert(user);
+
+ vl_vb_cleanup(&buf->vertex_stream);
+ vl_idct_cleanup_buffer(&ctx->idct_y, &buf->idct_y);
+ vl_idct_cleanup_buffer(&ctx->idct_cb, &buf->idct_cb);
+ vl_idct_cleanup_buffer(&ctx->idct_cr, &buf->idct_cr);
+ vl_mpeg12_mc_cleanup_buffer(&ctx->mc_renderer, &buf->mc);
+}
+
+static void
+upload_buffer(struct vl_mpeg12_context *ctx,
+ struct vl_mpeg12_buffer *buffer,
+ struct pipe_mpeg12_macroblock *mb)
+{
+ short *blocks;
+ unsigned tb, x, y;
+
+ assert(ctx);
+ assert(buffer);
+ assert(mb);
+
+ blocks = mb->blocks;
+
+ for (y = 0; y < 2; ++y) {
+ for (x = 0; x < 2; ++x, ++tb) {
+ if (mb->cbp & (*ctx->empty_block_mask)[0][y][x]) {
+ vl_idct_add_block(&buffer->idct_y, mb->mbx * 2 + x, mb->mby * 2 + y, blocks);
+ blocks += BLOCK_WIDTH * BLOCK_HEIGHT;
+ }
+ }
+ }
+
+ /* TODO: Implement 422, 444 */
+ assert(ctx->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
+
+ for (tb = 1; tb < 3; ++tb) {
+ if (mb->cbp & (*ctx->empty_block_mask)[tb][0][0]) {
+ if(tb == 1)
+ vl_idct_add_block(&buffer->idct_cb, mb->mbx, mb->mby, blocks);
+ else
+ vl_idct_add_block(&buffer->idct_cr, mb->mbx, mb->mby, blocks);
+ blocks += BLOCK_WIDTH * BLOCK_HEIGHT;
+ }
+ }
+}
+
+static void
+vl_mpeg12_destroy(struct pipe_video_context *vpipe)
+{
+ struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
+
+ assert(vpipe);
+
+ flush_buffer(ctx);
+
+ /* Asserted in softpipe_delete_fs_state() for some reason */
+ ctx->pipe->bind_vs_state(ctx->pipe, NULL);
+ ctx->pipe->bind_fs_state(ctx->pipe, NULL);
+
+ ctx->pipe->delete_blend_state(ctx->pipe, ctx->blend);
+ ctx->pipe->delete_rasterizer_state(ctx->pipe, ctx->rast);
+ ctx->pipe->delete_depth_stencil_alpha_state(ctx->pipe, ctx->dsa);
+
+ pipe_surface_reference(&ctx->decode_target, NULL);
+ vl_compositor_cleanup(&ctx->compositor);
+ util_delete_keymap(ctx->buffer_map, ctx);
+ vl_mpeg12_mc_renderer_cleanup(&ctx->mc_renderer);
+ vl_idct_cleanup(&ctx->idct_y);
+ vl_idct_cleanup(&ctx->idct_cr);
+ vl_idct_cleanup(&ctx->idct_cb);
+ ctx->pipe->delete_vertex_elements_state(ctx->pipe, ctx->vertex_elems_state);
+ pipe_resource_reference(&ctx->quads.buffer, NULL);
+ ctx->pipe->destroy(ctx->pipe);
+
+ FREE(ctx);
+}
+
+static int
+vl_mpeg12_get_param(struct pipe_video_context *vpipe, int param)
+{
+ struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
+
+ assert(vpipe);
+
+ switch (param) {
+ case PIPE_CAP_NPOT_TEXTURES:
+ /* XXX: Temporary; not all paths are NPOT-tested */
+#if 0
+ return ctx->pipe->screen->get_param(ctx->pipe->screen, param);
+#endif
+ return FALSE;
+ case PIPE_CAP_DECODE_TARGET_PREFERRED_FORMAT:
+ return ctx->decode_format;
+ default:
+ {
+ debug_printf("vl_mpeg12_context: Unknown PIPE_CAP %d\n", param);
+ return 0;
+ }
+ }
+}
+
+static struct pipe_surface *
+vl_mpeg12_create_surface(struct pipe_video_context *vpipe,
+ struct pipe_resource *resource,
+ const struct pipe_surface *templat)
+{
+ struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
+
+ assert(vpipe);
+
+ return ctx->pipe->create_surface(ctx->pipe, resource, templat);
+}
+
+static boolean
+vl_mpeg12_is_format_supported(struct pipe_video_context *vpipe,
+ enum pipe_format format,
+ unsigned usage,
+ unsigned geom)
+{
+ struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
+
+ assert(vpipe);
+
++ return ctx->pipe->screen->is_format_supported(ctx->pipe->screen, format,
++ PIPE_TEXTURE_2D,
++ 0, usage);
+}
+
+static void
+vl_mpeg12_decode_macroblocks(struct pipe_video_context *vpipe,
+ struct pipe_surface *past,
+ struct pipe_surface *future,
+ unsigned num_macroblocks,
+ struct pipe_macroblock *macroblocks,
+ struct pipe_fence_handle **fence)
+{
+ struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
+ struct pipe_mpeg12_macroblock *mpeg12_macroblocks = (struct pipe_mpeg12_macroblock*)macroblocks;
+ unsigned i;
+
+ assert(vpipe);
+ assert(num_macroblocks);
+ assert(macroblocks);
+ assert(macroblocks->codec == PIPE_VIDEO_CODEC_MPEG12);
+ assert(ctx->decode_target);
+ assert(ctx->cur_buffer);
+
+ for ( i = 0; i < num_macroblocks; ++i ) {
+ vl_vb_add_block(&ctx->cur_buffer->vertex_stream, &mpeg12_macroblocks[i],
+ ctx->empty_block_mask);
+ upload_buffer(ctx, ctx->cur_buffer, &mpeg12_macroblocks[i]);
+ }
+
+ vl_mpeg12_mc_set_surfaces(&ctx->mc_renderer, &ctx->cur_buffer->mc,
+ ctx->decode_target, past, future, fence);
+}
+
+static void
+vl_mpeg12_clear_render_target(struct pipe_video_context *vpipe,
+ struct pipe_surface *dst,
+ unsigned dstx, unsigned dsty,
+ const float *rgba,
+ unsigned width, unsigned height)
+{
+ struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
+
+ assert(vpipe);
+ assert(dst);
+
+ if (ctx->pipe->clear_render_target)
+ ctx->pipe->clear_render_target(ctx->pipe, dst, rgba, dstx, dsty, width, height);
+ else
+ util_clear_render_target(ctx->pipe, dst, rgba, dstx, dsty, width, height);
+}
+
+static void
+vl_mpeg12_resource_copy_region(struct pipe_video_context *vpipe,
+ struct pipe_resource *dst,
+ unsigned dstx, unsigned dsty, unsigned dstz,
+ struct pipe_resource *src,
+ unsigned srcx, unsigned srcy, unsigned srcz,
+ unsigned width, unsigned height)
+{
+ struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
+
+ assert(vpipe);
+ assert(dst);
+
+ struct pipe_box box;
+ box.x = srcx;
+ box.y = srcy;
+ box.z = srcz;
+ box.width = width;
+ box.height = height;
+
+ if (ctx->pipe->resource_copy_region)
+ ctx->pipe->resource_copy_region(ctx->pipe, dst, 0,
+ dstx, dsty, dstz,
+ src, 0, &box);
+ else
+ util_resource_copy_region(ctx->pipe, dst, 0,
+ dstx, dsty, dstz,
+ src, 0, &box);
+}
+
+static struct pipe_transfer*
+vl_mpeg12_get_transfer(struct pipe_video_context *vpipe,
+ struct pipe_resource *resource,
+ unsigned level,
+ unsigned usage, /* a combination of PIPE_TRANSFER_x */
+ const struct pipe_box *box)
+{
+ struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
+
+ assert(vpipe);
+ assert(resource);
+ assert(box);
+
+ return ctx->pipe->get_transfer(ctx->pipe, resource, level, usage, box);
+}
+
+static void
+vl_mpeg12_transfer_destroy(struct pipe_video_context *vpipe,
+ struct pipe_transfer *transfer)
+{
+ struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
+
+ assert(vpipe);
+ assert(transfer);
+
+ ctx->pipe->transfer_destroy(ctx->pipe, transfer);
+}
+
+static void*
+vl_mpeg12_transfer_map(struct pipe_video_context *vpipe,
+ struct pipe_transfer *transfer)
+{
+ struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
+
+ assert(vpipe);
+ assert(transfer);
+
+ return ctx->pipe->transfer_map(ctx->pipe, transfer);
+}
+
+static void
+vl_mpeg12_transfer_flush_region(struct pipe_video_context *vpipe,
+ struct pipe_transfer *transfer,
+ const struct pipe_box *box)
+{
+ struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
+
+ assert(vpipe);
+ assert(transfer);
+ assert(box);
+
+ ctx->pipe->transfer_flush_region(ctx->pipe, transfer, box);
+}
+
+static void
+vl_mpeg12_transfer_unmap(struct pipe_video_context *vpipe,
+ struct pipe_transfer *transfer)
+{
+ struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
+
+ assert(vpipe);
+ assert(transfer);
+
+ ctx->pipe->transfer_unmap(ctx->pipe, transfer);
+}
+
+static void
+vl_mpeg12_transfer_inline_write(struct pipe_video_context *vpipe,
+ struct pipe_resource *resource,
+ unsigned level,
+ unsigned usage, /* a combination of PIPE_TRANSFER_x */
+ const struct pipe_box *box,
+ const void *data,
+ unsigned stride,
+ unsigned slice_stride)
+{
+ struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
+
+ assert(vpipe);
+ assert(resource);
+ assert(box);
+ assert(data);
+ assert(ctx->pipe->transfer_inline_write);
+
+ ctx->pipe->transfer_inline_write(ctx->pipe, resource, level, usage,
+ box, data, stride, slice_stride);
+}
+
+static void
+vl_mpeg12_render_picture(struct pipe_video_context *vpipe,
+ struct pipe_surface *src_surface,
+ enum pipe_mpeg12_picture_type picture_type,
+ /*unsigned num_past_surfaces,
+ struct pipe_surface *past_surfaces,
+ unsigned num_future_surfaces,
+ struct pipe_surface *future_surfaces,*/
+ struct pipe_video_rect *src_area,
+ struct pipe_surface *dst_surface,
+ struct pipe_video_rect *dst_area,
+ struct pipe_fence_handle **fence)
+{
+ struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
+
+ assert(vpipe);
+ assert(src_surface);
+ assert(src_area);
+ assert(dst_surface);
+ assert(dst_area);
+
+ flush_buffer(ctx);
+
+ vl_compositor_render(&ctx->compositor, src_surface,
+ picture_type, src_area, dst_surface, dst_area, fence);
+}
+
+static void
+vl_mpeg12_set_picture_background(struct pipe_video_context *vpipe,
+ struct pipe_surface *bg,
+ struct pipe_video_rect *bg_src_rect)
+{
+ struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
+
+ assert(vpipe);
+ assert(bg);
+ assert(bg_src_rect);
+
+ vl_compositor_set_background(&ctx->compositor, bg, bg_src_rect);
+}
+
+static void
+vl_mpeg12_set_picture_layers(struct pipe_video_context *vpipe,
+ struct pipe_surface *layers[],
+ struct pipe_video_rect *src_rects[],
+ struct pipe_video_rect *dst_rects[],
+ unsigned num_layers)
+{
+ struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
+
+ assert(vpipe);
+ assert((layers && src_rects && dst_rects) ||
+ (!layers && !src_rects && !dst_rects));
+
+ vl_compositor_set_layers(&ctx->compositor, layers, src_rects, dst_rects, num_layers);
+}
+
+static void
+vl_mpeg12_set_decode_target(struct pipe_video_context *vpipe,
+ struct pipe_surface *dt)
+{
+ struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
+
+ assert(vpipe);
+ assert(dt);
+
+ if (ctx->decode_target != dt || ctx->cur_buffer == NULL) {
+ rotate_buffer(ctx);
+
+ pipe_surface_reference(&ctx->decode_target, dt);
+ }
+}
+
+static void
+vl_mpeg12_set_csc_matrix(struct pipe_video_context *vpipe, const float *mat)
+{
+ struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
+
+ assert(vpipe);
+
+ vl_compositor_set_csc_matrix(&ctx->compositor, mat);
+}
+
+static bool
+init_pipe_state(struct vl_mpeg12_context *ctx)
+{
+ struct pipe_rasterizer_state rast;
+ struct pipe_blend_state blend;
+ struct pipe_depth_stencil_alpha_state dsa;
+ unsigned i;
+
+ assert(ctx);
+
+ memset(&rast, 0, sizeof rast);
+ rast.flatshade = 1;
+ rast.flatshade_first = 0;
+ rast.light_twoside = 0;
+ rast.front_ccw = 1;
+ rast.cull_face = PIPE_FACE_NONE;
+ rast.fill_back = PIPE_POLYGON_MODE_FILL;
+ rast.fill_front = PIPE_POLYGON_MODE_FILL;
+ rast.offset_point = 0;
+ rast.offset_line = 0;
+ rast.scissor = 0;
+ rast.poly_smooth = 0;
+ rast.poly_stipple_enable = 0;
+ rast.sprite_coord_enable = 0;
+ rast.point_size_per_vertex = 0;
+ rast.multisample = 0;
+ rast.line_smooth = 0;
+ rast.line_stipple_enable = 0;
+ rast.line_stipple_factor = 0;
+ rast.line_stipple_pattern = 0;
+ rast.line_last_pixel = 0;
+ rast.line_width = 1;
+ rast.point_smooth = 0;
+ rast.point_quad_rasterization = 0;
+ rast.point_size_per_vertex = 1;
+ rast.offset_units = 1;
+ rast.offset_scale = 1;
+ rast.gl_rasterization_rules = 1;
+
+ ctx->rast = ctx->pipe->create_rasterizer_state(ctx->pipe, &rast);
+ ctx->pipe->bind_rasterizer_state(ctx->pipe, ctx->rast);
+
+ memset(&blend, 0, sizeof blend);
+
+ blend.independent_blend_enable = 0;
+ blend.rt[0].blend_enable = 0;
+ blend.rt[0].rgb_func = PIPE_BLEND_ADD;
+ blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ONE;
+ blend.rt[0].alpha_func = PIPE_BLEND_ADD;
+ blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
+ blend.logicop_enable = 0;
+ blend.logicop_func = PIPE_LOGICOP_CLEAR;
+ /* Needed to allow color writes to FB, even if blending disabled */
+ blend.rt[0].colormask = PIPE_MASK_RGBA;
+ blend.dither = 0;
+ ctx->blend = ctx->pipe->create_blend_state(ctx->pipe, &blend);
+ ctx->pipe->bind_blend_state(ctx->pipe, ctx->blend);
+
+ memset(&dsa, 0, sizeof dsa);
+ dsa.depth.enabled = 0;
+ dsa.depth.writemask = 0;
+ dsa.depth.func = PIPE_FUNC_ALWAYS;
+ for (i = 0; i < 2; ++i) {
+ dsa.stencil[i].enabled = 0;
+ dsa.stencil[i].func = PIPE_FUNC_ALWAYS;
+ dsa.stencil[i].fail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[i].zpass_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[i].zfail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[i].valuemask = 0;
+ dsa.stencil[i].writemask = 0;
+ }
+ dsa.alpha.enabled = 0;
+ dsa.alpha.func = PIPE_FUNC_ALWAYS;
+ dsa.alpha.ref_value = 0;
+ ctx->dsa = ctx->pipe->create_depth_stencil_alpha_state(ctx->pipe, &dsa);
+ ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe, ctx->dsa);
+
+ return true;
+}
+
+struct pipe_video_context *
+vl_create_mpeg12_context(struct pipe_context *pipe,
+ enum pipe_video_profile profile,
+ enum pipe_video_chroma_format chroma_format,
+ unsigned width, unsigned height,
+ bool pot_buffers,
+ enum pipe_format decode_format)
+{
+ struct pipe_resource *idct_matrix;
+ unsigned buffer_width, buffer_height;
+ unsigned chroma_width, chroma_height, chroma_blocks_x, chroma_blocks_y;
+ struct vl_mpeg12_context *ctx;
+
+ assert(u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12);
+
+ ctx = CALLOC_STRUCT(vl_mpeg12_context);
+
+ if (!ctx)
+ return NULL;
+
+ /* TODO: Non-pot buffers untested, probably doesn't work without changes to texcoord generation, vert shader, etc */
+ assert(pot_buffers);
+
+ buffer_width = pot_buffers ? util_next_power_of_two(width) : width;
+ buffer_height = pot_buffers ? util_next_power_of_two(height) : height;
+
+ ctx->base.profile = profile;
+ ctx->base.chroma_format = chroma_format;
+ ctx->base.width = width;
+ ctx->base.height = height;
+
+ ctx->base.screen = pipe->screen;
+
+ ctx->base.destroy = vl_mpeg12_destroy;
+ ctx->base.get_param = vl_mpeg12_get_param;
+ ctx->base.is_format_supported = vl_mpeg12_is_format_supported;
+ ctx->base.create_surface = vl_mpeg12_create_surface;
+ ctx->base.decode_macroblocks = vl_mpeg12_decode_macroblocks;
+ ctx->base.render_picture = vl_mpeg12_render_picture;
+ ctx->base.clear_render_target = vl_mpeg12_clear_render_target;
+ ctx->base.resource_copy_region = vl_mpeg12_resource_copy_region;
+ ctx->base.get_transfer = vl_mpeg12_get_transfer;
+ ctx->base.transfer_destroy = vl_mpeg12_transfer_destroy;
+ ctx->base.transfer_map = vl_mpeg12_transfer_map;
+ ctx->base.transfer_flush_region = vl_mpeg12_transfer_flush_region;
+ ctx->base.transfer_unmap = vl_mpeg12_transfer_unmap;
+ if (pipe->transfer_inline_write)
+ ctx->base.transfer_inline_write = vl_mpeg12_transfer_inline_write;
+ ctx->base.set_picture_background = vl_mpeg12_set_picture_background;
+ ctx->base.set_picture_layers = vl_mpeg12_set_picture_layers;
+ ctx->base.set_decode_target = vl_mpeg12_set_decode_target;
+ ctx->base.set_csc_matrix = vl_mpeg12_set_csc_matrix;
+
+ ctx->pipe = pipe;
+ ctx->decode_format = decode_format;
+
+ ctx->quads = vl_vb_upload_quads(ctx->pipe, 2, 2);
+ ctx->vertex_buffer_size = width / MACROBLOCK_WIDTH * height / MACROBLOCK_HEIGHT;
+ ctx->vertex_elems_state = vl_vb_get_elems_state(ctx->pipe, true);
+
+ if (ctx->vertex_elems_state == NULL) {
+ ctx->pipe->destroy(ctx->pipe);
+ FREE(ctx);
+ return NULL;
+ }
+
+ /* TODO: Implement 422, 444 */
+ assert(chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
+ ctx->empty_block_mask = &const_empty_block_mask_420;
+
+ if (!(idct_matrix = vl_idct_upload_matrix(ctx->pipe)))
+ return false;
+
+ if (!vl_idct_init(&ctx->idct_y, ctx->pipe, buffer_width, buffer_height,
+ 2, 2, TGSI_SWIZZLE_X, idct_matrix))
+ return false;
+
+ if (chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) {
+ chroma_width = buffer_width / 2;
+ chroma_height = buffer_height / 2;
+ chroma_blocks_x = 1;
+ chroma_blocks_y = 1;
+ } else if (chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422) {
+ chroma_width = buffer_width;
+ chroma_height = buffer_height / 2;
+ chroma_blocks_x = 2;
+ chroma_blocks_y = 1;
+ } else {
+ chroma_width = buffer_width;
+ chroma_height = buffer_height;
+ chroma_blocks_x = 2;
+ chroma_blocks_y = 2;
+ }
+
+ if(!vl_idct_init(&ctx->idct_cr, ctx->pipe, chroma_width, chroma_height,
+ chroma_blocks_x, chroma_blocks_y, TGSI_SWIZZLE_Z, idct_matrix))
+ return false;
+
+ if(!vl_idct_init(&ctx->idct_cb, ctx->pipe, chroma_width, chroma_height,
+ chroma_blocks_x, chroma_blocks_y, TGSI_SWIZZLE_Y, idct_matrix))
+ return false;
+
+ if (!vl_mpeg12_mc_renderer_init(&ctx->mc_renderer, ctx->pipe,
+ buffer_width, buffer_height, chroma_format)) {
+ ctx->pipe->destroy(ctx->pipe);
+ FREE(ctx);
+ return NULL;
+ }
+
+ ctx->buffer_map = util_new_keymap(sizeof(unsigned), -1, delete_buffer);
+ if (!ctx->buffer_map) {
+ vl_mpeg12_mc_renderer_cleanup(&ctx->mc_renderer);
+ ctx->pipe->destroy(ctx->pipe);
+ FREE(ctx);
+ return NULL;
+ }
+
+ if (!vl_compositor_init(&ctx->compositor, ctx->pipe)) {
+ util_delete_keymap(ctx->buffer_map, ctx);
+ vl_mpeg12_mc_renderer_cleanup(&ctx->mc_renderer);
+ ctx->pipe->destroy(ctx->pipe);
+ FREE(ctx);
+ return NULL;
+ }
+
+ if (!init_pipe_state(ctx)) {
+ vl_compositor_cleanup(&ctx->compositor);
+ util_delete_keymap(ctx->buffer_map, ctx);
+ vl_mpeg12_mc_renderer_cleanup(&ctx->mc_renderer);
+ ctx->pipe->destroy(ctx->pipe);
+ FREE(ctx);
+ return NULL;
+ }
+
+ return &ctx->base;
+}
--- /dev/null
- renderer->pipe->flush(renderer->pipe, PIPE_FLUSH_RENDER_CACHE, buffer->fence);
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vl_mpeg12_mc_renderer.h"
+#include "vl_vertex_buffers.h"
+#include "vl_defines.h"
+#include "util/u_draw.h"
+#include <assert.h>
+#include <pipe/p_context.h>
+#include <util/u_inlines.h>
+#include <util/u_format.h>
+#include <util/u_math.h>
+#include <util/u_memory.h>
+#include <util/u_keymap.h>
+#include <util/u_sampler.h>
+#include <util/u_draw.h>
+#include <tgsi/tgsi_ureg.h>
+
+enum VS_OUTPUT
+{
+ VS_O_VPOS,
+ VS_O_LINE,
+ VS_O_TEX0,
+ VS_O_TEX1,
+ VS_O_TEX2,
+ VS_O_EB_0,
+ VS_O_EB_1,
+ VS_O_INFO,
+ VS_O_MV0,
+ VS_O_MV1,
+ VS_O_MV2,
+ VS_O_MV3
+};
+
+static void *
+create_vert_shader(struct vl_mpeg12_mc_renderer *r)
+{
+ struct ureg_program *shader;
+ struct ureg_src block_scale, mv_scale;
+ struct ureg_src vrect, vpos, eb[2][2], vmv[4];
+ struct ureg_dst t_vpos, t_vtex, t_vmv;
+ struct ureg_dst o_vpos, o_line, o_vtex[3], o_eb[2], o_vmv[4], o_info;
+ unsigned i, label;
+
+ shader = ureg_create(TGSI_PROCESSOR_VERTEX);
+ if (!shader)
+ return NULL;
+
+ t_vpos = ureg_DECL_temporary(shader);
+ t_vtex = ureg_DECL_temporary(shader);
+ t_vmv = ureg_DECL_temporary(shader);
+
+ vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
+ vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
+ eb[0][0] = ureg_DECL_vs_input(shader, VS_I_EB_0_0);
+ eb[1][0] = ureg_DECL_vs_input(shader, VS_I_EB_1_0);
+ eb[0][1] = ureg_DECL_vs_input(shader, VS_I_EB_0_1);
+ eb[1][1] = ureg_DECL_vs_input(shader, VS_I_EB_1_1);
+
+ o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
+ o_line = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_LINE);
+ o_vtex[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX0);
+ o_vtex[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX1);
+ o_vtex[2] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX2);
+ o_eb[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_EB_0);
+ o_eb[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_EB_1);
+ o_info = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_INFO);
+
+ for (i = 0; i < 4; ++i) {
+ vmv[i] = ureg_DECL_vs_input(shader, VS_I_MV0 + i);
+ o_vmv[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_MV0 + i);
+ }
+
+ /*
+ * block_scale = (MACROBLOCK_WIDTH, MACROBLOCK_HEIGHT) / (dst.width, dst.height)
+ * mv_scale = 0.5 / (dst.width, dst.height);
+ *
+ * t_vpos = (vpos + vrect) * block_scale
+ * o_vpos.xy = t_vpos
+ * o_vpos.zw = vpos
+ *
+ * o_eb[0..1] = vrect.x ? eb[0..1][1] : eb[0..1][0]
+ *
+ * o_frame_pred = frame_pred
+ * o_info.x = ref_frames
+ * o_info.y = ref_frames > 0
+ * o_info.z = bkwd_pred
+ *
+ * // Apply motion vectors
+ * o_vmv[0..count] = t_vpos + vmv[0..count] * mv_scale
+ *
+ * o_line.xy = vrect * 8
+ * o_line.z = interlaced
+ *
+ * if(eb[0][0].w) { //interlaced
+ * t_vtex.x = vrect.x
+ * t_vtex.y = vrect.y * 0.5
+ * t_vtex += vpos
+ *
+ * o_vtex[0].xy = t_vtex * block_scale
+ *
+ * t_vtex.y += 0.5
+ * o_vtex[1].xy = t_vtex * block_scale
+ * } else {
+ * o_vtex[0..1].xy = t_vpos
+ * }
+ * o_vtex[2].xy = t_vpos
+ *
+ */
+ block_scale = ureg_imm2f(shader,
+ (float)MACROBLOCK_WIDTH / r->buffer_width,
+ (float)MACROBLOCK_HEIGHT / r->buffer_height);
+
+ mv_scale = ureg_imm2f(shader,
+ 0.5f / r->buffer_width,
+ 0.5f / r->buffer_height);
+
+ ureg_ADD(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), vpos, vrect);
+ ureg_MUL(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos), block_scale);
+ ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
+ ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), vpos);
+
+ ureg_CMP(shader, ureg_writemask(o_eb[0], TGSI_WRITEMASK_XYZ),
+ ureg_negate(ureg_scalar(vrect, TGSI_SWIZZLE_X)),
+ eb[0][1], eb[0][0]);
+ ureg_CMP(shader, ureg_writemask(o_eb[1], TGSI_WRITEMASK_XYZ),
+ ureg_negate(ureg_scalar(vrect, TGSI_SWIZZLE_X)),
+ eb[1][1], eb[1][0]);
+
+ ureg_MOV(shader, ureg_writemask(o_info, TGSI_WRITEMASK_X),
+ ureg_scalar(eb[1][1], TGSI_SWIZZLE_W));
+ ureg_SGE(shader, ureg_writemask(o_info, TGSI_WRITEMASK_Y),
+ ureg_scalar(eb[1][1], TGSI_SWIZZLE_W), ureg_imm1f(shader, 0.0f));
+ ureg_MOV(shader, ureg_writemask(o_info, TGSI_WRITEMASK_Z),
+ ureg_scalar(eb[1][0], TGSI_SWIZZLE_W));
+
+ ureg_MAD(shader, ureg_writemask(o_vmv[0], TGSI_WRITEMASK_XY), mv_scale, vmv[0], ureg_src(t_vpos));
+ ureg_MAD(shader, ureg_writemask(o_vmv[2], TGSI_WRITEMASK_XY), mv_scale, vmv[2], ureg_src(t_vpos));
+
+ ureg_CMP(shader, ureg_writemask(t_vmv, TGSI_WRITEMASK_XY),
+ ureg_negate(ureg_scalar(eb[0][1], TGSI_SWIZZLE_W)),
+ vmv[0], vmv[1]);
+ ureg_MAD(shader, ureg_writemask(o_vmv[1], TGSI_WRITEMASK_XY), mv_scale, ureg_src(t_vmv), ureg_src(t_vpos));
+
+ ureg_CMP(shader, ureg_writemask(t_vmv, TGSI_WRITEMASK_XY),
+ ureg_negate(ureg_scalar(eb[0][1], TGSI_SWIZZLE_W)),
+ vmv[2], vmv[3]);
+ ureg_MAD(shader, ureg_writemask(o_vmv[3], TGSI_WRITEMASK_XY), mv_scale, ureg_src(t_vmv), ureg_src(t_vpos));
+
+ ureg_MOV(shader, ureg_writemask(o_vtex[0], TGSI_WRITEMASK_XY), ureg_src(t_vpos));
+ ureg_MOV(shader, ureg_writemask(o_vtex[1], TGSI_WRITEMASK_XY), ureg_src(t_vpos));
+ ureg_MOV(shader, ureg_writemask(o_vtex[2], TGSI_WRITEMASK_XY), ureg_src(t_vpos));
+
+ ureg_MOV(shader, ureg_writemask(o_line, TGSI_WRITEMASK_X), ureg_scalar(vrect, TGSI_SWIZZLE_Y));
+ ureg_MUL(shader, ureg_writemask(o_line, TGSI_WRITEMASK_Y),
+ vrect, ureg_imm1f(shader, MACROBLOCK_HEIGHT / 2));
+
+ ureg_IF(shader, ureg_scalar(eb[0][0], TGSI_SWIZZLE_W), &label);
+
+ ureg_MOV(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_X), vrect);
+ ureg_MUL(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Y), vrect, ureg_imm1f(shader, 0.5f));
+ ureg_ADD(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_XY), vpos, ureg_src(t_vtex));
+ ureg_MUL(shader, ureg_writemask(o_vtex[0], TGSI_WRITEMASK_XY), ureg_src(t_vtex), block_scale);
+ ureg_ADD(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Y), ureg_src(t_vtex), ureg_imm1f(shader, 0.5f));
+ ureg_MUL(shader, ureg_writemask(o_vtex[1], TGSI_WRITEMASK_XY), ureg_src(t_vtex), block_scale);
+
+ ureg_MUL(shader, ureg_writemask(o_line, TGSI_WRITEMASK_X),
+ ureg_scalar(vrect, TGSI_SWIZZLE_Y),
+ ureg_imm1f(shader, MACROBLOCK_HEIGHT / 2));
+
+ ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
+ ureg_ENDIF(shader);
+
+ ureg_release_temporary(shader, t_vtex);
+ ureg_release_temporary(shader, t_vpos);
+ ureg_release_temporary(shader, t_vmv);
+
+ ureg_END(shader);
+
+ return ureg_create_shader_and_destroy(shader, r->pipe);
+}
+
+static struct ureg_dst
+calc_field(struct ureg_program *shader)
+{
+ struct ureg_dst tmp;
+ struct ureg_src line;
+
+ tmp = ureg_DECL_temporary(shader);
+
+ line = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_LINE, TGSI_INTERPOLATE_LINEAR);
+
+ /*
+ * line.x going from 0 to 1 if not interlaced
+ * line.x going from 0 to 8 in steps of 0.5 if interlaced
+ * line.y going from 0 to 8 in steps of 0.5
+ *
+ * tmp.xy = fraction(line)
+ * tmp.xy = tmp.xy >= 0.5 ? 1 : 0
+ */
+ ureg_FRC(shader, ureg_writemask(tmp, TGSI_WRITEMASK_XY), line);
+ ureg_SGE(shader, ureg_writemask(tmp, TGSI_WRITEMASK_XY), ureg_src(tmp), ureg_imm1f(shader, 0.5f));
+
+ return tmp;
+}
+
+static struct ureg_dst
+fetch_ycbcr(struct vl_mpeg12_mc_renderer *r, struct ureg_program *shader, struct ureg_dst field)
+{
+ struct ureg_src tc[3], sampler[3], eb[2];
+ struct ureg_dst texel, t_tc, t_eb_info;
+ unsigned i, label;
+
+ texel = ureg_DECL_temporary(shader);
+ t_tc = ureg_DECL_temporary(shader);
+ t_eb_info = ureg_DECL_temporary(shader);
+
+ tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX0, TGSI_INTERPOLATE_LINEAR);
+ tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX1, TGSI_INTERPOLATE_LINEAR);
+ tc[2] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX2, TGSI_INTERPOLATE_LINEAR);
+
+ eb[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_EB_0, TGSI_INTERPOLATE_CONSTANT);
+ eb[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_EB_1, TGSI_INTERPOLATE_CONSTANT);
+
+ for (i = 0; i < 3; ++i) {
+ sampler[i] = ureg_DECL_sampler(shader, i);
+ }
+
+ /*
+ * texel.y = tex(field.y ? tc[1] : tc[0], sampler[0])
+ * texel.cb = tex(tc[2], sampler[1])
+ * texel.cr = tex(tc[2], sampler[2])
+ */
+
+ ureg_CMP(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_XY),
+ ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_X)),
+ tc[1], tc[0]);
+
+ ureg_CMP(shader, ureg_writemask(t_eb_info, TGSI_WRITEMASK_XYZ),
+ ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_X)),
+ eb[1], eb[0]);
+
+ /* r600g is ignoring TGSI_INTERPOLATE_CONSTANT, just workaround this */
+ ureg_SLT(shader, ureg_writemask(t_eb_info, TGSI_WRITEMASK_XYZ), ureg_src(t_eb_info), ureg_imm1f(shader, 0.5f));
+
+ ureg_MOV(shader, ureg_writemask(texel, TGSI_WRITEMASK_XYZ), ureg_imm1f(shader, 0.0f));
+ for (i = 0; i < 3; ++i) {
+ ureg_IF(shader, ureg_scalar(ureg_src(t_eb_info), TGSI_SWIZZLE_X + i), &label);
+
+ /* Nouveau can't writemask tex dst regs (yet?), so this won't work anymore on nvidia hardware */
+ if(i==0 || r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_444) {
+ ureg_TEX(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), TGSI_TEXTURE_3D, ureg_src(t_tc), sampler[i]);
+ } else {
+ ureg_TEX(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), TGSI_TEXTURE_3D, tc[2], sampler[i]);
+ }
+
+ ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
+ ureg_ENDIF(shader);
+ }
+
+ ureg_release_temporary(shader, t_tc);
+ ureg_release_temporary(shader, t_eb_info);
+
+ return texel;
+}
+
+static struct ureg_dst
+fetch_ref(struct ureg_program *shader, struct ureg_dst field)
+{
+ struct ureg_src info;
+ struct ureg_src tc[4], sampler[2];
+ struct ureg_dst ref[2], result;
+ unsigned i, intra_label, bi_label, label;
+
+ info = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_INFO, TGSI_INTERPOLATE_CONSTANT);
+
+ for (i = 0; i < 4; ++i)
+ tc[i] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_MV0 + i, TGSI_INTERPOLATE_LINEAR);
+
+ for (i = 0; i < 2; ++i) {
+ sampler[i] = ureg_DECL_sampler(shader, i + 3);
+ ref[i] = ureg_DECL_temporary(shader);
+ }
+
+ result = ureg_DECL_temporary(shader);
+
+ ureg_MOV(shader, ureg_writemask(result, TGSI_WRITEMASK_XYZ), ureg_imm1f(shader, 0.5f));
+
+ ureg_IF(shader, ureg_scalar(info, TGSI_SWIZZLE_Y), &intra_label);
+ ureg_CMP(shader, ureg_writemask(ref[0], TGSI_WRITEMASK_XY),
+ ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Y)),
+ tc[1], tc[0]);
+
+ ureg_IF(shader, ureg_scalar(info, TGSI_SWIZZLE_X), &bi_label);
+
+ /*
+ * result = tex(field.z ? tc[1] : tc[0], sampler[bkwd_pred ? 1 : 0])
+ */
+ ureg_IF(shader, ureg_scalar(info, TGSI_SWIZZLE_Z), &label);
+ ureg_TEX(shader, result, TGSI_TEXTURE_2D, ureg_src(ref[0]), sampler[1]);
+ ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
+ ureg_ELSE(shader, &label);
+ ureg_TEX(shader, result, TGSI_TEXTURE_2D, ureg_src(ref[0]), sampler[0]);
+ ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
+ ureg_ENDIF(shader);
+
+ ureg_fixup_label(shader, bi_label, ureg_get_instruction_number(shader));
+ ureg_ELSE(shader, &bi_label);
+
+ /*
+ * if (field.z)
+ * ref[0..1] = tex(tc[0..1], sampler[0..1])
+ * else
+ * ref[0..1] = tex(tc[2..3], sampler[0..1])
+ */
+ ureg_CMP(shader, ureg_writemask(ref[1], TGSI_WRITEMASK_XY),
+ ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Y)),
+ tc[3], tc[2]);
+ ureg_TEX(shader, ref[0], TGSI_TEXTURE_2D, ureg_src(ref[0]), sampler[0]);
+ ureg_TEX(shader, ref[1], TGSI_TEXTURE_2D, ureg_src(ref[1]), sampler[1]);
+
+ ureg_LRP(shader, ureg_writemask(result, TGSI_WRITEMASK_XYZ), ureg_imm1f(shader, 0.5f),
+ ureg_src(ref[0]), ureg_src(ref[1]));
+
+ ureg_fixup_label(shader, bi_label, ureg_get_instruction_number(shader));
+ ureg_ENDIF(shader);
+ ureg_fixup_label(shader, intra_label, ureg_get_instruction_number(shader));
+ ureg_ENDIF(shader);
+
+ for (i = 0; i < 2; ++i)
+ ureg_release_temporary(shader, ref[i]);
+
+ return result;
+}
+
+static void *
+create_frag_shader(struct vl_mpeg12_mc_renderer *r)
+{
+ struct ureg_program *shader;
+ struct ureg_dst result;
+ struct ureg_dst field, texel;
+ struct ureg_dst fragment;
+
+ shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
+ if (!shader)
+ return NULL;
+
+ fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
+
+ field = calc_field(shader);
+ texel = fetch_ycbcr(r, shader, field);
+
+ result = fetch_ref(shader, field);
+
+ ureg_ADD(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ), ureg_src(texel), ureg_src(result));
+
+ ureg_release_temporary(shader, field);
+ ureg_release_temporary(shader, texel);
+ ureg_release_temporary(shader, result);
+ ureg_END(shader);
+
+ return ureg_create_shader_and_destroy(shader, r->pipe);
+}
+
+static bool
+init_pipe_state(struct vl_mpeg12_mc_renderer *r)
+{
+ struct pipe_sampler_state sampler;
+ struct pipe_rasterizer_state rs_state;
+ unsigned filters[5];
+ unsigned i;
+
+ assert(r);
+
+ r->viewport.scale[0] = r->buffer_width;
+ r->viewport.scale[1] = r->buffer_height;
+ r->viewport.scale[2] = 1;
+ r->viewport.scale[3] = 1;
+ r->viewport.translate[0] = 0;
+ r->viewport.translate[1] = 0;
+ r->viewport.translate[2] = 0;
+ r->viewport.translate[3] = 0;
+
+ r->fb_state.width = r->buffer_width;
+ r->fb_state.height = r->buffer_height;
+ r->fb_state.nr_cbufs = 1;
+ r->fb_state.zsbuf = NULL;
+
+ /* Luma filter */
+ filters[0] = PIPE_TEX_FILTER_NEAREST;
+ /* Chroma filters */
+ if (r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_444 || true) { //TODO
+ filters[1] = PIPE_TEX_FILTER_NEAREST;
+ filters[2] = PIPE_TEX_FILTER_NEAREST;
+ }
+ else {
+ filters[1] = PIPE_TEX_FILTER_LINEAR;
+ filters[2] = PIPE_TEX_FILTER_LINEAR;
+ }
+ /* Fwd, bkwd ref filters */
+ filters[3] = PIPE_TEX_FILTER_LINEAR;
+ filters[4] = PIPE_TEX_FILTER_LINEAR;
+
+ for (i = 0; i < 5; ++i) {
+ memset(&sampler, 0, sizeof(sampler));
+ sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
+ sampler.min_img_filter = filters[i];
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
+ sampler.mag_img_filter = filters[i];
+ sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
+ sampler.compare_func = PIPE_FUNC_ALWAYS;
+ sampler.normalized_coords = 1;
+ /*sampler.shadow_ambient = ; */
+ /*sampler.lod_bias = ; */
+ sampler.min_lod = 0;
+ /*sampler.max_lod = ; */
+ sampler.border_color[0] = 0.0f;
+ sampler.border_color[1] = 0.0f;
+ sampler.border_color[2] = 0.0f;
+ sampler.border_color[3] = 0.0f;
+ /*sampler.max_anisotropy = ; */
+ r->samplers.all[i] = r->pipe->create_sampler_state(r->pipe, &sampler);
+ }
+
+ memset(&rs_state, 0, sizeof(rs_state));
+ /*rs_state.sprite_coord_enable */
+ rs_state.sprite_coord_mode = PIPE_SPRITE_COORD_UPPER_LEFT;
+ rs_state.point_quad_rasterization = true;
+ rs_state.point_size = BLOCK_WIDTH;
+ rs_state.gl_rasterization_rules = true;
+ r->rs_state = r->pipe->create_rasterizer_state(r->pipe, &rs_state);
+
+ return true;
+}
+
+static void
+cleanup_pipe_state(struct vl_mpeg12_mc_renderer *r)
+{
+ unsigned i;
+
+ assert(r);
+
+ for (i = 0; i < 5; ++i)
+ r->pipe->delete_sampler_state(r->pipe, r->samplers.all[i]);
+
+ r->pipe->delete_rasterizer_state(r->pipe, r->rs_state);
+}
+
+static struct pipe_sampler_view
+*find_or_create_sampler_view(struct vl_mpeg12_mc_renderer *r, struct pipe_surface *surface)
+{
+ struct pipe_sampler_view *sampler_view;
+ assert(r);
+ assert(surface);
+
+ sampler_view = (struct pipe_sampler_view*)util_keymap_lookup(r->texview_map, &surface);
+ if (!sampler_view) {
+ struct pipe_sampler_view templat;
+ boolean added_to_map;
+
+ u_sampler_view_default_template(&templat, surface->texture,
+ surface->texture->format);
+ sampler_view = r->pipe->create_sampler_view(r->pipe, surface->texture,
+ &templat);
+ if (!sampler_view)
+ return NULL;
+
+ added_to_map = util_keymap_insert(r->texview_map, &surface,
+ sampler_view, r->pipe);
+ assert(added_to_map);
+ }
+
+ return sampler_view;
+}
+
+static void
+texview_map_delete(const struct keymap *map,
+ const void *key, void *data,
+ void *user)
+{
+ struct pipe_sampler_view *sv = (struct pipe_sampler_view*)data;
+
+ assert(map);
+ assert(key);
+ assert(data);
+ assert(user);
+
+ pipe_sampler_view_reference(&sv, NULL);
+}
+
+bool
+vl_mpeg12_mc_renderer_init(struct vl_mpeg12_mc_renderer *renderer,
+ struct pipe_context *pipe,
+ unsigned buffer_width,
+ unsigned buffer_height,
+ enum pipe_video_chroma_format chroma_format)
+{
+ assert(renderer);
+ assert(pipe);
+
+ memset(renderer, 0, sizeof(struct vl_mpeg12_mc_renderer));
+
+ renderer->pipe = pipe;
+ renderer->buffer_width = buffer_width;
+ renderer->buffer_height = buffer_height;
+ renderer->chroma_format = chroma_format;
+
+ renderer->texview_map = util_new_keymap(sizeof(struct pipe_surface*), -1,
+ texview_map_delete);
+ if (!renderer->texview_map)
+ return false;
+
+ if (!init_pipe_state(renderer))
+ goto error_pipe_state;
+
+ renderer->vs = create_vert_shader(renderer);
+ renderer->fs = create_frag_shader(renderer);
+
+ if (renderer->vs == NULL || renderer->fs == NULL)
+ goto error_shaders;
+
+ return true;
+
+error_shaders:
+ cleanup_pipe_state(renderer);
+
+error_pipe_state:
+ util_delete_keymap(renderer->texview_map, renderer->pipe);
+ return false;
+}
+
+void
+vl_mpeg12_mc_renderer_cleanup(struct vl_mpeg12_mc_renderer *renderer)
+{
+ assert(renderer);
+
+ util_delete_keymap(renderer->texview_map, renderer->pipe);
+ cleanup_pipe_state(renderer);
+
+ renderer->pipe->delete_vs_state(renderer->pipe, renderer->vs);
+ renderer->pipe->delete_fs_state(renderer->pipe, renderer->fs);
+}
+
+bool
+vl_mpeg12_mc_init_buffer(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer,
+ struct pipe_resource *y, struct pipe_resource *cr, struct pipe_resource *cb)
+{
+ struct pipe_sampler_view sampler_view;
+
+ unsigned i;
+
+ assert(renderer && buffer);
+
+ buffer->surface = NULL;
+ buffer->past = NULL;
+ buffer->future = NULL;
+
+ pipe_resource_reference(&buffer->textures.individual.y, y);
+ pipe_resource_reference(&buffer->textures.individual.cr, cr);
+ pipe_resource_reference(&buffer->textures.individual.cb, cb);
+
+ for (i = 0; i < 3; ++i) {
+ u_sampler_view_default_template(&sampler_view,
+ buffer->textures.all[i],
+ buffer->textures.all[i]->format);
+ sampler_view.swizzle_r = i == 0 ? PIPE_SWIZZLE_RED : PIPE_SWIZZLE_ZERO;
+ sampler_view.swizzle_g = i == 1 ? PIPE_SWIZZLE_RED : PIPE_SWIZZLE_ZERO;
+ sampler_view.swizzle_b = i == 2 ? PIPE_SWIZZLE_RED : PIPE_SWIZZLE_ZERO;
+ sampler_view.swizzle_a = PIPE_SWIZZLE_ONE;
+ buffer->sampler_views.all[i] = renderer->pipe->create_sampler_view(
+ renderer->pipe, buffer->textures.all[i], &sampler_view);
+ }
+
+ return true;
+}
+
+void
+vl_mpeg12_mc_cleanup_buffer(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer)
+{
+ unsigned i;
+
+ assert(renderer && buffer);
+
+ for (i = 0; i < 3; ++i) {
+ pipe_sampler_view_reference(&buffer->sampler_views.all[i], NULL);
+ pipe_resource_reference(&buffer->textures.all[i], NULL);
+ }
+
+ pipe_surface_reference(&buffer->surface, NULL);
+ pipe_surface_reference(&buffer->past, NULL);
+ pipe_surface_reference(&buffer->future, NULL);
+}
+
+void
+vl_mpeg12_mc_set_surfaces(struct vl_mpeg12_mc_renderer *renderer,
+ struct vl_mpeg12_mc_buffer *buffer,
+ struct pipe_surface *surface,
+ struct pipe_surface *past,
+ struct pipe_surface *future,
+ struct pipe_fence_handle **fence)
+{
+ assert(renderer && buffer);
+ assert(surface);
+
+ if (surface != buffer->surface) {
+ pipe_surface_reference(&buffer->surface, surface);
+ pipe_surface_reference(&buffer->past, past);
+ pipe_surface_reference(&buffer->future, future);
+ buffer->fence = fence;
+ } else {
+ /* If the surface we're rendering hasn't changed the ref frames shouldn't change. */
+ assert(buffer->past == past);
+ assert(buffer->future == future);
+ }
+}
+
+void
+vl_mpeg12_mc_renderer_flush(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer,
+ unsigned not_empty_start_instance, unsigned not_empty_num_instances,
+ unsigned empty_start_instance, unsigned empty_num_instances)
+{
+ assert(renderer && buffer);
+
+ if (not_empty_num_instances == 0 && empty_num_instances == 0)
+ return;
+
+ renderer->fb_state.cbufs[0] = buffer->surface;
+ renderer->pipe->bind_rasterizer_state(renderer->pipe, renderer->rs_state);
+ renderer->pipe->set_framebuffer_state(renderer->pipe, &renderer->fb_state);
+ renderer->pipe->set_viewport_state(renderer->pipe, &renderer->viewport);
+
+ if (buffer->past) {
+ buffer->sampler_views.individual.ref[0] = find_or_create_sampler_view(renderer, buffer->past);
+ } else {
+ buffer->sampler_views.individual.ref[0] = find_or_create_sampler_view(renderer, buffer->surface);
+ }
+
+ if (buffer->future) {
+ buffer->sampler_views.individual.ref[1] = find_or_create_sampler_view(renderer, buffer->future);
+ } else {
+ buffer->sampler_views.individual.ref[1] = find_or_create_sampler_view(renderer, buffer->surface);
+ }
+
+ renderer->pipe->set_fragment_sampler_views(renderer->pipe, 5, buffer->sampler_views.all);
+ renderer->pipe->bind_fragment_sampler_states(renderer->pipe, 5, renderer->samplers.all);
+
+ renderer->pipe->bind_vs_state(renderer->pipe, renderer->vs);
+ renderer->pipe->bind_fs_state(renderer->pipe, renderer->fs);
+
+ if (not_empty_num_instances > 0)
+ util_draw_arrays_instanced(renderer->pipe, PIPE_PRIM_QUADS, 0, 4,
+ not_empty_start_instance, not_empty_num_instances);
+
+ if (empty_num_instances > 0)
+ util_draw_arrays_instanced(renderer->pipe, PIPE_PRIM_QUADS, 0, 4,
+ empty_start_instance, empty_num_instances);
+
++ renderer->pipe->flush(renderer->pipe, buffer->fence);
+
+ /* Next time we get this surface it may have new ref frames */
+ pipe_surface_reference(&buffer->surface, NULL);
+ pipe_surface_reference(&buffer->past, NULL);
+ pipe_surface_reference(&buffer->future, NULL);
+}
return r600_bc_add_alu_type(bc, alu, BC_INST(bc, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU));
}
+static void r600_bc_remove_alu(struct r600_bc_cf *cf, struct r600_bc_alu *alu)
+{
+ if (alu->last && alu->list.prev != &cf->alu) {
+ PREV_ALU(alu)->last = 1;
+ }
+ LIST_DEL(&alu->list);
+ free(alu);
+ cf->ndw -= 2;
+}
+
+ static unsigned r600_bc_num_tex_and_vtx_instructions(const struct r600_bc *bc)
+ {
+ switch (bc->chiprev) {
+ case CHIPREV_R600:
+ return 8;
+
+ case CHIPREV_R700:
+ return 16;
+
+ case CHIPREV_EVERGREEN:
+ return 64;
+
+ default:
+ R600_ERR("Unknown chiprev %d.\n", bc->chiprev);
+ return 8;
+ }
+ }
+
int r600_bc_add_vtx(struct r600_bc *bc, const struct r600_bc_vtx *vtx)
{
struct r600_bc_vtx *nvtx = r600_bc_vtx();
return 0;
}
-/* common for r600/r700 - eg in eg_asm.c */
-static int r600_bc_cf_build(struct r600_bc *bc, struct r600_bc_cf *cf)
+ static void r600_bc_cf_vtx_build(uint32_t *bytecode, const struct r600_bc_cf *cf)
+ {
+ *bytecode++ = S_SQ_CF_WORD0_ADDR(cf->addr >> 1);
+ *bytecode++ = S_SQ_CF_WORD1_CF_INST(cf->inst) |
+ S_SQ_CF_WORD1_BARRIER(1) |
+ S_SQ_CF_WORD1_COUNT((cf->ndw / 4) - 1);
+ }
+
+enum cf_class
{
- unsigned id = cf->id;
+ CF_CLASS_ALU,
+ CF_CLASS_TEXTURE,
+ CF_CLASS_VERTEX,
+ CF_CLASS_EXPORT,
+ CF_CLASS_OTHER
+};
+static enum cf_class r600_bc_cf_class(struct r600_bc_cf *cf)
+{
switch (cf->inst) {
case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
- case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3):
case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3):
+ case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
+ return CF_CLASS_ALU;
+
+ case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
+ return CF_CLASS_TEXTURE;
+
+ case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
+ case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
+ return CF_CLASS_VERTEX;
+
+ case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
+ case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
+ case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
+ case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
+ return CF_CLASS_EXPORT;
+
+ case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
+ case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
+ case V_SQ_CF_WORD1_SQ_CF_INST_POP:
+ case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
+ case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
+ case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
+ case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
+ case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
+ case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
+ case V_SQ_CF_WORD1_SQ_CF_INST_NOP:
+ return CF_CLASS_OTHER;
+
+ default:
+ R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
+ return -EINVAL;
+ }
+}
+
+/* common for r600/r700 - eg in eg_asm.c */
+static int r600_bc_cf_build(struct r600_bc *bc, struct r600_bc_cf *cf)
+{
+ unsigned id = cf->id;
+ unsigned end_of_program = bc->cf.prev == &cf->list;
+
+ switch (r600_bc_cf_class(cf)) {
+ case CF_CLASS_ALU:
+ assert(!end_of_program);
bc->bytecode[id++] = S_SQ_CF_ALU_WORD0_ADDR(cf->addr >> 1) |
S_SQ_CF_ALU_WORD0_KCACHE_MODE0(cf->kcache[0].mode) |
S_SQ_CF_ALU_WORD0_KCACHE_BANK0(cf->kcache[0].bank) |
S_SQ_CF_ALU_WORD1_KCACHE_MODE1(cf->kcache[1].mode) |
S_SQ_CF_ALU_WORD1_KCACHE_ADDR0(cf->kcache[0].addr) |
S_SQ_CF_ALU_WORD1_KCACHE_ADDR1(cf->kcache[1].addr) |
- S_SQ_CF_ALU_WORD1_BARRIER(1) |
- S_SQ_CF_ALU_WORD1_USES_WATERFALL(bc->chiprev == CHIPREV_R600 ? cf->r6xx_uses_waterfall : 0) |
- S_SQ_CF_ALU_WORD1_COUNT((cf->ndw / 2) - 1);
+ S_SQ_CF_ALU_WORD1_BARRIER(cf->barrier) |
+ S_SQ_CF_ALU_WORD1_USES_WATERFALL(bc->chiprev == CHIPREV_R600 ? cf->r6xx_uses_waterfall : 0) |
+ S_SQ_CF_ALU_WORD1_COUNT((cf->ndw / 2) - 1);
break;
- case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
- case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
- case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
+ case CF_CLASS_TEXTURE:
+ case CF_CLASS_VERTEX:
- bc->bytecode[id++] = S_SQ_CF_WORD0_ADDR(cf->addr >> 1);
- bc->bytecode[id++] = S_SQ_CF_WORD1_CF_INST(cf->inst) |
- S_SQ_CF_WORD1_BARRIER(cf->barrier) |
- S_SQ_CF_WORD1_COUNT((cf->ndw / 4) - 1) |
- S_SQ_CF_WORD1_END_OF_PROGRAM(end_of_program);
+ if (bc->chiprev == CHIPREV_R700)
+ r700_bc_cf_vtx_build(&bc->bytecode[id], cf);
+ else
+ r600_bc_cf_vtx_build(&bc->bytecode[id], cf);
break;
- case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
- case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
+ case CF_CLASS_EXPORT:
bc->bytecode[id++] = S_SQ_CF_ALLOC_EXPORT_WORD0_RW_GPR(cf->output.gpr) |
S_SQ_CF_ALLOC_EXPORT_WORD0_ELEM_SIZE(cf->output.elem_size) |
S_SQ_CF_ALLOC_EXPORT_WORD0_ARRAY_BASE(cf->output.array_base) |
res->format,
res->target,
res->nr_samples,
- PIPE_BIND_SAMPLER_VIEW, 0))
+ PIPE_BIND_SAMPLER_VIEW))
return FALSE;
- return TRUE;
+ switch (res->usage) {
+ case PIPE_USAGE_STREAM:
+ case PIPE_USAGE_STAGING:
+ case PIPE_USAGE_STATIC:
+ case PIPE_USAGE_IMMUTABLE:
+ return FALSE;
+
+ default:
+ return TRUE;
+ }
}
static boolean r600_texture_get_handle(struct pipe_screen* screen,
PIPE_SHADER_CAP_SUBROUTINES, /* BGNSUB, ENDSUB, CAL, RET */
};
- /**
- * Referenced query flags.
- */
-
- #define PIPE_UNREFERENCED 0
- #define PIPE_REFERENCED_FOR_READ (1 << 0)
- #define PIPE_REFERENCED_FOR_WRITE (1 << 1)
+enum pipe_video_codec
+{
+ PIPE_VIDEO_CODEC_UNKNOWN = 0,
+ PIPE_VIDEO_CODEC_MPEG12, /**< MPEG1, MPEG2 */
+ PIPE_VIDEO_CODEC_MPEG4, /**< DIVX, XVID */
+ PIPE_VIDEO_CODEC_VC1, /**< WMV */
+ PIPE_VIDEO_CODEC_MPEG4_AVC /**< H.264 */
+};
+
+enum pipe_video_profile
+{
+ PIPE_VIDEO_PROFILE_UNKNOWN,
+ PIPE_VIDEO_PROFILE_MPEG1,
+ PIPE_VIDEO_PROFILE_MPEG2_SIMPLE,
+ PIPE_VIDEO_PROFILE_MPEG2_MAIN,
+ PIPE_VIDEO_PROFILE_MPEG4_SIMPLE,
+ PIPE_VIDEO_PROFILE_MPEG4_ADVANCED_SIMPLE,
+ PIPE_VIDEO_PROFILE_VC1_SIMPLE,
+ PIPE_VIDEO_PROFILE_VC1_MAIN,
+ PIPE_VIDEO_PROFILE_VC1_ADVANCED,
+ PIPE_VIDEO_PROFILE_MPEG4_AVC_BASELINE,
+ PIPE_VIDEO_PROFILE_MPEG4_AVC_MAIN,
+ PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH
+};
+
+
/**
* Composite query types
*/
PIPE_FORMAT_R8G8B8X8_UNORM = 134,
PIPE_FORMAT_B4G4R4X4_UNORM = 135,
+ PIPE_FORMAT_YV12 = 136,
+ PIPE_FORMAT_YV16 = 137,
+ PIPE_FORMAT_IYUV = 138, /**< aka I420 */
+ PIPE_FORMAT_NV12 = 139,
+ PIPE_FORMAT_NV21 = 140,
+ PIPE_FORMAT_AYUV = PIPE_FORMAT_A8R8G8B8_UNORM,
+ PIPE_FORMAT_VUYA = PIPE_FORMAT_B8G8R8A8_UNORM,
+ PIPE_FORMAT_XYUV = PIPE_FORMAT_X8R8G8B8_UNORM,
+ PIPE_FORMAT_VUYX = PIPE_FORMAT_B8G8R8X8_UNORM,
+ PIPE_FORMAT_IA44 = 141,
+ PIPE_FORMAT_AI44 = 142,
+
/* some stencil samplers formats */
- PIPE_FORMAT_X24S8_USCALED = 136,
- PIPE_FORMAT_S8X24_USCALED = 137,
- PIPE_FORMAT_X32_S8X24_USCALED = 138,
+ PIPE_FORMAT_X24S8_USCALED = 143,
+ PIPE_FORMAT_S8X24_USCALED = 144,
+ PIPE_FORMAT_X32_S8X24_USCALED = 145,
- PIPE_FORMAT_B2G3R3_UNORM = 139,
- PIPE_FORMAT_L16A16_UNORM = 140,
- PIPE_FORMAT_A16_UNORM = 141,
- PIPE_FORMAT_I16_UNORM = 142,
+ PIPE_FORMAT_B2G3R3_UNORM = 146,
+ PIPE_FORMAT_L16A16_UNORM = 147,
+ PIPE_FORMAT_A16_UNORM = 148,
+ PIPE_FORMAT_I16_UNORM = 149,
+ PIPE_FORMAT_LATC1_UNORM = 143,
+ PIPE_FORMAT_LATC1_SNORM = 144,
+ PIPE_FORMAT_LATC2_UNORM = 145,
+ PIPE_FORMAT_LATC2_SNORM = 146,
+
PIPE_FORMAT_COUNT
};
--- /dev/null
- unsigned usage,
- unsigned geom);
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef PIPE_VIDEO_CONTEXT_H
+#define PIPE_VIDEO_CONTEXT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <pipe/p_video_state.h>
+
+/* XXX: Move to an appropriate place */
+#define PIPE_CAP_DECODE_TARGET_PREFERRED_FORMAT 256
+
+struct pipe_screen;
+struct pipe_buffer;
+struct pipe_surface;
+struct pipe_macroblock;
+struct pipe_picture_desc;
+struct pipe_fence_handle;
+
+/**
+ * Gallium video rendering context
+ */
+struct pipe_video_context
+{
+ struct pipe_screen *screen;
+ enum pipe_video_profile profile;
+ enum pipe_video_chroma_format chroma_format;
+ unsigned width;
+ unsigned height;
+
+ void *priv; /**< context private data (for DRI for example) */
+
+ /**
+ * Query an integer-valued capability/parameter/limit
+ * \param param one of PIPE_CAP_x
+ */
+ int (*get_param)(struct pipe_video_context *vpipe, int param);
+
+ /**
+ * Check if the given pipe_format is supported as a texture or
+ * drawing surface.
+ */
+ boolean (*is_format_supported)(struct pipe_video_context *vpipe,
+ enum pipe_format format,
++ unsigned usage);
+
+ void (*destroy)(struct pipe_video_context *vpipe);
+
+ struct pipe_surface *(*create_surface)(struct pipe_video_context *vpipe,
+ struct pipe_resource *resource,
+ const struct pipe_surface *templat);
+
+ /**
+ * Picture decoding and displaying
+ */
+ /*@{*/
+ void (*decode_bitstream)(struct pipe_video_context *vpipe,
+ unsigned num_bufs,
+ struct pipe_buffer **bitstream_buf);
+
+ void (*decode_macroblocks)(struct pipe_video_context *vpipe,
+ struct pipe_surface *past,
+ struct pipe_surface *future,
+ unsigned num_macroblocks,
+ struct pipe_macroblock *macroblocks,
+ struct pipe_fence_handle **fence);
+
+ void (*render_picture)(struct pipe_video_context *vpipe,
+ struct pipe_surface *src_surface,
+ enum pipe_mpeg12_picture_type picture_type,
+ /*unsigned num_past_surfaces,
+ struct pipe_surface *past_surfaces,
+ unsigned num_future_surfaces,
+ struct pipe_surface *future_surfaces,*/
+ struct pipe_video_rect *src_area,
+ struct pipe_surface *dst_surface,
+ struct pipe_video_rect *dst_area,
+ struct pipe_fence_handle **fence);
+
+ void (*clear_render_target)(struct pipe_video_context *vpipe,
+ struct pipe_surface *dst,
+ unsigned dstx, unsigned dsty,
+ const float *rgba,
+ unsigned width, unsigned height);
+
+ void (*resource_copy_region)(struct pipe_video_context *vpipe,
+ struct pipe_resource *dst,
+ unsigned dstx, unsigned dsty, unsigned dstz,
+ struct pipe_resource *src,
+ unsigned srcx, unsigned srcy, unsigned srcz,
+ unsigned width, unsigned height);
+
+ struct pipe_transfer *(*get_transfer)(struct pipe_video_context *vpipe,
+ struct pipe_resource *resource,
+ unsigned level,
+ unsigned usage, /* a combination of PIPE_TRANSFER_x */
+ const struct pipe_box *box);
+
+ void (*transfer_destroy)(struct pipe_video_context *vpipe,
+ struct pipe_transfer *transfer);
+
+ void* (*transfer_map)(struct pipe_video_context *vpipe,
+ struct pipe_transfer *transfer);
+
+ void (*transfer_flush_region)(struct pipe_video_context *vpipe,
+ struct pipe_transfer *transfer,
+ const struct pipe_box *box);
+
+ void (*transfer_unmap)(struct pipe_video_context *vpipe,
+ struct pipe_transfer *transfer);
+
+ void (*transfer_inline_write)(struct pipe_video_context *vpipe,
+ struct pipe_resource *resource,
+ unsigned level,
+ unsigned usage, /* a combination of PIPE_TRANSFER_x */
+ const struct pipe_box *box,
+ const void *data,
+ unsigned stride,
+ unsigned slice_stride);
+
+ /*@}*/
+
+ /**
+ * Parameter-like states (or properties)
+ */
+ /*@{*/
+ void (*set_picture_background)(struct pipe_video_context *vpipe,
+ struct pipe_surface *bg,
+ struct pipe_video_rect *bg_src_rect);
+
+ void (*set_picture_layers)(struct pipe_video_context *vpipe,
+ struct pipe_surface *layers[],
+ struct pipe_video_rect *src_rects[],
+ struct pipe_video_rect *dst_rects[],
+ unsigned num_layers);
+
+ void (*set_picture_desc)(struct pipe_video_context *vpipe,
+ const struct pipe_picture_desc *desc);
+
+ void (*set_decode_target)(struct pipe_video_context *vpipe,
+ struct pipe_surface *dt);
+
+ void (*set_csc_matrix)(struct pipe_video_context *vpipe, const float *mat);
+
+ /* TODO: Interface for scaling modes, post-processing, etc. */
+ /*@}*/
+};
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PIPE_VIDEO_CONTEXT_H */
--- /dev/null
-
- if (vctx->is_format_supported(vctx, tmplt.format,
- PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET,
- PIPE_TEXTURE_GEOM_NON_POWER_OF_TWO)) {
- tmplt.width0 = vlsurf->width;
- tmplt.height0 = vlsurf->height;
- } else {
- assert(vctx->is_format_supported(vctx, tmplt.format,
- PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET,
- PIPE_TEXTURE_GEOM_NON_SQUARE));
- tmplt.width0 = util_next_power_of_two(vlsurf->width);
- tmplt.height0 = util_next_power_of_two(vlsurf->height);
- }
-
+/**************************************************************************
+ *
+ * Copyright 2010 Thomas Balling Sørensen.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vdpau_private.h"
+#include "mpeg2_bitstream_parser.h"
+#include <util/u_memory.h>
+#include <util/u_math.h>
+#include <pipe/p_video_context.h>
+#include <util/u_debug.h>
+
+VdpStatus
+vlVdpDecoderCreate(VdpDevice device,
+ VdpDecoderProfile profile,
+ uint32_t width, uint32_t height,
+ uint32_t max_references,
+ VdpDecoder *decoder)
+{
+ enum pipe_video_profile p_profile = PIPE_VIDEO_PROFILE_UNKNOWN;
+ VdpStatus ret = VDP_STATUS_OK;
+ vlVdpDecoder *vldecoder = NULL;
+
+ debug_printf("[VDPAU] Creating decoder\n");
+
+ if (!decoder)
+ return VDP_STATUS_INVALID_POINTER;
+
+ if (!(width && height))
+ return VDP_STATUS_INVALID_VALUE;
+
+ vlVdpDevice *dev = vlGetDataHTAB(device);
+ if (!dev) {
+ ret = VDP_STATUS_INVALID_HANDLE;
+ goto inv_device;
+ }
+
+ vldecoder = CALLOC(1,sizeof(vlVdpDecoder));
+ if (!vldecoder) {
+ ret = VDP_STATUS_RESOURCES;
+ goto no_decoder;
+ }
+
+ p_profile = ProfileToPipe(profile);
+ if (p_profile == PIPE_VIDEO_PROFILE_UNKNOWN) {
+ ret = VDP_STATUS_INVALID_DECODER_PROFILE;
+ goto inv_profile;
+ }
+
+ // TODO: Define max_references. Used mainly for H264
+
+ vldecoder->profile = p_profile;
+ vldecoder->height = height;
+ vldecoder->width = width;
+ vldecoder->device = dev;
+ vldecoder->vctx = NULL;
+
+ *decoder = vlAddDataHTAB(vldecoder);
+ if (*decoder == 0) {
+ ret = VDP_STATUS_ERROR;
+ goto no_handle;
+ }
+ debug_printf("[VDPAU] Decoder created succesfully\n");
+
+ return VDP_STATUS_OK;
+
+no_handle:
+ FREE(vldecoder);
+ inv_profile:
+no_screen:
+no_decoder:
+inv_device:
+ return ret;
+}
+
+VdpStatus
+vlVdpDecoderDestroy(VdpDecoder decoder)
+{
+ debug_printf("[VDPAU] Destroying decoder\n");
+ vlVdpDecoder *vldecoder;
+
+ vldecoder = (vlVdpDecoder *)vlGetDataHTAB(decoder);
+ if (!vldecoder) {
+ return VDP_STATUS_INVALID_HANDLE;
+ }
+
+ if (vldecoder->vctx) {
+ if (vldecoder->vctx->vscreen)
+ vl_screen_destroy(vldecoder->vctx->vscreen);
+ }
+
+ if (vldecoder->vctx)
+ vl_video_destroy(vldecoder->vctx);
+
+ FREE(vldecoder);
+
+ return VDP_STATUS_OK;
+}
+
+VdpStatus
+vlVdpCreateSurfaceTarget(vlVdpDecoder *vldecoder, vlVdpSurface *vlsurf)
+{
+ struct pipe_surface surf_template;
+ struct pipe_resource tmplt;
+ struct pipe_resource *surf_tex;
+ struct pipe_video_context *vctx;
+
+ debug_printf("[VDPAU] Creating surface\n");
+
+ if(!(vldecoder && vlsurf))
+ return VDP_STATUS_INVALID_POINTER;
+
+ vctx = vldecoder->vctx->vpipe;
+
++ if (!vctx->is_format_supported(vctx, tmplt.format, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET))
++ return VDP_STATUS_RESOURCES;
++
+ memset(&tmplt, 0, sizeof(struct pipe_resource));
+ tmplt.target = PIPE_TEXTURE_2D;
+ tmplt.format = vctx->get_param(vctx,PIPE_CAP_DECODE_TARGET_PREFERRED_FORMAT);
+ tmplt.last_level = 0;
++ tmplt.width0 = vlsurf->width;
++ tmplt.height0 = vlsurf->height;
+ tmplt.depth0 = 1;
+ tmplt.usage = PIPE_USAGE_DEFAULT;
+ tmplt.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
+ tmplt.flags = 0;
+
+ surf_tex = vctx->screen->resource_create(vctx->screen, &tmplt);
+
+ memset(&surf_template, 0, sizeof(surf_template));
+ surf_template.format = surf_tex->format;
+ surf_template.usage = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
+ vlsurf->psurface = vctx->create_surface(vctx->screen, surf_tex, &surf_template);
+
+ pipe_resource_reference(&surf_tex, NULL);
+
+ if (!vlsurf->psurface)
+ return VDP_STATUS_RESOURCES;
+ debug_printf("[VDPAU] Done creating surface\n");
+
+ return VDP_STATUS_OK;
+}
+
+VdpStatus
+vlVdpDecoderRenderMpeg2(vlVdpDecoder *vldecoder,
+ vlVdpSurface *vlsurf,
+ VdpPictureInfoMPEG1Or2 *picture_info,
+ uint32_t bitstream_buffer_count,
+ VdpBitstreamBuffer const *bitstream_buffers)
+{
+ struct pipe_video_context *vpipe;
+ vlVdpSurface *t_vdp_surf;
+ vlVdpSurface *p_vdp_surf;
+ vlVdpSurface *f_vdp_surf;
+ struct pipe_surface *t_surf;
+ struct pipe_surface *p_surf;
+ struct pipe_surface *f_surf;
+ uint32_t num_macroblocks;
+ struct pipe_mpeg12_macroblock *pipe_macroblocks;
+ VdpStatus ret;
+
+ debug_printf("[VDPAU] Decoding MPEG2\n");
+
+ t_vdp_surf = vlsurf;
+
+ /* if surfaces equals VDP_STATUS_INVALID_HANDLE, they are not used */
+ if (picture_info->backward_reference == VDP_INVALID_HANDLE)
+ p_vdp_surf = NULL;
+ else {
+ p_vdp_surf = (vlVdpSurface *)vlGetDataHTAB(picture_info->backward_reference);
+ if (!p_vdp_surf)
+ return VDP_STATUS_INVALID_HANDLE;
+ }
+
+ if (picture_info->forward_reference == VDP_INVALID_HANDLE)
+ f_vdp_surf = NULL;
+ else {
+ f_vdp_surf = (vlVdpSurface *)vlGetDataHTAB(picture_info->forward_reference);
+ if (!f_vdp_surf)
+ return VDP_STATUS_INVALID_HANDLE;
+ }
+
+ if (f_vdp_surf == VDP_INVALID_HANDLE) f_vdp_surf = NULL;
+
+ ret = vlVdpCreateSurfaceTarget(vldecoder,t_vdp_surf);
+
+ vpipe = vldecoder->vctx->vpipe;
+
+ if (vlVdpMPEG2BitstreamToMacroblock(vpipe->screen, bitstream_buffers, bitstream_buffer_count,
+ &num_macroblocks, &pipe_macroblocks))
+ {
+ debug_printf("[VDPAU] Error in frame-header. Skipping.\n");
+
+ ret = VDP_STATUS_OK;
+ goto skip_frame;
+ }
+
+ vpipe->set_decode_target(vpipe,t_surf);
+ vpipe->decode_macroblocks(vpipe, p_surf, f_surf, num_macroblocks,
+ (struct pipe_macroblock *)pipe_macroblocks, NULL);
+
+ skip_frame:
+ return ret;
+}
+
+VdpStatus
+vlVdpDecoderRender(VdpDecoder decoder,
+ VdpVideoSurface target,
+ VdpPictureInfo const *picture_info,
+ uint32_t bitstream_buffer_count,
+ VdpBitstreamBuffer const *bitstream_buffers)
+{
+ vlVdpDecoder *vldecoder;
+ vlVdpSurface *vlsurf;
+ struct vl_screen *vscreen;
+ VdpStatus ret;
+
+ debug_printf("[VDPAU] Decoding\n");
+
+ if (!(picture_info && bitstream_buffers))
+ return VDP_STATUS_INVALID_POINTER;
+
+ vldecoder = (vlVdpDecoder *)vlGetDataHTAB(decoder);
+ if (!vldecoder)
+ return VDP_STATUS_INVALID_HANDLE;
+
+ vlsurf = (vlVdpSurface *)vlGetDataHTAB(target);
+ if (!vlsurf)
+ return VDP_STATUS_INVALID_HANDLE;
+
+ if (vlsurf->device != vldecoder->device)
+ return VDP_STATUS_HANDLE_DEVICE_MISMATCH;
+
+ /* Test doesn't make sence */
+ /*if (vlsurf->chroma_format != vldecoder->chroma_format)
+ return VDP_STATUS_INVALID_CHROMA_TYPE;*/
+
+ vscreen = vl_screen_create(vldecoder->device->display, vldecoder->device->screen);
+ if (!vscreen)
+ return VDP_STATUS_RESOURCES;
+
+ vldecoder->vctx = vl_video_create(vscreen, vldecoder->profile, vlsurf->chroma_format, vldecoder->width, vldecoder->height);
+ if (!vldecoder->vctx)
+ return VDP_STATUS_RESOURCES;
+
+ // TODO: Right now only mpeg2 is supported.
+ switch (vldecoder->vctx->vpipe->profile) {
+ case PIPE_VIDEO_PROFILE_MPEG2_SIMPLE:
+ case PIPE_VIDEO_PROFILE_MPEG2_MAIN:
+ ret = vlVdpDecoderRenderMpeg2(vldecoder,vlsurf,(VdpPictureInfoMPEG1Or2 *)picture_info,
+ bitstream_buffer_count,bitstream_buffers);
+ break;
+ default:
+ return VDP_STATUS_INVALID_DECODER_PROFILE;
+ }
+ assert(0);
+
+ return ret;
+}
+
+VdpStatus
+vlVdpGenerateCSCMatrix(VdpProcamp *procamp,
+ VdpColorStandard standard,
+ VdpCSCMatrix *csc_matrix)
+{
+ debug_printf("[VDPAU] Generating CSCMatrix\n");
+ if (!(csc_matrix && procamp))
+ return VDP_STATUS_INVALID_POINTER;
+
+ return VDP_STATUS_OK;
+}
--- /dev/null
- PIPE_BIND_RENDER_TARGET,
- PIPE_TEXTURE_GEOM_NON_SQUARE);
+/**************************************************************************
+ *
+ * Copyright 2010 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vdpau_private.h"
+#include <vl_winsys.h>
+#include <assert.h>
+#include <pipe/p_screen.h>
+#include <pipe/p_defines.h>
+#include <math.h>
+#include <util/u_debug.h>
+
+
+VdpStatus
+vlVdpGetApiVersion(uint32_t *api_version)
+{
+ if (!api_version)
+ return VDP_STATUS_INVALID_POINTER;
+
+ *api_version = 1;
+ return VDP_STATUS_OK;
+}
+
+VdpStatus
+vlVdpGetInformationString(char const **information_string)
+{
+ if (!information_string)
+ return VDP_STATUS_INVALID_POINTER;
+
+ *information_string = INFORMATION_STRING;
+ return VDP_STATUS_OK;
+}
+
+VdpStatus
+vlVdpVideoSurfaceQueryCapabilities(VdpDevice device, VdpChromaType surface_chroma_type,
+ VdpBool *is_supported, uint32_t *max_width, uint32_t *max_height)
+{
+ struct vl_screen *vlscreen;
+ uint32_t max_2d_texture_level;
+ VdpStatus ret;
+
+ debug_printf("[VDPAU] Querying video surfaces\n");
+
+ if (!(is_supported && max_width && max_height))
+ return VDP_STATUS_INVALID_POINTER;
+
+ vlVdpDevice *dev = vlGetDataHTAB(device);
+ if (!dev)
+ return VDP_STATUS_INVALID_HANDLE;
+
+ vlscreen = vl_screen_create(dev->display, dev->screen);
+ if (!vlscreen)
+ return VDP_STATUS_RESOURCES;
+
+ /* XXX: Current limits */
+ *is_supported = true;
+ if (surface_chroma_type != VDP_CHROMA_TYPE_420) {
+ *is_supported = false;
+ goto no_sup;
+ }
+
+ max_2d_texture_level = vlscreen->pscreen->get_param( vlscreen->pscreen, PIPE_CAP_MAX_TEXTURE_2D_LEVELS );
+ if (!max_2d_texture_level) {
+ ret = VDP_STATUS_RESOURCES;
+ goto no_sup;
+ }
+
+ /* I am not quite sure if it is max_2d_texture_level-1 or just max_2d_texture_level */
+ *max_width = *max_height = pow(2,max_2d_texture_level-1);
+
+ vl_screen_destroy(vlscreen);
+
+ return VDP_STATUS_OK;
+ no_sup:
+ return ret;
+}
+
+VdpStatus
+vlVdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities(VdpDevice device, VdpChromaType surface_chroma_type,
+ VdpYCbCrFormat bits_ycbcr_format,
+ VdpBool *is_supported)
+{
+ struct vl_screen *vlscreen;
+
+ debug_printf("[VDPAU] Querying get put video surfaces\n");
+
+ if (!is_supported)
+ return VDP_STATUS_INVALID_POINTER;
+
+ vlVdpDevice *dev = vlGetDataHTAB(device);
+ if (!dev)
+ return VDP_STATUS_INVALID_HANDLE;
+
+ vlscreen = vl_screen_create(dev->display, dev->screen);
+ if (!vlscreen)
+ return VDP_STATUS_RESOURCES;
+
+ if (bits_ycbcr_format != VDP_YCBCR_FORMAT_Y8U8V8A8 && bits_ycbcr_format != VDP_YCBCR_FORMAT_V8U8Y8A8)
+ *is_supported = vlscreen->pscreen->is_format_supported(vlscreen->pscreen,
+ FormatToPipe(bits_ycbcr_format),
+ PIPE_TEXTURE_2D,
+ 1,
++ PIPE_BIND_RENDER_TARGET);
+
+ vl_screen_destroy(vlscreen);
+
+ return VDP_STATUS_OK;
+}
+
+VdpStatus
+vlVdpDecoderQueryCapabilities(VdpDevice device, VdpDecoderProfile profile,
+ VdpBool *is_supported, uint32_t *max_level, uint32_t *max_macroblocks,
+ uint32_t *max_width, uint32_t *max_height)
+{
+ enum pipe_video_profile p_profile;
+ uint32_t max_decode_width;
+ uint32_t max_decode_height;
+ uint32_t max_2d_texture_level;
+ struct vl_screen *vlscreen;
+
+ debug_printf("[VDPAU] Querying decoder\n");
+
+ if (!(is_supported && max_level && max_macroblocks && max_width && max_height))
+ return VDP_STATUS_INVALID_POINTER;
+
+ vlVdpDevice *dev = vlGetDataHTAB(device);
+ if (!dev)
+ return VDP_STATUS_INVALID_HANDLE;
+
+ vlscreen = vl_screen_create(dev->display, dev->screen);
+ if (!vlscreen)
+ return VDP_STATUS_RESOURCES;
+
+ p_profile = ProfileToPipe(profile);
+ if (p_profile == PIPE_VIDEO_PROFILE_UNKNOWN) {
+ *is_supported = false;
+ return VDP_STATUS_OK;
+ }
+
+ if (p_profile != PIPE_VIDEO_PROFILE_MPEG2_SIMPLE && p_profile != PIPE_VIDEO_PROFILE_MPEG2_MAIN) {
+ *is_supported = false;
+ return VDP_STATUS_OK;
+ }
+
+ /* XXX hack, need to implement something more sane when the decoders have been implemented */
+ max_2d_texture_level = vlscreen->pscreen->get_param( vlscreen->pscreen, PIPE_CAP_MAX_TEXTURE_2D_LEVELS );
+ max_decode_width = max_decode_height = pow(2,max_2d_texture_level-2);
+ if (!(max_decode_width && max_decode_height))
+ return VDP_STATUS_RESOURCES;
+
+ *is_supported = true;
+ *max_width = max_decode_width;
+ *max_height = max_decode_height;
+ *max_level = 16;
+ *max_macroblocks = (max_decode_width/16) * (max_decode_height/16);
+
+ vl_screen_destroy(vlscreen);
+
+ return VDP_STATUS_OK;
+}
+
+VdpStatus
+vlVdpOutputSurfaceQueryCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format,
+ VdpBool *is_supported, uint32_t *max_width, uint32_t *max_height)
+{
+ if (!(is_supported && max_width && max_height))
+ return VDP_STATUS_INVALID_POINTER;
+
+ debug_printf("[VDPAU] Querying ouput surfaces\n");
+
+ return VDP_STATUS_NO_IMPLEMENTATION;
+}
+
+VdpStatus
+vlVdpOutputSurfaceQueryGetPutBitsNativeCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format,
+ VdpBool *is_supported)
+{
+ debug_printf("[VDPAU] Querying output surfaces get put native cap\n");
+
+ if (!is_supported)
+ return VDP_STATUS_INVALID_POINTER;
+
+ return VDP_STATUS_NO_IMPLEMENTATION;
+}
+
+VdpStatus
+vlVdpOutputSurfaceQueryPutBitsYCbCrCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format,
+ VdpYCbCrFormat bits_ycbcr_format,
+ VdpBool *is_supported)
+{
+ debug_printf("[VDPAU] Querying output surfaces put ycrcb cap\n");
+ if (!is_supported)
+ return VDP_STATUS_INVALID_POINTER;
+
+ return VDP_STATUS_NO_IMPLEMENTATION;
+}
+
+VdpStatus
+vlVdpBitmapSurfaceQueryCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format,
+ VdpBool *is_supported, uint32_t *max_width, uint32_t *max_height)
+{
+ debug_printf("[VDPAU] Querying bitmap surfaces\n");
+ if (!(is_supported && max_width && max_height))
+ return VDP_STATUS_INVALID_POINTER;
+
+ return VDP_STATUS_NO_IMPLEMENTATION;
+}
+
+VdpStatus
+vlVdpVideoMixerQueryFeatureSupport(VdpDevice device, VdpVideoMixerFeature feature,
+ VdpBool *is_supported)
+{
+ debug_printf("[VDPAU] Querying mixer feature support\n");
+ if (!is_supported)
+ return VDP_STATUS_INVALID_POINTER;
+
+ return VDP_STATUS_NO_IMPLEMENTATION;
+}
+
+VdpStatus
+vlVdpVideoMixerQueryParameterSupport(VdpDevice device, VdpVideoMixerParameter parameter,
+ VdpBool *is_supported)
+{
+ if (!is_supported)
+ return VDP_STATUS_INVALID_POINTER;
+
+ return VDP_STATUS_NO_IMPLEMENTATION;
+}
+
+VdpStatus
+vlVdpVideoMixerQueryParameterValueRange(VdpDevice device, VdpVideoMixerParameter parameter,
+ void *min_value, void *max_value)
+{
+ if (!(min_value && max_value))
+ return VDP_STATUS_INVALID_POINTER;
+
+ return VDP_STATUS_NO_IMPLEMENTATION;
+}
+
+VdpStatus
+vlVdpVideoMixerQueryAttributeSupport(VdpDevice device, VdpVideoMixerAttribute attribute,
+ VdpBool *is_supported)
+{
+ if (!is_supported)
+ return VDP_STATUS_INVALID_POINTER;
+
+ return VDP_STATUS_NO_IMPLEMENTATION;
+}
+
+VdpStatus
+vlVdpVideoMixerQueryAttributeValueRange(VdpDevice device, VdpVideoMixerAttribute attribute,
+ void *min_value, void *max_value)
+{
+ if (!(min_value && max_value))
+ return VDP_STATUS_INVALID_POINTER;
+
+ return VDP_STATUS_NO_IMPLEMENTATION;
+}
--- /dev/null
- template.last_level = 0;
- if (vpipe->is_format_supported(vpipe, template.format,
- PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET,
- PIPE_TEXTURE_GEOM_NON_POWER_OF_TWO)) {
- template.width0 = context->width;
- template.height0 = context->height;
- }
- else {
- assert(vpipe->is_format_supported(vpipe, template.format,
- PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET,
- PIPE_TEXTURE_GEOM_NON_SQUARE));
- template.width0 = util_next_power_of_two(context->width);
- template.height0 = util_next_power_of_two(context->height);
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <assert.h>
+#include <stdio.h>
+#include <X11/Xlibint.h>
+#include <vl_winsys.h>
+#include <pipe/p_video_context.h>
+#include <pipe/p_video_state.h>
+#include <pipe/p_state.h>
+#include <util/u_inlines.h>
+#include <util/u_memory.h>
+#include <util/u_math.h>
+#include "xvmc_private.h"
+
+static enum pipe_mpeg12_macroblock_type TypeToPipe(int xvmc_mb_type)
+{
+ if (xvmc_mb_type & XVMC_MB_TYPE_INTRA)
+ return PIPE_MPEG12_MACROBLOCK_TYPE_INTRA;
+ if ((xvmc_mb_type & (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD)) == XVMC_MB_TYPE_MOTION_FORWARD)
+ return PIPE_MPEG12_MACROBLOCK_TYPE_FWD;
+ if ((xvmc_mb_type & (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD)) == XVMC_MB_TYPE_MOTION_BACKWARD)
+ return PIPE_MPEG12_MACROBLOCK_TYPE_BKWD;
+ if ((xvmc_mb_type & (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD)) == (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD))
+ return PIPE_MPEG12_MACROBLOCK_TYPE_BI;
+
+ assert(0);
+
+ XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized mb type 0x%08X.\n", xvmc_mb_type);
+
+ return -1;
+}
+
+static enum pipe_mpeg12_picture_type PictureToPipe(int xvmc_pic)
+{
+ switch (xvmc_pic) {
+ case XVMC_TOP_FIELD:
+ return PIPE_MPEG12_PICTURE_TYPE_FIELD_TOP;
+ case XVMC_BOTTOM_FIELD:
+ return PIPE_MPEG12_PICTURE_TYPE_FIELD_BOTTOM;
+ case XVMC_FRAME_PICTURE:
+ return PIPE_MPEG12_PICTURE_TYPE_FRAME;
+ default:
+ assert(0);
+ }
+
+ XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized picture type 0x%08X.\n", xvmc_pic);
+
+ return -1;
+}
+
+static enum pipe_mpeg12_motion_type MotionToPipe(int xvmc_motion_type, unsigned int xvmc_picture_structure)
+{
+ switch (xvmc_motion_type) {
+ case XVMC_PREDICTION_FRAME:
+ if (xvmc_picture_structure == XVMC_FRAME_PICTURE)
+ return PIPE_MPEG12_MOTION_TYPE_FRAME;
+ else
+ return PIPE_MPEG12_MOTION_TYPE_16x8;
+ break;
+ case XVMC_PREDICTION_FIELD:
+ return PIPE_MPEG12_MOTION_TYPE_FIELD;
+ case XVMC_PREDICTION_DUAL_PRIME:
+ return PIPE_MPEG12_MOTION_TYPE_DUALPRIME;
+ default:
+ assert(0);
+ }
+
+ XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized motion type 0x%08X (with picture structure 0x%08X).\n", xvmc_motion_type, xvmc_picture_structure);
+
+ return -1;
+}
+
+#if 0
+static bool
+CreateOrResizeBackBuffer(struct vl_context *vctx, unsigned int width, unsigned int height,
+ struct pipe_surface **backbuffer)
+{
+ struct pipe_video_context *vpipe;
+ struct pipe_resource template;
+ struct pipe_resource *tex;
+
+ assert(vctx);
+
+ vpipe = vctx->vpipe;
+
+ if (*backbuffer) {
+ if ((*backbuffer)->width != width || (*backbuffer)->height != height)
+ pipe_surface_reference(backbuffer, NULL);
+ else
+ return true;
+ }
+
+ memset(&template, 0, sizeof(struct pipe_resource));
+ template.target = PIPE_TEXTURE_2D;
+ template.format = vctx->vscreen->format;
+ template.last_level = 0;
+ template.width0 = width;
+ template.height0 = height;
+ template.depth0 = 1;
+ template.array_size = 1;
+ template.usage = PIPE_USAGE_DEFAULT;
+ template.bind = PIPE_BIND_RENDER_TARGET | PIPE_BIND_DISPLAY_TARGET | PIPE_BIND_BLIT_SOURCE;
+ template.flags = 0;
+
+ tex = vpipe->screen->resource_create(vpipe->screen, &template);
+ if (!tex)
+ return false;
+
+ *backbuffer = vpipe->screen->get_tex_surface(vpipe->screen, tex, 0, 0, 0,
+ template.bind);
+ pipe_resource_reference(&tex, NULL);
+
+ if (!*backbuffer)
+ return false;
+
+ /* Clear the backbuffer in case the video doesn't cover the whole window */
+ /* FIXME: Need to clear every time a frame moves and leaves dirty rects */
+ vpipe->surface_fill(vpipe, *backbuffer, 0, 0, width, height, 0);
+
+ return true;
+}
+#endif
+
+static void
+MacroBlocksToPipe(struct pipe_screen *screen,
+ unsigned int xvmc_picture_structure,
+ const XvMCMacroBlockArray *xvmc_macroblocks,
+ const XvMCBlockArray *xvmc_blocks,
+ unsigned int first_macroblock,
+ unsigned int num_macroblocks,
+ struct pipe_mpeg12_macroblock *pipe_macroblocks)
+{
+ unsigned int i, j, k, l;
+ XvMCMacroBlock *xvmc_mb;
+
+ assert(xvmc_macroblocks);
+ assert(xvmc_blocks);
+ assert(pipe_macroblocks);
+ assert(num_macroblocks);
+
+ xvmc_mb = xvmc_macroblocks->macro_blocks + first_macroblock;
+
+ for (i = 0; i < num_macroblocks; ++i) {
+ pipe_macroblocks->base.codec = PIPE_VIDEO_CODEC_MPEG12;
+ pipe_macroblocks->mbx = xvmc_mb->x;
+ pipe_macroblocks->mby = xvmc_mb->y;
+ pipe_macroblocks->mb_type = TypeToPipe(xvmc_mb->macroblock_type);
+ if (pipe_macroblocks->mb_type != PIPE_MPEG12_MACROBLOCK_TYPE_INTRA)
+ pipe_macroblocks->mo_type = MotionToPipe(xvmc_mb->motion_type, xvmc_picture_structure);
+ /* Get rid of Valgrind 'undefined' warnings */
+ else
+ pipe_macroblocks->mo_type = -1;
+ pipe_macroblocks->dct_type = xvmc_mb->dct_type == XVMC_DCT_TYPE_FIELD ?
+ PIPE_MPEG12_DCT_TYPE_FIELD : PIPE_MPEG12_DCT_TYPE_FRAME;
+
+ for (j = 0; j < 2; ++j)
+ for (k = 0; k < 2; ++k)
+ for (l = 0; l < 2; ++l)
+ pipe_macroblocks->pmv[j][k][l] = xvmc_mb->PMV[j][k][l];
+
+ pipe_macroblocks->mvfs[0][0] = xvmc_mb->motion_vertical_field_select & XVMC_SELECT_FIRST_FORWARD;
+ pipe_macroblocks->mvfs[0][1] = xvmc_mb->motion_vertical_field_select & XVMC_SELECT_FIRST_BACKWARD;
+ pipe_macroblocks->mvfs[1][0] = xvmc_mb->motion_vertical_field_select & XVMC_SELECT_SECOND_FORWARD;
+ pipe_macroblocks->mvfs[1][1] = xvmc_mb->motion_vertical_field_select & XVMC_SELECT_SECOND_BACKWARD;
+
+ pipe_macroblocks->cbp = xvmc_mb->coded_block_pattern;
+ pipe_macroblocks->blocks = xvmc_blocks->blocks + xvmc_mb->index * BLOCK_SIZE_SAMPLES;
+
+ ++pipe_macroblocks;
+ ++xvmc_mb;
+ }
+}
+
+PUBLIC
+Status XvMCCreateSurface(Display *dpy, XvMCContext *context, XvMCSurface *surface)
+{
+ XvMCContextPrivate *context_priv;
+ struct pipe_video_context *vpipe;
+ XvMCSurfacePrivate *surface_priv;
+ struct pipe_resource template;
+ struct pipe_resource *vsfc_tex;
+ struct pipe_surface surf_template;
+ struct pipe_surface *vsfc;
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Creating surface %p.\n", surface);
+
+ assert(dpy);
+
+ if (!context)
+ return XvMCBadContext;
+ if (!surface)
+ return XvMCBadSurface;
+
+ context_priv = context->privData;
+ vpipe = context_priv->vctx->vpipe;
+
+ surface_priv = CALLOC(1, sizeof(XvMCSurfacePrivate));
+ if (!surface_priv)
+ return BadAlloc;
+
+ memset(&template, 0, sizeof(struct pipe_resource));
+ template.target = PIPE_TEXTURE_2D;
+ template.format = (enum pipe_format)vpipe->get_param(vpipe, PIPE_CAP_DECODE_TARGET_PREFERRED_FORMAT);
++ if (!vpipe->is_format_supported(vpipe, template.format,
++ PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET)) {
++ FREE(surface_priv);
++ return BadAlloc;
+ }
++ template.last_level = 0;
++ template.width0 = util_next_power_of_two(context->width);
++ template.height0 = util_next_power_of_two(context->height);
+ template.depth0 = 1;
+ template.array_size = 1;
+ template.usage = PIPE_USAGE_DEFAULT;
+ template.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
+ template.flags = 0;
+ vsfc_tex = vpipe->screen->resource_create(vpipe->screen, &template);
+ if (!vsfc_tex) {
+ FREE(surface_priv);
+ return BadAlloc;
+ }
+
+ memset(&surf_template, 0, sizeof(surf_template));
+ surf_template.format = vsfc_tex->format;
+ surf_template.usage = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
+ vsfc = vpipe->create_surface(vpipe, vsfc_tex, &surf_template);
+ pipe_resource_reference(&vsfc_tex, NULL);
+ if (!vsfc) {
+ FREE(surface_priv);
+ return BadAlloc;
+ }
+
+ surface_priv->pipe_vsfc = vsfc;
+ surface_priv->context = context;
+
+ surface->surface_id = XAllocID(dpy);
+ surface->context_id = context->context_id;
+ surface->surface_type_id = context->surface_type_id;
+ surface->width = context->width;
+ surface->height = context->height;
+ surface->privData = surface_priv;
+
+ SyncHandle();
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Surface %p created.\n", surface);
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int picture_structure,
+ XvMCSurface *target_surface, XvMCSurface *past_surface, XvMCSurface *future_surface,
+ unsigned int flags, unsigned int num_macroblocks, unsigned int first_macroblock,
+ XvMCMacroBlockArray *macroblocks, XvMCBlockArray *blocks
+)
+{
+ struct pipe_video_context *vpipe;
+ struct pipe_surface *t_vsfc;
+ struct pipe_surface *p_vsfc;
+ struct pipe_surface *f_vsfc;
+ XvMCContextPrivate *context_priv;
+ XvMCSurfacePrivate *target_surface_priv;
+ XvMCSurfacePrivate *past_surface_priv;
+ XvMCSurfacePrivate *future_surface_priv;
+ struct pipe_mpeg12_macroblock pipe_macroblocks[num_macroblocks];
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Rendering to surface %p.\n", target_surface);
+
+ assert(dpy);
+
+ if (!context || !context->privData)
+ return XvMCBadContext;
+ if (!target_surface || !target_surface->privData)
+ return XvMCBadSurface;
+
+ if (picture_structure != XVMC_TOP_FIELD &&
+ picture_structure != XVMC_BOTTOM_FIELD &&
+ picture_structure != XVMC_FRAME_PICTURE)
+ return BadValue;
+ /* Bkwd pred equivalent to fwd (past && !future) */
+ if (future_surface && !past_surface)
+ return BadMatch;
+
+ assert(context->context_id == target_surface->context_id);
+ assert(!past_surface || context->context_id == past_surface->context_id);
+ assert(!future_surface || context->context_id == future_surface->context_id);
+
+ assert(macroblocks);
+ assert(blocks);
+
+ assert(macroblocks->context_id == context->context_id);
+ assert(blocks->context_id == context->context_id);
+
+ assert(flags == 0 || flags == XVMC_SECOND_FIELD);
+
+ target_surface_priv = target_surface->privData;
+ past_surface_priv = past_surface ? past_surface->privData : NULL;
+ future_surface_priv = future_surface ? future_surface->privData : NULL;
+
+ assert(target_surface_priv->context == context);
+ assert(!past_surface || past_surface_priv->context == context);
+ assert(!future_surface || future_surface_priv->context == context);
+
+ context_priv = context->privData;
+ vpipe = context_priv->vctx->vpipe;
+
+ t_vsfc = target_surface_priv->pipe_vsfc;
+ p_vsfc = past_surface ? past_surface_priv->pipe_vsfc : NULL;
+ f_vsfc = future_surface ? future_surface_priv->pipe_vsfc : NULL;
+
+ MacroBlocksToPipe(vpipe->screen, picture_structure, macroblocks, blocks, first_macroblock,
+ num_macroblocks, pipe_macroblocks);
+
+ vpipe->set_decode_target(vpipe, t_vsfc);
+ vpipe->decode_macroblocks(vpipe, p_vsfc, f_vsfc, num_macroblocks,
+ &pipe_macroblocks->base, &target_surface_priv->render_fence);
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Submitted surface %p for rendering.\n", target_surface);
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCFlushSurface(Display *dpy, XvMCSurface *surface)
+{
+ assert(dpy);
+
+ if (!surface)
+ return XvMCBadSurface;
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCSyncSurface(Display *dpy, XvMCSurface *surface)
+{
+ assert(dpy);
+
+ if (!surface)
+ return XvMCBadSurface;
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCPutSurface(Display *dpy, XvMCSurface *surface, Drawable drawable,
+ short srcx, short srcy, unsigned short srcw, unsigned short srch,
+ short destx, short desty, unsigned short destw, unsigned short desth,
+ int flags)
+{
+ static int dump_window = -1;
+
+ struct pipe_video_context *vpipe;
+ XvMCSurfacePrivate *surface_priv;
+ XvMCContextPrivate *context_priv;
+ XvMCSubpicturePrivate *subpicture_priv;
+ XvMCContext *context;
+ struct pipe_video_rect src_rect = {srcx, srcy, srcw, srch};
+ struct pipe_video_rect dst_rect = {destx, desty, destw, desth};
+ struct pipe_surface *drawable_surface;
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Displaying surface %p.\n", surface);
+
+ assert(dpy);
+
+ if (!surface || !surface->privData)
+ return XvMCBadSurface;
+
+ surface_priv = surface->privData;
+ context = surface_priv->context;
+ context_priv = context->privData;
+
+ drawable_surface = vl_drawable_surface_get(context_priv->vctx, drawable);
+ if (!drawable_surface)
+ return BadDrawable;
+
+ assert(flags == XVMC_TOP_FIELD || flags == XVMC_BOTTOM_FIELD || flags == XVMC_FRAME_PICTURE);
+ assert(srcx + srcw - 1 < surface->width);
+ assert(srcy + srch - 1 < surface->height);
+ /*
+ * Some apps (mplayer) hit these asserts because they call
+ * this function after the window has been resized by the WM
+ * but before they've handled the corresponding XEvent and
+ * know about the new dimensions. The output should be clipped
+ * until the app updates destw and desth.
+ */
+ /*
+ assert(destx + destw - 1 < drawable_surface->width);
+ assert(desty + desth - 1 < drawable_surface->height);
+ */
+
+ subpicture_priv = surface_priv->subpicture ? surface_priv->subpicture->privData : NULL;
+ vpipe = context_priv->vctx->vpipe;
+
+#if 0
+ if (!CreateOrResizeBackBuffer(context_priv->vctx, width, height, &context_priv->backbuffer))
+ return BadAlloc;
+#endif
+
+ if (subpicture_priv) {
+ struct pipe_video_rect src_rect = {surface_priv->subx, surface_priv->suby, surface_priv->subw, surface_priv->subh};
+ struct pipe_video_rect dst_rect = {surface_priv->surfx, surface_priv->surfy, surface_priv->surfw, surface_priv->surfh};
+ struct pipe_video_rect *src_rects[1] = {&src_rect};
+ struct pipe_video_rect *dst_rects[1] = {&dst_rect};
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Surface %p has subpicture %p.\n", surface, surface_priv->subpicture);
+
+ assert(subpicture_priv->surface == surface);
+ vpipe->set_picture_layers(vpipe, &subpicture_priv->sfc, src_rects, dst_rects, 1);
+
+ surface_priv->subpicture = NULL;
+ subpicture_priv->surface = NULL;
+ }
+ else
+ vpipe->set_picture_layers(vpipe, NULL, NULL, NULL, 0);
+
+ vpipe->render_picture(vpipe, surface_priv->pipe_vsfc, PictureToPipe(flags), &src_rect,
+ drawable_surface, &dst_rect, &surface_priv->disp_fence);
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Submitted surface %p for display. Pushing to front buffer.\n", surface);
+
+ vpipe->screen->flush_frontbuffer
+ (
+ vpipe->screen,
+ drawable_surface->texture,
+ 0, 0,
+ vl_contextprivate_get(context_priv->vctx, drawable_surface)
+ );
+
+ pipe_surface_reference(&drawable_surface, NULL);
+
+ if(dump_window == -1) {
+ dump_window = debug_get_num_option("XVMC_DUMP", 0);
+ }
+
+ if(dump_window) {
+ static unsigned int framenum = 0;
+ char cmd[256];
+ sprintf(cmd, "xwd -id %d -out xvmc_frame_%08d.xwd", (int)drawable, ++framenum);
+ system(cmd);
+ }
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Pushed surface %p to front buffer.\n", surface);
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCGetSurfaceStatus(Display *dpy, XvMCSurface *surface, int *status)
+{
+ assert(dpy);
+
+ if (!surface)
+ return XvMCBadSurface;
+
+ assert(status);
+
+ *status = 0;
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCDestroySurface(Display *dpy, XvMCSurface *surface)
+{
+ XvMCSurfacePrivate *surface_priv;
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Destroying surface %p.\n", surface);
+
+ assert(dpy);
+
+ if (!surface || !surface->privData)
+ return XvMCBadSurface;
+
+ surface_priv = surface->privData;
+ pipe_surface_reference(&surface_priv->pipe_vsfc, NULL);
+ FREE(surface_priv);
+ surface->privData = NULL;
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Surface %p destroyed.\n", surface);
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCHideSurface(Display *dpy, XvMCSurface *surface)
+{
+ assert(dpy);
+
+ if (!surface || !surface->privData)
+ return XvMCBadSurface;
+
+ /* No op, only for overlaid rendering */
+
+ return Success;
+}
case PIPE_FORMAT_RGTC2_SNORM:
return MESA_FORMAT_SIGNED_RG_RGTC2;
- case PIPE_FORMAT_LATC2_SNORM:
- return MESA_FORMAT_SIGNED_LA_LATC2;
+ case PIPE_FORMAT_LATC1_UNORM:
+ return MESA_FORMAT_L_LATC1;
+ case PIPE_FORMAT_LATC1_SNORM:
+ return MESA_FORMAT_SIGNED_L_LATC1;
+ case PIPE_FORMAT_LATC2_UNORM:
+ return MESA_FORMAT_LA_LATC2;
++ //case PIPE_FORMAT_LATC2_SNORM:
++ // return MESA_FORMAT_SIGNED_LA_LATC2;
+
default:
assert(0);
return MESA_FORMAT_NONE;