[enable_gallium_r600="$enableval"],
[enable_gallium_r600=auto])
if test "x$enable_gallium_r600" = xyes; then
- if test "x$HAVE_LIBDRM_RADEON" = xyes; then
- GALLIUM_DRIVERS_DIRS="$GALLIUM_DRIVERS_DIRS r600"
- gallium_check_st "r600/drm" "dri-r600" "xvmc-r600" "va-r600"
- else
- AC_MSG_ERROR([libdrm_radeon is missing, cannot build gallium-r600])
- fi
+ GALLIUM_DRIVERS_DIRS="$GALLIUM_DRIVERS_DIRS r600"
- gallium_check_st "r600/drm" "dri-r600"
++ gallium_check_st "r600/drm" "dri-r600" "xvmc-r600"
fi
dnl
util/u_transfer.c \
util/u_resource.c \
util/u_upload_mgr.c \
- util/u_vbuf_mgr.c
-
- # Disabling until pipe-video branch gets merged in
- #vl/vl_bitstream_parser.c \
- #vl/vl_mpeg12_mc_renderer.c \
- #vl/vl_compositor.c \
- #vl/vl_csc.c \
- #vl/vl_shader_build.c \
++ util/u_vbuf_mgr.c \
+ vl/vl_bitstream_parser.c \
+ vl/vl_mpeg12_mc_renderer.c \
+ vl/vl_compositor.c \
+ vl/vl_csc.c \
+ vl/vl_idct.c \
+ vl/vl_vertex_buffers.c
GALLIVM_SOURCES = \
gallivm/lp_bld_arit.c \
'util/u_tile.c',
'util/u_transfer.c',
'util/u_upload_mgr.c',
- # Disabling until pipe-video branch gets merged in
- #'vl/vl_bitstream_parser.c',
- #'vl/vl_mpeg12_mc_renderer.c',
- #'vl/vl_compositor.c',
- #'vl/vl_csc.c',
- #'vl/vl_shader_build.c',
+ 'util/u_vbuf_mgr.c',
- 'target-helpers/wrap_screen.c',
+ 'vl/vl_bitstream_parser.c',
+ 'vl/vl_mpeg12_mc_renderer.c',
+ 'vl/vl_compositor.c',
+ 'vl/vl_csc.c',
]
if env['llvm']:
--- /dev/null
- c->vertex_buf.max_index = (VL_COMPOSITOR_MAX_LAYERS + 2) * 6 - 1;
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vl_compositor.h"
+#include "util/u_draw.h"
+#include <assert.h>
+#include <pipe/p_context.h>
+#include <util/u_inlines.h>
+#include <util/u_memory.h>
+#include <util/u_keymap.h>
+#include <util/u_draw.h>
+#include <util/u_sampler.h>
+#include <tgsi/tgsi_ureg.h>
+#include "vl_csc.h"
+
+struct vertex_shader_consts
+{
+ struct vertex4f dst_scale;
+ struct vertex4f dst_trans;
+ struct vertex4f src_scale;
+ struct vertex4f src_trans;
+};
+
+struct fragment_shader_consts
+{
+ float matrix[16];
+};
+
+static bool
+u_video_rects_equal(struct pipe_video_rect *a, struct pipe_video_rect *b)
+{
+ assert(a && b);
+
+ if (a->x != b->x)
+ return false;
+ if (a->y != b->y)
+ return false;
+ if (a->w != b->w)
+ return false;
+ if (a->h != b->h)
+ return false;
+
+ return true;
+}
+
+static bool
+create_vert_shader(struct vl_compositor *c)
+{
+ struct ureg_program *shader;
+ struct ureg_src vpos, vtex;
+ struct ureg_dst o_vpos, o_vtex;
+
+ shader = ureg_create(TGSI_PROCESSOR_VERTEX);
+ if (!shader)
+ return false;
+
+ vpos = ureg_DECL_vs_input(shader, 0);
+ vtex = ureg_DECL_vs_input(shader, 1);
+ o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, 0);
+ o_vtex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, 1);
+
+ /*
+ * o_vpos = vpos
+ * o_vtex = vtex
+ */
+ ureg_MOV(shader, o_vpos, vpos);
+ ureg_MOV(shader, o_vtex, vtex);
+
+ ureg_END(shader);
+
+ c->vertex_shader = ureg_create_shader_and_destroy(shader, c->pipe);
+ if (!c->vertex_shader)
+ return false;
+
+ return true;
+}
+
+static bool
+create_frag_shader_ycbcr_2_rgb(struct vl_compositor *c)
+{
+ struct ureg_program *shader;
+ struct ureg_src tc;
+ struct ureg_src csc[4];
+ struct ureg_src sampler;
+ struct ureg_dst texel;
+ struct ureg_dst fragment;
+ unsigned i;
+
+ shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
+ if (!shader)
+ return false;
+
+ tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, 1, TGSI_INTERPOLATE_LINEAR);
+ for (i = 0; i < 4; ++i)
+ csc[i] = ureg_DECL_constant(shader, i);
+ sampler = ureg_DECL_sampler(shader, 0);
+ texel = ureg_DECL_temporary(shader);
+ fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
+
+ /*
+ * texel = tex(tc, sampler)
+ * fragment = csc * texel
+ */
+ ureg_TEX(shader, texel, TGSI_TEXTURE_2D, tc, sampler);
+ for (i = 0; i < 4; ++i)
+ ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(texel));
+
+ ureg_release_temporary(shader, texel);
+ ureg_END(shader);
+
+ c->fragment_shader.ycbcr_2_rgb = ureg_create_shader_and_destroy(shader, c->pipe);
+ if (!c->fragment_shader.ycbcr_2_rgb)
+ return false;
+
+ return true;
+}
+
+static bool
+create_frag_shader_rgb_2_rgb(struct vl_compositor *c)
+{
+ struct ureg_program *shader;
+ struct ureg_src tc;
+ struct ureg_src sampler;
+ struct ureg_dst fragment;
+
+ shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
+ if (!shader)
+ return false;
+
+ tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, 1, TGSI_INTERPOLATE_LINEAR);
+ sampler = ureg_DECL_sampler(shader, 0);
+ fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
+
+ /*
+ * fragment = tex(tc, sampler)
+ */
+ ureg_TEX(shader, fragment, TGSI_TEXTURE_2D, tc, sampler);
+ ureg_END(shader);
+
+ c->fragment_shader.rgb_2_rgb = ureg_create_shader_and_destroy(shader, c->pipe);
+ if (!c->fragment_shader.rgb_2_rgb)
+ return false;
+
+ return true;
+}
+
+static bool
+init_pipe_state(struct vl_compositor *c)
+{
+ struct pipe_sampler_state sampler;
+
+ assert(c);
+
+ c->fb_state.nr_cbufs = 1;
+ c->fb_state.zsbuf = NULL;
+
+ memset(&sampler, 0, sizeof(sampler));
+ sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR;
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
+ sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
+ sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
+ sampler.compare_func = PIPE_FUNC_ALWAYS;
+ sampler.normalized_coords = 1;
+ /*sampler.lod_bias = ;*/
+ /*sampler.min_lod = ;*/
+ /*sampler.max_lod = ;*/
+ /*sampler.border_color[i] = ;*/
+ /*sampler.max_anisotropy = ;*/
+ c->sampler = c->pipe->create_sampler_state(c->pipe, &sampler);
+
+ return true;
+}
+
+static void cleanup_pipe_state(struct vl_compositor *c)
+{
+ assert(c);
+
+ c->pipe->delete_sampler_state(c->pipe, c->sampler);
+}
+
+static bool
+init_shaders(struct vl_compositor *c)
+{
+ assert(c);
+
+ if (!create_vert_shader(c)) {
+ debug_printf("Unable to create vertex shader.\n");
+ return false;
+ }
+ if (!create_frag_shader_ycbcr_2_rgb(c)) {
+ debug_printf("Unable to create YCbCr-to-RGB fragment shader.\n");
+ return false;
+ }
+ if (!create_frag_shader_rgb_2_rgb(c)) {
+ debug_printf("Unable to create RGB-to-RGB fragment shader.\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void cleanup_shaders(struct vl_compositor *c)
+{
+ assert(c);
+
+ c->pipe->delete_vs_state(c->pipe, c->vertex_shader);
+ c->pipe->delete_fs_state(c->pipe, c->fragment_shader.ycbcr_2_rgb);
+ c->pipe->delete_fs_state(c->pipe, c->fragment_shader.rgb_2_rgb);
+}
+
+static bool
+init_buffers(struct vl_compositor *c)
+{
+ struct fragment_shader_consts fsc;
+ struct pipe_vertex_element vertex_elems[2];
+
+ assert(c);
+
+ /*
+ * Create our vertex buffer and vertex buffer elements
+ */
+ c->vertex_buf.stride = sizeof(struct vertex4f);
+ c->vertex_buf.buffer_offset = 0;
+ /* XXX: Create with DYNAMIC or STREAM */
+ c->vertex_buf.buffer = pipe_buffer_create
+ (
+ c->pipe->screen,
+ PIPE_BIND_VERTEX_BUFFER,
++ PIPE_USAGE_STATIC,
+ sizeof(struct vertex4f) * (VL_COMPOSITOR_MAX_LAYERS + 2) * 6
+ );
+
+ vertex_elems[0].src_offset = 0;
+ vertex_elems[0].instance_divisor = 0;
+ vertex_elems[0].vertex_buffer_index = 0;
+ vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
+ vertex_elems[1].src_offset = sizeof(struct vertex2f);
+ vertex_elems[1].instance_divisor = 0;
+ vertex_elems[1].vertex_buffer_index = 0;
+ vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
+ c->vertex_elems_state = c->pipe->create_vertex_elements_state(c->pipe, 2, vertex_elems);
+
+ /*
+ * Create our fragment shader's constant buffer
+ * Const buffer contains the color conversion matrix and bias vectors
+ */
+ /* XXX: Create with IMMUTABLE/STATIC... although it does change every once in a long while... */
+ c->fs_const_buf = pipe_buffer_create
+ (
+ c->pipe->screen,
+ PIPE_BIND_CONSTANT_BUFFER,
++ PIPE_USAGE_STATIC,
+ sizeof(struct fragment_shader_consts)
+ );
+
+ vl_csc_get_matrix(VL_CSC_COLOR_STANDARD_IDENTITY, NULL, true, fsc.matrix);
+
+ vl_compositor_set_csc_matrix(c, fsc.matrix);
+
+ return true;
+}
+
+static void
+cleanup_buffers(struct vl_compositor *c)
+{
+ assert(c);
+
+ c->pipe->delete_vertex_elements_state(c->pipe, c->vertex_elems_state);
+ pipe_resource_reference(&c->vertex_buf.buffer, NULL);
+ pipe_resource_reference(&c->fs_const_buf, NULL);
+}
+
+static void
+texview_map_delete(const struct keymap *map,
+ const void *key, void *data,
+ void *user)
+{
+ struct pipe_sampler_view *sv = (struct pipe_sampler_view*)data;
+
+ assert(map);
+ assert(key);
+ assert(data);
+ assert(user);
+
+ pipe_sampler_view_reference(&sv, NULL);
+}
+
+bool vl_compositor_init(struct vl_compositor *compositor, struct pipe_context *pipe)
+{
+ unsigned i;
+
+ assert(compositor);
+
+ memset(compositor, 0, sizeof(struct vl_compositor));
+
+ compositor->pipe = pipe;
+
+ compositor->texview_map = util_new_keymap(sizeof(struct pipe_surface*), -1,
+ texview_map_delete);
+ if (!compositor->texview_map)
+ return false;
+
+ if (!init_pipe_state(compositor)) {
+ util_delete_keymap(compositor->texview_map, compositor->pipe);
+ return false;
+ }
+ if (!init_shaders(compositor)) {
+ util_delete_keymap(compositor->texview_map, compositor->pipe);
+ cleanup_pipe_state(compositor);
+ return false;
+ }
+ if (!init_buffers(compositor)) {
+ util_delete_keymap(compositor->texview_map, compositor->pipe);
+ cleanup_shaders(compositor);
+ cleanup_pipe_state(compositor);
+ return false;
+ }
+
+ compositor->fb_state.width = 0;
+ compositor->fb_state.height = 0;
+ compositor->bg = NULL;
+ compositor->dirty_bg = false;
+ for (i = 0; i < VL_COMPOSITOR_MAX_LAYERS; ++i)
+ compositor->layers[i] = NULL;
+ compositor->dirty_layers = 0;
+
+ return true;
+}
+
+void vl_compositor_cleanup(struct vl_compositor *compositor)
+{
+ assert(compositor);
+
+ util_delete_keymap(compositor->texview_map, compositor->pipe);
+ cleanup_buffers(compositor);
+ cleanup_shaders(compositor);
+ cleanup_pipe_state(compositor);
+}
+
+void vl_compositor_set_background(struct vl_compositor *compositor,
+ struct pipe_surface *bg, struct pipe_video_rect *bg_src_rect)
+{
+ assert(compositor);
+ assert((bg && bg_src_rect) || (!bg && !bg_src_rect));
+
+ if (compositor->bg != bg ||
+ !u_video_rects_equal(&compositor->bg_src_rect, bg_src_rect)) {
+ pipe_surface_reference(&compositor->bg, bg);
+ /*if (!u_video_rects_equal(&compositor->bg_src_rect, bg_src_rect))*/
+ compositor->bg_src_rect = *bg_src_rect;
+ compositor->dirty_bg = true;
+ }
+}
+
+void vl_compositor_set_layers(struct vl_compositor *compositor,
+ struct pipe_surface *layers[],
+ struct pipe_video_rect *src_rects[],
+ struct pipe_video_rect *dst_rects[],
+ unsigned num_layers)
+{
+ unsigned i;
+
+ assert(compositor);
+ assert(num_layers <= VL_COMPOSITOR_MAX_LAYERS);
+
+ for (i = 0; i < num_layers; ++i)
+ {
+ assert((layers[i] && src_rects[i] && dst_rects[i]) ||
+ (!layers[i] && !src_rects[i] && !dst_rects[i]));
+
+ if (compositor->layers[i] != layers[i] ||
+ !u_video_rects_equal(&compositor->layer_src_rects[i], src_rects[i]) ||
+ !u_video_rects_equal(&compositor->layer_dst_rects[i], dst_rects[i]))
+ {
+ pipe_surface_reference(&compositor->layers[i], layers[i]);
+ /*if (!u_video_rects_equal(&compositor->layer_src_rects[i], src_rects[i]))*/
+ compositor->layer_src_rects[i] = *src_rects[i];
+ /*if (!u_video_rects_equal(&compositor->layer_dst_rects[i], dst_rects[i]))*/
+ compositor->layer_dst_rects[i] = *dst_rects[i];
+ compositor->dirty_layers |= 1 << i;
+ }
+
+ if (layers[i])
+ compositor->dirty_layers |= 1 << i;
+ }
+
+ for (; i < VL_COMPOSITOR_MAX_LAYERS; ++i)
+ pipe_surface_reference(&compositor->layers[i], NULL);
+}
+
+static void gen_rect_verts(unsigned pos,
+ struct pipe_video_rect *src_rect,
+ struct vertex2f *src_inv_size,
+ struct pipe_video_rect *dst_rect,
+ struct vertex2f *dst_inv_size,
+ struct vertex4f *vb)
+{
+ assert(pos < VL_COMPOSITOR_MAX_LAYERS + 2);
+ assert(src_rect);
+ assert(src_inv_size);
+ assert((dst_rect && dst_inv_size) /*|| (!dst_rect && !dst_inv_size)*/);
+ assert(vb);
+
+ vb[pos * 6 + 0].x = dst_rect->x * dst_inv_size->x;
+ vb[pos * 6 + 0].y = dst_rect->y * dst_inv_size->y;
+ vb[pos * 6 + 0].z = src_rect->x * src_inv_size->x;
+ vb[pos * 6 + 0].w = src_rect->y * src_inv_size->y;
+
+ vb[pos * 6 + 1].x = dst_rect->x * dst_inv_size->x;
+ vb[pos * 6 + 1].y = (dst_rect->y + dst_rect->h) * dst_inv_size->y;
+ vb[pos * 6 + 1].z = src_rect->x * src_inv_size->x;
+ vb[pos * 6 + 1].w = (src_rect->y + src_rect->h) * src_inv_size->y;
+
+ vb[pos * 6 + 2].x = (dst_rect->x + dst_rect->w) * dst_inv_size->x;
+ vb[pos * 6 + 2].y = dst_rect->y * dst_inv_size->y;
+ vb[pos * 6 + 2].z = (src_rect->x + src_rect->w) * src_inv_size->x;
+ vb[pos * 6 + 2].w = src_rect->y * src_inv_size->y;
+
+ vb[pos * 6 + 3].x = (dst_rect->x + dst_rect->w) * dst_inv_size->x;
+ vb[pos * 6 + 3].y = dst_rect->y * dst_inv_size->y;
+ vb[pos * 6 + 3].z = (src_rect->x + src_rect->w) * src_inv_size->x;
+ vb[pos * 6 + 3].w = src_rect->y * src_inv_size->y;
+
+ vb[pos * 6 + 4].x = dst_rect->x * dst_inv_size->x;
+ vb[pos * 6 + 4].y = (dst_rect->y + dst_rect->h) * dst_inv_size->y;
+ vb[pos * 6 + 4].z = src_rect->x * src_inv_size->x;
+ vb[pos * 6 + 4].w = (src_rect->y + src_rect->h) * src_inv_size->y;
+
+ vb[pos * 6 + 5].x = (dst_rect->x + dst_rect->w) * dst_inv_size->x;
+ vb[pos * 6 + 5].y = (dst_rect->y + dst_rect->h) * dst_inv_size->y;
+ vb[pos * 6 + 5].z = (src_rect->x + src_rect->w) * src_inv_size->x;
+ vb[pos * 6 + 5].w = (src_rect->y + src_rect->h) * src_inv_size->y;
+}
+
+static unsigned gen_data(struct vl_compositor *c,
+ struct pipe_surface *src_surface,
+ struct pipe_video_rect *src_rect,
+ struct pipe_video_rect *dst_rect,
+ struct pipe_surface **textures,
+ void **frag_shaders)
+{
+ void *vb;
+ struct pipe_transfer *buf_transfer;
+ unsigned num_rects = 0;
+ unsigned i;
+
+ assert(c);
+ assert(src_surface);
+ assert(src_rect);
+ assert(dst_rect);
+ assert(textures);
+
+ vb = pipe_buffer_map(c->pipe, c->vertex_buf.buffer,
+ PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
+ &buf_transfer);
+
+ if (!vb)
+ return 0;
+
+ if (c->dirty_bg) {
+ struct vertex2f bg_inv_size = {1.0f / c->bg->width, 1.0f / c->bg->height};
+ gen_rect_verts(num_rects, &c->bg_src_rect, &bg_inv_size, NULL, NULL, vb);
+ textures[num_rects] = c->bg;
+ /* XXX: Hack */
+ frag_shaders[num_rects] = c->fragment_shader.rgb_2_rgb;
+ ++num_rects;
+ c->dirty_bg = false;
+ }
+
+ {
+ struct vertex2f src_inv_size = { 1.0f / src_surface->width, 1.0f / src_surface->height};
+ gen_rect_verts(num_rects, src_rect, &src_inv_size, dst_rect, &c->fb_inv_size, vb);
+ textures[num_rects] = src_surface;
+ /* XXX: Hack, sort of */
+ frag_shaders[num_rects] = c->fragment_shader.ycbcr_2_rgb;
+ ++num_rects;
+ }
+
+ for (i = 0; c->dirty_layers > 0; i++) {
+ assert(i < VL_COMPOSITOR_MAX_LAYERS);
+
+ if (c->dirty_layers & (1 << i)) {
+ struct vertex2f layer_inv_size = {1.0f / c->layers[i]->width, 1.0f / c->layers[i]->height};
+ gen_rect_verts(num_rects, &c->layer_src_rects[i], &layer_inv_size,
+ &c->layer_dst_rects[i], &c->fb_inv_size, vb);
+ textures[num_rects] = c->layers[i];
+ /* XXX: Hack */
+ frag_shaders[num_rects] = c->fragment_shader.rgb_2_rgb;
+ ++num_rects;
+ c->dirty_layers &= ~(1 << i);
+ }
+ }
+
+ pipe_buffer_unmap(c->pipe, buf_transfer);
+
+ return num_rects;
+}
+
+static void draw_layers(struct vl_compositor *c,
+ struct pipe_surface *src_surface,
+ struct pipe_video_rect *src_rect,
+ struct pipe_video_rect *dst_rect)
+{
+ unsigned num_rects;
+ struct pipe_surface *src_surfaces[VL_COMPOSITOR_MAX_LAYERS + 2];
+ void *frag_shaders[VL_COMPOSITOR_MAX_LAYERS + 2];
+ unsigned i;
+
+ assert(c);
+ assert(src_surface);
+ assert(src_rect);
+ assert(dst_rect);
+
+ num_rects = gen_data(c, src_surface, src_rect, dst_rect, src_surfaces, frag_shaders);
+
+ for (i = 0; i < num_rects; ++i) {
+ boolean delete_view = FALSE;
+ struct pipe_sampler_view *surface_view = (struct pipe_sampler_view*)util_keymap_lookup(c->texview_map,
+ &src_surfaces[i]);
+ if (!surface_view) {
+ struct pipe_sampler_view templat;
+ u_sampler_view_default_template(&templat, src_surfaces[i]->texture,
+ src_surfaces[i]->texture->format);
+ surface_view = c->pipe->create_sampler_view(c->pipe, src_surfaces[i]->texture,
+ &templat);
+ if (!surface_view)
+ return;
+
+ delete_view = !util_keymap_insert(c->texview_map, &src_surfaces[i],
+ surface_view, c->pipe);
+ }
+
+ c->pipe->bind_fs_state(c->pipe, frag_shaders[i]);
+ c->pipe->set_fragment_sampler_views(c->pipe, 1, &surface_view);
+
+ util_draw_arrays(c->pipe, PIPE_PRIM_TRIANGLES, i * 6, 6);
+
+ if (delete_view) {
+ pipe_sampler_view_reference(&surface_view, NULL);
+ }
+ }
+}
+
+void vl_compositor_render(struct vl_compositor *compositor,
+ struct pipe_surface *src_surface,
+ enum pipe_mpeg12_picture_type picture_type,
+ /*unsigned num_past_surfaces,
+ struct pipe_surface *past_surfaces,
+ unsigned num_future_surfaces,
+ struct pipe_surface *future_surfaces,*/
+ struct pipe_video_rect *src_area,
+ struct pipe_surface *dst_surface,
+ struct pipe_video_rect *dst_area,
+ struct pipe_fence_handle **fence)
+{
+ assert(compositor);
+ assert(src_surface);
+ assert(src_area);
+ assert(dst_surface);
+ assert(dst_area);
+ assert(picture_type == PIPE_MPEG12_PICTURE_TYPE_FRAME);
+
+ if (compositor->fb_state.width != dst_surface->width) {
+ compositor->fb_inv_size.x = 1.0f / dst_surface->width;
+ compositor->fb_state.width = dst_surface->width;
+ }
+ if (compositor->fb_state.height != dst_surface->height) {
+ compositor->fb_inv_size.y = 1.0f / dst_surface->height;
+ compositor->fb_state.height = dst_surface->height;
+ }
+
+ compositor->fb_state.cbufs[0] = dst_surface;
+
+ compositor->viewport.scale[0] = compositor->fb_state.width;
+ compositor->viewport.scale[1] = compositor->fb_state.height;
+ compositor->viewport.scale[2] = 1;
+ compositor->viewport.scale[3] = 1;
+ compositor->viewport.translate[0] = 0;
+ compositor->viewport.translate[1] = 0;
+ compositor->viewport.translate[2] = 0;
+ compositor->viewport.translate[3] = 0;
+
+ compositor->pipe->set_framebuffer_state(compositor->pipe, &compositor->fb_state);
+ compositor->pipe->set_viewport_state(compositor->pipe, &compositor->viewport);
+ compositor->pipe->bind_fragment_sampler_states(compositor->pipe, 1, &compositor->sampler);
+ compositor->pipe->bind_vs_state(compositor->pipe, compositor->vertex_shader);
+ compositor->pipe->set_vertex_buffers(compositor->pipe, 1, &compositor->vertex_buf);
+ compositor->pipe->bind_vertex_elements_state(compositor->pipe, compositor->vertex_elems_state);
+ compositor->pipe->set_constant_buffer(compositor->pipe, PIPE_SHADER_FRAGMENT, 0, compositor->fs_const_buf);
+
+ draw_layers(compositor, src_surface, src_area, dst_area);
+
+ assert(!compositor->dirty_bg && !compositor->dirty_layers);
+ compositor->pipe->flush(compositor->pipe, PIPE_FLUSH_RENDER_CACHE, fence);
+}
+
+void vl_compositor_set_csc_matrix(struct vl_compositor *compositor, const float *mat)
+{
+ struct pipe_transfer *buf_transfer;
+
+ assert(compositor);
+
+ memcpy
+ (
+ pipe_buffer_map(compositor->pipe, compositor->fs_const_buf,
+ PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
+ &buf_transfer),
+ mat,
+ sizeof(struct fragment_shader_consts)
+ );
+
+ pipe_buffer_unmap(compositor->pipe, buf_transfer);
+}
--- /dev/null
- template.depth0 = 1;
+/**************************************************************************
+ *
+ * Copyright 2010 Christian König
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vl_idct.h"
+#include "vl_vertex_buffers.h"
+#include "util/u_draw.h"
+#include <assert.h>
+#include <pipe/p_context.h>
+#include <pipe/p_screen.h>
+#include <util/u_inlines.h>
+#include <util/u_sampler.h>
+#include <util/u_format.h>
+#include <tgsi/tgsi_ureg.h>
+#include "vl_types.h"
+
+#define BLOCK_WIDTH 8
+#define BLOCK_HEIGHT 8
+
+#define SCALE_FACTOR_16_TO_9 (32768.0f / 256.0f)
+
+#define NR_RENDER_TARGETS 4
+
+enum VS_INPUT
+{
+ VS_I_RECT,
+ VS_I_VPOS,
+
+ NUM_VS_INPUTS
+};
+
+enum VS_OUTPUT
+{
+ VS_O_VPOS,
+ VS_O_L_ADDR0,
+ VS_O_L_ADDR1,
+ VS_O_R_ADDR0,
+ VS_O_R_ADDR1
+};
+
+static const float const_matrix[8][8] = {
+ { 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.353553f, 0.3535530f },
+ { 0.4903930f, 0.4157350f, 0.2777850f, 0.0975451f, -0.0975452f, -0.2777850f, -0.415735f, -0.4903930f },
+ { 0.4619400f, 0.1913420f, -0.1913420f, -0.4619400f, -0.4619400f, -0.1913420f, 0.191342f, 0.4619400f },
+ { 0.4157350f, -0.0975452f, -0.4903930f, -0.2777850f, 0.2777850f, 0.4903930f, 0.097545f, -0.4157350f },
+ { 0.3535530f, -0.3535530f, -0.3535530f, 0.3535540f, 0.3535530f, -0.3535540f, -0.353553f, 0.3535530f },
+ { 0.2777850f, -0.4903930f, 0.0975452f, 0.4157350f, -0.4157350f, -0.0975451f, 0.490393f, -0.2777850f },
+ { 0.1913420f, -0.4619400f, 0.4619400f, -0.1913420f, -0.1913410f, 0.4619400f, -0.461940f, 0.1913420f },
+ { 0.0975451f, -0.2777850f, 0.4157350f, -0.4903930f, 0.4903930f, -0.4157350f, 0.277786f, -0.0975458f }
+};
+
+static void
+calc_addr(struct ureg_program *shader, struct ureg_dst addr[2],
+ struct ureg_src tc, struct ureg_src start, bool right_side,
+ bool transposed, float size)
+{
+ unsigned wm_start = (right_side == transposed) ? TGSI_WRITEMASK_X : TGSI_WRITEMASK_Y;
+ unsigned sw_start = right_side ? TGSI_SWIZZLE_Y : TGSI_SWIZZLE_X;
+
+ unsigned wm_tc = (right_side == transposed) ? TGSI_WRITEMASK_Y : TGSI_WRITEMASK_X;
+ unsigned sw_tc = right_side ? TGSI_SWIZZLE_X : TGSI_SWIZZLE_Y;
+
+ /*
+ * addr[0..1].(start) = right_side ? start.x : tc.x
+ * addr[0..1].(tc) = right_side ? tc.y : start.y
+ * addr[0..1].z = tc.z
+ * addr[1].(start) += 1.0f / scale
+ */
+ ureg_MOV(shader, ureg_writemask(addr[0], wm_start), ureg_scalar(start, sw_start));
+ ureg_MOV(shader, ureg_writemask(addr[0], wm_tc), ureg_scalar(tc, sw_tc));
+ ureg_MOV(shader, ureg_writemask(addr[0], TGSI_WRITEMASK_Z), tc);
+
+ ureg_ADD(shader, ureg_writemask(addr[1], wm_start), ureg_scalar(start, sw_start), ureg_imm1f(shader, 1.0f / size));
+ ureg_MOV(shader, ureg_writemask(addr[1], wm_tc), ureg_scalar(tc, sw_tc));
+ ureg_MOV(shader, ureg_writemask(addr[1], TGSI_WRITEMASK_Z), tc);
+}
+
+static void *
+create_vert_shader(struct vl_idct *idct, bool matrix_stage)
+{
+ struct ureg_program *shader;
+ struct ureg_src scale;
+ struct ureg_src vrect, vpos;
+ struct ureg_dst t_tex, t_start;
+ struct ureg_dst o_vpos, o_l_addr[2], o_r_addr[2];
+
+ shader = ureg_create(TGSI_PROCESSOR_VERTEX);
+ if (!shader)
+ return NULL;
+
+ t_tex = ureg_DECL_temporary(shader);
+ t_start = ureg_DECL_temporary(shader);
+
+ vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
+ vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
+
+ o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
+
+ o_l_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0);
+ o_l_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1);
+
+ o_r_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0);
+ o_r_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1);
+
+ /*
+ * scale = (BLOCK_WIDTH, BLOCK_HEIGHT) / (dst.width, dst.height)
+ *
+ * t_vpos = vpos + vrect
+ * o_vpos.xy = t_vpos * scale
+ * o_vpos.zw = vpos
+ *
+ * o_l_addr = calc_addr(...)
+ * o_r_addr = calc_addr(...)
+ *
+ */
+ scale = ureg_imm2f(shader,
+ (float)BLOCK_WIDTH / idct->buffer_width,
+ (float)BLOCK_HEIGHT / idct->buffer_height);
+
+ ureg_ADD(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), vpos, vrect);
+ ureg_MUL(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), ureg_src(t_tex), scale);
+ ureg_MUL(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_Z),
+ ureg_scalar(vrect, TGSI_SWIZZLE_X),
+ ureg_imm1f(shader, BLOCK_WIDTH / NR_RENDER_TARGETS));
+
+ ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_tex));
+ ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), vpos);
+
+ ureg_MUL(shader, ureg_writemask(t_start, TGSI_WRITEMASK_XY), vpos, scale);
+
+ if(matrix_stage) {
+ calc_addr(shader, o_l_addr, ureg_src(t_tex), ureg_src(t_start), false, false, idct->buffer_width / 4);
+ calc_addr(shader, o_r_addr, vrect, ureg_imm1f(shader, 0.0f), true, true, BLOCK_WIDTH / 4);
+ } else {
+ calc_addr(shader, o_l_addr, vrect, ureg_imm1f(shader, 0.0f), false, false, BLOCK_WIDTH / 4);
+ calc_addr(shader, o_r_addr, ureg_src(t_tex), ureg_src(t_start), true, false, idct->buffer_height / 4);
+ }
+
+ ureg_release_temporary(shader, t_tex);
+ ureg_release_temporary(shader, t_start);
+
+ ureg_END(shader);
+
+ return ureg_create_shader_and_destroy(shader, idct->pipe);
+}
+
+static void
+increment_addr(struct ureg_program *shader, struct ureg_dst daddr[2],
+ struct ureg_src saddr[2], bool right_side, bool transposed,
+ int pos, float size)
+{
+ unsigned wm_start = (right_side == transposed) ? TGSI_WRITEMASK_X : TGSI_WRITEMASK_Y;
+ unsigned wm_tc = (right_side == transposed) ? TGSI_WRITEMASK_Y : TGSI_WRITEMASK_X;
+
+ /*
+ * daddr[0..1].(start) = saddr[0..1].(start)
+ * daddr[0..1].(tc) = saddr[0..1].(tc)
+ */
+
+ ureg_MOV(shader, ureg_writemask(daddr[0], wm_start), saddr[0]);
+ ureg_ADD(shader, ureg_writemask(daddr[0], wm_tc), saddr[0], ureg_imm1f(shader, pos / size));
+ ureg_MOV(shader, ureg_writemask(daddr[1], wm_start), saddr[1]);
+ ureg_ADD(shader, ureg_writemask(daddr[1], wm_tc), saddr[1], ureg_imm1f(shader, pos / size));
+}
+
+static void
+fetch_four(struct ureg_program *shader, struct ureg_dst m[2], struct ureg_src addr[2], struct ureg_src sampler)
+{
+ ureg_TEX(shader, m[0], TGSI_TEXTURE_3D, addr[0], sampler);
+ ureg_TEX(shader, m[1], TGSI_TEXTURE_3D, addr[1], sampler);
+}
+
+static void
+matrix_mul(struct ureg_program *shader, struct ureg_dst dst, struct ureg_dst l[2], struct ureg_dst r[2])
+{
+ struct ureg_dst tmp;
+
+ tmp = ureg_DECL_temporary(shader);
+
+ /*
+ * tmp.xy = dot4(m[0][0..1], m[1][0..1])
+ * dst = tmp.x + tmp.y
+ */
+ ureg_DP4(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X), ureg_src(l[0]), ureg_src(r[0]));
+ ureg_DP4(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_src(l[1]), ureg_src(r[1]));
+ ureg_ADD(shader, dst,
+ ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X),
+ ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y));
+
+ ureg_release_temporary(shader, tmp);
+}
+
+static void *
+create_matrix_frag_shader(struct vl_idct *idct)
+{
+ struct ureg_program *shader;
+
+ struct ureg_src l_addr[2], r_addr[2];
+
+ struct ureg_dst l[4][2], r[2];
+ struct ureg_dst fragment[NR_RENDER_TARGETS];
+
+ unsigned i, j;
+
+ shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
+ if (!shader)
+ return NULL;
+
+ l_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0, TGSI_INTERPOLATE_LINEAR);
+ l_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1, TGSI_INTERPOLATE_LINEAR);
+
+ r_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0, TGSI_INTERPOLATE_LINEAR);
+ r_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1, TGSI_INTERPOLATE_LINEAR);
+
+ for (i = 0; i < NR_RENDER_TARGETS; ++i)
+ fragment[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, i);
+
+ for (i = 0; i < 4; ++i) {
+ l[i][0] = ureg_DECL_temporary(shader);
+ l[i][1] = ureg_DECL_temporary(shader);
+ }
+
+ r[0] = ureg_DECL_temporary(shader);
+ r[1] = ureg_DECL_temporary(shader);
+
+ for (i = 1; i < 4; ++i) {
+ increment_addr(shader, l[i], l_addr, false, false, i, idct->buffer_height);
+ }
+
+ for (i = 0; i < 4; ++i) {
+ struct ureg_src s_addr[2];
+ s_addr[0] = i == 0 ? l_addr[0] : ureg_src(l[i][0]);
+ s_addr[1] = i == 0 ? l_addr[1] : ureg_src(l[i][1]);
+ fetch_four(shader, l[i], s_addr, ureg_DECL_sampler(shader, 1));
+ }
+
+ for (i = 0; i < NR_RENDER_TARGETS; ++i) {
+ if(i > 0)
+ increment_addr(shader, r, r_addr, true, true, i, BLOCK_HEIGHT);
+
+ struct ureg_src s_addr[2] = { ureg_src(r[0]), ureg_src(r[1]) };
+ s_addr[0] = i == 0 ? r_addr[0] : ureg_src(r[0]);
+ s_addr[1] = i == 0 ? r_addr[1] : ureg_src(r[1]);
+ fetch_four(shader, r, s_addr, ureg_DECL_sampler(shader, 0));
+
+ for (j = 0; j < 4; ++j) {
+ matrix_mul(shader, ureg_writemask(fragment[i], TGSI_WRITEMASK_X << j), l[j], r);
+ }
+ }
+
+ for (i = 0; i < 4; ++i) {
+ ureg_release_temporary(shader, l[i][0]);
+ ureg_release_temporary(shader, l[i][1]);
+ }
+ ureg_release_temporary(shader, r[0]);
+ ureg_release_temporary(shader, r[1]);
+
+ ureg_END(shader);
+
+ return ureg_create_shader_and_destroy(shader, idct->pipe);
+}
+
+static void *
+create_transpose_frag_shader(struct vl_idct *idct)
+{
+ struct ureg_program *shader;
+
+ struct ureg_src l_addr[2], r_addr[2];
+
+ struct ureg_dst l[2], r[2];
+ struct ureg_dst fragment;
+
+ shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
+ if (!shader)
+ return NULL;
+
+ l_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0, TGSI_INTERPOLATE_LINEAR);
+ l_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1, TGSI_INTERPOLATE_LINEAR);
+
+ r_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0, TGSI_INTERPOLATE_LINEAR);
+ r_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1, TGSI_INTERPOLATE_LINEAR);
+
+ l[0] = ureg_DECL_temporary(shader);
+ l[1] = ureg_DECL_temporary(shader);
+ r[0] = ureg_DECL_temporary(shader);
+ r[1] = ureg_DECL_temporary(shader);
+
+ fetch_four(shader, l, l_addr, ureg_DECL_sampler(shader, 0));
+ fetch_four(shader, r, r_addr, ureg_DECL_sampler(shader, 1));
+
+ fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
+
+ matrix_mul(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X), l, r);
+
+ ureg_release_temporary(shader, l[0]);
+ ureg_release_temporary(shader, l[1]);
+ ureg_release_temporary(shader, r[0]);
+ ureg_release_temporary(shader, r[1]);
+
+ ureg_END(shader);
+
+ return ureg_create_shader_and_destroy(shader, idct->pipe);
+}
+
+static bool
+init_shaders(struct vl_idct *idct)
+{
+ idct->matrix_vs = create_vert_shader(idct, true);
+ idct->matrix_fs = create_matrix_frag_shader(idct);
+
+ idct->transpose_vs = create_vert_shader(idct, false);
+ idct->transpose_fs = create_transpose_frag_shader(idct);
+
+ return
+ idct->matrix_vs != NULL &&
+ idct->matrix_fs != NULL &&
+ idct->transpose_vs != NULL &&
+ idct->transpose_fs != NULL;
+}
+
+static void
+cleanup_shaders(struct vl_idct *idct)
+{
+ idct->pipe->delete_vs_state(idct->pipe, idct->matrix_vs);
+ idct->pipe->delete_fs_state(idct->pipe, idct->matrix_fs);
+ idct->pipe->delete_vs_state(idct->pipe, idct->transpose_vs);
+ idct->pipe->delete_fs_state(idct->pipe, idct->transpose_fs);
+}
+
+static bool
+init_state(struct vl_idct *idct)
+{
+ struct pipe_vertex_element vertex_elems[NUM_VS_INPUTS];
+ struct pipe_sampler_state sampler;
+ struct pipe_rasterizer_state rs_state;
+ unsigned i;
+
+ assert(idct);
+
+ idct->quad = vl_vb_upload_quads(idct->pipe, idct->max_blocks);
+
+ if(idct->quad.buffer == NULL)
+ return false;
+
+ for (i = 0; i < 4; ++i) {
+ memset(&sampler, 0, sizeof(sampler));
+ sampler.wrap_s = PIPE_TEX_WRAP_REPEAT;
+ sampler.wrap_t = PIPE_TEX_WRAP_REPEAT;
+ sampler.wrap_r = PIPE_TEX_WRAP_REPEAT;
+ sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
+ sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
+ sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
+ sampler.compare_func = PIPE_FUNC_ALWAYS;
+ sampler.normalized_coords = 1;
+ /*sampler.shadow_ambient = ; */
+ /*sampler.lod_bias = ; */
+ sampler.min_lod = 0;
+ /*sampler.max_lod = ; */
+ /*sampler.border_color[0] = ; */
+ /*sampler.max_anisotropy = ; */
+ idct->samplers.all[i] = idct->pipe->create_sampler_state(idct->pipe, &sampler);
+ }
+
+ memset(&rs_state, 0, sizeof(rs_state));
+ /*rs_state.sprite_coord_enable */
+ rs_state.sprite_coord_mode = PIPE_SPRITE_COORD_UPPER_LEFT;
+ rs_state.point_quad_rasterization = true;
+ rs_state.point_size = BLOCK_WIDTH;
+ rs_state.gl_rasterization_rules = false;
+ idct->rs_state = idct->pipe->create_rasterizer_state(idct->pipe, &rs_state);
+
+ vertex_elems[VS_I_RECT] = vl_vb_get_quad_vertex_element();
+
+ /* Pos element */
+ vertex_elems[VS_I_VPOS].src_format = PIPE_FORMAT_R16G16_SSCALED;
+
+ idct->vertex_buffer_stride = vl_vb_element_helper(&vertex_elems[VS_I_VPOS], 1, 1);
+ idct->vertex_elems_state = idct->pipe->create_vertex_elements_state(idct->pipe, 2, vertex_elems);
+
+ return true;
+}
+
+static void
+cleanup_state(struct vl_idct *idct)
+{
+ unsigned i;
+
+ for (i = 0; i < 4; ++i)
+ idct->pipe->delete_sampler_state(idct->pipe, idct->samplers.all[i]);
+
+ idct->pipe->delete_rasterizer_state(idct->pipe, idct->rs_state);
+ idct->pipe->delete_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
+}
+
+static bool
+init_textures(struct vl_idct *idct, struct vl_idct_buffer *buffer)
+{
+ struct pipe_resource template;
+ struct pipe_sampler_view sampler_view;
+ unsigned i;
+
+ assert(idct && buffer);
+
+ /* create textures */
+ memset(&template, 0, sizeof(struct pipe_resource));
+ template.last_level = 0;
- buffer->vertex_bufs.individual.quad.max_index = idct->quad.max_index;
+ template.bind = PIPE_BIND_SAMPLER_VIEW;
+ template.flags = 0;
+
+ template.target = PIPE_TEXTURE_2D;
+ template.format = PIPE_FORMAT_R16G16B16A16_SNORM;
+ template.width0 = idct->buffer_width / 4;
+ template.height0 = idct->buffer_height;
+ template.depth0 = 1;
++ template.array_size = 1;
+ template.usage = PIPE_USAGE_STREAM;
+ buffer->textures.individual.source = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
+
+ template.target = PIPE_TEXTURE_3D;
+ template.format = PIPE_FORMAT_R16G16B16A16_SNORM;
+ template.width0 = idct->buffer_width / NR_RENDER_TARGETS;
+ template.height0 = idct->buffer_height / 4;
+ template.depth0 = NR_RENDER_TARGETS;
+ template.usage = PIPE_USAGE_STATIC;
+ buffer->textures.individual.intermediate = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
+
+ for (i = 0; i < 4; ++i) {
+ if(buffer->textures.all[i] == NULL)
+ return false; /* a texture failed to allocate */
+
+ u_sampler_view_default_template(&sampler_view, buffer->textures.all[i], buffer->textures.all[i]->format);
+ buffer->sampler_views.all[i] = idct->pipe->create_sampler_view(idct->pipe, buffer->textures.all[i], &sampler_view);
+ }
+
+ return true;
+}
+
+static void
+cleanup_textures(struct vl_idct *idct, struct vl_idct_buffer *buffer)
+{
+ unsigned i;
+
+ assert(idct && buffer);
+
+ for (i = 0; i < 4; ++i) {
+ pipe_sampler_view_reference(&buffer->sampler_views.all[i], NULL);
+ pipe_resource_reference(&buffer->textures.all[i], NULL);
+ }
+}
+
+static bool
+init_vertex_buffers(struct vl_idct *idct, struct vl_idct_buffer *buffer)
+{
+ assert(idct && buffer);
+
+ buffer->vertex_bufs.individual.quad.stride = idct->quad.stride;
+ buffer->vertex_bufs.individual.quad.buffer_offset = idct->quad.buffer_offset;
+ pipe_resource_reference(&buffer->vertex_bufs.individual.quad.buffer, idct->quad.buffer);
+
+ buffer->vertex_bufs.individual.pos = vl_vb_init(
+ &buffer->blocks, idct->pipe, idct->max_blocks,
+ idct->vertex_buffer_stride);
+
+ if(buffer->vertex_bufs.individual.pos.buffer == NULL)
+ return false;
+
+ return true;
+}
+
+static void
+cleanup_vertex_buffers(struct vl_idct *idct, struct vl_idct_buffer *buffer)
+{
+ assert(idct && buffer);
+
+ pipe_resource_reference(&buffer->vertex_bufs.individual.quad.buffer, NULL);
+ pipe_resource_reference(&buffer->vertex_bufs.individual.pos.buffer, NULL);
+
+ vl_vb_cleanup(&buffer->blocks);
+}
+
+struct pipe_resource *
+vl_idct_upload_matrix(struct pipe_context *pipe)
+{
+ struct pipe_resource template, *matrix;
+ struct pipe_transfer *buf_transfer;
+ unsigned i, j, pitch;
+ float *f;
+
+ struct pipe_box rect =
+ {
+ 0, 0, 0,
+ BLOCK_WIDTH / 4,
+ BLOCK_HEIGHT,
+ 1
+ };
+
+ memset(&template, 0, sizeof(struct pipe_resource));
+ template.target = PIPE_TEXTURE_2D;
+ template.format = PIPE_FORMAT_R32G32B32A32_FLOAT;
+ template.last_level = 0;
+ template.width0 = 2;
+ template.height0 = 8;
+ template.depth0 = 1;
++ template.array_size = 1;
+ template.usage = PIPE_USAGE_IMMUTABLE;
+ template.bind = PIPE_BIND_SAMPLER_VIEW;
+ template.flags = 0;
+
+ matrix = pipe->screen->resource_create(pipe->screen, &template);
+
+ /* matrix */
+ buf_transfer = pipe->get_transfer
+ (
+ pipe, matrix,
+ 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
+ &rect
+ );
+ pitch = buf_transfer->stride / sizeof(float);
+
+ f = pipe->transfer_map(pipe, buf_transfer);
+ for(i = 0; i < BLOCK_HEIGHT; ++i)
+ for(j = 0; j < BLOCK_WIDTH; ++j)
+ // transpose and scale
+ f[i * pitch + j] = const_matrix[j][i] * sqrtf(SCALE_FACTOR_16_TO_9);
+
+ pipe->transfer_unmap(pipe, buf_transfer);
+ pipe->transfer_destroy(pipe, buf_transfer);
+
+ return matrix;
+}
+
+bool vl_idct_init(struct vl_idct *idct, struct pipe_context *pipe,
+ unsigned buffer_width, unsigned buffer_height,
+ struct pipe_resource *matrix)
+{
+ assert(idct && pipe && matrix);
+
+ idct->pipe = pipe;
+ idct->buffer_width = buffer_width;
+ idct->buffer_height = buffer_height;
+ pipe_resource_reference(&idct->matrix, matrix);
+
+ idct->max_blocks =
+ align(buffer_width, BLOCK_WIDTH) / BLOCK_WIDTH *
+ align(buffer_height, BLOCK_HEIGHT) / BLOCK_HEIGHT;
+
+ if(!init_shaders(idct))
+ return false;
+
+ if(!init_state(idct)) {
+ cleanup_shaders(idct);
+ return false;
+ }
+
+ return true;
+}
+
+void
+vl_idct_cleanup(struct vl_idct *idct)
+{
+ cleanup_shaders(idct);
+ cleanup_state(idct);
+
+ pipe_resource_reference(&idct->matrix, NULL);
+}
+
+bool
+vl_idct_init_buffer(struct vl_idct *idct, struct vl_idct_buffer *buffer, struct pipe_resource *dst)
+{
+ struct pipe_surface template;
+
+ unsigned i;
+
+ assert(buffer);
+ assert(idct);
+ assert(dst);
+
+ pipe_resource_reference(&buffer->textures.individual.matrix, idct->matrix);
+ pipe_resource_reference(&buffer->textures.individual.transpose, idct->matrix);
+ pipe_resource_reference(&buffer->destination, dst);
+
+ if (!init_textures(idct, buffer))
+ return false;
+
+ if (!init_vertex_buffers(idct, buffer))
+ return false;
+
+ /* init state */
+ buffer->viewport[0].scale[0] = buffer->textures.individual.intermediate->width0;
+ buffer->viewport[0].scale[1] = buffer->textures.individual.intermediate->height0;
+
+ buffer->viewport[1].scale[0] = buffer->destination->width0;
+ buffer->viewport[1].scale[1] = buffer->destination->height0;
+
+ buffer->fb_state[0].width = buffer->textures.individual.intermediate->width0;
+ buffer->fb_state[0].height = buffer->textures.individual.intermediate->height0;
+
+ buffer->fb_state[0].nr_cbufs = NR_RENDER_TARGETS;
+ for(i = 0; i < NR_RENDER_TARGETS; ++i) {
+ memset(&template, 0, sizeof(template));
+ template.format = buffer->textures.individual.intermediate->format;
+ template.u.tex.first_layer = i;
+ template.u.tex.last_layer = i;
+ template.usage = PIPE_BIND_RENDER_TARGET;
+ buffer->fb_state[0].cbufs[i] = idct->pipe->create_surface(
+ idct->pipe, buffer->textures.individual.intermediate,
+ &template);
+ }
+
+ buffer->fb_state[1].width = buffer->destination->width0;
+ buffer->fb_state[1].height = buffer->destination->height0;
+
+ buffer->fb_state[1].nr_cbufs = 1;
+
+ memset(&template, 0, sizeof(template));
+ template.format = buffer->destination->format;
+ template.usage = PIPE_BIND_RENDER_TARGET;
+ buffer->fb_state[1].cbufs[0] = idct->pipe->create_surface(
+ idct->pipe, buffer->destination, &template);
+
+ for(i = 0; i < 2; ++i) {
+ buffer->viewport[i].scale[2] = 1;
+ buffer->viewport[i].scale[3] = 1;
+ buffer->viewport[i].translate[0] = 0;
+ buffer->viewport[i].translate[1] = 0;
+ buffer->viewport[i].translate[2] = 0;
+ buffer->viewport[i].translate[3] = 0;
+
+ buffer->fb_state[i].zsbuf = NULL;
+ }
+
+ return true;
+}
+
+void
+vl_idct_cleanup_buffer(struct vl_idct *idct, struct vl_idct_buffer *buffer)
+{
+ unsigned i;
+
+ assert(buffer);
+
+ for(i = 0; i < NR_RENDER_TARGETS; ++i) {
+ idct->pipe->surface_destroy(idct->pipe, buffer->fb_state[0].cbufs[i]);
+ }
+
+ idct->pipe->surface_destroy(idct->pipe, buffer->fb_state[1].cbufs[0]);
+
+ cleanup_textures(idct, buffer);
+ cleanup_vertex_buffers(idct, buffer);
+}
+
+void
+vl_idct_map_buffers(struct vl_idct *idct, struct vl_idct_buffer *buffer)
+{
+ assert(idct);
+
+ struct pipe_box rect =
+ {
+ 0, 0, 0,
+ buffer->textures.individual.source->width0,
+ buffer->textures.individual.source->height0,
+ 1
+ };
+
+ buffer->tex_transfer = idct->pipe->get_transfer
+ (
+ idct->pipe, buffer->textures.individual.source,
+ 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
+ &rect
+ );
+
+ buffer->texels = idct->pipe->transfer_map(idct->pipe, buffer->tex_transfer);
+
+ vl_vb_map(&buffer->blocks, idct->pipe);
+}
+
+void
+vl_idct_add_block(struct vl_idct_buffer *buffer, unsigned x, unsigned y, short *block)
+{
+ struct vertex2s v;
+ unsigned tex_pitch;
+ short *texels;
+
+ unsigned i;
+
+ assert(buffer);
+
+ tex_pitch = buffer->tex_transfer->stride / sizeof(short);
+ texels = buffer->texels + y * tex_pitch * BLOCK_HEIGHT + x * BLOCK_WIDTH;
+
+ for (i = 0; i < BLOCK_HEIGHT; ++i)
+ memcpy(texels + i * tex_pitch, block + i * BLOCK_WIDTH, BLOCK_WIDTH * sizeof(short));
+
+ v.x = x;
+ v.y = y;
+ vl_vb_add_block(&buffer->blocks, &v);
+}
+
+void
+vl_idct_unmap_buffers(struct vl_idct *idct, struct vl_idct_buffer *buffer)
+{
+ assert(idct && buffer);
+
+ idct->pipe->transfer_unmap(idct->pipe, buffer->tex_transfer);
+ idct->pipe->transfer_destroy(idct->pipe, buffer->tex_transfer);
+ vl_vb_unmap(&buffer->blocks, idct->pipe);
+}
+
+void
+vl_idct_flush(struct vl_idct *idct, struct vl_idct_buffer *buffer)
+{
+ unsigned num_verts;
+
+ assert(idct);
+
+ num_verts = vl_vb_restart(&buffer->blocks);
+
+ if(num_verts > 0) {
+
+ idct->pipe->bind_rasterizer_state(idct->pipe, idct->rs_state);
+ idct->pipe->set_vertex_buffers(idct->pipe, 2, buffer->vertex_bufs.all);
+ idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
+
+ /* first stage */
+ idct->pipe->set_framebuffer_state(idct->pipe, &buffer->fb_state[0]);
+ idct->pipe->set_viewport_state(idct->pipe, &buffer->viewport[0]);
+ idct->pipe->set_fragment_sampler_views(idct->pipe, 2, buffer->sampler_views.stage[0]);
+ idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[0]);
+ idct->pipe->bind_vs_state(idct->pipe, idct->matrix_vs);
+ idct->pipe->bind_fs_state(idct->pipe, idct->matrix_fs);
+ util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, num_verts);
+
+ /* second stage */
+ idct->pipe->set_framebuffer_state(idct->pipe, &buffer->fb_state[1]);
+ idct->pipe->set_viewport_state(idct->pipe, &buffer->viewport[1]);
+ idct->pipe->set_fragment_sampler_views(idct->pipe, 2, buffer->sampler_views.stage[1]);
+ idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[1]);
+ idct->pipe->bind_vs_state(idct->pipe, idct->transpose_vs);
+ idct->pipe->bind_fs_state(idct->pipe, idct->transpose_fs);
+ util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, num_verts);
+ }
+}
--- /dev/null
- buffer->vertex_bufs.individual.quad.max_index = renderer->quad.max_index;
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vl_mpeg12_mc_renderer.h"
+#include "util/u_draw.h"
+#include <assert.h>
+#include <pipe/p_context.h>
+#include <util/u_inlines.h>
+#include <util/u_format.h>
+#include <util/u_math.h>
+#include <util/u_memory.h>
+#include <util/u_keymap.h>
+#include <util/u_sampler.h>
+#include <util/u_draw.h>
+#include <tgsi/tgsi_ureg.h>
+
+#define DEFAULT_BUF_ALIGNMENT 1
+#define MACROBLOCK_WIDTH 16
+#define MACROBLOCK_HEIGHT 16
+#define BLOCK_WIDTH 8
+#define BLOCK_HEIGHT 8
+
+struct vertex_stream
+{
+ struct vertex2s pos;
+ struct vertex2s mv[4];
+ struct {
+ int8_t y;
+ int8_t cr;
+ int8_t cb;
+ int8_t flag;
+ } eb[2][2];
+};
+
+enum VS_INPUT
+{
+ VS_I_RECT,
+ VS_I_VPOS,
+ VS_I_MV0,
+ VS_I_MV1,
+ VS_I_MV2,
+ VS_I_MV3,
+ VS_I_EB_0_0,
+ VS_I_EB_0_1,
+ VS_I_EB_1_0,
+ VS_I_EB_1_1,
+
+ NUM_VS_INPUTS
+};
+
+enum VS_OUTPUT
+{
+ VS_O_VPOS,
+ VS_O_LINE,
+ VS_O_TEX0,
+ VS_O_TEX1,
+ VS_O_TEX2,
+ VS_O_EB_0,
+ VS_O_EB_1,
+ VS_O_INFO,
+ VS_O_MV0,
+ VS_O_MV1,
+ VS_O_MV2,
+ VS_O_MV3
+};
+
+static const unsigned const_empty_block_mask_420[3][2][2] = {
+ { { 0x20, 0x10 }, { 0x08, 0x04 } },
+ { { 0x02, 0x02 }, { 0x02, 0x02 } },
+ { { 0x01, 0x01 }, { 0x01, 0x01 } }
+};
+
+static void *
+create_vert_shader(struct vl_mpeg12_mc_renderer *r)
+{
+ struct ureg_program *shader;
+ struct ureg_src block_scale, mv_scale;
+ struct ureg_src vrect, vpos, eb[2][2], vmv[4];
+ struct ureg_dst t_vpos, t_vtex, t_vmv;
+ struct ureg_dst o_vpos, o_line, o_vtex[3], o_eb[2], o_vmv[4], o_info;
+ unsigned i, label;
+
+ shader = ureg_create(TGSI_PROCESSOR_VERTEX);
+ if (!shader)
+ return NULL;
+
+ t_vpos = ureg_DECL_temporary(shader);
+ t_vtex = ureg_DECL_temporary(shader);
+ t_vmv = ureg_DECL_temporary(shader);
+
+ vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
+ vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
+ eb[0][0] = ureg_DECL_vs_input(shader, VS_I_EB_0_0);
+ eb[1][0] = ureg_DECL_vs_input(shader, VS_I_EB_1_0);
+ eb[0][1] = ureg_DECL_vs_input(shader, VS_I_EB_0_1);
+ eb[1][1] = ureg_DECL_vs_input(shader, VS_I_EB_1_1);
+
+ o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
+ o_line = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_LINE);
+ o_vtex[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX0);
+ o_vtex[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX1);
+ o_vtex[2] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX2);
+ o_eb[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_EB_0);
+ o_eb[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_EB_1);
+ o_info = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_INFO);
+
+ for (i = 0; i < 4; ++i) {
+ vmv[i] = ureg_DECL_vs_input(shader, VS_I_MV0 + i);
+ o_vmv[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_MV0 + i);
+ }
+
+ /*
+ * block_scale = (MACROBLOCK_WIDTH, MACROBLOCK_HEIGHT) / (dst.width, dst.height)
+ * mv_scale = 0.5 / (dst.width, dst.height);
+ *
+ * t_vpos = (vpos + vrect) * block_scale
+ * o_vpos.xy = t_vpos
+ * o_vpos.zw = vpos
+ *
+ * o_eb[0..1] = vrect.x ? eb[0..1][1] : eb[0..1][0]
+ *
+ * o_frame_pred = frame_pred
+ * o_info.x = ref_frames
+ * o_info.y = ref_frames > 0
+ * o_info.z = bkwd_pred
+ *
+ * // Apply motion vectors
+ * o_vmv[0..count] = t_vpos + vmv[0..count] * mv_scale
+ *
+ * o_line.xy = vrect * 8
+ * o_line.z = interlaced
+ *
+ * if(eb[0][0].w) { //interlaced
+ * t_vtex.x = vrect.x
+ * t_vtex.y = vrect.y * 0.5
+ * t_vtex += vpos
+ *
+ * o_vtex[0].xy = t_vtex * block_scale
+ *
+ * t_vtex.y += 0.5
+ * o_vtex[1].xy = t_vtex * block_scale
+ * } else {
+ * o_vtex[0..1].xy = t_vpos
+ * }
+ * o_vtex[2].xy = t_vpos
+ *
+ */
+ block_scale = ureg_imm2f(shader,
+ (float)MACROBLOCK_WIDTH / r->buffer_width,
+ (float)MACROBLOCK_HEIGHT / r->buffer_height);
+
+ mv_scale = ureg_imm2f(shader,
+ 0.5f / r->buffer_width,
+ 0.5f / r->buffer_height);
+
+ ureg_ADD(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), vpos, vrect);
+ ureg_MUL(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos), block_scale);
+ ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
+ ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), vpos);
+
+ ureg_CMP(shader, ureg_writemask(o_eb[0], TGSI_WRITEMASK_XYZ),
+ ureg_negate(ureg_scalar(vrect, TGSI_SWIZZLE_X)),
+ eb[0][1], eb[0][0]);
+ ureg_CMP(shader, ureg_writemask(o_eb[1], TGSI_WRITEMASK_XYZ),
+ ureg_negate(ureg_scalar(vrect, TGSI_SWIZZLE_X)),
+ eb[1][1], eb[1][0]);
+
+ ureg_MOV(shader, ureg_writemask(o_info, TGSI_WRITEMASK_X),
+ ureg_scalar(eb[1][1], TGSI_SWIZZLE_W));
+ ureg_SGE(shader, ureg_writemask(o_info, TGSI_WRITEMASK_Y),
+ ureg_scalar(eb[1][1], TGSI_SWIZZLE_W), ureg_imm1f(shader, 0.0f));
+ ureg_MOV(shader, ureg_writemask(o_info, TGSI_WRITEMASK_Z),
+ ureg_scalar(eb[1][0], TGSI_SWIZZLE_W));
+
+ ureg_MAD(shader, ureg_writemask(o_vmv[0], TGSI_WRITEMASK_XY), mv_scale, vmv[0], ureg_src(t_vpos));
+ ureg_MAD(shader, ureg_writemask(o_vmv[2], TGSI_WRITEMASK_XY), mv_scale, vmv[2], ureg_src(t_vpos));
+
+ ureg_CMP(shader, ureg_writemask(t_vmv, TGSI_WRITEMASK_XY),
+ ureg_negate(ureg_scalar(eb[0][1], TGSI_SWIZZLE_W)),
+ vmv[0], vmv[1]);
+ ureg_MAD(shader, ureg_writemask(o_vmv[1], TGSI_WRITEMASK_XY), mv_scale, ureg_src(t_vmv), ureg_src(t_vpos));
+
+ ureg_CMP(shader, ureg_writemask(t_vmv, TGSI_WRITEMASK_XY),
+ ureg_negate(ureg_scalar(eb[0][1], TGSI_SWIZZLE_W)),
+ vmv[2], vmv[3]);
+ ureg_MAD(shader, ureg_writemask(o_vmv[3], TGSI_WRITEMASK_XY), mv_scale, ureg_src(t_vmv), ureg_src(t_vpos));
+
+ ureg_MOV(shader, ureg_writemask(o_vtex[0], TGSI_WRITEMASK_XY), ureg_src(t_vpos));
+ ureg_MOV(shader, ureg_writemask(o_vtex[1], TGSI_WRITEMASK_XY), ureg_src(t_vpos));
+ ureg_MOV(shader, ureg_writemask(o_vtex[2], TGSI_WRITEMASK_XY), ureg_src(t_vpos));
+
+ ureg_MOV(shader, ureg_writemask(o_line, TGSI_WRITEMASK_X), ureg_scalar(vrect, TGSI_SWIZZLE_Y));
+ ureg_MUL(shader, ureg_writemask(o_line, TGSI_WRITEMASK_Y),
+ vrect, ureg_imm1f(shader, MACROBLOCK_HEIGHT / 2));
+
+ ureg_IF(shader, ureg_scalar(eb[0][0], TGSI_SWIZZLE_W), &label);
+
+ ureg_MOV(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_X), vrect);
+ ureg_MUL(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Y), vrect, ureg_imm1f(shader, 0.5f));
+ ureg_ADD(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_XY), vpos, ureg_src(t_vtex));
+ ureg_MUL(shader, ureg_writemask(o_vtex[0], TGSI_WRITEMASK_XY), ureg_src(t_vtex), block_scale);
+ ureg_ADD(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Y), ureg_src(t_vtex), ureg_imm1f(shader, 0.5f));
+ ureg_MUL(shader, ureg_writemask(o_vtex[1], TGSI_WRITEMASK_XY), ureg_src(t_vtex), block_scale);
+
+ ureg_MUL(shader, ureg_writemask(o_line, TGSI_WRITEMASK_X),
+ ureg_scalar(vrect, TGSI_SWIZZLE_Y),
+ ureg_imm1f(shader, MACROBLOCK_HEIGHT / 2));
+
+ ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
+ ureg_ENDIF(shader);
+
+ ureg_release_temporary(shader, t_vtex);
+ ureg_release_temporary(shader, t_vpos);
+ ureg_release_temporary(shader, t_vmv);
+
+ ureg_END(shader);
+
+ return ureg_create_shader_and_destroy(shader, r->pipe);
+}
+
+static struct ureg_dst
+calc_field(struct ureg_program *shader)
+{
+ struct ureg_dst tmp;
+ struct ureg_src line;
+
+ tmp = ureg_DECL_temporary(shader);
+
+ line = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_LINE, TGSI_INTERPOLATE_LINEAR);
+
+ /*
+ * line.x going from 0 to 1 if not interlaced
+ * line.x going from 0 to 8 in steps of 0.5 if interlaced
+ * line.y going from 0 to 8 in steps of 0.5
+ *
+ * tmp.xy = fraction(line)
+ * tmp.xy = tmp.xy >= 0.5 ? 1 : 0
+ */
+ ureg_FRC(shader, ureg_writemask(tmp, TGSI_WRITEMASK_XY), line);
+ ureg_SGE(shader, ureg_writemask(tmp, TGSI_WRITEMASK_XY), ureg_src(tmp), ureg_imm1f(shader, 0.5f));
+
+ return tmp;
+}
+
+static struct ureg_dst
+fetch_ycbcr(struct vl_mpeg12_mc_renderer *r, struct ureg_program *shader, struct ureg_dst field)
+{
+ struct ureg_src tc[3], sampler[3], eb[2];
+ struct ureg_dst texel, t_tc, t_eb_info;
+ unsigned i, label;
+
+ texel = ureg_DECL_temporary(shader);
+ t_tc = ureg_DECL_temporary(shader);
+ t_eb_info = ureg_DECL_temporary(shader);
+
+ tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX0, TGSI_INTERPOLATE_LINEAR);
+ tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX1, TGSI_INTERPOLATE_LINEAR);
+ tc[2] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX2, TGSI_INTERPOLATE_LINEAR);
+
+ eb[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_EB_0, TGSI_INTERPOLATE_CONSTANT);
+ eb[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_EB_1, TGSI_INTERPOLATE_CONSTANT);
+
+ for (i = 0; i < 3; ++i) {
+ sampler[i] = ureg_DECL_sampler(shader, i);
+ }
+
+ /*
+ * texel.y = tex(field.y ? tc[1] : tc[0], sampler[0])
+ * texel.cb = tex(tc[2], sampler[1])
+ * texel.cr = tex(tc[2], sampler[2])
+ */
+
+ ureg_CMP(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_XY),
+ ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_X)),
+ tc[1], tc[0]);
+
+ ureg_CMP(shader, ureg_writemask(t_eb_info, TGSI_WRITEMASK_XYZ),
+ ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_X)),
+ eb[1], eb[0]);
+
+ /* r600g is ignoring TGSI_INTERPOLATE_CONSTANT, just workaround this */
+ ureg_SLT(shader, ureg_writemask(t_eb_info, TGSI_WRITEMASK_XYZ), ureg_src(t_eb_info), ureg_imm1f(shader, 0.5f));
+
+ ureg_MOV(shader, ureg_writemask(texel, TGSI_WRITEMASK_XYZ), ureg_imm1f(shader, 0.0f));
+ for (i = 0; i < 3; ++i) {
+ ureg_IF(shader, ureg_scalar(ureg_src(t_eb_info), TGSI_SWIZZLE_X + i), &label);
+
+ /* Nouveau can't writemask tex dst regs (yet?), so this won't work anymore on nvidia hardware */
+ if(i==0 || r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_444) {
+ ureg_TEX(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), TGSI_TEXTURE_3D, ureg_src(t_tc), sampler[i]);
+ } else {
+ ureg_TEX(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), TGSI_TEXTURE_3D, tc[2], sampler[i]);
+ }
+
+ ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
+ ureg_ENDIF(shader);
+ }
+
+ ureg_release_temporary(shader, t_tc);
+ ureg_release_temporary(shader, t_eb_info);
+
+ return texel;
+}
+
+static struct ureg_dst
+fetch_ref(struct ureg_program *shader, struct ureg_dst field)
+{
+ struct ureg_src info;
+ struct ureg_src tc[4], sampler[2];
+ struct ureg_dst ref[2], result;
+ unsigned i, intra_label, bi_label, label;
+
+ info = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_INFO, TGSI_INTERPOLATE_CONSTANT);
+
+ for (i = 0; i < 4; ++i)
+ tc[i] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_MV0 + i, TGSI_INTERPOLATE_LINEAR);
+
+ for (i = 0; i < 2; ++i) {
+ sampler[i] = ureg_DECL_sampler(shader, i + 3);
+ ref[i] = ureg_DECL_temporary(shader);
+ }
+
+ result = ureg_DECL_temporary(shader);
+
+ ureg_MOV(shader, ureg_writemask(result, TGSI_WRITEMASK_XYZ), ureg_imm1f(shader, 0.5f));
+
+ ureg_IF(shader, ureg_scalar(info, TGSI_SWIZZLE_Y), &intra_label);
+ ureg_CMP(shader, ureg_writemask(ref[0], TGSI_WRITEMASK_XY),
+ ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Y)),
+ tc[1], tc[0]);
+
+ ureg_IF(shader, ureg_scalar(info, TGSI_SWIZZLE_X), &bi_label);
+
+ /*
+ * result = tex(field.z ? tc[1] : tc[0], sampler[bkwd_pred ? 1 : 0])
+ */
+ ureg_IF(shader, ureg_scalar(info, TGSI_SWIZZLE_Z), &label);
+ ureg_TEX(shader, result, TGSI_TEXTURE_2D, ureg_src(ref[0]), sampler[1]);
+ ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
+ ureg_ELSE(shader, &label);
+ ureg_TEX(shader, result, TGSI_TEXTURE_2D, ureg_src(ref[0]), sampler[0]);
+ ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
+ ureg_ENDIF(shader);
+
+ ureg_fixup_label(shader, bi_label, ureg_get_instruction_number(shader));
+ ureg_ELSE(shader, &bi_label);
+
+ /*
+ * if (field.z)
+ * ref[0..1] = tex(tc[0..1], sampler[0..1])
+ * else
+ * ref[0..1] = tex(tc[2..3], sampler[0..1])
+ */
+ ureg_CMP(shader, ureg_writemask(ref[1], TGSI_WRITEMASK_XY),
+ ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Y)),
+ tc[3], tc[2]);
+ ureg_TEX(shader, ref[0], TGSI_TEXTURE_2D, ureg_src(ref[0]), sampler[0]);
+ ureg_TEX(shader, ref[1], TGSI_TEXTURE_2D, ureg_src(ref[1]), sampler[1]);
+
+ ureg_LRP(shader, ureg_writemask(result, TGSI_WRITEMASK_XYZ), ureg_imm1f(shader, 0.5f),
+ ureg_src(ref[0]), ureg_src(ref[1]));
+
+ ureg_fixup_label(shader, bi_label, ureg_get_instruction_number(shader));
+ ureg_ENDIF(shader);
+ ureg_fixup_label(shader, intra_label, ureg_get_instruction_number(shader));
+ ureg_ENDIF(shader);
+
+ for (i = 0; i < 2; ++i)
+ ureg_release_temporary(shader, ref[i]);
+
+ return result;
+}
+
+static void *
+create_frag_shader(struct vl_mpeg12_mc_renderer *r)
+{
+ struct ureg_program *shader;
+ struct ureg_dst result;
+ struct ureg_dst field, texel;
+ struct ureg_dst fragment;
+
+ shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
+ if (!shader)
+ return NULL;
+
+ fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
+
+ field = calc_field(shader);
+ texel = fetch_ycbcr(r, shader, field);
+
+ result = fetch_ref(shader, field);
+
+ ureg_ADD(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ), ureg_src(texel), ureg_src(result));
+
+ ureg_release_temporary(shader, field);
+ ureg_release_temporary(shader, texel);
+ ureg_release_temporary(shader, result);
+ ureg_END(shader);
+
+ return ureg_create_shader_and_destroy(shader, r->pipe);
+}
+
+static bool
+init_pipe_state(struct vl_mpeg12_mc_renderer *r)
+{
+ struct pipe_sampler_state sampler;
+ struct pipe_rasterizer_state rs_state;
+ unsigned filters[5];
+ unsigned i;
+
+ assert(r);
+
+ r->viewport.scale[0] = r->buffer_width;
+ r->viewport.scale[1] = r->buffer_height;
+ r->viewport.scale[2] = 1;
+ r->viewport.scale[3] = 1;
+ r->viewport.translate[0] = 0;
+ r->viewport.translate[1] = 0;
+ r->viewport.translate[2] = 0;
+ r->viewport.translate[3] = 0;
+
+ r->fb_state.width = r->buffer_width;
+ r->fb_state.height = r->buffer_height;
+ r->fb_state.nr_cbufs = 1;
+ r->fb_state.zsbuf = NULL;
+
+ /* Luma filter */
+ filters[0] = PIPE_TEX_FILTER_NEAREST;
+ /* Chroma filters */
+ if (r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_444 || true) { //TODO
+ filters[1] = PIPE_TEX_FILTER_NEAREST;
+ filters[2] = PIPE_TEX_FILTER_NEAREST;
+ }
+ else {
+ filters[1] = PIPE_TEX_FILTER_LINEAR;
+ filters[2] = PIPE_TEX_FILTER_LINEAR;
+ }
+ /* Fwd, bkwd ref filters */
+ filters[3] = PIPE_TEX_FILTER_LINEAR;
+ filters[4] = PIPE_TEX_FILTER_LINEAR;
+
+ for (i = 0; i < 5; ++i) {
+ memset(&sampler, 0, sizeof(sampler));
+ sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
+ sampler.min_img_filter = filters[i];
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
+ sampler.mag_img_filter = filters[i];
+ sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
+ sampler.compare_func = PIPE_FUNC_ALWAYS;
+ sampler.normalized_coords = 1;
+ /*sampler.shadow_ambient = ; */
+ /*sampler.lod_bias = ; */
+ sampler.min_lod = 0;
+ /*sampler.max_lod = ; */
+ sampler.border_color[0] = 0.0f;
+ sampler.border_color[1] = 0.0f;
+ sampler.border_color[2] = 0.0f;
+ sampler.border_color[3] = 0.0f;
+ /*sampler.max_anisotropy = ; */
+ r->samplers.all[i] = r->pipe->create_sampler_state(r->pipe, &sampler);
+ }
+
+ memset(&rs_state, 0, sizeof(rs_state));
+ /*rs_state.sprite_coord_enable */
+ rs_state.sprite_coord_mode = PIPE_SPRITE_COORD_UPPER_LEFT;
+ rs_state.point_quad_rasterization = true;
+ rs_state.point_size = BLOCK_WIDTH;
+ rs_state.gl_rasterization_rules = true;
+ r->rs_state = r->pipe->create_rasterizer_state(r->pipe, &rs_state);
+
+ return true;
+}
+
+static void
+cleanup_pipe_state(struct vl_mpeg12_mc_renderer *r)
+{
+ unsigned i;
+
+ assert(r);
+
+ for (i = 0; i < 5; ++i)
+ r->pipe->delete_sampler_state(r->pipe, r->samplers.all[i]);
+
+ r->pipe->delete_rasterizer_state(r->pipe, r->rs_state);
+}
+
+static bool
+init_buffers(struct vl_mpeg12_mc_renderer *r)
+{
+ struct pipe_resource *idct_matrix;
+ struct pipe_vertex_element vertex_elems[NUM_VS_INPUTS];
+
+ const unsigned mbw =
+ align(r->buffer_width, MACROBLOCK_WIDTH) / MACROBLOCK_WIDTH;
+ const unsigned mbh =
+ align(r->buffer_height, MACROBLOCK_HEIGHT) / MACROBLOCK_HEIGHT;
+
+ unsigned i, chroma_width, chroma_height;
+
+ assert(r);
+
+ r->macroblocks_per_batch =
+ mbw * (r->bufmode == VL_MPEG12_MC_RENDERER_BUFFER_PICTURE ? mbh : 1);
+
+ if (!(idct_matrix = vl_idct_upload_matrix(r->pipe)))
+ return false;
+
+ if (!vl_idct_init(&r->idct_luma, r->pipe, r->buffer_width, r->buffer_height, idct_matrix))
+ return false;
+
+ if (r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) {
+ chroma_width = r->buffer_width / 2;
+ chroma_height = r->buffer_height / 2;
+ } else if (r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422) {
+ chroma_width = r->buffer_width;
+ chroma_height = r->buffer_height / 2;
+ } else {
+ chroma_width = r->buffer_width;
+ chroma_height = r->buffer_height;
+ }
+
+ if(!vl_idct_init(&r->idct_chroma, r->pipe, chroma_width, chroma_height, idct_matrix))
+ return false;
+
+ memset(&vertex_elems, 0, sizeof(vertex_elems));
+
+ vertex_elems[VS_I_RECT] = vl_vb_get_quad_vertex_element();
+ r->quad = vl_vb_upload_quads(r->pipe, r->macroblocks_per_batch);
+
+ /* Position element */
+ vertex_elems[VS_I_VPOS].src_format = PIPE_FORMAT_R16G16_SSCALED;
+
+ for (i = 0; i < 4; ++i)
+ /* motion vector 0..4 element */
+ vertex_elems[VS_I_MV0 + i].src_format = PIPE_FORMAT_R16G16_SSCALED;
+
+ /* y, cr, cb empty block element top left block */
+ vertex_elems[VS_I_EB_0_0].src_format = PIPE_FORMAT_R8G8B8A8_SSCALED;
+
+ /* y, cr, cb empty block element top right block */
+ vertex_elems[VS_I_EB_0_1].src_format = PIPE_FORMAT_R8G8B8A8_SSCALED;
+
+ /* y, cr, cb empty block element bottom left block */
+ vertex_elems[VS_I_EB_1_0].src_format = PIPE_FORMAT_R8G8B8A8_SSCALED;
+
+ /* y, cr, cb empty block element bottom right block */
+ vertex_elems[VS_I_EB_1_1].src_format = PIPE_FORMAT_R8G8B8A8_SSCALED;
+
+ r->vertex_stream_stride = vl_vb_element_helper(&vertex_elems[VS_I_VPOS], 9, 1);
+
+ r->vertex_elems_state = r->pipe->create_vertex_elements_state(
+ r->pipe, NUM_VS_INPUTS, vertex_elems);
+
+ if (r->vertex_elems_state == NULL)
+ return false;
+
+ r->vs = create_vert_shader(r);
+ r->fs = create_frag_shader(r);
+
+ if (r->vs == NULL || r->fs == NULL)
+ return false;
+
+ return true;
+}
+
+static void
+cleanup_buffers(struct vl_mpeg12_mc_renderer *r)
+{
+ assert(r);
+
+ r->pipe->delete_vs_state(r->pipe, r->vs);
+ r->pipe->delete_fs_state(r->pipe, r->fs);
+
+ vl_idct_cleanup(&r->idct_luma);
+ vl_idct_cleanup(&r->idct_chroma);
+
+ r->pipe->delete_vertex_elements_state(r->pipe, r->vertex_elems_state);
+}
+
+static struct pipe_sampler_view
+*find_or_create_sampler_view(struct vl_mpeg12_mc_renderer *r, struct pipe_surface *surface)
+{
+ struct pipe_sampler_view *sampler_view;
+ assert(r);
+ assert(surface);
+
+ sampler_view = (struct pipe_sampler_view*)util_keymap_lookup(r->texview_map, &surface);
+ if (!sampler_view) {
+ struct pipe_sampler_view templat;
+ boolean added_to_map;
+
+ u_sampler_view_default_template(&templat, surface->texture,
+ surface->texture->format);
+ sampler_view = r->pipe->create_sampler_view(r->pipe, surface->texture,
+ &templat);
+ if (!sampler_view)
+ return NULL;
+
+ added_to_map = util_keymap_insert(r->texview_map, &surface,
+ sampler_view, r->pipe);
+ assert(added_to_map);
+ }
+
+ return sampler_view;
+}
+
+static void
+get_motion_vectors(struct pipe_mpeg12_macroblock *mb, struct vertex2s mv[4])
+{
+ switch (mb->mb_type) {
+ case PIPE_MPEG12_MACROBLOCK_TYPE_BI:
+ {
+ if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME) {
+ mv[2].x = mb->pmv[0][1][0];
+ mv[2].y = mb->pmv[0][1][1];
+
+ } else {
+ mv[2].x = mb->pmv[0][1][0];
+ mv[2].y = mb->pmv[0][1][1] - (mb->pmv[0][1][1] % 4);
+
+ mv[3].x = mb->pmv[1][1][0];
+ mv[3].y = mb->pmv[1][1][1] - (mb->pmv[1][1][1] % 4);
+
+ if(mb->mvfs[0][1]) mv[2].y += 2;
+ if(!mb->mvfs[1][1]) mv[3].y -= 2;
+ }
+
+ /* fall-through */
+ }
+ case PIPE_MPEG12_MACROBLOCK_TYPE_FWD:
+ case PIPE_MPEG12_MACROBLOCK_TYPE_BKWD:
+ {
+ if (mb->mb_type == PIPE_MPEG12_MACROBLOCK_TYPE_BKWD) {
+
+ if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME) {
+ mv[0].x = mb->pmv[0][1][0];
+ mv[0].y = mb->pmv[0][1][1];
+
+ } else {
+ mv[0].x = mb->pmv[0][1][0];
+ mv[0].y = mb->pmv[0][1][1] - (mb->pmv[0][1][1] % 4);
+
+ mv[1].x = mb->pmv[1][1][0];
+ mv[1].y = mb->pmv[1][1][1] - (mb->pmv[1][1][1] % 4);
+
+ if(mb->mvfs[0][1]) mv[0].y += 2;
+ if(!mb->mvfs[1][1]) mv[1].y -= 2;
+ }
+
+ } else {
+
+ if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME) {
+ mv[0].x = mb->pmv[0][0][0];
+ mv[0].y = mb->pmv[0][0][1];
+
+ } else {
+ mv[0].x = mb->pmv[0][0][0];
+ mv[0].y = mb->pmv[0][0][1] - (mb->pmv[0][0][1] % 4);
+
+ mv[1].x = mb->pmv[1][0][0];
+ mv[1].y = mb->pmv[1][0][1] - (mb->pmv[1][0][1] % 4);
+
+ if(mb->mvfs[0][0]) mv[0].y += 2;
+ if(!mb->mvfs[1][0]) mv[1].y -= 2;
+ }
+ }
+ }
+ default:
+ break;
+ }
+}
+
+static void
+grab_vectors(struct vl_mpeg12_mc_renderer *r,
+ struct vl_mpeg12_mc_buffer *buffer,
+ struct pipe_mpeg12_macroblock *mb)
+{
+ struct vertex_stream stream;
+
+ unsigned i, j;
+
+ assert(r);
+ assert(mb);
+
+ stream.pos.x = mb->mbx;
+ stream.pos.y = mb->mby;
+ for ( i = 0; i < 2; ++i) {
+ for ( j = 0; j < 2; ++j) {
+ stream.eb[i][j].y = !(mb->cbp & (*r->empty_block_mask)[0][i][j]);
+ stream.eb[i][j].cr = !(mb->cbp & (*r->empty_block_mask)[1][i][j]);
+ stream.eb[i][j].cb = !(mb->cbp & (*r->empty_block_mask)[2][i][j]);
+ }
+ }
+ stream.eb[0][0].flag = mb->dct_type == PIPE_MPEG12_DCT_TYPE_FIELD;
+ stream.eb[0][1].flag = mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME;
+ stream.eb[1][0].flag = mb->mb_type == PIPE_MPEG12_MACROBLOCK_TYPE_BKWD;
+ switch (mb->mb_type) {
+ case PIPE_MPEG12_MACROBLOCK_TYPE_INTRA:
+ stream.eb[1][1].flag = -1;
+ break;
+
+ case PIPE_MPEG12_MACROBLOCK_TYPE_FWD:
+ case PIPE_MPEG12_MACROBLOCK_TYPE_BKWD:
+ stream.eb[1][1].flag = 1;
+ break;
+
+ case PIPE_MPEG12_MACROBLOCK_TYPE_BI:
+ stream.eb[1][1].flag = 0;
+ break;
+
+ default:
+ assert(0);
+ }
+
+ get_motion_vectors(mb, stream.mv);
+ vl_vb_add_block(&buffer->vertex_stream, &stream);
+}
+
+static void
+grab_blocks(struct vl_mpeg12_mc_renderer *r,
+ struct vl_mpeg12_mc_buffer *buffer,
+ unsigned mbx, unsigned mby,
+ unsigned cbp, short *blocks)
+{
+ unsigned tb = 0;
+ unsigned x, y;
+
+ assert(r);
+ assert(blocks);
+
+ for (y = 0; y < 2; ++y) {
+ for (x = 0; x < 2; ++x, ++tb) {
+ if (cbp & (*r->empty_block_mask)[0][y][x]) {
+ vl_idct_add_block(&buffer->idct_y, mbx * 2 + x, mby * 2 + y, blocks);
+ blocks += BLOCK_WIDTH * BLOCK_HEIGHT;
+ }
+ }
+ }
+
+ /* TODO: Implement 422, 444 */
+ assert(r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
+
+ for (tb = 1; tb < 3; ++tb) {
+ if (cbp & (*r->empty_block_mask)[tb][0][0]) {
+ if(tb == 1)
+ vl_idct_add_block(&buffer->idct_cb, mbx, mby, blocks);
+ else
+ vl_idct_add_block(&buffer->idct_cr, mbx, mby, blocks);
+ blocks += BLOCK_WIDTH * BLOCK_HEIGHT;
+ }
+ }
+}
+
+static void
+grab_macroblock(struct vl_mpeg12_mc_renderer *r,
+ struct vl_mpeg12_mc_buffer *buffer,
+ struct pipe_mpeg12_macroblock *mb)
+{
+ assert(r);
+ assert(mb);
+ assert(mb->blocks);
+ assert(buffer->num_macroblocks < r->macroblocks_per_batch);
+
+ grab_vectors(r, buffer, mb);
+ grab_blocks(r, buffer, mb->mbx, mb->mby, mb->cbp, mb->blocks);
+
+ ++buffer->num_macroblocks;
+}
+
+static void
+texview_map_delete(const struct keymap *map,
+ const void *key, void *data,
+ void *user)
+{
+ struct pipe_sampler_view *sv = (struct pipe_sampler_view*)data;
+
+ assert(map);
+ assert(key);
+ assert(data);
+ assert(user);
+
+ pipe_sampler_view_reference(&sv, NULL);
+}
+
+bool
+vl_mpeg12_mc_renderer_init(struct vl_mpeg12_mc_renderer *renderer,
+ struct pipe_context *pipe,
+ unsigned buffer_width,
+ unsigned buffer_height,
+ enum pipe_video_chroma_format chroma_format,
+ enum VL_MPEG12_MC_RENDERER_BUFFER_MODE bufmode)
+{
+ assert(renderer);
+ assert(pipe);
+
+ /* TODO: Implement other policies */
+ assert(bufmode == VL_MPEG12_MC_RENDERER_BUFFER_PICTURE);
+
+ memset(renderer, 0, sizeof(struct vl_mpeg12_mc_renderer));
+
+ renderer->pipe = pipe;
+ renderer->buffer_width = buffer_width;
+ renderer->buffer_height = buffer_height;
+ renderer->chroma_format = chroma_format;
+ renderer->bufmode = bufmode;
+
+ /* TODO: Implement 422, 444 */
+ assert(chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
+ renderer->empty_block_mask = &const_empty_block_mask_420;
+
+ renderer->texview_map = util_new_keymap(sizeof(struct pipe_surface*), -1,
+ texview_map_delete);
+ if (!renderer->texview_map)
+ return false;
+
+ if (!init_pipe_state(renderer))
+ goto error_pipe_state;
+
+ if (!init_buffers(renderer))
+ goto error_buffers;
+
+ return true;
+
+error_buffers:
+ cleanup_pipe_state(renderer);
+
+error_pipe_state:
+ util_delete_keymap(renderer->texview_map, renderer->pipe);
+ return false;
+}
+
+void
+vl_mpeg12_mc_renderer_cleanup(struct vl_mpeg12_mc_renderer *renderer)
+{
+ assert(renderer);
+
+ util_delete_keymap(renderer->texview_map, renderer->pipe);
+ cleanup_pipe_state(renderer);
+ cleanup_buffers(renderer);
+}
+
+bool
+vl_mpeg12_mc_init_buffer(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer)
+{
+ struct pipe_resource template;
+ struct pipe_sampler_view sampler_view;
+
+ unsigned i;
+
+ assert(renderer && buffer);
+
+ buffer->surface = NULL;
+ buffer->past = NULL;
+ buffer->future = NULL;
+ buffer->num_macroblocks = 0;
+
+ memset(&template, 0, sizeof(struct pipe_resource));
+ template.target = PIPE_TEXTURE_2D;
+ /* TODO: Accomodate HW that can't do this and also for cases when this isn't precise enough */
+ template.format = PIPE_FORMAT_R16_SNORM;
+ template.last_level = 0;
+ template.width0 = renderer->buffer_width;
+ template.height0 = renderer->buffer_height;
+ template.depth0 = 1;
++ template.array_size = 1;
+ template.usage = PIPE_USAGE_STATIC;
+ template.bind = PIPE_BIND_SAMPLER_VIEW;
+ template.flags = 0;
+
+ buffer->textures.individual.y = renderer->pipe->screen->resource_create(renderer->pipe->screen, &template);
+
+ if (!vl_idct_init_buffer(&renderer->idct_luma, &buffer->idct_y, buffer->textures.individual.y))
+ return false;
+
+ if (renderer->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) {
+ template.width0 = renderer->buffer_width / 2;
+ template.height0 = renderer->buffer_height / 2;
+ }
+ else if (renderer->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422)
+ template.height0 = renderer->buffer_height / 2;
+
+ buffer->textures.individual.cb =
+ renderer->pipe->screen->resource_create(renderer->pipe->screen, &template);
+ buffer->textures.individual.cr =
+ renderer->pipe->screen->resource_create(renderer->pipe->screen, &template);
+
+ if (!vl_idct_init_buffer(&renderer->idct_chroma, &buffer->idct_cb, buffer->textures.individual.cb))
+ return false;
+
+ if (!vl_idct_init_buffer(&renderer->idct_chroma, &buffer->idct_cr, buffer->textures.individual.cr))
+ return false;
+
+ for (i = 0; i < 3; ++i) {
+ u_sampler_view_default_template(&sampler_view,
+ buffer->textures.all[i],
+ buffer->textures.all[i]->format);
+ sampler_view.swizzle_r = i == 0 ? PIPE_SWIZZLE_RED : PIPE_SWIZZLE_ZERO;
+ sampler_view.swizzle_g = i == 1 ? PIPE_SWIZZLE_RED : PIPE_SWIZZLE_ZERO;
+ sampler_view.swizzle_b = i == 2 ? PIPE_SWIZZLE_RED : PIPE_SWIZZLE_ZERO;
+ sampler_view.swizzle_a = PIPE_SWIZZLE_ONE;
+ buffer->sampler_views.all[i] = renderer->pipe->create_sampler_view(
+ renderer->pipe, buffer->textures.all[i], &sampler_view);
+ }
+
+ buffer->vertex_bufs.individual.quad.stride = renderer->quad.stride;
+ buffer->vertex_bufs.individual.quad.buffer_offset = renderer->quad.buffer_offset;
+ pipe_resource_reference(&buffer->vertex_bufs.individual.quad.buffer, renderer->quad.buffer);
+
+ buffer->vertex_bufs.individual.stream = vl_vb_init(
+ &buffer->vertex_stream, renderer->pipe, renderer->macroblocks_per_batch,
+ renderer->vertex_stream_stride);
+
+ return true;
+}
+
+void
+vl_mpeg12_mc_cleanup_buffer(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer)
+{
+ unsigned i;
+
+ assert(renderer && buffer);
+
+ for (i = 0; i < 3; ++i) {
+ pipe_sampler_view_reference(&buffer->sampler_views.all[i], NULL);
+ pipe_resource_reference(&buffer->vertex_bufs.all[i].buffer, NULL);
+ pipe_resource_reference(&buffer->textures.all[i], NULL);
+ }
+
+ pipe_resource_reference(&buffer->vertex_bufs.individual.quad.buffer, NULL);
+ vl_vb_cleanup(&buffer->vertex_stream);
+
+ vl_idct_cleanup_buffer(&renderer->idct_luma, &buffer->idct_y);
+ vl_idct_cleanup_buffer(&renderer->idct_chroma, &buffer->idct_cb);
+ vl_idct_cleanup_buffer(&renderer->idct_chroma, &buffer->idct_cr);
+
+ pipe_surface_reference(&buffer->surface, NULL);
+ pipe_surface_reference(&buffer->past, NULL);
+ pipe_surface_reference(&buffer->future, NULL);
+}
+
+void
+vl_mpeg12_mc_map_buffer(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer)
+{
+ assert(renderer && buffer);
+
+ vl_idct_map_buffers(&renderer->idct_luma, &buffer->idct_y);
+ vl_idct_map_buffers(&renderer->idct_chroma, &buffer->idct_cr);
+ vl_idct_map_buffers(&renderer->idct_chroma, &buffer->idct_cb);
+
+ vl_vb_map(&buffer->vertex_stream, renderer->pipe);
+}
+
+void
+vl_mpeg12_mc_renderer_render_macroblocks(struct vl_mpeg12_mc_renderer *renderer,
+ struct vl_mpeg12_mc_buffer *buffer,
+ struct pipe_surface *surface,
+ struct pipe_surface *past,
+ struct pipe_surface *future,
+ unsigned num_macroblocks,
+ struct pipe_mpeg12_macroblock *mpeg12_macroblocks,
+ struct pipe_fence_handle **fence)
+{
+ assert(renderer && buffer);
+ assert(surface);
+ assert(num_macroblocks);
+ assert(mpeg12_macroblocks);
+
+ if (surface != buffer->surface) {
+ pipe_surface_reference(&buffer->surface, surface);
+ pipe_surface_reference(&buffer->past, past);
+ pipe_surface_reference(&buffer->future, future);
+ buffer->fence = fence;
+ } else {
+ /* If the surface we're rendering hasn't changed the ref frames shouldn't change. */
+ assert(buffer->past == past);
+ assert(buffer->future == future);
+ }
+
+ while (num_macroblocks) {
+ unsigned left_in_batch = renderer->macroblocks_per_batch - buffer->num_macroblocks;
+ unsigned num_to_submit = MIN2(num_macroblocks, left_in_batch);
+ unsigned i;
+
+ for (i = 0; i < num_to_submit; ++i) {
+ assert(mpeg12_macroblocks[i].base.codec == PIPE_VIDEO_CODEC_MPEG12);
+ grab_macroblock(renderer, buffer, &mpeg12_macroblocks[i]);
+ }
+
+ num_macroblocks -= num_to_submit;
+
+ if (buffer->num_macroblocks == renderer->macroblocks_per_batch) {
+ vl_mpeg12_mc_unmap_buffer(renderer, buffer);
+ vl_mpeg12_mc_renderer_flush(renderer, buffer);
+ pipe_surface_reference(&buffer->surface, surface);
+ pipe_surface_reference(&buffer->past, past);
+ pipe_surface_reference(&buffer->future, future);
+ vl_mpeg12_mc_map_buffer(renderer, buffer);
+ }
+ }
+}
+
+void
+vl_mpeg12_mc_unmap_buffer(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer)
+{
+ assert(renderer && buffer);
+
+ vl_idct_unmap_buffers(&renderer->idct_luma, &buffer->idct_y);
+ vl_idct_unmap_buffers(&renderer->idct_chroma, &buffer->idct_cr);
+ vl_idct_unmap_buffers(&renderer->idct_chroma, &buffer->idct_cb);
+
+ vl_vb_unmap(&buffer->vertex_stream, renderer->pipe);
+}
+
+void
+vl_mpeg12_mc_renderer_flush(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer)
+{
+ assert(renderer && buffer);
+ assert(buffer->num_macroblocks <= renderer->macroblocks_per_batch);
+
+ if (buffer->num_macroblocks == 0)
+ return;
+
+ vl_idct_flush(&renderer->idct_luma, &buffer->idct_y);
+ vl_idct_flush(&renderer->idct_chroma, &buffer->idct_cr);
+ vl_idct_flush(&renderer->idct_chroma, &buffer->idct_cb);
+
+ vl_vb_restart(&buffer->vertex_stream);
+
+ renderer->fb_state.cbufs[0] = buffer->surface;
+ renderer->pipe->bind_rasterizer_state(renderer->pipe, renderer->rs_state);
+ renderer->pipe->set_framebuffer_state(renderer->pipe, &renderer->fb_state);
+ renderer->pipe->set_viewport_state(renderer->pipe, &renderer->viewport);
+ renderer->pipe->set_vertex_buffers(renderer->pipe, 2, buffer->vertex_bufs.all);
+ renderer->pipe->bind_vertex_elements_state(renderer->pipe, renderer->vertex_elems_state);
+
+ if (buffer->past) {
+ buffer->textures.individual.ref[0] = buffer->past->texture;
+ buffer->sampler_views.individual.ref[0] = find_or_create_sampler_view(renderer, buffer->past);
+ } else {
+ buffer->textures.individual.ref[0] = buffer->surface->texture;
+ buffer->sampler_views.individual.ref[0] = find_or_create_sampler_view(renderer, buffer->surface);
+ }
+
+ if (buffer->future) {
+ buffer->textures.individual.ref[1] = buffer->future->texture;
+ buffer->sampler_views.individual.ref[1] = find_or_create_sampler_view(renderer, buffer->future);
+ } else {
+ buffer->textures.individual.ref[1] = buffer->surface->texture;
+ buffer->sampler_views.individual.ref[1] = find_or_create_sampler_view(renderer, buffer->surface);
+ }
+
+ renderer->pipe->set_fragment_sampler_views(renderer->pipe, 5, buffer->sampler_views.all);
+ renderer->pipe->bind_fragment_sampler_states(renderer->pipe, 5, renderer->samplers.all);
+
+ renderer->pipe->bind_vs_state(renderer->pipe, renderer->vs);
+ renderer->pipe->bind_fs_state(renderer->pipe, renderer->fs);
+ util_draw_arrays(renderer->pipe, PIPE_PRIM_QUADS, 0, buffer->num_macroblocks * 4);
+
+ renderer->pipe->flush(renderer->pipe, PIPE_FLUSH_RENDER_CACHE, buffer->fence);
+
+ /* Next time we get this surface it may have new ref frames */
+ pipe_surface_reference(&buffer->surface, NULL);
+ pipe_surface_reference(&buffer->past, NULL);
+ pipe_surface_reference(&buffer->future, NULL);
+
+ buffer->num_macroblocks = 0;
+}
--- /dev/null
- quad.max_index = 4 * max_blocks - 1;
+/**************************************************************************
+ *
+ * Copyright 2010 Christian König
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <assert.h>
+#include <pipe/p_context.h>
+#include <pipe/p_screen.h>
+#include <util/u_memory.h>
+#include <util/u_inlines.h>
+#include <util/u_format.h>
+#include "vl_vertex_buffers.h"
+#include "vl_types.h"
+
+/* vertices for a quad covering a block */
+static const struct quadf const_quad = {
+ {0.0f, 1.0f}, {0.0f, 0.0f}, {1.0f, 0.0f}, {1.0f, 1.0f}
+};
+
+struct pipe_vertex_buffer
+vl_vb_upload_quads(struct pipe_context *pipe, unsigned max_blocks)
+{
+ struct pipe_vertex_buffer quad;
+ struct pipe_transfer *buf_transfer;
+ struct quadf *v;
+
+ unsigned i;
+
+ assert(pipe);
+ assert(max_blocks);
+
+ /* create buffer */
+ quad.stride = sizeof(struct vertex2f);
- unsigned i, size, offset = 0;
+ quad.buffer_offset = 0;
+ quad.buffer = pipe_buffer_create
+ (
+ pipe->screen,
+ PIPE_BIND_VERTEX_BUFFER,
++ PIPE_USAGE_STATIC,
+ sizeof(struct vertex2f) * 4 * max_blocks
+ );
+
+ if(!quad.buffer)
+ return quad;
+
+ /* and fill it */
+ v = pipe_buffer_map
+ (
+ pipe,
+ quad.buffer,
+ PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
+ &buf_transfer
+ );
+
+ for ( i = 0; i < max_blocks; ++i)
+ memcpy(v + i, &const_quad, sizeof(const_quad));
+
+ pipe_buffer_unmap(pipe, buf_transfer);
+
+ return quad;
+}
+
+struct pipe_vertex_element
+vl_vb_get_quad_vertex_element(void)
+{
+ struct pipe_vertex_element element;
+
+ /* setup rectangle element */
+ element.src_offset = 0;
+ element.instance_divisor = 0;
+ element.vertex_buffer_index = 0;
+ element.src_format = PIPE_FORMAT_R32G32_FLOAT;
+
+ return element;
+}
+
+unsigned
+vl_vb_element_helper(struct pipe_vertex_element* elements, unsigned num_elements,
+ unsigned vertex_buffer_index)
+{
- buf.max_index = 4 * max_blocks - 1;
++ unsigned i, offset = 0;
+
+ assert(elements && num_elements);
+
+ for ( i = 0; i < num_elements; ++i ) {
+ elements[i].src_offset = offset;
+ elements[i].instance_divisor = 0;
+ elements[i].vertex_buffer_index = vertex_buffer_index;
+ offset += util_format_get_blocksize(elements[i].src_format);
+ }
+
+ return offset;
+}
+
+struct pipe_vertex_buffer
+vl_vb_init(struct vl_vertex_buffer *buffer, struct pipe_context *pipe,
+ unsigned max_blocks, unsigned stride)
+{
+ struct pipe_vertex_buffer buf;
+
+ assert(buffer);
+
+ buffer->num_verts = 0;
+ buffer->stride = stride;
+
+ buf.stride = stride;
+ buf.buffer_offset = 0;
+ buf.buffer = pipe_buffer_create
+ (
+ pipe->screen,
+ PIPE_BIND_VERTEX_BUFFER,
++ PIPE_USAGE_STREAM,
+ stride * 4 * max_blocks
+ );
+
+ pipe_resource_reference(&buffer->resource, buf.buffer);
+
+ vl_vb_map(buffer, pipe);
+
+ return buf;
+}
+
+void
+vl_vb_map(struct vl_vertex_buffer *buffer, struct pipe_context *pipe)
+{
+ assert(buffer && pipe);
+
+ buffer->vectors = pipe_buffer_map
+ (
+ pipe,
+ buffer->resource,
+ PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
+ &buffer->transfer
+ );
+}
+
+void
+vl_vb_unmap(struct vl_vertex_buffer *buffer, struct pipe_context *pipe)
+{
+ assert(buffer && pipe);
+
+ pipe_buffer_unmap(pipe, buffer->transfer);
+}
+
+unsigned
+vl_vb_restart(struct vl_vertex_buffer *buffer)
+{
+ assert(buffer);
+
+ unsigned todo = buffer->num_verts;
+ buffer->num_verts = 0;
+ return todo;
+}
+
+void
+vl_vb_cleanup(struct vl_vertex_buffer *buffer)
+{
+ assert(buffer);
+
+ pipe_resource_reference(&buffer->resource, NULL);
+}
{
int r;
- if (bc->cf_last && (bc->cf_last->inst == output->inst ||
- (bc->cf_last->inst == BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT) &&
- output->inst == BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE))) &&
++ if (bc->cf_last && bc->cf_last->inst == BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT) &&
+ output->type == bc->cf_last->output.type &&
+ output->elem_size == bc->cf_last->output.elem_size &&
+ output->swizzle_x == bc->cf_last->output.swizzle_x &&
+ output->swizzle_y == bc->cf_last->output.swizzle_y &&
+ output->swizzle_z == bc->cf_last->output.swizzle_z &&
+ output->swizzle_w == bc->cf_last->output.swizzle_w &&
+ (output->burst_count + bc->cf_last->output.burst_count) <= 16) {
+
+ if ((output->gpr + output->burst_count) == bc->cf_last->output.gpr &&
+ (output->array_base + output->burst_count) == bc->cf_last->output.array_base) {
+
- bc->cf_last->output.end_of_program |= output->end_of_program;
- bc->cf_last->output.inst = output->inst;
+ bc->cf_last->output.gpr = output->gpr;
+ bc->cf_last->output.array_base = output->array_base;
+ bc->cf_last->output.burst_count += output->burst_count;
+ return 0;
+
+ } else if (output->gpr == (bc->cf_last->output.gpr + bc->cf_last->output.burst_count) &&
+ output->array_base == (bc->cf_last->output.array_base + bc->cf_last->output.burst_count)) {
+
- bc->cf_last->output.end_of_program |= output->end_of_program;
- bc->cf_last->output.inst = output->inst;
+ bc->cf_last->output.burst_count += output->burst_count;
+ return 0;
+ }
+ }
+
r = r600_bc_add_cf(bc);
if (r)
return r;
return ctx->num_interp_gpr;
}
- int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader, u32 **literals)
+ static void tgsi_src(struct r600_shader_ctx *ctx,
+ const struct tgsi_full_src_register *tgsi_src,
+ struct r600_shader_src *r600_src)
+ {
+ memset(r600_src, 0, sizeof(*r600_src));
+ r600_src->swizzle[0] = tgsi_src->Register.SwizzleX;
+ r600_src->swizzle[1] = tgsi_src->Register.SwizzleY;
+ r600_src->swizzle[2] = tgsi_src->Register.SwizzleZ;
+ r600_src->swizzle[3] = tgsi_src->Register.SwizzleW;
+ r600_src->neg = tgsi_src->Register.Negate;
+ r600_src->abs = tgsi_src->Register.Absolute;
+ if (tgsi_src->Register.File == TGSI_FILE_IMMEDIATE) {
+ int index;
+ if ((tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleY) &&
+ (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleZ) &&
+ (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleW)) {
+
+ index = tgsi_src->Register.Index * 4 + tgsi_src->Register.SwizzleX;
+ r600_bc_special_constants(ctx->literals[index], &r600_src->sel, &r600_src->neg);
+ if (r600_src->sel != V_SQ_ALU_SRC_LITERAL)
+ return;
+ }
+ index = tgsi_src->Register.Index;
+ r600_src->sel = V_SQ_ALU_SRC_LITERAL;
+ memcpy(r600_src->value, ctx->literals + index * 4, sizeof(r600_src->value));
+ } else {
+ if (tgsi_src->Register.Indirect)
+ r600_src->rel = V_SQ_REL_RELATIVE;
+ r600_src->sel = tgsi_src->Register.Index;
+ r600_src->sel += ctx->file_offset[tgsi_src->Register.File];
+ }
+ }
+
+ static int tgsi_fetch_rel_const(struct r600_shader_ctx *ctx, unsigned int offset, unsigned int dst_reg)
+ {
+ struct r600_bc_vtx vtx;
+ unsigned int ar_reg;
+ int r;
+
+ if (offset) {
+ struct r600_bc_alu alu;
+
+ memset(&alu, 0, sizeof(alu));
+
+ alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT);
+ alu.src[0].sel = ctx->ar_reg;
+
+ alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[1].value = offset;
+
+ alu.dst.sel = dst_reg;
+ alu.dst.write = 1;
+ alu.last = 1;
+
+ if ((r = r600_bc_add_alu(ctx->bc, &alu)))
+ return r;
+
+ ar_reg = dst_reg;
+ } else {
+ ar_reg = ctx->ar_reg;
+ }
+
+ memset(&vtx, 0, sizeof(vtx));
+ vtx.fetch_type = 2; /* VTX_FETCH_NO_INDEX_OFFSET */
+ vtx.src_gpr = ar_reg;
+ vtx.mega_fetch_count = 16;
+ vtx.dst_gpr = dst_reg;
+ vtx.dst_sel_x = 0; /* SEL_X */
+ vtx.dst_sel_y = 1; /* SEL_Y */
+ vtx.dst_sel_z = 2; /* SEL_Z */
+ vtx.dst_sel_w = 3; /* SEL_W */
+ vtx.data_format = FMT_32_32_32_32_FLOAT;
+ vtx.num_format_all = 2; /* NUM_FORMAT_SCALED */
+ vtx.format_comp_all = 1; /* FORMAT_COMP_SIGNED */
+ vtx.srf_mode_all = 1; /* SRF_MODE_NO_ZERO */
+
+ if ((r = r600_bc_add_vtx(ctx->bc, &vtx)))
+ return r;
+
+ return 0;
+ }
+
+ static int tgsi_split_constant(struct r600_shader_ctx *ctx)
+ {
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ struct r600_bc_alu alu;
+ int i, j, k, nconst, r;
+
+ for (i = 0, nconst = 0; i < inst->Instruction.NumSrcRegs; i++) {
+ if (inst->Src[i].Register.File == TGSI_FILE_CONSTANT) {
+ nconst++;
+ }
+ tgsi_src(ctx, &inst->Src[i], &ctx->src[i]);
+ }
+ for (i = 0, j = nconst - 1; i < inst->Instruction.NumSrcRegs; i++) {
+ if (inst->Src[i].Register.File != TGSI_FILE_CONSTANT) {
+ continue;
+ }
+
+ if (ctx->src[i].rel) {
+ int treg = r600_get_temp(ctx);
+ if ((r = tgsi_fetch_rel_const(ctx, ctx->src[i].sel - 512, treg)))
+ return r;
+
+ ctx->src[i].sel = treg;
+ ctx->src[i].rel = 0;
+ j--;
+ } else if (j > 0) {
+ int treg = r600_get_temp(ctx);
+ for (k = 0; k < 4; k++) {
+ memset(&alu, 0, sizeof(struct r600_bc_alu));
+ alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV);
+ alu.src[0].sel = ctx->src[i].sel;
+ alu.src[0].chan = k;
+ alu.src[0].rel = ctx->src[i].rel;
+ alu.dst.sel = treg;
+ alu.dst.chan = k;
+ alu.dst.write = 1;
+ if (k == 3)
+ alu.last = 1;
+ r = r600_bc_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ ctx->src[i].sel = treg;
+ ctx->src[i].rel =0;
+ j--;
+ }
+ }
+ return 0;
+ }
+
+ /* need to move any immediate into a temp - for trig functions which use literal for PI stuff */
+ static int tgsi_split_literal_constant(struct r600_shader_ctx *ctx)
+ {
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ struct r600_bc_alu alu;
+ int i, j, k, nliteral, r;
+
+ for (i = 0, nliteral = 0; i < inst->Instruction.NumSrcRegs; i++) {
+ if (ctx->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
+ nliteral++;
+ }
+ }
+ for (i = 0, j = nliteral - 1; i < inst->Instruction.NumSrcRegs; i++) {
+ if (j > 0 && ctx->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
+ int treg = r600_get_temp(ctx);
+ for (k = 0; k < 4; k++) {
+ memset(&alu, 0, sizeof(struct r600_bc_alu));
+ alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV);
+ alu.src[0].sel = ctx->src[i].sel;
+ alu.src[0].chan = k;
+ alu.src[0].value = ctx->src[i].value[k];
+ alu.dst.sel = treg;
+ alu.dst.chan = k;
+ alu.dst.write = 1;
+ if (k == 3)
+ alu.last = 1;
+ r = r600_bc_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ ctx->src[i].sel = treg;
+ j--;
+ }
+ }
+ return 0;
+ }
+
+ static int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader)
{
struct tgsi_full_immediate *immediate;
+ struct tgsi_full_property *property;
struct r600_shader_ctx ctx;
struct r600_bc_output output[32];
- unsigned output_done, noutput;
+ unsigned noutput;
unsigned opcode;
int i, r = 0, pos0;
output[i].swizzle_y = 1;
output[i].swizzle_z = 2;
output[i].swizzle_w = 3;
- output[i].barrier = 1;
+ output[i].burst_count = 1;
output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
output[i].array_base = i - pos0;
- output[i].inst = BC_INST(ctx.bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT);
switch (ctx.type) {
case TGSI_PROCESSOR_VERTEX:
if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
output[i].swizzle_y = 1;
output[i].swizzle_z = 2;
output[i].swizzle_w = 3;
- output[i].barrier = 1;
+ output[i].burst_count = 1;
output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
output[i].array_base = 0;
- output[i].inst = BC_INST(ctx.bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT);
noutput++;
}
}
output[0].swizzle_y = 7;
output[0].swizzle_z = 7;
output[0].swizzle_w = 7;
- output[0].barrier = 1;
+ output[0].burst_count = 1;
output[0].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
output[0].array_base = 0;
- output[0].inst = BC_INST(ctx.bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT);
noutput++;
}
- /* set export done on last export of each type */
- for (i = noutput - 1, output_done = 0; i >= 0; i--) {
- if (i == (noutput - 1)) {
- output[i].end_of_program = 1;
- }
- if (!(output_done & (1 << output[i].type))) {
- output_done |= (1 << output[i].type);
- output[i].inst = BC_INST(ctx.bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE);
- }
- }
/* add output to bytecode */
for (i = 0; i < noutput; i++) {
r = r600_bc_add_output(ctx.bc, &output[i]);
bo[2] = rbuffer->bo;
/* XXX quite sure for dx10+ hw don't need any offset hacks */
- offset = r600_texture_get_offset((struct r600_resource_texture *)state->cbufs[cb]->texture,
+ offset = r600_texture_get_offset(rtex,
level, state->cbufs[cb]->u.tex.first_layer);
- pitch = rtex->pitch_in_pixels[level] / 8 - 1;
- slice = rtex->pitch_in_pixels[level] * surf->aligned_height / 64 - 1;
+ pitch = rtex->pitch_in_blocks[level] / 8 - 1;
+ slice = rtex->pitch_in_blocks[level] * surf->aligned_height / 64 - 1;
ntype = 0;
- desc = util_format_description(rtex->resource.base.b.format);
+ desc = util_format_description(surf->base.format);
if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
ntype = V_0280A0_NUMBER_SRGB;
+ else if (desc->layout == UTIL_FORMAT_LAYOUT_PLAIN) {
+ switch(desc->channel[0].type) {
+ case UTIL_FORMAT_TYPE_UNSIGNED:
+ ntype = V_0280A0_NUMBER_UNORM;
+ break;
+
+ case UTIL_FORMAT_TYPE_SIGNED:
+ ntype = V_0280A0_NUMBER_SNORM;
+ break;
+ }
+ }
- format = r600_translate_colorformat(rtex->resource.base.b.format);
- swap = r600_translate_colorswap(rtex->resource.base.b.format);
+ for (i = 0; i < 4; i++) {
+ if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
+ break;
+ }
+ }
+
+ format = r600_translate_colorformat(surf->base.format);
+ swap = r600_translate_colorswap(surf->base.format);
+
+ /* disable when gallium grows int textures */
+ if ((format == FMT_32_32_32_32 || format == FMT_16_16_16_16) && rtex->force_int_type)
+ ntype = 4;
+
color_info = S_0280A0_FORMAT(format) |
S_0280A0_COMP_SWAP(swap) |
S_0280A0_ARRAY_MODE(rtex->array_mode[level]) |
rtex->size = offset;
}
-
+ /* Figure out whether u_blitter will fallback to a transfer operation.
+ * If so, don't use a staging resource.
+ */
+ static boolean permit_hardware_blit(struct pipe_screen *screen,
+ const struct pipe_resource *res)
+ {
+ unsigned bind;
+
+ if (util_format_is_depth_or_stencil(res->format))
+ bind = PIPE_BIND_DEPTH_STENCIL;
+ else
+ bind = PIPE_BIND_RENDER_TARGET;
+
+ /* hackaround for S3TC */
+ if (util_format_is_s3tc(res->format))
+ return TRUE;
++
+ if (!screen->is_format_supported(screen,
+ res->format,
+ res->target,
+ res->nr_samples,
+ bind, 0))
+ return FALSE;
+
+ if (!screen->is_format_supported(screen,
+ res->format,
+ res->target,
+ res->nr_samples,
+ PIPE_BIND_SAMPLER_VIEW, 0))
+ return FALSE;
+
+ return TRUE;
+ }
+
+ static boolean r600_texture_get_handle(struct pipe_screen* screen,
+ struct pipe_resource *ptex,
+ struct winsys_handle *whandle)
+ {
+ struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex;
+ struct r600_resource *resource = &rtex->resource;
+ struct radeon *radeon = (struct radeon *)screen->winsys;
+
+ return r600_bo_get_winsys_handle(radeon, resource->bo,
+ rtex->pitch_in_bytes[0], whandle);
+ }
+
+ static void r600_texture_destroy(struct pipe_screen *screen,
+ struct pipe_resource *ptex)
+ {
+ struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex;
+ struct r600_resource *resource = &rtex->resource;
+ struct radeon *radeon = (struct radeon *)screen->winsys;
+
+ if (rtex->flushed_depth_texture)
+ pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL);
+
+ if (resource->bo) {
+ r600_bo_reference(radeon, &resource->bo, NULL);
+ }
+ FREE(rtex);
+ }
+
+ static unsigned int r600_texture_is_referenced(struct pipe_context *context,
+ struct pipe_resource *texture,
+ unsigned level, int layer)
+ {
+ /* FIXME */
+ return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+ }
+
+ static const struct u_resource_vtbl r600_texture_vtbl =
+ {
+ r600_texture_get_handle, /* get_handle */
+ r600_texture_destroy, /* resource_destroy */
+ r600_texture_is_referenced, /* is_resource_referenced */
+ r600_texture_get_transfer, /* get_transfer */
+ r600_texture_transfer_destroy, /* transfer_destroy */
+ r600_texture_transfer_map, /* transfer_map */
+ u_default_transfer_flush_region,/* transfer_flush_region */
+ r600_texture_transfer_unmap, /* transfer_unmap */
+ u_default_transfer_inline_write /* transfer_inline_write */
+ };
+
static struct r600_resource_texture *
r600_texture_create_object(struct pipe_screen *screen,
const struct pipe_resource *base,
--- /dev/null
- *
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
-
++ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <assert.h>
+#include <X11/Xlibint.h>
+#include <X11/extensions/XvMClib.h>
+#include <xorg/fourcc.h>
+#include <vl_winsys.h>
+#include <pipe/p_screen.h>
+#include <pipe/p_video_context.h>
+#include <pipe/p_state.h>
+#include <util/u_memory.h>
+#include <util/u_math.h>
+#include "xvmc_private.h"
+
+#define FOURCC_RGB 0x0000003
+
+static enum pipe_format XvIDToPipe(int xvimage_id)
+{
+ switch (xvimage_id) {
+ case FOURCC_RGB:
+ return PIPE_FORMAT_B8G8R8X8_UNORM;
+ default:
+ XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized Xv image ID 0x%08X.\n", xvimage_id);
+ return PIPE_FORMAT_NONE;
+ }
+}
+
+static int PipeToComponentOrder(enum pipe_format format, char *component_order)
+{
+ assert(component_order);
+
+ switch (format) {
+ case PIPE_FORMAT_B8G8R8X8_UNORM:
+ return 0;
+ default:
+ XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized PIPE_FORMAT 0x%08X.\n", format);
+ component_order[0] = 0;
+ component_order[1] = 0;
+ component_order[2] = 0;
+ component_order[3] = 0;
+ }
+
+ return 0;
+}
+
+static Status Validate(Display *dpy, XvPortID port, int surface_type_id, int xvimage_id)
+{
+ XvImageFormatValues *subpictures;
+ int num_subpics;
+ unsigned int i;
+
+ subpictures = XvMCListSubpictureTypes(dpy, port, surface_type_id, &num_subpics);
+ if (num_subpics < 1) {
+ if (subpictures)
+ XFree(subpictures);
+ return BadMatch;
+ }
+ if (!subpictures)
+ return BadAlloc;
+
+ for (i = 0; i < num_subpics; ++i) {
+ if (subpictures[i].id == xvimage_id) {
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Found requested subpicture format.\n" \
+ "[XvMC] port=%u\n" \
+ "[XvMC] surface id=0x%08X\n" \
+ "[XvMC] image id=0x%08X\n" \
+ "[XvMC] type=%08X\n" \
+ "[XvMC] byte order=%08X\n" \
+ "[XvMC] bits per pixel=%u\n" \
+ "[XvMC] format=%08X\n" \
+ "[XvMC] num planes=%d\n",
+ port, surface_type_id, xvimage_id, subpictures[i].type, subpictures[i].byte_order,
+ subpictures[i].bits_per_pixel, subpictures[i].format, subpictures[i].num_planes);
+ if (subpictures[i].type == XvRGB) {
+ XVMC_MSG(XVMC_TRACE, "[XvMC] depth=%d\n" \
+ "[XvMC] red mask=0x%08X\n" \
+ "[XvMC] green mask=0x%08X\n" \
+ "[XvMC] blue mask=0x%08X\n",
+ subpictures[i].depth, subpictures[i].red_mask, subpictures[i].green_mask, subpictures[i].blue_mask);
+ }
+ else if (subpictures[i].type == XvYUV) {
+ XVMC_MSG(XVMC_TRACE, "[XvMC] y sample bits=0x%08X\n" \
+ "[XvMC] u sample bits=0x%08X\n" \
+ "[XvMC] v sample bits=0x%08X\n" \
+ "[XvMC] horz y period=%u\n" \
+ "[XvMC] horz u period=%u\n" \
+ "[XvMC] horz v period=%u\n" \
+ "[XvMC] vert y period=%u\n" \
+ "[XvMC] vert u period=%u\n" \
+ "[XvMC] vert v period=%u\n",
+ subpictures[i].y_sample_bits, subpictures[i].u_sample_bits, subpictures[i].v_sample_bits,
+ subpictures[i].horz_y_period, subpictures[i].horz_u_period, subpictures[i].horz_v_period,
+ subpictures[i].vert_y_period, subpictures[i].vert_u_period, subpictures[i].vert_v_period);
+ }
+ break;
+ }
+ }
+
+ XFree(subpictures);
+
+ return i < num_subpics ? Success : BadMatch;
+}
+
+PUBLIC
+Status XvMCCreateSubpicture(Display *dpy, XvMCContext *context, XvMCSubpicture *subpicture,
+ unsigned short width, unsigned short height, int xvimage_id)
+{
+ XvMCContextPrivate *context_priv;
+ XvMCSubpicturePrivate *subpicture_priv;
+ struct pipe_video_context *vpipe;
+ struct pipe_resource template;
+ struct pipe_resource *tex;
+ struct pipe_surface surf_template;
+ Status ret;
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Creating subpicture %p.\n", subpicture);
+
+ assert(dpy);
+
+ if (!context)
+ return XvMCBadContext;
+
+ context_priv = context->privData;
+ vpipe = context_priv->vctx->vpipe;
+
+ if (!subpicture)
+ return XvMCBadSubpicture;
+
+ if (width > context_priv->subpicture_max_width ||
+ height > context_priv->subpicture_max_height)
+ return BadValue;
+
+ ret = Validate(dpy, context->port, context->surface_type_id, xvimage_id);
+ if (ret != Success)
+ return ret;
+
+ subpicture_priv = CALLOC(1, sizeof(XvMCSubpicturePrivate));
+ if (!subpicture_priv)
+ return BadAlloc;
+
+ memset(&template, 0, sizeof(struct pipe_resource));
+ template.target = PIPE_TEXTURE_2D;
+ template.format = XvIDToPipe(xvimage_id);
+ template.last_level = 0;
+ if (vpipe->get_param(vpipe, PIPE_CAP_NPOT_TEXTURES)) {
+ template.width0 = width;
+ template.height0 = height;
+ }
+ else {
+ template.width0 = util_next_power_of_two(width);
+ template.height0 = util_next_power_of_two(height);
+ }
+ template.depth0 = 1;
++ template.array_size = 1;
+ template.usage = PIPE_USAGE_DYNAMIC;
+ template.bind = PIPE_BIND_SAMPLER_VIEW;
+ template.flags = 0;
+
+ subpicture_priv->context = context;
+ tex = vpipe->screen->resource_create(vpipe->screen, &template);
+
+ memset(&surf_template, 0, sizeof(surf_template));
+ surf_template.format = tex->format;
+ surf_template.usage = PIPE_BIND_SAMPLER_VIEW;
+ subpicture_priv->sfc = vpipe->create_surface(vpipe, tex, &surf_template);
+ pipe_resource_reference(&tex, NULL);
+ if (!subpicture_priv->sfc) {
+ FREE(subpicture_priv);
+ return BadAlloc;
+ }
+
+ subpicture->subpicture_id = XAllocID(dpy);
+ subpicture->context_id = context->context_id;
+ subpicture->xvimage_id = xvimage_id;
+ subpicture->width = width;
+ subpicture->height = height;
+ subpicture->num_palette_entries = 0;
+ subpicture->entry_bytes = PipeToComponentOrder(template.format, subpicture->component_order);
+ subpicture->privData = subpicture_priv;
+
+ SyncHandle();
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Subpicture %p created.\n", subpicture);
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCClearSubpicture(Display *dpy, XvMCSubpicture *subpicture, short x, short y,
+ unsigned short width, unsigned short height, unsigned int color)
+{
+ XvMCSubpicturePrivate *subpicture_priv;
+ XvMCContextPrivate *context_priv;
+ unsigned int tmp_color;
+ float color_f[4];
+
+ assert(dpy);
+
+ if (!subpicture)
+ return XvMCBadSubpicture;
++
+ /* Convert color to float */
+ util_format_read_4f(PIPE_FORMAT_B8G8R8A8_UNORM,
+ color_f, 1,
+ &color, 4,
+ 0, 0, 1, 1);
+
+ subpicture_priv = subpicture->privData;
+ context_priv = subpicture_priv->context->privData;
+ /* TODO: Assert clear rect is within bounds? Or clip? */
+ context_priv->vctx->vpipe->clear_render_target(context_priv->vctx->vpipe,
+ subpicture_priv->sfc, x, y,
+ color_f,
+ width, height);
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCCompositeSubpicture(Display *dpy, XvMCSubpicture *subpicture, XvImage *image,
+ short srcx, short srcy, unsigned short width, unsigned short height,
+ short dstx, short dsty)
+{
+ XvMCSubpicturePrivate *subpicture_priv;
+ XvMCContextPrivate *context_priv;
+ struct pipe_video_context *vpipe;
+ struct pipe_transfer *xfer;
+ unsigned char *src, *dst, *dst_line;
+ unsigned x, y;
+ struct pipe_box dst_box = {dstx, dsty, 0, width, height, 1};
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Compositing subpicture %p.\n", subpicture);
+
+ assert(dpy);
+
+ if (!subpicture)
+ return XvMCBadSubpicture;
+
+ assert(image);
+
+ if (subpicture->xvimage_id != image->id)
+ return BadMatch;
+
+ /* No planar support for now */
+ if (image->num_planes != 1)
+ return BadMatch;
+
+ subpicture_priv = subpicture->privData;
+ context_priv = subpicture_priv->context->privData;
+ vpipe = context_priv->vctx->vpipe;
+
+ /* TODO: Assert rects are within bounds? Or clip? */
+
+ xfer = vpipe->get_transfer(vpipe, subpicture_priv->sfc->texture,
+ 0, PIPE_TRANSFER_WRITE, &dst_box);
+ if (!xfer)
+ return BadAlloc;
+
+ src = image->data;
+ dst = vpipe->transfer_map(vpipe, xfer);
+ if (!dst) {
+ vpipe->transfer_destroy(vpipe, xfer);
+ return BadAlloc;
+ }
+
+ switch (image->id) {
+ case FOURCC_RGB:
+ assert(subpicture_priv->sfc->format == XvIDToPipe(image->id));
+ for (y = 0; y < height; ++y) {
+ dst_line = dst;
+ for (x = 0; x < width; ++x, src += 3, dst_line += 4) {
+ dst_line[0] = src[2]; /* B */
+ dst_line[1] = src[1]; /* G */
+ dst_line[2] = src[0]; /* R */
+ }
+ dst += xfer->stride;
+ }
+ break;
+ default:
+ XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized Xv image ID 0x%08X.\n", image->id);
+ }
+
+ vpipe->transfer_unmap(vpipe, xfer);
+ vpipe->transfer_destroy(vpipe, xfer);
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Subpicture %p composited.\n", subpicture);
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCDestroySubpicture(Display *dpy, XvMCSubpicture *subpicture)
+{
+ XvMCSubpicturePrivate *subpicture_priv;
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Destroying subpicture %p.\n", subpicture);
+
+ assert(dpy);
+
+ if (!subpicture)
+ return XvMCBadSubpicture;
+
+ subpicture_priv = subpicture->privData;
+ pipe_surface_reference(&subpicture_priv->sfc, NULL);
+ FREE(subpicture_priv);
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Subpicture %p destroyed.\n", subpicture);
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCSetSubpicturePalette(Display *dpy, XvMCSubpicture *subpicture, unsigned char *palette)
+{
+ assert(dpy);
+
+ if (!subpicture)
+ return XvMCBadSubpicture;
+
+ assert(palette);
+
+ /* We don't support paletted subpictures */
+ return BadMatch;
+}
+
+PUBLIC
+Status XvMCBlendSubpicture(Display *dpy, XvMCSurface *target_surface, XvMCSubpicture *subpicture,
+ short subx, short suby, unsigned short subw, unsigned short subh,
+ short surfx, short surfy, unsigned short surfw, unsigned short surfh)
+{
+ XvMCSurfacePrivate *surface_priv;
+ XvMCSubpicturePrivate *subpicture_priv;
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Associating subpicture %p with surface %p.\n", subpicture, target_surface);
+
+ assert(dpy);
+
+ if (!target_surface)
+ return XvMCBadSurface;
+
+ if (!subpicture)
+ return XvMCBadSubpicture;
+
+ if (target_surface->context_id != subpicture->context_id)
+ return BadMatch;
+
+ /* TODO: Verify against subpicture independent scaling */
+
+ surface_priv = target_surface->privData;
+ subpicture_priv = subpicture->privData;
+
+ /* TODO: Assert rects are within bounds? Or clip? */
+
+ surface_priv->subpicture = subpicture;
+ surface_priv->subx = subx;
+ surface_priv->suby = suby;
+ surface_priv->subw = subw;
+ surface_priv->subh = subh;
+ surface_priv->surfx = surfx;
+ surface_priv->surfy = surfy;
+ surface_priv->surfw = surfw;
+ surface_priv->surfh = surfh;
+ subpicture_priv->surface = target_surface;
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCBlendSubpicture2(Display *dpy, XvMCSurface *source_surface, XvMCSurface *target_surface,
+ XvMCSubpicture *subpicture, short subx, short suby, unsigned short subw, unsigned short subh,
+ short surfx, short surfy, unsigned short surfw, unsigned short surfh)
+{
+ assert(dpy);
+
+ if (!source_surface || !target_surface)
+ return XvMCBadSurface;
+
+ if (!subpicture)
+ return XvMCBadSubpicture;
+
+ if (source_surface->context_id != subpicture->context_id)
+ return BadMatch;
+
+ if (source_surface->context_id != subpicture->context_id)
+ return BadMatch;
+
+ /* TODO: Assert rects are within bounds? Or clip? */
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCSyncSubpicture(Display *dpy, XvMCSubpicture *subpicture)
+{
+ assert(dpy);
+
+ if (!subpicture)
+ return XvMCBadSubpicture;
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCFlushSubpicture(Display *dpy, XvMCSubpicture *subpicture)
+{
+ assert(dpy);
+
+ if (!subpicture)
+ return XvMCBadSubpicture;
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCGetSubpictureStatus(Display *dpy, XvMCSubpicture *subpicture, int *status)
+{
+ assert(dpy);
+
+ if (!subpicture)
+ return XvMCBadSubpicture;
+
+ assert(status);
+
+ /* TODO */
+ *status = 0;
+
+ return Success;
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <assert.h>
+#include <stdio.h>
+#include <X11/Xlibint.h>
+#include <vl_winsys.h>
+#include <pipe/p_video_context.h>
+#include <pipe/p_video_state.h>
+#include <pipe/p_state.h>
+#include <util/u_inlines.h>
+#include <util/u_memory.h>
+#include <util/u_math.h>
+#include "xvmc_private.h"
+
+static enum pipe_mpeg12_macroblock_type TypeToPipe(int xvmc_mb_type)
+{
+ if (xvmc_mb_type & XVMC_MB_TYPE_INTRA)
+ return PIPE_MPEG12_MACROBLOCK_TYPE_INTRA;
+ if ((xvmc_mb_type & (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD)) == XVMC_MB_TYPE_MOTION_FORWARD)
+ return PIPE_MPEG12_MACROBLOCK_TYPE_FWD;
+ if ((xvmc_mb_type & (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD)) == XVMC_MB_TYPE_MOTION_BACKWARD)
+ return PIPE_MPEG12_MACROBLOCK_TYPE_BKWD;
+ if ((xvmc_mb_type & (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD)) == (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD))
+ return PIPE_MPEG12_MACROBLOCK_TYPE_BI;
+
+ assert(0);
+
+ XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized mb type 0x%08X.\n", xvmc_mb_type);
+
+ return -1;
+}
+
+static enum pipe_mpeg12_picture_type PictureToPipe(int xvmc_pic)
+{
+ switch (xvmc_pic) {
+ case XVMC_TOP_FIELD:
+ return PIPE_MPEG12_PICTURE_TYPE_FIELD_TOP;
+ case XVMC_BOTTOM_FIELD:
+ return PIPE_MPEG12_PICTURE_TYPE_FIELD_BOTTOM;
+ case XVMC_FRAME_PICTURE:
+ return PIPE_MPEG12_PICTURE_TYPE_FRAME;
+ default:
+ assert(0);
+ }
+
+ XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized picture type 0x%08X.\n", xvmc_pic);
+
+ return -1;
+}
+
+static enum pipe_mpeg12_motion_type MotionToPipe(int xvmc_motion_type, unsigned int xvmc_picture_structure)
+{
+ switch (xvmc_motion_type) {
+ case XVMC_PREDICTION_FRAME:
+ if (xvmc_picture_structure == XVMC_FRAME_PICTURE)
+ return PIPE_MPEG12_MOTION_TYPE_FRAME;
+ else
+ return PIPE_MPEG12_MOTION_TYPE_16x8;
+ break;
+ case XVMC_PREDICTION_FIELD:
+ return PIPE_MPEG12_MOTION_TYPE_FIELD;
+ case XVMC_PREDICTION_DUAL_PRIME:
+ return PIPE_MPEG12_MOTION_TYPE_DUALPRIME;
+ default:
+ assert(0);
+ }
+
+ XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized motion type 0x%08X (with picture structure 0x%08X).\n", xvmc_motion_type, xvmc_picture_structure);
+
+ return -1;
+}
+
+#if 0
+static bool
+CreateOrResizeBackBuffer(struct vl_context *vctx, unsigned int width, unsigned int height,
+ struct pipe_surface **backbuffer)
+{
+ struct pipe_video_context *vpipe;
+ struct pipe_resource template;
+ struct pipe_resource *tex;
+
+ assert(vctx);
+
+ vpipe = vctx->vpipe;
+
+ if (*backbuffer) {
+ if ((*backbuffer)->width != width || (*backbuffer)->height != height)
+ pipe_surface_reference(backbuffer, NULL);
+ else
+ return true;
+ }
+
+ memset(&template, 0, sizeof(struct pipe_resource));
+ template.target = PIPE_TEXTURE_2D;
+ template.format = vctx->vscreen->format;
+ template.last_level = 0;
+ template.width0 = width;
+ template.height0 = height;
+ template.depth0 = 1;
++ template.array_size = 1;
+ template.usage = PIPE_USAGE_DEFAULT;
+ template.bind = PIPE_BIND_RENDER_TARGET | PIPE_BIND_DISPLAY_TARGET | PIPE_BIND_BLIT_SOURCE;
+ template.flags = 0;
+
+ tex = vpipe->screen->resource_create(vpipe->screen, &template);
+ if (!tex)
+ return false;
+
+ *backbuffer = vpipe->screen->get_tex_surface(vpipe->screen, tex, 0, 0, 0,
+ template.bind);
+ pipe_resource_reference(&tex, NULL);
+
+ if (!*backbuffer)
+ return false;
+
+ /* Clear the backbuffer in case the video doesn't cover the whole window */
+ /* FIXME: Need to clear every time a frame moves and leaves dirty rects */
+ vpipe->surface_fill(vpipe, *backbuffer, 0, 0, width, height, 0);
+
+ return true;
+}
+#endif
+
+static void
+MacroBlocksToPipe(struct pipe_screen *screen,
+ unsigned int xvmc_picture_structure,
+ const XvMCMacroBlockArray *xvmc_macroblocks,
+ const XvMCBlockArray *xvmc_blocks,
+ unsigned int first_macroblock,
+ unsigned int num_macroblocks,
+ struct pipe_mpeg12_macroblock *pipe_macroblocks)
+{
+ unsigned int i, j, k, l;
+ XvMCMacroBlock *xvmc_mb;
+
+ assert(xvmc_macroblocks);
+ assert(xvmc_blocks);
+ assert(pipe_macroblocks);
+ assert(num_macroblocks);
+
+ xvmc_mb = xvmc_macroblocks->macro_blocks + first_macroblock;
+
+ for (i = 0; i < num_macroblocks; ++i) {
+ pipe_macroblocks->base.codec = PIPE_VIDEO_CODEC_MPEG12;
+ pipe_macroblocks->mbx = xvmc_mb->x;
+ pipe_macroblocks->mby = xvmc_mb->y;
+ pipe_macroblocks->mb_type = TypeToPipe(xvmc_mb->macroblock_type);
+ if (pipe_macroblocks->mb_type != PIPE_MPEG12_MACROBLOCK_TYPE_INTRA)
+ pipe_macroblocks->mo_type = MotionToPipe(xvmc_mb->motion_type, xvmc_picture_structure);
+ /* Get rid of Valgrind 'undefined' warnings */
+ else
+ pipe_macroblocks->mo_type = -1;
+ pipe_macroblocks->dct_type = xvmc_mb->dct_type == XVMC_DCT_TYPE_FIELD ?
+ PIPE_MPEG12_DCT_TYPE_FIELD : PIPE_MPEG12_DCT_TYPE_FRAME;
+
+ for (j = 0; j < 2; ++j)
+ for (k = 0; k < 2; ++k)
+ for (l = 0; l < 2; ++l)
+ pipe_macroblocks->pmv[j][k][l] = xvmc_mb->PMV[j][k][l];
+
+ pipe_macroblocks->mvfs[0][0] = xvmc_mb->motion_vertical_field_select & XVMC_SELECT_FIRST_FORWARD;
+ pipe_macroblocks->mvfs[0][1] = xvmc_mb->motion_vertical_field_select & XVMC_SELECT_FIRST_BACKWARD;
+ pipe_macroblocks->mvfs[1][0] = xvmc_mb->motion_vertical_field_select & XVMC_SELECT_SECOND_FORWARD;
+ pipe_macroblocks->mvfs[1][1] = xvmc_mb->motion_vertical_field_select & XVMC_SELECT_SECOND_BACKWARD;
+
+ pipe_macroblocks->cbp = xvmc_mb->coded_block_pattern;
+ pipe_macroblocks->blocks = xvmc_blocks->blocks + xvmc_mb->index * BLOCK_SIZE_SAMPLES;
+
+ ++pipe_macroblocks;
+ ++xvmc_mb;
+ }
+}
+
+PUBLIC
+Status XvMCCreateSurface(Display *dpy, XvMCContext *context, XvMCSurface *surface)
+{
+ XvMCContextPrivate *context_priv;
+ struct pipe_video_context *vpipe;
+ XvMCSurfacePrivate *surface_priv;
+ struct pipe_resource template;
+ struct pipe_resource *vsfc_tex;
+ struct pipe_surface surf_template;
+ struct pipe_surface *vsfc;
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Creating surface %p.\n", surface);
+
+ assert(dpy);
+
+ if (!context)
+ return XvMCBadContext;
+ if (!surface)
+ return XvMCBadSurface;
+
+ context_priv = context->privData;
+ vpipe = context_priv->vctx->vpipe;
+
+ surface_priv = CALLOC(1, sizeof(XvMCSurfacePrivate));
+ if (!surface_priv)
+ return BadAlloc;
+
+ memset(&template, 0, sizeof(struct pipe_resource));
+ template.target = PIPE_TEXTURE_2D;
+ template.format = (enum pipe_format)vpipe->get_param(vpipe, PIPE_CAP_DECODE_TARGET_PREFERRED_FORMAT);
+ template.last_level = 0;
+ if (vpipe->is_format_supported(vpipe, template.format,
+ PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET,
+ PIPE_TEXTURE_GEOM_NON_POWER_OF_TWO)) {
+ template.width0 = context->width;
+ template.height0 = context->height;
+ }
+ else {
+ assert(vpipe->is_format_supported(vpipe, template.format,
+ PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET,
+ PIPE_TEXTURE_GEOM_NON_SQUARE));
+ template.width0 = util_next_power_of_two(context->width);
+ template.height0 = util_next_power_of_two(context->height);
+ }
+ template.depth0 = 1;
++ template.array_size = 1;
+ template.usage = PIPE_USAGE_DEFAULT;
+ template.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
+ template.flags = 0;
+ vsfc_tex = vpipe->screen->resource_create(vpipe->screen, &template);
+ if (!vsfc_tex) {
+ FREE(surface_priv);
+ return BadAlloc;
+ }
+
+ memset(&surf_template, 0, sizeof(surf_template));
+ surf_template.format = vsfc_tex->format;
+ surf_template.usage = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
+ vsfc = vpipe->create_surface(vpipe, vsfc_tex, &surf_template);
+ pipe_resource_reference(&vsfc_tex, NULL);
+ if (!vsfc) {
+ FREE(surface_priv);
+ return BadAlloc;
+ }
+
+ surface_priv->pipe_vsfc = vsfc;
+ surface_priv->context = context;
+
+ surface->surface_id = XAllocID(dpy);
+ surface->context_id = context->context_id;
+ surface->surface_type_id = context->surface_type_id;
+ surface->width = context->width;
+ surface->height = context->height;
+ surface->privData = surface_priv;
+
+ SyncHandle();
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Surface %p created.\n", surface);
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int picture_structure,
+ XvMCSurface *target_surface, XvMCSurface *past_surface, XvMCSurface *future_surface,
+ unsigned int flags, unsigned int num_macroblocks, unsigned int first_macroblock,
+ XvMCMacroBlockArray *macroblocks, XvMCBlockArray *blocks
+)
+{
+ struct pipe_video_context *vpipe;
+ struct pipe_surface *t_vsfc;
+ struct pipe_surface *p_vsfc;
+ struct pipe_surface *f_vsfc;
+ XvMCContextPrivate *context_priv;
+ XvMCSurfacePrivate *target_surface_priv;
+ XvMCSurfacePrivate *past_surface_priv;
+ XvMCSurfacePrivate *future_surface_priv;
+ struct pipe_mpeg12_macroblock pipe_macroblocks[num_macroblocks];
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Rendering to surface %p.\n", target_surface);
+
+ assert(dpy);
+
+ if (!context || !context->privData)
+ return XvMCBadContext;
+ if (!target_surface || !target_surface->privData)
+ return XvMCBadSurface;
+
+ if (picture_structure != XVMC_TOP_FIELD &&
+ picture_structure != XVMC_BOTTOM_FIELD &&
+ picture_structure != XVMC_FRAME_PICTURE)
+ return BadValue;
+ /* Bkwd pred equivalent to fwd (past && !future) */
+ if (future_surface && !past_surface)
+ return BadMatch;
+
+ assert(context->context_id == target_surface->context_id);
+ assert(!past_surface || context->context_id == past_surface->context_id);
+ assert(!future_surface || context->context_id == future_surface->context_id);
+
+ assert(macroblocks);
+ assert(blocks);
+
+ assert(macroblocks->context_id == context->context_id);
+ assert(blocks->context_id == context->context_id);
+
+ assert(flags == 0 || flags == XVMC_SECOND_FIELD);
+
+ target_surface_priv = target_surface->privData;
+ past_surface_priv = past_surface ? past_surface->privData : NULL;
+ future_surface_priv = future_surface ? future_surface->privData : NULL;
+
+ assert(target_surface_priv->context == context);
+ assert(!past_surface || past_surface_priv->context == context);
+ assert(!future_surface || future_surface_priv->context == context);
+
+ context_priv = context->privData;
+ vpipe = context_priv->vctx->vpipe;
+
+ t_vsfc = target_surface_priv->pipe_vsfc;
+ p_vsfc = past_surface ? past_surface_priv->pipe_vsfc : NULL;
+ f_vsfc = future_surface ? future_surface_priv->pipe_vsfc : NULL;
+
+ MacroBlocksToPipe(vpipe->screen, picture_structure, macroblocks, blocks, first_macroblock,
+ num_macroblocks, pipe_macroblocks);
+
+ vpipe->set_decode_target(vpipe, t_vsfc);
+ vpipe->decode_macroblocks(vpipe, p_vsfc, f_vsfc, num_macroblocks,
+ &pipe_macroblocks->base, &target_surface_priv->render_fence);
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Submitted surface %p for rendering.\n", target_surface);
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCFlushSurface(Display *dpy, XvMCSurface *surface)
+{
+ assert(dpy);
+
+ if (!surface)
+ return XvMCBadSurface;
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCSyncSurface(Display *dpy, XvMCSurface *surface)
+{
+ assert(dpy);
+
+ if (!surface)
+ return XvMCBadSurface;
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCPutSurface(Display *dpy, XvMCSurface *surface, Drawable drawable,
+ short srcx, short srcy, unsigned short srcw, unsigned short srch,
+ short destx, short desty, unsigned short destw, unsigned short desth,
+ int flags)
+{
+ static int dump_window = -1;
+
+ struct pipe_video_context *vpipe;
+ XvMCSurfacePrivate *surface_priv;
+ XvMCContextPrivate *context_priv;
+ XvMCSubpicturePrivate *subpicture_priv;
+ XvMCContext *context;
+ struct pipe_video_rect src_rect = {srcx, srcy, srcw, srch};
+ struct pipe_video_rect dst_rect = {destx, desty, destw, desth};
+ struct pipe_surface *drawable_surface;
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Displaying surface %p.\n", surface);
+
+ assert(dpy);
+
+ if (!surface || !surface->privData)
+ return XvMCBadSurface;
+
+ surface_priv = surface->privData;
+ context = surface_priv->context;
+ context_priv = context->privData;
+
+ drawable_surface = vl_drawable_surface_get(context_priv->vctx, drawable);
+ if (!drawable_surface)
+ return BadDrawable;
+
+ assert(flags == XVMC_TOP_FIELD || flags == XVMC_BOTTOM_FIELD || flags == XVMC_FRAME_PICTURE);
+ assert(srcx + srcw - 1 < surface->width);
+ assert(srcy + srch - 1 < surface->height);
+ /*
+ * Some apps (mplayer) hit these asserts because they call
+ * this function after the window has been resized by the WM
+ * but before they've handled the corresponding XEvent and
+ * know about the new dimensions. The output should be clipped
+ * until the app updates destw and desth.
+ */
+ /*
+ assert(destx + destw - 1 < drawable_surface->width);
+ assert(desty + desth - 1 < drawable_surface->height);
+ */
+
+ subpicture_priv = surface_priv->subpicture ? surface_priv->subpicture->privData : NULL;
+ vpipe = context_priv->vctx->vpipe;
+
+#if 0
+ if (!CreateOrResizeBackBuffer(context_priv->vctx, width, height, &context_priv->backbuffer))
+ return BadAlloc;
+#endif
+
+ if (subpicture_priv) {
+ struct pipe_video_rect src_rect = {surface_priv->subx, surface_priv->suby, surface_priv->subw, surface_priv->subh};
+ struct pipe_video_rect dst_rect = {surface_priv->surfx, surface_priv->surfy, surface_priv->surfw, surface_priv->surfh};
+ struct pipe_video_rect *src_rects[1] = {&src_rect};
+ struct pipe_video_rect *dst_rects[1] = {&dst_rect};
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Surface %p has subpicture %p.\n", surface, surface_priv->subpicture);
+
+ assert(subpicture_priv->surface == surface);
+ vpipe->set_picture_layers(vpipe, &subpicture_priv->sfc, src_rects, dst_rects, 1);
+
+ surface_priv->subpicture = NULL;
+ subpicture_priv->surface = NULL;
+ }
+ else
+ vpipe->set_picture_layers(vpipe, NULL, NULL, NULL, 0);
+
+ vpipe->render_picture(vpipe, surface_priv->pipe_vsfc, PictureToPipe(flags), &src_rect,
+ drawable_surface, &dst_rect, &surface_priv->disp_fence);
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Submitted surface %p for display. Pushing to front buffer.\n", surface);
+
+ vpipe->screen->flush_frontbuffer
+ (
+ vpipe->screen,
+ drawable_surface->texture,
+ 0, 0,
+ vl_contextprivate_get(context_priv->vctx, drawable_surface)
+ );
+
+ pipe_surface_reference(&drawable_surface, NULL);
+
+ if(dump_window == -1) {
+ dump_window = debug_get_num_option("XVMC_DUMP", 0);
+ }
+
+ if(dump_window) {
+ static unsigned int framenum = 0;
+ char cmd[256];
+ sprintf(cmd, "xwd -id %d -out xvmc_frame_%08d.xwd", (int)drawable, ++framenum);
+ system(cmd);
+ }
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Pushed surface %p to front buffer.\n", surface);
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCGetSurfaceStatus(Display *dpy, XvMCSurface *surface, int *status)
+{
+ assert(dpy);
+
+ if (!surface)
+ return XvMCBadSurface;
+
+ assert(status);
+
+ *status = 0;
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCDestroySurface(Display *dpy, XvMCSurface *surface)
+{
+ XvMCSurfacePrivate *surface_priv;
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Destroying surface %p.\n", surface);
+
+ assert(dpy);
+
+ if (!surface || !surface->privData)
+ return XvMCBadSurface;
+
+ surface_priv = surface->privData;
+ pipe_surface_reference(&surface_priv->pipe_vsfc, NULL);
+ FREE(surface_priv);
+ surface->privData = NULL;
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Surface %p destroyed.\n", surface);
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCHideSurface(Display *dpy, XvMCSurface *surface)
+{
+ assert(dpy);
+
+ if (!surface || !surface->privData)
+ return XvMCBadSurface;
+
+ /* No op, only for overlaid rendering */
+
+ return Success;
+}