Merge remote-tracking branch 'mareko/r300g-draw-instanced' into pipe-video
[mesa.git] / src / gallium / state_trackers / xorg / xvmc / surface.c
1 /**************************************************************************
2 *
3 * Copyright 2009 Younes Manton.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <assert.h>
29 #include <stdio.h>
30
31 #include <X11/Xlibint.h>
32
33 #include <pipe/p_video_context.h>
34 #include <pipe/p_video_state.h>
35 #include <pipe/p_state.h>
36
37 #include <util/u_inlines.h>
38 #include <util/u_memory.h>
39 #include <util/u_math.h>
40
41 #include <vl_winsys.h>
42
43 #include "xvmc_private.h"
44
45 static const unsigned const_empty_block_mask_420[3][2][2] = {
46 { { 0x20, 0x10 }, { 0x08, 0x04 } },
47 { { 0x02, 0x02 }, { 0x02, 0x02 } },
48 { { 0x01, 0x01 }, { 0x01, 0x01 } }
49 };
50
51 static enum pipe_mpeg12_picture_type PictureToPipe(int xvmc_pic)
52 {
53 switch (xvmc_pic) {
54 case XVMC_TOP_FIELD:
55 return PIPE_MPEG12_PICTURE_TYPE_FIELD_TOP;
56 case XVMC_BOTTOM_FIELD:
57 return PIPE_MPEG12_PICTURE_TYPE_FIELD_BOTTOM;
58 case XVMC_FRAME_PICTURE:
59 return PIPE_MPEG12_PICTURE_TYPE_FRAME;
60 default:
61 assert(0);
62 }
63
64 XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized picture type 0x%08X.\n", xvmc_pic);
65
66 return -1;
67 }
68
69 static inline void
70 MacroBlockTypeToPipeWeights(const XvMCMacroBlock *xvmc_mb, unsigned weights[2])
71 {
72 assert(xvmc_mb);
73
74 switch (xvmc_mb->macroblock_type & (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD)) {
75 case XVMC_MB_TYPE_MOTION_FORWARD:
76 weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX;
77 weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
78 break;
79
80 case (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD):
81 weights[0] = PIPE_VIDEO_MV_WEIGHT_HALF;
82 weights[1] = PIPE_VIDEO_MV_WEIGHT_HALF;
83 break;
84
85 case XVMC_MB_TYPE_MOTION_BACKWARD:
86 weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN;
87 weights[1] = PIPE_VIDEO_MV_WEIGHT_MAX;
88 break;
89
90 default:
91 /* workaround for xines xxmc video out plugin */
92 if (!(xvmc_mb->macroblock_type & ~XVMC_MB_TYPE_PATTERN)) {
93 weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX;
94 weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
95 } else {
96 weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN;
97 weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
98 }
99 break;
100 }
101 }
102
103 static inline struct pipe_motionvector
104 MotionVectorToPipe(const XvMCMacroBlock *xvmc_mb, unsigned vector,
105 unsigned field_select_mask, unsigned weight)
106 {
107 struct pipe_motionvector mv;
108
109 assert(xvmc_mb);
110
111 switch (xvmc_mb->motion_type) {
112 case XVMC_PREDICTION_FRAME:
113 mv.top.x = xvmc_mb->PMV[0][vector][0];
114 mv.top.y = xvmc_mb->PMV[0][vector][1];
115 mv.top.field_select = PIPE_VIDEO_FRAME;
116 mv.top.weight = weight;
117
118 mv.bottom.x = xvmc_mb->PMV[0][vector][0];
119 mv.bottom.y = xvmc_mb->PMV[0][vector][1];
120 mv.bottom.weight = weight;
121 mv.bottom.field_select = PIPE_VIDEO_FRAME;
122 break;
123
124 case XVMC_PREDICTION_FIELD:
125 mv.top.x = xvmc_mb->PMV[0][vector][0];
126 mv.top.y = xvmc_mb->PMV[0][vector][1];
127 mv.top.field_select = (xvmc_mb->motion_vertical_field_select & field_select_mask) ?
128 PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD;
129 mv.top.weight = weight;
130
131 mv.bottom.x = xvmc_mb->PMV[1][vector][0];
132 mv.bottom.y = xvmc_mb->PMV[1][vector][1];
133 mv.bottom.field_select = (xvmc_mb->motion_vertical_field_select & (field_select_mask << 2)) ?
134 PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD;
135 mv.bottom.weight = weight;
136 break;
137
138 default: // TODO: Support DUALPRIME and 16x8
139 break;
140 }
141
142 return mv;
143 }
144
145 static inline void
146 UploadYcbcrBlocks(XvMCSurfacePrivate *surface,
147 const XvMCMacroBlock *xvmc_mb,
148 const XvMCBlockArray *xvmc_blocks)
149 {
150 enum pipe_mpeg12_dct_intra intra;
151 enum pipe_mpeg12_dct_type coding;
152
153 unsigned tb, x, y, luma_blocks;
154 short *blocks;
155
156 assert(surface);
157 assert(xvmc_mb);
158
159 if (!xvmc_mb->coded_block_pattern)
160 return;
161
162 intra = xvmc_mb->macroblock_type & XVMC_MB_TYPE_INTRA ?
163 PIPE_MPEG12_DCT_INTRA : PIPE_MPEG12_DCT_DELTA;
164
165 coding = xvmc_mb->dct_type == XVMC_DCT_TYPE_FIELD ?
166 PIPE_MPEG12_DCT_TYPE_FIELD : PIPE_MPEG12_DCT_TYPE_FRAME;
167
168 blocks = xvmc_blocks->blocks + xvmc_mb->index * BLOCK_SIZE_SAMPLES;
169
170 for (y = 0, luma_blocks = 0; y < 2; ++y) {
171 for (x = 0; x < 2; ++x, ++tb) {
172 if (xvmc_mb->coded_block_pattern & const_empty_block_mask_420[0][y][x]) {
173
174 struct pipe_ycbcr_block *stream = surface->ycbcr[0].stream;
175 stream->x = xvmc_mb->x * 2 + x;
176 stream->y = xvmc_mb->y * 2 + y;
177 stream->intra = intra;
178 stream->coding = coding;
179
180 surface->ycbcr[0].num_blocks_added++;
181 surface->ycbcr[0].stream++;
182
183 luma_blocks++;
184 }
185 }
186 }
187
188 if (luma_blocks > 0) {
189 memcpy(surface->ycbcr[0].buffer, blocks, BLOCK_SIZE_BYTES * luma_blocks);
190 surface->ycbcr[0].buffer += BLOCK_SIZE_SAMPLES * luma_blocks;
191 blocks += BLOCK_SIZE_SAMPLES * luma_blocks;
192 }
193
194 /* TODO: Implement 422, 444 */
195 //assert(ctx->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
196
197 for (tb = 1; tb < 3; ++tb) {
198 if (xvmc_mb->coded_block_pattern & const_empty_block_mask_420[tb][0][0]) {
199
200 struct pipe_ycbcr_block *stream = surface->ycbcr[tb].stream;
201 stream->x = xvmc_mb->x;
202 stream->y = xvmc_mb->y;
203 stream->intra = intra;
204 stream->coding = PIPE_MPEG12_DCT_TYPE_FRAME;
205
206 memcpy(surface->ycbcr[tb].buffer, blocks, BLOCK_SIZE_BYTES);
207
208 surface->ycbcr[tb].num_blocks_added++;
209 surface->ycbcr[tb].stream++;
210 surface->ycbcr[tb].buffer += BLOCK_SIZE_SAMPLES;
211 blocks += BLOCK_SIZE_SAMPLES;
212 }
213 }
214
215 }
216
217 static void
218 MacroBlocksToPipe(XvMCSurfacePrivate *surface,
219 unsigned int xvmc_picture_structure,
220 const XvMCMacroBlock *xvmc_mb,
221 const XvMCBlockArray *xvmc_blocks,
222 unsigned int num_macroblocks)
223 {
224 unsigned int i, j;
225
226 assert(xvmc_mb);
227 assert(xvmc_blocks);
228 assert(num_macroblocks);
229
230 for (i = 0; i < num_macroblocks; ++i) {
231 unsigned mv_pos = xvmc_mb->x + surface->mv_stride * xvmc_mb->y;
232 unsigned mv_weights[2];
233
234 if (xvmc_mb->macroblock_type & (XVMC_MB_TYPE_PATTERN | XVMC_MB_TYPE_INTRA))
235 UploadYcbcrBlocks(surface, xvmc_mb, xvmc_blocks);
236
237 MacroBlockTypeToPipeWeights(xvmc_mb, mv_weights);
238
239 for (j = 0; j < 2; ++j) {
240 if (!surface->ref[j].mv) continue;
241
242 surface->ref[j].mv[mv_pos] = MotionVectorToPipe
243 (
244 xvmc_mb, j,
245 j ? XVMC_SELECT_FIRST_BACKWARD : XVMC_SELECT_FIRST_FORWARD,
246 mv_weights[j]
247 );
248 }
249
250 ++xvmc_mb;
251 }
252 }
253
254 static void
255 unmap_and_flush_surface(XvMCSurfacePrivate *surface)
256 {
257 struct pipe_video_buffer *ref_frames[2];
258 XvMCContextPrivate *context_priv;
259 unsigned i, num_ycbcr_blocks[3];
260
261 assert(surface);
262
263 context_priv = surface->context->privData;
264
265 for ( i = 0; i < 2; ++i ) {
266 if (surface->ref[i].surface) {
267 XvMCSurfacePrivate *ref = surface->ref[i].surface->privData;
268
269 assert(ref);
270
271 unmap_and_flush_surface(ref);
272 surface->ref[i].surface = NULL;
273 ref_frames[i] = ref->video_buffer;
274 } else {
275 ref_frames[i] = NULL;
276 }
277 }
278
279 if (surface->mapped) {
280 surface->decode_buffer->unmap(surface->decode_buffer);
281 for (i = 0; i < 3; ++i)
282 num_ycbcr_blocks[i] = surface->ycbcr[i].num_blocks_added;
283 context_priv->decoder->flush_buffer(surface->decode_buffer,
284 num_ycbcr_blocks,
285 ref_frames,
286 surface->video_buffer);
287 surface->mapped = 0;
288 }
289 }
290
291 PUBLIC
292 Status XvMCCreateSurface(Display *dpy, XvMCContext *context, XvMCSurface *surface)
293 {
294 XvMCContextPrivate *context_priv;
295 struct pipe_video_context *vpipe;
296 XvMCSurfacePrivate *surface_priv;
297
298 XVMC_MSG(XVMC_TRACE, "[XvMC] Creating surface %p.\n", surface);
299
300 assert(dpy);
301
302 if (!context)
303 return XvMCBadContext;
304 if (!surface)
305 return XvMCBadSurface;
306
307 context_priv = context->privData;
308 vpipe = context_priv->vctx->vpipe;
309
310 surface_priv = CALLOC(1, sizeof(XvMCSurfacePrivate));
311 if (!surface_priv)
312 return BadAlloc;
313
314 surface_priv->decode_buffer = context_priv->decoder->create_buffer(context_priv->decoder);
315 surface_priv->mv_stride = surface_priv->decode_buffer->get_mv_stream_stride(surface_priv->decode_buffer);
316 surface_priv->video_buffer = vpipe->create_buffer(vpipe, PIPE_FORMAT_NV12,
317 context_priv->decoder->chroma_format,
318 context_priv->decoder->width,
319 context_priv->decoder->height);
320 surface_priv->context = context;
321
322 surface->surface_id = XAllocID(dpy);
323 surface->context_id = context->context_id;
324 surface->surface_type_id = context->surface_type_id;
325 surface->width = context->width;
326 surface->height = context->height;
327 surface->privData = surface_priv;
328
329 SyncHandle();
330
331 XVMC_MSG(XVMC_TRACE, "[XvMC] Surface %p created.\n", surface);
332
333 return Success;
334 }
335
336 PUBLIC
337 Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int picture_structure,
338 XvMCSurface *target_surface, XvMCSurface *past_surface, XvMCSurface *future_surface,
339 unsigned int flags, unsigned int num_macroblocks, unsigned int first_macroblock,
340 XvMCMacroBlockArray *macroblocks, XvMCBlockArray *blocks
341 )
342 {
343 struct pipe_video_context *vpipe;
344 struct pipe_video_decode_buffer *t_buffer;
345
346 XvMCContextPrivate *context_priv;
347 XvMCSurfacePrivate *target_surface_priv;
348 XvMCSurfacePrivate *past_surface_priv;
349 XvMCSurfacePrivate *future_surface_priv;
350 XvMCMacroBlock *xvmc_mb;
351
352 unsigned i;
353
354 XVMC_MSG(XVMC_TRACE, "[XvMC] Rendering to surface %p, with past %p and future %p\n",
355 target_surface, past_surface, future_surface);
356
357 assert(dpy);
358
359 if (!context || !context->privData)
360 return XvMCBadContext;
361 if (!target_surface || !target_surface->privData)
362 return XvMCBadSurface;
363
364 if (picture_structure != XVMC_TOP_FIELD &&
365 picture_structure != XVMC_BOTTOM_FIELD &&
366 picture_structure != XVMC_FRAME_PICTURE)
367 return BadValue;
368 /* Bkwd pred equivalent to fwd (past && !future) */
369 if (future_surface && !past_surface)
370 return BadMatch;
371
372 assert(context->context_id == target_surface->context_id);
373 assert(!past_surface || context->context_id == past_surface->context_id);
374 assert(!future_surface || context->context_id == future_surface->context_id);
375
376 assert(macroblocks);
377 assert(blocks);
378
379 assert(macroblocks->context_id == context->context_id);
380 assert(blocks->context_id == context->context_id);
381
382 assert(flags == 0 || flags == XVMC_SECOND_FIELD);
383
384 target_surface_priv = target_surface->privData;
385 past_surface_priv = past_surface ? past_surface->privData : NULL;
386 future_surface_priv = future_surface ? future_surface->privData : NULL;
387
388 assert(target_surface_priv->context == context);
389 assert(!past_surface || past_surface_priv->context == context);
390 assert(!future_surface || future_surface_priv->context == context);
391
392 context_priv = context->privData;
393 vpipe = context_priv->vctx->vpipe;
394
395 t_buffer = target_surface_priv->decode_buffer;
396
397 // enshure that all reference frames are flushed
398 // not really nessasary, but speeds ups rendering
399 if (past_surface)
400 unmap_and_flush_surface(past_surface->privData);
401
402 if (future_surface)
403 unmap_and_flush_surface(future_surface->privData);
404
405 xvmc_mb = macroblocks->macro_blocks + first_macroblock;
406
407 /* If the surface we're rendering hasn't changed the ref frames shouldn't change. */
408 if (target_surface_priv->mapped && (
409 target_surface_priv->ref[0].surface != past_surface ||
410 target_surface_priv->ref[1].surface != future_surface ||
411 (xvmc_mb->x == 0 && xvmc_mb->y == 0))) {
412
413 // If they change anyway we need to clear our surface
414 unmap_and_flush_surface(target_surface_priv);
415 }
416
417 if (!target_surface_priv->mapped) {
418 t_buffer->map(t_buffer);
419
420 for (i = 0; i < 3; ++i) {
421 target_surface_priv->ycbcr[i].num_blocks_added = 0;
422 target_surface_priv->ycbcr[i].stream = t_buffer->get_ycbcr_stream(t_buffer, i);
423 target_surface_priv->ycbcr[i].buffer = t_buffer->get_ycbcr_buffer(t_buffer, i);
424 }
425
426 for (i = 0; i < 2; ++i) {
427 target_surface_priv->ref[i].surface = i == 0 ? past_surface : future_surface;
428
429 if (target_surface_priv->ref[i].surface)
430 target_surface_priv->ref[i].mv = t_buffer->get_mv_stream(t_buffer, i);
431 else
432 target_surface_priv->ref[i].mv = NULL;
433 }
434
435 target_surface_priv->mapped = 1;
436 }
437
438 MacroBlocksToPipe(target_surface_priv, picture_structure, xvmc_mb, blocks, num_macroblocks);
439
440 XVMC_MSG(XVMC_TRACE, "[XvMC] Submitted surface %p for rendering.\n", target_surface);
441
442 return Success;
443 }
444
445 PUBLIC
446 Status XvMCFlushSurface(Display *dpy, XvMCSurface *surface)
447 {
448 assert(dpy);
449
450 if (!surface)
451 return XvMCBadSurface;
452
453 // don't call flush here, because this is usually
454 // called once for every slice instead of every frame
455
456 XVMC_MSG(XVMC_TRACE, "[XvMC] Flushing surface %p\n", surface);
457
458 return Success;
459 }
460
461 PUBLIC
462 Status XvMCSyncSurface(Display *dpy, XvMCSurface *surface)
463 {
464 assert(dpy);
465
466 if (!surface)
467 return XvMCBadSurface;
468
469 XVMC_MSG(XVMC_TRACE, "[XvMC] Syncing surface %p\n", surface);
470
471 return Success;
472 }
473
474 PUBLIC
475 Status XvMCPutSurface(Display *dpy, XvMCSurface *surface, Drawable drawable,
476 short srcx, short srcy, unsigned short srcw, unsigned short srch,
477 short destx, short desty, unsigned short destw, unsigned short desth,
478 int flags)
479 {
480 static int dump_window = -1;
481
482 struct pipe_video_context *vpipe;
483 struct pipe_video_compositor *compositor;
484
485 XvMCSurfacePrivate *surface_priv;
486 XvMCContextPrivate *context_priv;
487 XvMCSubpicturePrivate *subpicture_priv;
488 XvMCContext *context;
489 struct pipe_video_rect src_rect = {srcx, srcy, srcw, srch};
490 struct pipe_video_rect dst_rect = {destx, desty, destw, desth};
491
492 XVMC_MSG(XVMC_TRACE, "[XvMC] Displaying surface %p.\n", surface);
493
494 assert(dpy);
495
496 if (!surface || !surface->privData)
497 return XvMCBadSurface;
498
499 surface_priv = surface->privData;
500 context = surface_priv->context;
501 context_priv = context->privData;
502
503 assert(flags == XVMC_TOP_FIELD || flags == XVMC_BOTTOM_FIELD || flags == XVMC_FRAME_PICTURE);
504 assert(srcx + srcw - 1 < surface->width);
505 assert(srcy + srch - 1 < surface->height);
506
507 subpicture_priv = surface_priv->subpicture ? surface_priv->subpicture->privData : NULL;
508 vpipe = context_priv->vctx->vpipe;
509 compositor = context_priv->compositor;
510
511 if (!context_priv->drawable_surface ||
512 context_priv->dst_rect.x != dst_rect.x || context_priv->dst_rect.y != dst_rect.y ||
513 context_priv->dst_rect.w != dst_rect.w || context_priv->dst_rect.h != dst_rect.h) {
514
515 context_priv->drawable_surface = vl_drawable_surface_get(context_priv->vctx, drawable);
516 context_priv->dst_rect = dst_rect;
517 compositor->reset_dirty_area(compositor);
518 }
519
520 if (!context_priv->drawable_surface)
521 return BadDrawable;
522
523 /*
524 * Some apps (mplayer) hit these asserts because they call
525 * this function after the window has been resized by the WM
526 * but before they've handled the corresponding XEvent and
527 * know about the new dimensions. The output should be clipped
528 * until the app updates destw and desth.
529 */
530 /*
531 assert(destx + destw - 1 < drawable_surface->width);
532 assert(desty + desth - 1 < drawable_surface->height);
533 */
534
535 unmap_and_flush_surface(surface_priv);
536
537 compositor->clear_layers(compositor);
538 compositor->set_buffer_layer(compositor, 0, surface_priv->video_buffer, &src_rect, NULL);
539
540 if (subpicture_priv) {
541 XVMC_MSG(XVMC_TRACE, "[XvMC] Surface %p has subpicture %p.\n", surface, surface_priv->subpicture);
542
543 assert(subpicture_priv->surface == surface);
544
545 if (subpicture_priv->palette)
546 compositor->set_palette_layer(compositor, 1, subpicture_priv->sampler, subpicture_priv->palette,
547 &subpicture_priv->src_rect, &subpicture_priv->dst_rect);
548 else
549 compositor->set_rgba_layer(compositor, 1, subpicture_priv->sampler,
550 &subpicture_priv->src_rect, &subpicture_priv->dst_rect);
551
552 surface_priv->subpicture = NULL;
553 subpicture_priv->surface = NULL;
554 }
555
556 // Workaround for r600g, there seems to be a bug in the fence refcounting code
557 vpipe->screen->fence_reference(vpipe->screen, &surface_priv->fence, NULL);
558
559 compositor->render_picture(compositor, PictureToPipe(flags), context_priv->drawable_surface, &dst_rect, &surface_priv->fence);
560
561 XVMC_MSG(XVMC_TRACE, "[XvMC] Submitted surface %p for display. Pushing to front buffer.\n", surface);
562
563 vpipe->screen->flush_frontbuffer
564 (
565 vpipe->screen,
566 context_priv->drawable_surface->texture,
567 0, 0,
568 vl_contextprivate_get(context_priv->vctx, context_priv->drawable_surface)
569 );
570
571 if(dump_window == -1) {
572 dump_window = debug_get_num_option("XVMC_DUMP", 0);
573 }
574
575 if(dump_window) {
576 static unsigned int framenum = 0;
577 char cmd[256];
578
579 sprintf(cmd, "xwd -id %d -out xvmc_frame_%08d.xwd", (int)drawable, ++framenum);
580 if (system(cmd) != 0)
581 XVMC_MSG(XVMC_ERR, "[XvMC] Dumping surface %p failed.\n", surface);
582 }
583
584 XVMC_MSG(XVMC_TRACE, "[XvMC] Pushed surface %p to front buffer.\n", surface);
585
586 return Success;
587 }
588
589 PUBLIC
590 Status XvMCGetSurfaceStatus(Display *dpy, XvMCSurface *surface, int *status)
591 {
592 struct pipe_video_context *vpipe;
593 XvMCSurfacePrivate *surface_priv;
594 XvMCContextPrivate *context_priv;
595
596 assert(dpy);
597
598 if (!surface)
599 return XvMCBadSurface;
600
601 assert(status);
602
603 surface_priv = surface->privData;
604 context_priv = surface_priv->context->privData;
605 vpipe = context_priv->vctx->vpipe;
606
607 *status = 0;
608
609 if (surface_priv->fence)
610 if (!vpipe->screen->fence_signalled(vpipe->screen, surface_priv->fence))
611 *status |= XVMC_RENDERING;
612
613 return Success;
614 }
615
616 PUBLIC
617 Status XvMCDestroySurface(Display *dpy, XvMCSurface *surface)
618 {
619 XvMCSurfacePrivate *surface_priv;
620
621 XVMC_MSG(XVMC_TRACE, "[XvMC] Destroying surface %p.\n", surface);
622
623 assert(dpy);
624
625 if (!surface || !surface->privData)
626 return XvMCBadSurface;
627
628 surface_priv = surface->privData;
629 surface_priv->decode_buffer->destroy(surface_priv->decode_buffer);
630 surface_priv->video_buffer->destroy(surface_priv->video_buffer);
631 FREE(surface_priv);
632 surface->privData = NULL;
633
634 XVMC_MSG(XVMC_TRACE, "[XvMC] Surface %p destroyed.\n", surface);
635
636 return Success;
637 }
638
639 PUBLIC
640 Status XvMCHideSurface(Display *dpy, XvMCSurface *surface)
641 {
642 assert(dpy);
643
644 if (!surface || !surface->privData)
645 return XvMCBadSurface;
646
647 /* No op, only for overlaid rendering */
648
649 return Success;
650 }