1 /**************************************************************************
3 * Copyright 2009 Younes Manton.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
31 #include <X11/Xlibint.h>
33 #include <pipe/p_video_context.h>
34 #include <pipe/p_video_state.h>
35 #include <pipe/p_state.h>
37 #include <util/u_inlines.h>
38 #include <util/u_memory.h>
39 #include <util/u_math.h>
41 #include <vl_winsys.h>
43 #include "xvmc_private.h"
45 static const unsigned const_empty_block_mask_420
[3][2][2] = {
46 { { 0x20, 0x10 }, { 0x08, 0x04 } },
47 { { 0x02, 0x02 }, { 0x02, 0x02 } },
48 { { 0x01, 0x01 }, { 0x01, 0x01 } }
51 static enum pipe_mpeg12_picture_type
PictureToPipe(int xvmc_pic
)
55 return PIPE_MPEG12_PICTURE_TYPE_FIELD_TOP
;
56 case XVMC_BOTTOM_FIELD
:
57 return PIPE_MPEG12_PICTURE_TYPE_FIELD_BOTTOM
;
58 case XVMC_FRAME_PICTURE
:
59 return PIPE_MPEG12_PICTURE_TYPE_FRAME
;
64 XVMC_MSG(XVMC_ERR
, "[XvMC] Unrecognized picture type 0x%08X.\n", xvmc_pic
);
70 MacroBlockTypeToPipeWeights(const XvMCMacroBlock
*xvmc_mb
, unsigned weights
[2])
74 switch (xvmc_mb
->macroblock_type
& (XVMC_MB_TYPE_MOTION_FORWARD
| XVMC_MB_TYPE_MOTION_BACKWARD
)) {
75 case XVMC_MB_TYPE_MOTION_FORWARD
:
76 weights
[0] = PIPE_VIDEO_MV_WEIGHT_MAX
;
77 weights
[1] = PIPE_VIDEO_MV_WEIGHT_MIN
;
80 case (XVMC_MB_TYPE_MOTION_FORWARD
| XVMC_MB_TYPE_MOTION_BACKWARD
):
81 weights
[0] = PIPE_VIDEO_MV_WEIGHT_HALF
;
82 weights
[1] = PIPE_VIDEO_MV_WEIGHT_HALF
;
85 case XVMC_MB_TYPE_MOTION_BACKWARD
:
86 weights
[0] = PIPE_VIDEO_MV_WEIGHT_MIN
;
87 weights
[1] = PIPE_VIDEO_MV_WEIGHT_MAX
;
91 /* workaround for xines xxmc video out plugin */
92 if (!(xvmc_mb
->macroblock_type
& ~XVMC_MB_TYPE_PATTERN
)) {
93 weights
[0] = PIPE_VIDEO_MV_WEIGHT_MAX
;
94 weights
[1] = PIPE_VIDEO_MV_WEIGHT_MIN
;
96 weights
[0] = PIPE_VIDEO_MV_WEIGHT_MIN
;
97 weights
[1] = PIPE_VIDEO_MV_WEIGHT_MIN
;
103 static inline struct pipe_motionvector
104 MotionVectorToPipe(const XvMCMacroBlock
*xvmc_mb
, unsigned vector
,
105 unsigned field_select_mask
, unsigned weight
)
107 struct pipe_motionvector mv
;
111 switch (xvmc_mb
->motion_type
) {
112 case XVMC_PREDICTION_FRAME
:
113 mv
.top
.x
= xvmc_mb
->PMV
[0][vector
][0];
114 mv
.top
.y
= xvmc_mb
->PMV
[0][vector
][1];
115 mv
.top
.field_select
= PIPE_VIDEO_FRAME
;
116 mv
.top
.weight
= weight
;
118 mv
.bottom
.x
= xvmc_mb
->PMV
[0][vector
][0];
119 mv
.bottom
.y
= xvmc_mb
->PMV
[0][vector
][1];
120 mv
.bottom
.weight
= weight
;
121 mv
.bottom
.field_select
= PIPE_VIDEO_FRAME
;
124 case XVMC_PREDICTION_FIELD
:
125 mv
.top
.x
= xvmc_mb
->PMV
[0][vector
][0];
126 mv
.top
.y
= xvmc_mb
->PMV
[0][vector
][1];
127 mv
.top
.field_select
= (xvmc_mb
->motion_vertical_field_select
& field_select_mask
) ?
128 PIPE_VIDEO_BOTTOM_FIELD
: PIPE_VIDEO_TOP_FIELD
;
129 mv
.top
.weight
= weight
;
131 mv
.bottom
.x
= xvmc_mb
->PMV
[1][vector
][0];
132 mv
.bottom
.y
= xvmc_mb
->PMV
[1][vector
][1];
133 mv
.bottom
.field_select
= (xvmc_mb
->motion_vertical_field_select
& (field_select_mask
<< 2)) ?
134 PIPE_VIDEO_BOTTOM_FIELD
: PIPE_VIDEO_TOP_FIELD
;
135 mv
.bottom
.weight
= weight
;
138 default: // TODO: Support DUALPRIME and 16x8
146 UploadYcbcrBlocks(XvMCSurfacePrivate
*surface
,
147 const XvMCMacroBlock
*xvmc_mb
,
148 const XvMCBlockArray
*xvmc_blocks
)
150 enum pipe_mpeg12_dct_intra intra
;
151 enum pipe_mpeg12_dct_type coding
;
153 unsigned tb
, x
, y
, luma_blocks
;
159 if (!xvmc_mb
->coded_block_pattern
)
162 intra
= xvmc_mb
->macroblock_type
& XVMC_MB_TYPE_INTRA
?
163 PIPE_MPEG12_DCT_INTRA
: PIPE_MPEG12_DCT_DELTA
;
165 coding
= xvmc_mb
->dct_type
== XVMC_DCT_TYPE_FIELD
?
166 PIPE_MPEG12_DCT_TYPE_FIELD
: PIPE_MPEG12_DCT_TYPE_FRAME
;
168 blocks
= xvmc_blocks
->blocks
+ xvmc_mb
->index
* BLOCK_SIZE_SAMPLES
;
170 for (y
= 0, luma_blocks
= 0; y
< 2; ++y
) {
171 for (x
= 0; x
< 2; ++x
, ++tb
) {
172 if (xvmc_mb
->coded_block_pattern
& const_empty_block_mask_420
[0][y
][x
]) {
174 struct pipe_ycbcr_block
*stream
= surface
->ycbcr
[0].stream
;
175 stream
->x
= xvmc_mb
->x
* 2 + x
;
176 stream
->y
= xvmc_mb
->y
* 2 + y
;
177 stream
->intra
= intra
;
178 stream
->coding
= coding
;
180 surface
->ycbcr
[0].num_blocks_added
++;
181 surface
->ycbcr
[0].stream
++;
188 if (luma_blocks
> 0) {
189 memcpy(surface
->ycbcr
[0].buffer
, blocks
, BLOCK_SIZE_BYTES
* luma_blocks
);
190 surface
->ycbcr
[0].buffer
+= BLOCK_SIZE_SAMPLES
* luma_blocks
;
191 blocks
+= BLOCK_SIZE_SAMPLES
* luma_blocks
;
194 /* TODO: Implement 422, 444 */
195 //assert(ctx->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
197 for (tb
= 1; tb
< 3; ++tb
) {
198 if (xvmc_mb
->coded_block_pattern
& const_empty_block_mask_420
[tb
][0][0]) {
200 struct pipe_ycbcr_block
*stream
= surface
->ycbcr
[tb
].stream
;
201 stream
->x
= xvmc_mb
->x
;
202 stream
->y
= xvmc_mb
->y
;
203 stream
->intra
= intra
;
204 stream
->coding
= PIPE_MPEG12_DCT_TYPE_FRAME
;
206 memcpy(surface
->ycbcr
[tb
].buffer
, blocks
, BLOCK_SIZE_BYTES
);
208 surface
->ycbcr
[tb
].num_blocks_added
++;
209 surface
->ycbcr
[tb
].stream
++;
210 surface
->ycbcr
[tb
].buffer
+= BLOCK_SIZE_SAMPLES
;
211 blocks
+= BLOCK_SIZE_SAMPLES
;
218 MacroBlocksToPipe(XvMCSurfacePrivate
*surface
,
219 unsigned int xvmc_picture_structure
,
220 const XvMCMacroBlock
*xvmc_mb
,
221 const XvMCBlockArray
*xvmc_blocks
,
222 unsigned int num_macroblocks
)
228 assert(num_macroblocks
);
230 for (i
= 0; i
< num_macroblocks
; ++i
) {
231 unsigned mv_pos
= xvmc_mb
->x
+ surface
->mv_stride
* xvmc_mb
->y
;
232 unsigned mv_weights
[2];
234 if (xvmc_mb
->macroblock_type
& (XVMC_MB_TYPE_PATTERN
| XVMC_MB_TYPE_INTRA
))
235 UploadYcbcrBlocks(surface
, xvmc_mb
, xvmc_blocks
);
237 MacroBlockTypeToPipeWeights(xvmc_mb
, mv_weights
);
239 for (j
= 0; j
< 2; ++j
) {
240 if (!surface
->ref
[j
].mv
) continue;
242 surface
->ref
[j
].mv
[mv_pos
] = MotionVectorToPipe
245 j
? XVMC_SELECT_FIRST_BACKWARD
: XVMC_SELECT_FIRST_FORWARD
,
255 unmap_and_flush_surface(XvMCSurfacePrivate
*surface
)
257 struct pipe_video_buffer
*ref_frames
[2];
258 XvMCContextPrivate
*context_priv
;
259 unsigned i
, num_ycbcr_blocks
[3];
263 context_priv
= surface
->context
->privData
;
265 for ( i
= 0; i
< 2; ++i
) {
266 if (surface
->ref
[i
].surface
) {
267 XvMCSurfacePrivate
*ref
= surface
->ref
[i
].surface
->privData
;
271 unmap_and_flush_surface(ref
);
272 surface
->ref
[i
].surface
= NULL
;
273 ref_frames
[i
] = ref
->video_buffer
;
275 ref_frames
[i
] = NULL
;
279 if (surface
->mapped
) {
280 surface
->decode_buffer
->end_frame(surface
->decode_buffer
);
281 for (i
= 0; i
< 3; ++i
)
282 num_ycbcr_blocks
[i
] = surface
->ycbcr
[i
].num_blocks_added
;
283 context_priv
->decoder
->flush_buffer(surface
->decode_buffer
,
286 surface
->video_buffer
);
292 Status
XvMCCreateSurface(Display
*dpy
, XvMCContext
*context
, XvMCSurface
*surface
)
294 XvMCContextPrivate
*context_priv
;
295 struct pipe_video_context
*vpipe
;
296 XvMCSurfacePrivate
*surface_priv
;
298 XVMC_MSG(XVMC_TRACE
, "[XvMC] Creating surface %p.\n", surface
);
303 return XvMCBadContext
;
305 return XvMCBadSurface
;
307 context_priv
= context
->privData
;
308 vpipe
= context_priv
->vctx
->vpipe
;
310 surface_priv
= CALLOC(1, sizeof(XvMCSurfacePrivate
));
314 surface_priv
->decode_buffer
= context_priv
->decoder
->create_buffer(context_priv
->decoder
);
315 surface_priv
->mv_stride
= surface_priv
->decode_buffer
->get_mv_stream_stride(surface_priv
->decode_buffer
);
316 surface_priv
->video_buffer
= vpipe
->create_buffer(vpipe
, PIPE_FORMAT_NV12
,
317 context_priv
->decoder
->chroma_format
,
318 context_priv
->decoder
->width
,
319 context_priv
->decoder
->height
);
320 surface_priv
->context
= context
;
322 surface
->surface_id
= XAllocID(dpy
);
323 surface
->context_id
= context
->context_id
;
324 surface
->surface_type_id
= context
->surface_type_id
;
325 surface
->width
= context
->width
;
326 surface
->height
= context
->height
;
327 surface
->privData
= surface_priv
;
331 XVMC_MSG(XVMC_TRACE
, "[XvMC] Surface %p created.\n", surface
);
337 Status
XvMCRenderSurface(Display
*dpy
, XvMCContext
*context
, unsigned int picture_structure
,
338 XvMCSurface
*target_surface
, XvMCSurface
*past_surface
, XvMCSurface
*future_surface
,
339 unsigned int flags
, unsigned int num_macroblocks
, unsigned int first_macroblock
,
340 XvMCMacroBlockArray
*macroblocks
, XvMCBlockArray
*blocks
343 struct pipe_video_context
*vpipe
;
344 struct pipe_video_decode_buffer
*t_buffer
;
346 XvMCContextPrivate
*context_priv
;
347 XvMCSurfacePrivate
*target_surface_priv
;
348 XvMCSurfacePrivate
*past_surface_priv
;
349 XvMCSurfacePrivate
*future_surface_priv
;
350 XvMCMacroBlock
*xvmc_mb
;
354 XVMC_MSG(XVMC_TRACE
, "[XvMC] Rendering to surface %p, with past %p and future %p\n",
355 target_surface
, past_surface
, future_surface
);
359 if (!context
|| !context
->privData
)
360 return XvMCBadContext
;
361 if (!target_surface
|| !target_surface
->privData
)
362 return XvMCBadSurface
;
364 if (picture_structure
!= XVMC_TOP_FIELD
&&
365 picture_structure
!= XVMC_BOTTOM_FIELD
&&
366 picture_structure
!= XVMC_FRAME_PICTURE
)
368 /* Bkwd pred equivalent to fwd (past && !future) */
369 if (future_surface
&& !past_surface
)
372 assert(context
->context_id
== target_surface
->context_id
);
373 assert(!past_surface
|| context
->context_id
== past_surface
->context_id
);
374 assert(!future_surface
|| context
->context_id
== future_surface
->context_id
);
379 assert(macroblocks
->context_id
== context
->context_id
);
380 assert(blocks
->context_id
== context
->context_id
);
382 assert(flags
== 0 || flags
== XVMC_SECOND_FIELD
);
384 target_surface_priv
= target_surface
->privData
;
385 past_surface_priv
= past_surface
? past_surface
->privData
: NULL
;
386 future_surface_priv
= future_surface
? future_surface
->privData
: NULL
;
388 assert(target_surface_priv
->context
== context
);
389 assert(!past_surface
|| past_surface_priv
->context
== context
);
390 assert(!future_surface
|| future_surface_priv
->context
== context
);
392 context_priv
= context
->privData
;
393 vpipe
= context_priv
->vctx
->vpipe
;
395 t_buffer
= target_surface_priv
->decode_buffer
;
397 // enshure that all reference frames are flushed
398 // not really nessasary, but speeds ups rendering
400 unmap_and_flush_surface(past_surface
->privData
);
403 unmap_and_flush_surface(future_surface
->privData
);
405 xvmc_mb
= macroblocks
->macro_blocks
+ first_macroblock
;
407 /* If the surface we're rendering hasn't changed the ref frames shouldn't change. */
408 if (target_surface_priv
->mapped
&& (
409 target_surface_priv
->ref
[0].surface
!= past_surface
||
410 target_surface_priv
->ref
[1].surface
!= future_surface
||
411 (xvmc_mb
->x
== 0 && xvmc_mb
->y
== 0))) {
413 // If they change anyway we need to clear our surface
414 unmap_and_flush_surface(target_surface_priv
);
417 if (!target_surface_priv
->mapped
) {
418 t_buffer
->begin_frame(t_buffer
);
420 for (i
= 0; i
< 3; ++i
) {
421 target_surface_priv
->ycbcr
[i
].num_blocks_added
= 0;
422 target_surface_priv
->ycbcr
[i
].stream
= t_buffer
->get_ycbcr_stream(t_buffer
, i
);
423 target_surface_priv
->ycbcr
[i
].buffer
= t_buffer
->get_ycbcr_buffer(t_buffer
, i
);
426 for (i
= 0; i
< 2; ++i
) {
427 target_surface_priv
->ref
[i
].surface
= i
== 0 ? past_surface
: future_surface
;
429 if (target_surface_priv
->ref
[i
].surface
)
430 target_surface_priv
->ref
[i
].mv
= t_buffer
->get_mv_stream(t_buffer
, i
);
432 target_surface_priv
->ref
[i
].mv
= NULL
;
435 target_surface_priv
->mapped
= 1;
438 MacroBlocksToPipe(target_surface_priv
, picture_structure
, xvmc_mb
, blocks
, num_macroblocks
);
440 XVMC_MSG(XVMC_TRACE
, "[XvMC] Submitted surface %p for rendering.\n", target_surface
);
446 Status
XvMCFlushSurface(Display
*dpy
, XvMCSurface
*surface
)
451 return XvMCBadSurface
;
453 // don't call flush here, because this is usually
454 // called once for every slice instead of every frame
456 XVMC_MSG(XVMC_TRACE
, "[XvMC] Flushing surface %p\n", surface
);
462 Status
XvMCSyncSurface(Display
*dpy
, XvMCSurface
*surface
)
467 return XvMCBadSurface
;
469 XVMC_MSG(XVMC_TRACE
, "[XvMC] Syncing surface %p\n", surface
);
475 Status
XvMCPutSurface(Display
*dpy
, XvMCSurface
*surface
, Drawable drawable
,
476 short srcx
, short srcy
, unsigned short srcw
, unsigned short srch
,
477 short destx
, short desty
, unsigned short destw
, unsigned short desth
,
480 static int dump_window
= -1;
482 struct pipe_video_context
*vpipe
;
483 struct pipe_video_compositor
*compositor
;
485 XvMCSurfacePrivate
*surface_priv
;
486 XvMCContextPrivate
*context_priv
;
487 XvMCSubpicturePrivate
*subpicture_priv
;
488 XvMCContext
*context
;
489 struct pipe_video_rect src_rect
= {srcx
, srcy
, srcw
, srch
};
490 struct pipe_video_rect dst_rect
= {destx
, desty
, destw
, desth
};
492 XVMC_MSG(XVMC_TRACE
, "[XvMC] Displaying surface %p.\n", surface
);
496 if (!surface
|| !surface
->privData
)
497 return XvMCBadSurface
;
499 surface_priv
= surface
->privData
;
500 context
= surface_priv
->context
;
501 context_priv
= context
->privData
;
503 assert(flags
== XVMC_TOP_FIELD
|| flags
== XVMC_BOTTOM_FIELD
|| flags
== XVMC_FRAME_PICTURE
);
504 assert(srcx
+ srcw
- 1 < surface
->width
);
505 assert(srcy
+ srch
- 1 < surface
->height
);
507 subpicture_priv
= surface_priv
->subpicture
? surface_priv
->subpicture
->privData
: NULL
;
508 vpipe
= context_priv
->vctx
->vpipe
;
509 compositor
= context_priv
->compositor
;
511 if (!context_priv
->drawable_surface
||
512 context_priv
->dst_rect
.x
!= dst_rect
.x
|| context_priv
->dst_rect
.y
!= dst_rect
.y
||
513 context_priv
->dst_rect
.w
!= dst_rect
.w
|| context_priv
->dst_rect
.h
!= dst_rect
.h
) {
515 context_priv
->drawable_surface
= vl_drawable_surface_get(context_priv
->vctx
, drawable
);
516 context_priv
->dst_rect
= dst_rect
;
517 compositor
->reset_dirty_area(compositor
);
520 if (!context_priv
->drawable_surface
)
524 * Some apps (mplayer) hit these asserts because they call
525 * this function after the window has been resized by the WM
526 * but before they've handled the corresponding XEvent and
527 * know about the new dimensions. The output should be clipped
528 * until the app updates destw and desth.
531 assert(destx + destw - 1 < drawable_surface->width);
532 assert(desty + desth - 1 < drawable_surface->height);
535 unmap_and_flush_surface(surface_priv
);
537 compositor
->clear_layers(compositor
);
538 compositor
->set_buffer_layer(compositor
, 0, surface_priv
->video_buffer
, &src_rect
, NULL
);
540 if (subpicture_priv
) {
541 XVMC_MSG(XVMC_TRACE
, "[XvMC] Surface %p has subpicture %p.\n", surface
, surface_priv
->subpicture
);
543 assert(subpicture_priv
->surface
== surface
);
545 if (subpicture_priv
->palette
)
546 compositor
->set_palette_layer(compositor
, 1, subpicture_priv
->sampler
, subpicture_priv
->palette
,
547 &subpicture_priv
->src_rect
, &subpicture_priv
->dst_rect
);
549 compositor
->set_rgba_layer(compositor
, 1, subpicture_priv
->sampler
,
550 &subpicture_priv
->src_rect
, &subpicture_priv
->dst_rect
);
552 surface_priv
->subpicture
= NULL
;
553 subpicture_priv
->surface
= NULL
;
556 // Workaround for r600g, there seems to be a bug in the fence refcounting code
557 vpipe
->screen
->fence_reference(vpipe
->screen
, &surface_priv
->fence
, NULL
);
559 compositor
->render_picture(compositor
, PictureToPipe(flags
), context_priv
->drawable_surface
, &dst_rect
, &surface_priv
->fence
);
561 XVMC_MSG(XVMC_TRACE
, "[XvMC] Submitted surface %p for display. Pushing to front buffer.\n", surface
);
563 vpipe
->screen
->flush_frontbuffer
566 context_priv
->drawable_surface
->texture
,
568 vl_contextprivate_get(context_priv
->vctx
, context_priv
->drawable_surface
)
571 if(dump_window
== -1) {
572 dump_window
= debug_get_num_option("XVMC_DUMP", 0);
576 static unsigned int framenum
= 0;
579 sprintf(cmd
, "xwd -id %d -out xvmc_frame_%08d.xwd", (int)drawable
, ++framenum
);
580 if (system(cmd
) != 0)
581 XVMC_MSG(XVMC_ERR
, "[XvMC] Dumping surface %p failed.\n", surface
);
584 XVMC_MSG(XVMC_TRACE
, "[XvMC] Pushed surface %p to front buffer.\n", surface
);
590 Status
XvMCGetSurfaceStatus(Display
*dpy
, XvMCSurface
*surface
, int *status
)
592 struct pipe_video_context
*vpipe
;
593 XvMCSurfacePrivate
*surface_priv
;
594 XvMCContextPrivate
*context_priv
;
599 return XvMCBadSurface
;
603 surface_priv
= surface
->privData
;
604 context_priv
= surface_priv
->context
->privData
;
605 vpipe
= context_priv
->vctx
->vpipe
;
609 if (surface_priv
->fence
)
610 if (!vpipe
->screen
->fence_signalled(vpipe
->screen
, surface_priv
->fence
))
611 *status
|= XVMC_RENDERING
;
617 Status
XvMCDestroySurface(Display
*dpy
, XvMCSurface
*surface
)
619 XvMCSurfacePrivate
*surface_priv
;
621 XVMC_MSG(XVMC_TRACE
, "[XvMC] Destroying surface %p.\n", surface
);
625 if (!surface
|| !surface
->privData
)
626 return XvMCBadSurface
;
628 surface_priv
= surface
->privData
;
629 surface_priv
->decode_buffer
->destroy(surface_priv
->decode_buffer
);
630 surface_priv
->video_buffer
->destroy(surface_priv
->video_buffer
);
632 surface
->privData
= NULL
;
634 XVMC_MSG(XVMC_TRACE
, "[XvMC] Surface %p destroyed.\n", surface
);
640 Status
XvMCHideSurface(Display
*dpy
, XvMCSurface
*surface
)
644 if (!surface
|| !surface
->privData
)
645 return XvMCBadSurface
;
647 /* No op, only for overlaid rendering */