gallium: make p_winsys internal
[mesa.git] / src / gallium / state_trackers / g3dvl / vl_r16snorm_mc_buf.c
1 #define VL_INTERNAL
2 #include "vl_r16snorm_mc_buf.h"
3 #include <assert.h>
4 #include <pipe/p_context.h>
5 #include <pipe/p_screen.h>
6 #include <pipe/p_state.h>
7 #include <pipe/p_inlines.h>
8 #include <tgsi/tgsi_parse.h>
9 #include <tgsi/tgsi_build.h>
10 #include <util/u_math.h>
11 #include <util/u_memory.h>
12 #include "vl_render.h"
13 #include "vl_shader_build.h"
14 #include "vl_surface.h"
15 #include "vl_util.h"
16 #include "vl_types.h"
17 #include "vl_defs.h"
18
19 const unsigned int DEFAULT_BUF_ALIGNMENT = 1;
20
21 enum vlMacroBlockTypeEx
22 {
23 vlMacroBlockExTypeIntra,
24 vlMacroBlockExTypeFwdPredictedFrame,
25 vlMacroBlockExTypeFwdPredictedField,
26 vlMacroBlockExTypeBkwdPredictedFrame,
27 vlMacroBlockExTypeBkwdPredictedField,
28 vlMacroBlockExTypeBiPredictedFrame,
29 vlMacroBlockExTypeBiPredictedField,
30
31 vlNumMacroBlockExTypes
32 };
33
34 struct vlVertexShaderConsts
35 {
36 struct vlVertex4f denorm;
37 };
38
39 struct vlFragmentShaderConsts
40 {
41 struct vlVertex4f multiplier;
42 struct vlVertex4f div;
43 };
44
45 struct vlMacroBlockVertexStream0
46 {
47 struct vlVertex2f pos;
48 struct vlVertex2f luma_tc;
49 struct vlVertex2f cb_tc;
50 struct vlVertex2f cr_tc;
51 };
52
53 struct vlR16SnormBufferedMC
54 {
55 struct vlRender base;
56
57 unsigned int picture_width;
58 unsigned int picture_height;
59 enum vlFormat picture_format;
60 unsigned int macroblocks_per_picture;
61
62 struct vlSurface *buffered_surface;
63 struct vlSurface *past_surface;
64 struct vlSurface *future_surface;
65 struct vlVertex2f surface_tex_inv_size;
66 struct vlVertex2f zero_block[3];
67 unsigned int num_macroblocks;
68 struct vlMpeg2MacroBlock *macroblocks;
69 struct pipe_surface *tex_surface[3];
70 short *texels[3];
71
72 struct pipe_context *pipe;
73 struct pipe_viewport_state viewport;
74 struct pipe_framebuffer_state render_target;
75
76 union
77 {
78 void *all[5];
79 struct
80 {
81 void *y;
82 void *cb;
83 void *cr;
84 void *ref[2];
85 };
86 } samplers;
87
88 union
89 {
90 struct pipe_texture *all[5];
91 struct
92 {
93 struct pipe_texture *y;
94 struct pipe_texture *cb;
95 struct pipe_texture *cr;
96 struct pipe_texture *ref[2];
97 };
98 } textures;
99
100 union
101 {
102 struct pipe_vertex_buffer all[3];
103 struct
104 {
105 struct pipe_vertex_buffer ycbcr;
106 struct pipe_vertex_buffer ref[2];
107 };
108 } vertex_bufs;
109
110 void *i_vs, *p_vs[2], *b_vs[2];
111 void *i_fs, *p_fs[2], *b_fs[2];
112 struct pipe_vertex_element vertex_elems[8];
113 struct pipe_constant_buffer vs_const_buf;
114 struct pipe_constant_buffer fs_const_buf;
115 };
116
117 static inline int vlBegin
118 (
119 struct vlRender *render
120 )
121 {
122 assert(render);
123
124 return 0;
125 }
126
127 static inline int vlGrabFrameCodedBlock(short *src, short *dst, unsigned int dst_pitch)
128 {
129 unsigned int y;
130
131 for (y = 0; y < VL_BLOCK_HEIGHT; ++y)
132 memcpy
133 (
134 dst + y * dst_pitch,
135 src + y * VL_BLOCK_WIDTH,
136 VL_BLOCK_WIDTH * 2
137 );
138
139 return 0;
140 }
141
142 static inline int vlGrabFieldCodedBlock(short *src, short *dst, unsigned int dst_pitch)
143 {
144 unsigned int y;
145
146 for (y = 0; y < VL_BLOCK_HEIGHT; ++y)
147 memcpy
148 (
149 dst + y * dst_pitch * 2,
150 src + y * VL_BLOCK_WIDTH,
151 VL_BLOCK_WIDTH * 2
152 );
153
154 return 0;
155 }
156
157 static inline int vlGrabNoBlock(short *dst, unsigned int dst_pitch)
158 {
159 unsigned int y;
160
161 for (y = 0; y < VL_BLOCK_HEIGHT; ++y)
162 memset
163 (
164 dst + y * dst_pitch,
165 0,
166 VL_BLOCK_WIDTH * 2
167 );
168
169 return 0;
170 }
171
172 static inline int vlGrabBlocks
173 (
174 struct vlR16SnormBufferedMC *mc,
175 unsigned int mbx,
176 unsigned int mby,
177 enum vlDCTType dct_type,
178 unsigned int coded_block_pattern,
179 short *blocks
180 )
181 {
182 short *texels;
183 unsigned int tex_pitch;
184 unsigned int x, y, tb = 0, sb = 0;
185 unsigned int mbpx = mbx * VL_MACROBLOCK_WIDTH, mbpy = mby * VL_MACROBLOCK_HEIGHT;
186
187 assert(mc);
188 assert(blocks);
189
190 tex_pitch = mc->tex_surface[0]->stride / mc->tex_surface[0]->block.size;
191 texels = mc->texels[0] + mbpy * tex_pitch + mbpx;
192
193 for (y = 0; y < 2; ++y)
194 {
195 for (x = 0; x < 2; ++x, ++tb)
196 {
197 if ((coded_block_pattern >> (5 - tb)) & 1)
198 {
199 short *cur_block = blocks + sb * VL_BLOCK_WIDTH * VL_BLOCK_HEIGHT;
200
201 if (dct_type == vlDCTTypeFrameCoded)
202 {
203 vlGrabFrameCodedBlock
204 (
205 cur_block,
206 texels + y * tex_pitch * VL_BLOCK_HEIGHT + x * VL_BLOCK_WIDTH,
207 tex_pitch
208 );
209 }
210 else
211 {
212 vlGrabFieldCodedBlock
213 (
214 cur_block,
215 texels + y * tex_pitch + x * VL_BLOCK_WIDTH,
216 tex_pitch
217 );
218 }
219
220 ++sb;
221 }
222 else if (mc->zero_block[0].x < 0.0f)
223 {
224 vlGrabNoBlock(texels + y * tex_pitch * VL_BLOCK_HEIGHT + x * VL_BLOCK_WIDTH, tex_pitch);
225
226 mc->zero_block[0].x = (mbpx + x * 8) * mc->surface_tex_inv_size.x;
227 mc->zero_block[0].y = (mbpy + y * 8) * mc->surface_tex_inv_size.y;
228 }
229 }
230 }
231
232 /* TODO: Implement 422, 444 */
233 mbpx >>= 1;
234 mbpy >>= 1;
235
236 for (tb = 0; tb < 2; ++tb)
237 {
238 tex_pitch = mc->tex_surface[tb + 1]->stride / mc->tex_surface[tb + 1]->block.size;
239 texels = mc->texels[tb + 1] + mbpy * tex_pitch + mbpx;
240
241 if ((coded_block_pattern >> (1 - tb)) & 1)
242 {
243 short *cur_block = blocks + sb * VL_BLOCK_WIDTH * VL_BLOCK_HEIGHT;
244
245 vlGrabFrameCodedBlock
246 (
247 cur_block,
248 texels,
249 tex_pitch
250 );
251
252 ++sb;
253 }
254 else if (mc->zero_block[tb + 1].x < 0.0f)
255 {
256 vlGrabNoBlock(texels, tex_pitch);
257
258 mc->zero_block[tb + 1].x = (mbpx << 1) * mc->surface_tex_inv_size.x;
259 mc->zero_block[tb + 1].y = (mbpy << 1) * mc->surface_tex_inv_size.y;
260 }
261 }
262
263 return 0;
264 }
265
266 static inline enum vlMacroBlockTypeEx vlGetMacroBlockTypeEx(struct vlMpeg2MacroBlock *mb)
267 {
268 assert(mb);
269
270 switch (mb->mb_type)
271 {
272 case vlMacroBlockTypeIntra:
273 return vlMacroBlockExTypeIntra;
274 case vlMacroBlockTypeFwdPredicted:
275 return mb->mo_type == vlMotionTypeFrame ?
276 vlMacroBlockExTypeFwdPredictedFrame : vlMacroBlockExTypeFwdPredictedField;
277 case vlMacroBlockTypeBkwdPredicted:
278 return mb->mo_type == vlMotionTypeFrame ?
279 vlMacroBlockExTypeBkwdPredictedFrame : vlMacroBlockExTypeBkwdPredictedField;
280 case vlMacroBlockTypeBiPredicted:
281 return mb->mo_type == vlMotionTypeFrame ?
282 vlMacroBlockExTypeBiPredictedFrame : vlMacroBlockExTypeBiPredictedField;
283 default:
284 assert(0);
285 }
286
287 /* Unreachable */
288 return -1;
289 }
290
291 static inline int vlGrabMacroBlock
292 (
293 struct vlR16SnormBufferedMC *mc,
294 struct vlMpeg2MacroBlock *macroblock
295 )
296 {
297 assert(mc);
298 assert(macroblock);
299 assert(mc->num_macroblocks < mc->macroblocks_per_picture);
300
301 mc->macroblocks[mc->num_macroblocks].mbx = macroblock->mbx;
302 mc->macroblocks[mc->num_macroblocks].mby = macroblock->mby;
303 mc->macroblocks[mc->num_macroblocks].mb_type = macroblock->mb_type;
304 mc->macroblocks[mc->num_macroblocks].mo_type = macroblock->mo_type;
305 mc->macroblocks[mc->num_macroblocks].dct_type = macroblock->dct_type;
306 mc->macroblocks[mc->num_macroblocks].PMV[0][0][0] = macroblock->PMV[0][0][0];
307 mc->macroblocks[mc->num_macroblocks].PMV[0][0][1] = macroblock->PMV[0][0][1];
308 mc->macroblocks[mc->num_macroblocks].PMV[0][1][0] = macroblock->PMV[0][1][0];
309 mc->macroblocks[mc->num_macroblocks].PMV[0][1][1] = macroblock->PMV[0][1][1];
310 mc->macroblocks[mc->num_macroblocks].PMV[1][0][0] = macroblock->PMV[1][0][0];
311 mc->macroblocks[mc->num_macroblocks].PMV[1][0][1] = macroblock->PMV[1][0][1];
312 mc->macroblocks[mc->num_macroblocks].PMV[1][1][0] = macroblock->PMV[1][1][0];
313 mc->macroblocks[mc->num_macroblocks].PMV[1][1][1] = macroblock->PMV[1][1][1];
314 mc->macroblocks[mc->num_macroblocks].cbp = macroblock->cbp;
315 mc->macroblocks[mc->num_macroblocks].blocks = macroblock->blocks;
316
317 vlGrabBlocks
318 (
319 mc,
320 macroblock->mbx,
321 macroblock->mby,
322 macroblock->dct_type,
323 macroblock->cbp,
324 macroblock->blocks
325 );
326
327 mc->num_macroblocks++;
328
329 return 0;
330 }
331
332 #define SET_BLOCK(vb, cbp, mbx, mby, unitx, unity, ofsx, ofsy, hx, hy, lm, cbm, crm, zb) \
333 do { \
334 (vb)[0].pos.x = (mbx) * (unitx) + (ofsx); (vb)[0].pos.y = (mby) * (unity) + (ofsy); \
335 (vb)[1].pos.x = (mbx) * (unitx) + (ofsx); (vb)[1].pos.y = (mby) * (unity) + (ofsy) + (hy); \
336 (vb)[2].pos.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[2].pos.y = (mby) * (unity) + (ofsy); \
337 (vb)[3].pos.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[3].pos.y = (mby) * (unity) + (ofsy); \
338 (vb)[4].pos.x = (mbx) * (unitx) + (ofsx); (vb)[4].pos.y = (mby) * (unity) + (ofsy) + (hy); \
339 (vb)[5].pos.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[5].pos.y = (mby) * (unity) + (ofsy) + (hy); \
340 \
341 if ((cbp) & (lm)) \
342 { \
343 (vb)[0].luma_tc.x = (mbx) * (unitx) + (ofsx); (vb)[0].luma_tc.y = (mby) * (unity) + (ofsy); \
344 (vb)[1].luma_tc.x = (mbx) * (unitx) + (ofsx); (vb)[1].luma_tc.y = (mby) * (unity) + (ofsy) + (hy); \
345 (vb)[2].luma_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[2].luma_tc.y = (mby) * (unity) + (ofsy); \
346 (vb)[3].luma_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[3].luma_tc.y = (mby) * (unity) + (ofsy); \
347 (vb)[4].luma_tc.x = (mbx) * (unitx) + (ofsx); (vb)[4].luma_tc.y = (mby) * (unity) + (ofsy) + (hy); \
348 (vb)[5].luma_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[5].luma_tc.y = (mby) * (unity) + (ofsy) + (hy); \
349 } \
350 else \
351 { \
352 (vb)[0].luma_tc.x = (zb)[0].x; (vb)[0].luma_tc.y = (zb)[0].y; \
353 (vb)[1].luma_tc.x = (zb)[0].x; (vb)[1].luma_tc.y = (zb)[0].y + (hy); \
354 (vb)[2].luma_tc.x = (zb)[0].x + (hx); (vb)[2].luma_tc.y = (zb)[0].y; \
355 (vb)[3].luma_tc.x = (zb)[0].x + (hx); (vb)[3].luma_tc.y = (zb)[0].y; \
356 (vb)[4].luma_tc.x = (zb)[0].x; (vb)[4].luma_tc.y = (zb)[0].y + (hy); \
357 (vb)[5].luma_tc.x = (zb)[0].x + (hx); (vb)[5].luma_tc.y = (zb)[0].y + (hy); \
358 } \
359 \
360 if ((cbp) & (cbm)) \
361 { \
362 (vb)[0].cb_tc.x = (mbx) * (unitx) + (ofsx); (vb)[0].cb_tc.y = (mby) * (unity) + (ofsy); \
363 (vb)[1].cb_tc.x = (mbx) * (unitx) + (ofsx); (vb)[1].cb_tc.y = (mby) * (unity) + (ofsy) + (hy); \
364 (vb)[2].cb_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[2].cb_tc.y = (mby) * (unity) + (ofsy); \
365 (vb)[3].cb_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[3].cb_tc.y = (mby) * (unity) + (ofsy); \
366 (vb)[4].cb_tc.x = (mbx) * (unitx) + (ofsx); (vb)[4].cb_tc.y = (mby) * (unity) + (ofsy) + (hy); \
367 (vb)[5].cb_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[5].cb_tc.y = (mby) * (unity) + (ofsy) + (hy); \
368 } \
369 else \
370 { \
371 (vb)[0].cb_tc.x = (zb)[1].x; (vb)[0].cb_tc.y = (zb)[1].y; \
372 (vb)[1].cb_tc.x = (zb)[1].x; (vb)[1].cb_tc.y = (zb)[1].y + (hy); \
373 (vb)[2].cb_tc.x = (zb)[1].x + (hx); (vb)[2].cb_tc.y = (zb)[1].y; \
374 (vb)[3].cb_tc.x = (zb)[1].x + (hx); (vb)[3].cb_tc.y = (zb)[1].y; \
375 (vb)[4].cb_tc.x = (zb)[1].x; (vb)[4].cb_tc.y = (zb)[1].y + (hy); \
376 (vb)[5].cb_tc.x = (zb)[1].x + (hx); (vb)[5].cb_tc.y = (zb)[1].y + (hy); \
377 } \
378 \
379 if ((cbp) & (crm)) \
380 { \
381 (vb)[0].cr_tc.x = (mbx) * (unitx) + (ofsx); (vb)[0].cr_tc.y = (mby) * (unity) + (ofsy); \
382 (vb)[1].cr_tc.x = (mbx) * (unitx) + (ofsx); (vb)[1].cr_tc.y = (mby) * (unity) + (ofsy) + (hy); \
383 (vb)[2].cr_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[2].cr_tc.y = (mby) * (unity) + (ofsy); \
384 (vb)[3].cr_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[3].cr_tc.y = (mby) * (unity) + (ofsy); \
385 (vb)[4].cr_tc.x = (mbx) * (unitx) + (ofsx); (vb)[4].cr_tc.y = (mby) * (unity) + (ofsy) + (hy); \
386 (vb)[5].cr_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[5].cr_tc.y = (mby) * (unity) + (ofsy) + (hy); \
387 } \
388 else \
389 { \
390 (vb)[0].cr_tc.x = (zb)[2].x; (vb)[0].cr_tc.y = (zb)[2].y; \
391 (vb)[1].cr_tc.x = (zb)[2].x; (vb)[1].cr_tc.y = (zb)[2].y + (hy); \
392 (vb)[2].cr_tc.x = (zb)[2].x + (hx); (vb)[2].cr_tc.y = (zb)[2].y; \
393 (vb)[3].cr_tc.x = (zb)[2].x + (hx); (vb)[3].cr_tc.y = (zb)[2].y; \
394 (vb)[4].cr_tc.x = (zb)[2].x; (vb)[4].cr_tc.y = (zb)[2].y + (hy); \
395 (vb)[5].cr_tc.x = (zb)[2].x + (hx); (vb)[5].cr_tc.y = (zb)[2].y + (hy); \
396 } \
397 } while (0)
398
399 static inline int vlGenMacroblockVerts
400 (
401 struct vlR16SnormBufferedMC *mc,
402 struct vlMpeg2MacroBlock *macroblock,
403 unsigned int pos,
404 struct vlMacroBlockVertexStream0 *ycbcr_vb,
405 struct vlVertex2f **ref_vb
406 )
407 {
408 struct vlVertex2f mo_vec[2];
409 unsigned int i;
410
411 assert(mc);
412 assert(macroblock);
413 assert(ycbcr_vb);
414 assert(pos < mc->macroblocks_per_picture);
415
416 switch (macroblock->mb_type)
417 {
418 case vlMacroBlockTypeBiPredicted:
419 {
420 struct vlVertex2f *vb;
421
422 assert(ref_vb && ref_vb[1]);
423
424 vb = ref_vb[1] + pos * 2 * 24;
425
426 mo_vec[0].x = macroblock->PMV[0][1][0] * 0.5f * mc->surface_tex_inv_size.x;
427 mo_vec[0].y = macroblock->PMV[0][1][1] * 0.5f * mc->surface_tex_inv_size.y;
428
429 if (macroblock->mo_type == vlMotionTypeFrame)
430 {
431 for (i = 0; i < 24 * 2; i += 2)
432 {
433 vb[i].x = mo_vec[0].x;
434 vb[i].y = mo_vec[0].y;
435 }
436 }
437 else
438 {
439 mo_vec[1].x = macroblock->PMV[1][1][0] * 0.5f * mc->surface_tex_inv_size.x;
440 mo_vec[1].y = macroblock->PMV[1][1][1] * 0.5f * mc->surface_tex_inv_size.y;
441
442 for (i = 0; i < 24 * 2; i += 2)
443 {
444 vb[i].x = mo_vec[0].x;
445 vb[i].y = mo_vec[0].y;
446 vb[i + 1].x = mo_vec[1].x;
447 vb[i + 1].y = mo_vec[1].y;
448 }
449 }
450
451 /* fall-through */
452 }
453 case vlMacroBlockTypeFwdPredicted:
454 case vlMacroBlockTypeBkwdPredicted:
455 {
456 struct vlVertex2f *vb;
457
458 assert(ref_vb && ref_vb[0]);
459
460 vb = ref_vb[0] + pos * 2 * 24;
461
462 if (macroblock->mb_type == vlMacroBlockTypeBkwdPredicted)
463 {
464 mo_vec[0].x = macroblock->PMV[0][1][0] * 0.5f * mc->surface_tex_inv_size.x;
465 mo_vec[0].y = macroblock->PMV[0][1][1] * 0.5f * mc->surface_tex_inv_size.y;
466
467 if (macroblock->mo_type == vlMotionTypeField)
468 {
469 mo_vec[1].x = macroblock->PMV[1][1][0] * 0.5f * mc->surface_tex_inv_size.x;
470 mo_vec[1].y = macroblock->PMV[1][1][1] * 0.5f * mc->surface_tex_inv_size.y;
471 }
472 }
473 else
474 {
475 mo_vec[0].x = macroblock->PMV[0][0][0] * 0.5f * mc->surface_tex_inv_size.x;
476 mo_vec[0].y = macroblock->PMV[0][0][1] * 0.5f * mc->surface_tex_inv_size.y;
477
478 if (macroblock->mo_type == vlMotionTypeField)
479 {
480 mo_vec[1].x = macroblock->PMV[1][0][0] * 0.5f * mc->surface_tex_inv_size.x;
481 mo_vec[1].y = macroblock->PMV[1][0][1] * 0.5f * mc->surface_tex_inv_size.y;
482 }
483 }
484
485 if (macroblock->mo_type == vlMotionTypeFrame)
486 {
487 for (i = 0; i < 24 * 2; i += 2)
488 {
489 vb[i].x = mo_vec[0].x;
490 vb[i].y = mo_vec[0].y;
491 }
492 }
493 else
494 {
495 for (i = 0; i < 24 * 2; i += 2)
496 {
497 vb[i].x = mo_vec[0].x;
498 vb[i].y = mo_vec[0].y;
499 vb[i + 1].x = mo_vec[1].x;
500 vb[i + 1].y = mo_vec[1].y;
501 }
502 }
503
504 /* fall-through */
505 }
506 case vlMacroBlockTypeIntra:
507 {
508 const struct vlVertex2f unit =
509 {
510 mc->surface_tex_inv_size.x * VL_MACROBLOCK_WIDTH,
511 mc->surface_tex_inv_size.y * VL_MACROBLOCK_HEIGHT
512 };
513 const struct vlVertex2f half =
514 {
515 mc->surface_tex_inv_size.x * (VL_MACROBLOCK_WIDTH / 2),
516 mc->surface_tex_inv_size.y * (VL_MACROBLOCK_HEIGHT / 2)
517 };
518
519 struct vlMacroBlockVertexStream0 *vb;
520
521 vb = ycbcr_vb + pos * 24;
522
523 SET_BLOCK
524 (
525 vb,
526 macroblock->cbp, macroblock->mbx, macroblock->mby,
527 unit.x, unit.y, 0, 0, half.x, half.y,
528 32, 2, 1, mc->zero_block
529 );
530
531 SET_BLOCK
532 (
533 vb + 6,
534 macroblock->cbp, macroblock->mbx, macroblock->mby,
535 unit.x, unit.y, half.x, 0, half.x, half.y,
536 16, 2, 1, mc->zero_block
537 );
538
539 SET_BLOCK
540 (
541 vb + 12,
542 macroblock->cbp, macroblock->mbx, macroblock->mby,
543 unit.x, unit.y, 0, half.y, half.x, half.y,
544 8, 2, 1, mc->zero_block
545 );
546
547 SET_BLOCK
548 (
549 vb + 18,
550 macroblock->cbp, macroblock->mbx, macroblock->mby,
551 unit.x, unit.y, half.x, half.y, half.x, half.y,
552 4, 2, 1, mc->zero_block
553 );
554
555 break;
556 }
557 default:
558 assert(0);
559 }
560
561 return 0;
562 }
563
564 static int vlFlush
565 (
566 struct vlRender *render
567 )
568 {
569 struct vlR16SnormBufferedMC *mc;
570 struct pipe_context *pipe;
571 struct vlVertexShaderConsts *vs_consts;
572 unsigned int num_macroblocks[vlNumMacroBlockExTypes] = {0};
573 unsigned int offset[vlNumMacroBlockExTypes];
574 unsigned int vb_start = 0;
575 unsigned int i;
576
577 assert(render);
578
579 mc = (struct vlR16SnormBufferedMC*)render;
580
581 if (!mc->buffered_surface)
582 return 0;
583
584 if (mc->num_macroblocks < mc->macroblocks_per_picture)
585 return 0;
586
587 assert(mc->num_macroblocks <= mc->macroblocks_per_picture);
588
589 pipe = mc->pipe;
590
591 for (i = 0; i < mc->num_macroblocks; ++i)
592 {
593 enum vlMacroBlockTypeEx mb_type_ex = vlGetMacroBlockTypeEx(&mc->macroblocks[i]);
594
595 num_macroblocks[mb_type_ex]++;
596 }
597
598 offset[0] = 0;
599
600 for (i = 1; i < vlNumMacroBlockExTypes; ++i)
601 offset[i] = offset[i - 1] + num_macroblocks[i - 1];
602
603 {
604 struct vlMacroBlockVertexStream0 *ycbcr_vb;
605 struct vlVertex2f *ref_vb[2];
606
607 ycbcr_vb = (struct vlMacroBlockVertexStream0*)pipe_buffer_map
608 (
609 pipe->screen,
610 mc->vertex_bufs.ycbcr.buffer,
611 PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD
612 );
613
614 for (i = 0; i < 2; ++i)
615 ref_vb[i] = (struct vlVertex2f*)pipe_buffer_map
616 (
617 pipe->screen,
618 mc->vertex_bufs.ref[i].buffer,
619 PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD
620 );
621
622 for (i = 0; i < mc->num_macroblocks; ++i)
623 {
624 enum vlMacroBlockTypeEx mb_type_ex = vlGetMacroBlockTypeEx(&mc->macroblocks[i]);
625
626 vlGenMacroblockVerts(mc, &mc->macroblocks[i], offset[mb_type_ex], ycbcr_vb, ref_vb);
627
628 offset[mb_type_ex]++;
629 }
630
631 pipe_buffer_unmap(pipe->screen, mc->vertex_bufs.ycbcr.buffer);
632 for (i = 0; i < 2; ++i)
633 pipe_buffer_unmap(pipe->screen, mc->vertex_bufs.ref[i].buffer);
634 }
635
636 for (i = 0; i < 3; ++i)
637 {
638 pipe_surface_unmap(mc->tex_surface[i]);
639 pipe_surface_reference(&mc->tex_surface[i], NULL);
640 }
641
642 mc->render_target.cbufs[0] = pipe->screen->get_tex_surface
643 (
644 pipe->screen,
645 mc->buffered_surface->texture,
646 0, 0, 0, PIPE_BUFFER_USAGE_GPU_READ | PIPE_BUFFER_USAGE_GPU_WRITE
647 );
648
649 pipe->set_framebuffer_state(pipe, &mc->render_target);
650 pipe->set_viewport_state(pipe, &mc->viewport);
651 vs_consts = pipe_buffer_map
652 (
653 pipe->screen,
654 mc->vs_const_buf.buffer,
655 PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD
656 );
657
658 vs_consts->denorm.x = mc->buffered_surface->texture->width[0];
659 vs_consts->denorm.y = mc->buffered_surface->texture->height[0];
660
661 pipe_buffer_unmap(pipe->screen, mc->vs_const_buf.buffer);
662 pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, &mc->vs_const_buf);
663 pipe->set_constant_buffer(pipe, PIPE_SHADER_FRAGMENT, 0, &mc->fs_const_buf);
664
665 if (num_macroblocks[vlMacroBlockExTypeIntra] > 0)
666 {
667 pipe->set_vertex_buffers(pipe, 1, mc->vertex_bufs.all);
668 pipe->set_vertex_elements(pipe, 4, mc->vertex_elems);
669 pipe->set_sampler_textures(pipe, 3, mc->textures.all);
670 pipe->bind_sampler_states(pipe, 3, mc->samplers.all);
671 pipe->bind_vs_state(pipe, mc->i_vs);
672 pipe->bind_fs_state(pipe, mc->i_fs);
673
674 pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLES, vb_start, num_macroblocks[vlMacroBlockExTypeIntra] * 24);
675 vb_start += num_macroblocks[vlMacroBlockExTypeIntra] * 24;
676 }
677
678 if (num_macroblocks[vlMacroBlockExTypeFwdPredictedFrame] > 0)
679 {
680 pipe->set_vertex_buffers(pipe, 2, mc->vertex_bufs.all);
681 pipe->set_vertex_elements(pipe, 6, mc->vertex_elems);
682 mc->textures.ref[0] = mc->past_surface->texture;
683 pipe->set_sampler_textures(pipe, 4, mc->textures.all);
684 pipe->bind_sampler_states(pipe, 4, mc->samplers.all);
685 pipe->bind_vs_state(pipe, mc->p_vs[0]);
686 pipe->bind_fs_state(pipe, mc->p_fs[0]);
687
688 pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLES, vb_start, num_macroblocks[vlMacroBlockExTypeFwdPredictedFrame] * 24);
689 vb_start += num_macroblocks[vlMacroBlockExTypeFwdPredictedFrame] * 24;
690 }
691
692 if (num_macroblocks[vlMacroBlockExTypeFwdPredictedField] > 0)
693 {
694 pipe->set_vertex_buffers(pipe, 2, mc->vertex_bufs.all);
695 pipe->set_vertex_elements(pipe, 6, mc->vertex_elems);
696 mc->textures.ref[0] = mc->past_surface->texture;
697 pipe->set_sampler_textures(pipe, 4, mc->textures.all);
698 pipe->bind_sampler_states(pipe, 4, mc->samplers.all);
699 pipe->bind_vs_state(pipe, mc->p_vs[1]);
700 pipe->bind_fs_state(pipe, mc->p_fs[1]);
701
702 pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLES, vb_start, num_macroblocks[vlMacroBlockExTypeFwdPredictedField] * 24);
703 vb_start += num_macroblocks[vlMacroBlockExTypeFwdPredictedField] * 24;
704 }
705
706 if (num_macroblocks[vlMacroBlockExTypeBkwdPredictedFrame] > 0)
707 {
708 pipe->set_vertex_buffers(pipe, 2, mc->vertex_bufs.all);
709 pipe->set_vertex_elements(pipe, 6, mc->vertex_elems);
710 mc->textures.ref[0] = mc->future_surface->texture;
711 pipe->set_sampler_textures(pipe, 4, mc->textures.all);
712 pipe->bind_sampler_states(pipe, 4, mc->samplers.all);
713 pipe->bind_vs_state(pipe, mc->p_vs[0]);
714 pipe->bind_fs_state(pipe, mc->p_fs[0]);
715
716 pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLES, vb_start, num_macroblocks[vlMacroBlockExTypeBkwdPredictedFrame] * 24);
717 vb_start += num_macroblocks[vlMacroBlockExTypeBkwdPredictedFrame] * 24;
718 }
719
720 if (num_macroblocks[vlMacroBlockExTypeBkwdPredictedField] > 0)
721 {
722 pipe->set_vertex_buffers(pipe, 2, mc->vertex_bufs.all);
723 pipe->set_vertex_elements(pipe, 6, mc->vertex_elems);
724 mc->textures.ref[0] = mc->future_surface->texture;
725 pipe->set_sampler_textures(pipe, 4, mc->textures.all);
726 pipe->bind_sampler_states(pipe, 4, mc->samplers.all);
727 pipe->bind_vs_state(pipe, mc->p_vs[1]);
728 pipe->bind_fs_state(pipe, mc->p_fs[1]);
729
730 pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLES, vb_start, num_macroblocks[vlMacroBlockExTypeBkwdPredictedField] * 24);
731 vb_start += num_macroblocks[vlMacroBlockExTypeBkwdPredictedField] * 24;
732 }
733
734 if (num_macroblocks[vlMacroBlockExTypeBiPredictedFrame] > 0)
735 {
736 pipe->set_vertex_buffers(pipe, 3, mc->vertex_bufs.all);
737 pipe->set_vertex_elements(pipe, 8, mc->vertex_elems);
738 mc->textures.ref[0] = mc->past_surface->texture;
739 mc->textures.ref[1] = mc->future_surface->texture;
740 pipe->set_sampler_textures(pipe, 5, mc->textures.all);
741 pipe->bind_sampler_states(pipe, 5, mc->samplers.all);
742 pipe->bind_vs_state(pipe, mc->b_vs[0]);
743 pipe->bind_fs_state(pipe, mc->b_fs[0]);
744
745 pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLES, vb_start, num_macroblocks[vlMacroBlockExTypeBiPredictedFrame] * 24);
746 vb_start += num_macroblocks[vlMacroBlockExTypeBiPredictedFrame] * 24;
747 }
748
749 if (num_macroblocks[vlMacroBlockExTypeBiPredictedField] > 0)
750 {
751 pipe->set_vertex_buffers(pipe, 3, mc->vertex_bufs.all);
752 pipe->set_vertex_elements(pipe, 8, mc->vertex_elems);
753 mc->textures.ref[0] = mc->past_surface->texture;
754 mc->textures.ref[1] = mc->future_surface->texture;
755 pipe->set_sampler_textures(pipe, 5, mc->textures.all);
756 pipe->bind_sampler_states(pipe, 5, mc->samplers.all);
757 pipe->bind_vs_state(pipe, mc->b_vs[1]);
758 pipe->bind_fs_state(pipe, mc->b_fs[1]);
759
760 pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLES, vb_start, num_macroblocks[vlMacroBlockExTypeBiPredictedField] * 24);
761 vb_start += num_macroblocks[vlMacroBlockExTypeBiPredictedField] * 24;
762 }
763
764 pipe->flush(pipe, PIPE_FLUSH_RENDER_CACHE, &mc->buffered_surface->render_fence);
765 pipe_surface_reference(&mc->render_target.cbufs[0], NULL);
766
767 for (i = 0; i < 3; ++i)
768 mc->zero_block[i].x = -1.0f;
769
770 mc->buffered_surface = NULL;
771 mc->num_macroblocks = 0;
772
773 return 0;
774 }
775
776 static int vlRenderMacroBlocksMpeg2R16SnormBuffered
777 (
778 struct vlRender *render,
779 struct vlMpeg2MacroBlockBatch *batch,
780 struct vlSurface *surface
781 )
782 {
783 struct vlR16SnormBufferedMC *mc;
784 bool new_surface = false;
785 unsigned int i;
786
787 assert(render);
788
789 mc = (struct vlR16SnormBufferedMC*)render;
790
791 if (mc->buffered_surface)
792 {
793 if (mc->buffered_surface != surface)
794 {
795 vlFlush(&mc->base);
796 new_surface = true;
797 }
798 }
799 else
800 new_surface = true;
801
802 if (new_surface)
803 {
804 mc->buffered_surface = surface;
805 mc->past_surface = batch->past_surface;
806 mc->future_surface = batch->future_surface;
807 mc->surface_tex_inv_size.x = 1.0f / surface->texture->width[0];
808 mc->surface_tex_inv_size.y = 1.0f / surface->texture->height[0];
809
810 for (i = 0; i < 3; ++i)
811 {
812 mc->tex_surface[i] = mc->pipe->screen->get_tex_surface
813 (
814 mc->pipe->screen,
815 mc->textures.all[i],
816 0, 0, 0, PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD
817 );
818
819 mc->texels[i] = pipe_surface_map(mc->tex_surface[i], PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD);
820 }
821 }
822
823 for (i = 0; i < batch->num_macroblocks; ++i)
824 vlGrabMacroBlock(mc, &batch->macroblocks[i]);
825
826 return 0;
827 }
828
829 static inline int vlEnd
830 (
831 struct vlRender *render
832 )
833 {
834 assert(render);
835
836 return 0;
837 }
838
839 static int vlDestroy
840 (
841 struct vlRender *render
842 )
843 {
844 struct vlR16SnormBufferedMC *mc;
845 struct pipe_context *pipe;
846 unsigned int i;
847
848 assert(render);
849
850 mc = (struct vlR16SnormBufferedMC*)render;
851 pipe = mc->pipe;
852
853 for (i = 0; i < 5; ++i)
854 pipe->delete_sampler_state(pipe, mc->samplers.all[i]);
855
856 for (i = 0; i < 3; ++i)
857 pipe_buffer_reference(pipe->screen, &mc->vertex_bufs.all[i].buffer, NULL);
858
859 /* Textures 3 & 4 are not created directly, no need to release them here */
860 for (i = 0; i < 3; ++i)
861 pipe_texture_reference(&mc->textures.all[i], NULL);
862
863 pipe->delete_vs_state(pipe, mc->i_vs);
864 pipe->delete_fs_state(pipe, mc->i_fs);
865
866 for (i = 0; i < 2; ++i)
867 {
868 pipe->delete_vs_state(pipe, mc->p_vs[i]);
869 pipe->delete_fs_state(pipe, mc->p_fs[i]);
870 pipe->delete_vs_state(pipe, mc->b_vs[i]);
871 pipe->delete_fs_state(pipe, mc->b_fs[i]);
872 }
873
874 pipe_buffer_reference(pipe->screen, &mc->vs_const_buf.buffer, NULL);
875 pipe_buffer_reference(pipe->screen, &mc->fs_const_buf.buffer, NULL);
876
877 FREE(mc->macroblocks);
878 FREE(mc);
879
880 return 0;
881 }
882
883 /*
884 * Muliplier renormalizes block samples from 16 bits to 12 bits.
885 * Divider is used when calculating Y % 2 for choosing top or bottom
886 * field for P or B macroblocks.
887 * TODO: Use immediates.
888 */
889 static const struct vlFragmentShaderConsts fs_consts =
890 {
891 {32767.0f / 255.0f, 32767.0f / 255.0f, 32767.0f / 255.0f, 0.0f},
892 {0.5f, 2.0f, 0.0f, 0.0f}
893 };
894
895 #include "vl_r16snorm_mc_buf_shaders.inc"
896
897 static int vlCreateDataBufs
898 (
899 struct vlR16SnormBufferedMC *mc
900 )
901 {
902 const unsigned int mbw = align(mc->picture_width, VL_MACROBLOCK_WIDTH) / VL_MACROBLOCK_WIDTH;
903 const unsigned int mbh = align(mc->picture_height, VL_MACROBLOCK_HEIGHT) / VL_MACROBLOCK_HEIGHT;
904
905 struct pipe_context *pipe;
906 unsigned int i;
907
908 assert(mc);
909
910 pipe = mc->pipe;
911 mc->macroblocks_per_picture = mbw * mbh;
912
913 /* Create our vertex buffers */
914 mc->vertex_bufs.ycbcr.pitch = sizeof(struct vlVertex2f) * 4;
915 mc->vertex_bufs.ycbcr.max_index = 24 * mc->macroblocks_per_picture - 1;
916 mc->vertex_bufs.ycbcr.buffer_offset = 0;
917 mc->vertex_bufs.ycbcr.buffer = pipe_buffer_create
918 (
919 pipe->screen,
920 DEFAULT_BUF_ALIGNMENT,
921 PIPE_BUFFER_USAGE_VERTEX | PIPE_BUFFER_USAGE_DISCARD,
922 sizeof(struct vlVertex2f) * 4 * 24 * mc->macroblocks_per_picture
923 );
924
925 for (i = 1; i < 3; ++i)
926 {
927 mc->vertex_bufs.all[i].pitch = sizeof(struct vlVertex2f) * 2;
928 mc->vertex_bufs.all[i].max_index = 24 * mc->macroblocks_per_picture - 1;
929 mc->vertex_bufs.all[i].buffer_offset = 0;
930 mc->vertex_bufs.all[i].buffer = pipe_buffer_create
931 (
932 pipe->screen,
933 DEFAULT_BUF_ALIGNMENT,
934 PIPE_BUFFER_USAGE_VERTEX | PIPE_BUFFER_USAGE_DISCARD,
935 sizeof(struct vlVertex2f) * 2 * 24 * mc->macroblocks_per_picture
936 );
937 }
938
939 /* Position element */
940 mc->vertex_elems[0].src_offset = 0;
941 mc->vertex_elems[0].vertex_buffer_index = 0;
942 mc->vertex_elems[0].nr_components = 2;
943 mc->vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
944
945 /* Luma, texcoord element */
946 mc->vertex_elems[1].src_offset = sizeof(struct vlVertex2f);
947 mc->vertex_elems[1].vertex_buffer_index = 0;
948 mc->vertex_elems[1].nr_components = 2;
949 mc->vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
950
951 /* Chroma Cr texcoord element */
952 mc->vertex_elems[2].src_offset = sizeof(struct vlVertex2f) * 2;
953 mc->vertex_elems[2].vertex_buffer_index = 0;
954 mc->vertex_elems[2].nr_components = 2;
955 mc->vertex_elems[2].src_format = PIPE_FORMAT_R32G32_FLOAT;
956
957 /* Chroma Cb texcoord element */
958 mc->vertex_elems[3].src_offset = sizeof(struct vlVertex2f) * 3;
959 mc->vertex_elems[3].vertex_buffer_index = 0;
960 mc->vertex_elems[3].nr_components = 2;
961 mc->vertex_elems[3].src_format = PIPE_FORMAT_R32G32_FLOAT;
962
963 /* First ref surface top field texcoord element */
964 mc->vertex_elems[4].src_offset = 0;
965 mc->vertex_elems[4].vertex_buffer_index = 1;
966 mc->vertex_elems[4].nr_components = 2;
967 mc->vertex_elems[4].src_format = PIPE_FORMAT_R32G32_FLOAT;
968
969 /* First ref surface bottom field texcoord element */
970 mc->vertex_elems[5].src_offset = sizeof(struct vlVertex2f);
971 mc->vertex_elems[5].vertex_buffer_index = 1;
972 mc->vertex_elems[5].nr_components = 2;
973 mc->vertex_elems[5].src_format = PIPE_FORMAT_R32G32_FLOAT;
974
975 /* Second ref surface top field texcoord element */
976 mc->vertex_elems[6].src_offset = 0;
977 mc->vertex_elems[6].vertex_buffer_index = 2;
978 mc->vertex_elems[6].nr_components = 2;
979 mc->vertex_elems[6].src_format = PIPE_FORMAT_R32G32_FLOAT;
980
981 /* Second ref surface bottom field texcoord element */
982 mc->vertex_elems[7].src_offset = sizeof(struct vlVertex2f);
983 mc->vertex_elems[7].vertex_buffer_index = 2;
984 mc->vertex_elems[7].nr_components = 2;
985 mc->vertex_elems[7].src_format = PIPE_FORMAT_R32G32_FLOAT;
986
987 /* Create our constant buffer */
988 mc->vs_const_buf.size = sizeof(struct vlVertexShaderConsts);
989 mc->vs_const_buf.buffer = pipe_buffer_create
990 (
991 pipe->screen,
992 DEFAULT_BUF_ALIGNMENT,
993 PIPE_BUFFER_USAGE_CONSTANT | PIPE_BUFFER_USAGE_DISCARD,
994 mc->vs_const_buf.size
995 );
996
997 mc->fs_const_buf.size = sizeof(struct vlFragmentShaderConsts);
998 mc->fs_const_buf.buffer = pipe_buffer_create
999 (
1000 pipe->screen,
1001 DEFAULT_BUF_ALIGNMENT,
1002 PIPE_BUFFER_USAGE_CONSTANT,
1003 mc->fs_const_buf.size
1004 );
1005
1006 memcpy
1007 (
1008 pipe_buffer_map(pipe->screen, mc->fs_const_buf.buffer, PIPE_BUFFER_USAGE_CPU_WRITE),
1009 &fs_consts,
1010 sizeof(struct vlFragmentShaderConsts)
1011 );
1012
1013 pipe_buffer_unmap(pipe->screen, mc->fs_const_buf.buffer);
1014
1015 mc->macroblocks = MALLOC(sizeof(struct vlMpeg2MacroBlock) * mc->macroblocks_per_picture);
1016
1017 return 0;
1018 }
1019
1020 static int vlInit
1021 (
1022 struct vlR16SnormBufferedMC *mc
1023 )
1024 {
1025 struct pipe_context *pipe;
1026 struct pipe_sampler_state sampler;
1027 struct pipe_texture template;
1028 unsigned int filters[5];
1029 unsigned int i;
1030
1031 assert(mc);
1032
1033 pipe = mc->pipe;
1034
1035 mc->buffered_surface = NULL;
1036 mc->past_surface = NULL;
1037 mc->future_surface = NULL;
1038 for (i = 0; i < 3; ++i)
1039 mc->zero_block[i].x = -1.0f;
1040 mc->num_macroblocks = 0;
1041
1042 /* For MC we render to textures, which are rounded up to nearest POT */
1043 mc->viewport.scale[0] = vlRoundUpPOT(mc->picture_width);
1044 mc->viewport.scale[1] = vlRoundUpPOT(mc->picture_height);
1045 mc->viewport.scale[2] = 1;
1046 mc->viewport.scale[3] = 1;
1047 mc->viewport.translate[0] = 0;
1048 mc->viewport.translate[1] = 0;
1049 mc->viewport.translate[2] = 0;
1050 mc->viewport.translate[3] = 0;
1051
1052 mc->render_target.width = vlRoundUpPOT(mc->picture_width);
1053 mc->render_target.height = vlRoundUpPOT(mc->picture_height);
1054 mc->render_target.nr_cbufs = 1;
1055 /* FB for MC stage is a vlSurface created by the user, set at render time */
1056 mc->render_target.zsbuf = NULL;
1057
1058 filters[0] = PIPE_TEX_FILTER_NEAREST;
1059 /* FIXME: Linear causes discoloration around block edges */
1060 filters[1] = /*mc->picture_format == vlFormatYCbCr444 ?*/ PIPE_TEX_FILTER_NEAREST /*: PIPE_TEX_FILTER_LINEAR*/;
1061 filters[2] = /*mc->picture_format == vlFormatYCbCr444 ?*/ PIPE_TEX_FILTER_NEAREST /*: PIPE_TEX_FILTER_LINEAR*/;
1062 filters[3] = PIPE_TEX_FILTER_LINEAR;
1063 filters[4] = PIPE_TEX_FILTER_LINEAR;
1064
1065 for (i = 0; i < 5; ++i)
1066 {
1067 sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
1068 sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
1069 sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
1070 sampler.min_img_filter = filters[i];
1071 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
1072 sampler.mag_img_filter = filters[i];
1073 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
1074 sampler.compare_func = PIPE_FUNC_ALWAYS;
1075 sampler.normalized_coords = 1;
1076 /*sampler.prefilter = ;*/
1077 /*sampler.shadow_ambient = ;*/
1078 /*sampler.lod_bias = ;*/
1079 sampler.min_lod = 0;
1080 /*sampler.max_lod = ;*/
1081 /*sampler.border_color[i] = ;*/
1082 /*sampler.max_anisotropy = ;*/
1083 mc->samplers.all[i] = pipe->create_sampler_state(pipe, &sampler);
1084 }
1085
1086 memset(&template, 0, sizeof(struct pipe_texture));
1087 template.target = PIPE_TEXTURE_2D;
1088 template.format = PIPE_FORMAT_R16_SNORM;
1089 template.last_level = 0;
1090 template.width[0] = vlRoundUpPOT(mc->picture_width);
1091 template.height[0] = vlRoundUpPOT(mc->picture_height);
1092 template.depth[0] = 1;
1093 template.compressed = 0;
1094 pf_get_block(template.format, &template.block);
1095 template.tex_usage = PIPE_TEXTURE_USAGE_SAMPLER | PIPE_TEXTURE_USAGE_DYNAMIC;
1096
1097 mc->textures.y = pipe->screen->texture_create(pipe->screen, &template);
1098
1099 if (mc->picture_format == vlFormatYCbCr420)
1100 {
1101 template.width[0] = vlRoundUpPOT(mc->picture_width / 2);
1102 template.height[0] = vlRoundUpPOT(mc->picture_height / 2);
1103 }
1104 else if (mc->picture_format == vlFormatYCbCr422)
1105 template.height[0] = vlRoundUpPOT(mc->picture_height / 2);
1106
1107 mc->textures.cb = pipe->screen->texture_create(pipe->screen, &template);
1108 mc->textures.cr = pipe->screen->texture_create(pipe->screen, &template);
1109
1110 /* textures.all[3] & textures.all[4] are assigned from vlSurfaces for P and B macroblocks at render time */
1111
1112 vlCreateVertexShaderIMB(mc);
1113 vlCreateFragmentShaderIMB(mc);
1114 vlCreateVertexShaderFramePMB(mc);
1115 vlCreateVertexShaderFieldPMB(mc);
1116 vlCreateFragmentShaderFramePMB(mc);
1117 vlCreateFragmentShaderFieldPMB(mc);
1118 vlCreateVertexShaderFrameBMB(mc);
1119 vlCreateVertexShaderFieldBMB(mc);
1120 vlCreateFragmentShaderFrameBMB(mc);
1121 vlCreateFragmentShaderFieldBMB(mc);
1122 vlCreateDataBufs(mc);
1123
1124 return 0;
1125 }
1126
1127 int vlCreateR16SNormBufferedMC
1128 (
1129 struct pipe_context *pipe,
1130 unsigned int picture_width,
1131 unsigned int picture_height,
1132 enum vlFormat picture_format,
1133 struct vlRender **render
1134 )
1135 {
1136 struct vlR16SnormBufferedMC *mc;
1137
1138 assert(pipe);
1139 assert(render);
1140
1141 mc = CALLOC_STRUCT(vlR16SnormBufferedMC);
1142
1143 mc->base.vlBegin = &vlBegin;
1144 mc->base.vlRenderMacroBlocksMpeg2 = &vlRenderMacroBlocksMpeg2R16SnormBuffered;
1145 mc->base.vlEnd = &vlEnd;
1146 mc->base.vlFlush = &vlFlush;
1147 mc->base.vlDestroy = &vlDestroy;
1148 mc->pipe = pipe;
1149 mc->picture_width = picture_width;
1150 mc->picture_height = picture_height;
1151
1152 vlInit(mc);
1153
1154 *render = &mc->base;
1155
1156 return 0;
1157 }