g3dvl: Ref count everywhere.
[mesa.git] / src / gallium / state_trackers / g3dvl / vl_r16snorm_mc_buf.c
1 #define VL_INTERNAL
2 #include "vl_r16snorm_mc_buf.h"
3 #include <assert.h>
4 #include <pipe/p_context.h>
5 #include <pipe/p_winsys.h>
6 #include <pipe/p_screen.h>
7 #include <pipe/p_state.h>
8 #include <pipe/p_inlines.h>
9 #include <tgsi/tgsi_parse.h>
10 #include <tgsi/tgsi_build.h>
11 #include <util/u_math.h>
12 #include <util/u_memory.h>
13 #include "vl_render.h"
14 #include "vl_shader_build.h"
15 #include "vl_surface.h"
16 #include "vl_util.h"
17 #include "vl_types.h"
18 #include "vl_defs.h"
19
20 const unsigned int DEFAULT_BUF_ALIGNMENT = 1;
21
22 enum vlMacroBlockTypeEx
23 {
24 vlMacroBlockExTypeIntra,
25 vlMacroBlockExTypeFwdPredictedFrame,
26 vlMacroBlockExTypeFwdPredictedField,
27 vlMacroBlockExTypeBkwdPredictedFrame,
28 vlMacroBlockExTypeBkwdPredictedField,
29 vlMacroBlockExTypeBiPredictedFrame,
30 vlMacroBlockExTypeBiPredictedField,
31
32 vlNumMacroBlockExTypes
33 };
34
35 struct vlVertexShaderConsts
36 {
37 struct vlVertex4f denorm;
38 };
39
40 struct vlFragmentShaderConsts
41 {
42 struct vlVertex4f multiplier;
43 struct vlVertex4f div;
44 };
45
46 struct vlMacroBlockVertexStream0
47 {
48 struct vlVertex2f pos;
49 struct vlVertex2f luma_tc;
50 struct vlVertex2f cb_tc;
51 struct vlVertex2f cr_tc;
52 };
53
54 struct vlR16SnormBufferedMC
55 {
56 struct vlRender base;
57
58 unsigned int picture_width;
59 unsigned int picture_height;
60 enum vlFormat picture_format;
61 unsigned int macroblocks_per_picture;
62
63 struct vlSurface *buffered_surface;
64 struct vlSurface *past_surface;
65 struct vlSurface *future_surface;
66 struct vlVertex2f surface_tex_inv_size;
67 struct vlVertex2f zero_block[3];
68 unsigned int num_macroblocks;
69 struct vlMpeg2MacroBlock *macroblocks;
70 struct pipe_surface *tex_surface[3];
71 short *texels[3];
72
73 struct pipe_context *pipe;
74 struct pipe_viewport_state viewport;
75 struct pipe_framebuffer_state render_target;
76
77 union
78 {
79 void *all[5];
80 struct
81 {
82 void *y;
83 void *cb;
84 void *cr;
85 void *ref[2];
86 };
87 } samplers;
88
89 union
90 {
91 struct pipe_texture *all[5];
92 struct
93 {
94 struct pipe_texture *y;
95 struct pipe_texture *cb;
96 struct pipe_texture *cr;
97 struct pipe_texture *ref[2];
98 };
99 } textures;
100
101 union
102 {
103 struct pipe_vertex_buffer all[3];
104 struct
105 {
106 struct pipe_vertex_buffer ycbcr;
107 struct pipe_vertex_buffer ref[2];
108 };
109 } vertex_bufs;
110
111 void *i_vs, *p_vs[2], *b_vs[2];
112 void *i_fs, *p_fs[2], *b_fs[2];
113 struct pipe_vertex_element vertex_elems[8];
114 struct pipe_constant_buffer vs_const_buf;
115 struct pipe_constant_buffer fs_const_buf;
116 };
117
118 static inline int vlBegin
119 (
120 struct vlRender *render
121 )
122 {
123 assert(render);
124
125 return 0;
126 }
127
128 static inline int vlGrabFrameCodedBlock(short *src, short *dst, unsigned int dst_pitch)
129 {
130 unsigned int y;
131
132 for (y = 0; y < VL_BLOCK_HEIGHT; ++y)
133 memcpy
134 (
135 dst + y * dst_pitch,
136 src + y * VL_BLOCK_WIDTH,
137 VL_BLOCK_WIDTH * 2
138 );
139
140 return 0;
141 }
142
143 static inline int vlGrabFieldCodedBlock(short *src, short *dst, unsigned int dst_pitch)
144 {
145 unsigned int y;
146
147 for (y = 0; y < VL_BLOCK_HEIGHT; ++y)
148 memcpy
149 (
150 dst + y * dst_pitch * 2,
151 src + y * VL_BLOCK_WIDTH,
152 VL_BLOCK_WIDTH * 2
153 );
154
155 return 0;
156 }
157
158 static inline int vlGrabNoBlock(short *dst, unsigned int dst_pitch)
159 {
160 unsigned int y;
161
162 for (y = 0; y < VL_BLOCK_HEIGHT; ++y)
163 memset
164 (
165 dst + y * dst_pitch,
166 0,
167 VL_BLOCK_WIDTH * 2
168 );
169
170 return 0;
171 }
172
173 static inline int vlGrabBlocks
174 (
175 struct vlR16SnormBufferedMC *mc,
176 unsigned int mbx,
177 unsigned int mby,
178 enum vlDCTType dct_type,
179 unsigned int coded_block_pattern,
180 short *blocks
181 )
182 {
183 short *texels;
184 unsigned int tex_pitch;
185 unsigned int x, y, tb = 0, sb = 0;
186 unsigned int mbpx = mbx * VL_MACROBLOCK_WIDTH, mbpy = mby * VL_MACROBLOCK_HEIGHT;
187
188 assert(mc);
189 assert(blocks);
190
191 tex_pitch = mc->tex_surface[0]->stride / mc->tex_surface[0]->block.size;
192 texels = mc->texels[0] + mbpy * tex_pitch + mbpx;
193
194 for (y = 0; y < 2; ++y)
195 {
196 for (x = 0; x < 2; ++x, ++tb)
197 {
198 if ((coded_block_pattern >> (5 - tb)) & 1)
199 {
200 short *cur_block = blocks + sb * VL_BLOCK_WIDTH * VL_BLOCK_HEIGHT;
201
202 if (dct_type == vlDCTTypeFrameCoded)
203 {
204 vlGrabFrameCodedBlock
205 (
206 cur_block,
207 texels + y * tex_pitch * VL_BLOCK_HEIGHT + x * VL_BLOCK_WIDTH,
208 tex_pitch
209 );
210 }
211 else
212 {
213 vlGrabFieldCodedBlock
214 (
215 cur_block,
216 texels + y * tex_pitch + x * VL_BLOCK_WIDTH,
217 tex_pitch
218 );
219 }
220
221 ++sb;
222 }
223 else if (mc->zero_block[0].x < 0.0f)
224 {
225 vlGrabNoBlock(texels + y * tex_pitch * VL_BLOCK_HEIGHT + x * VL_BLOCK_WIDTH, tex_pitch);
226
227 mc->zero_block[0].x = (mbpx + x * 8) * mc->surface_tex_inv_size.x;
228 mc->zero_block[0].y = (mbpy + y * 8) * mc->surface_tex_inv_size.y;
229 }
230 }
231 }
232
233 /* TODO: Implement 422, 444 */
234 mbpx >>= 1;
235 mbpy >>= 1;
236
237 for (tb = 0; tb < 2; ++tb)
238 {
239 tex_pitch = mc->tex_surface[tb + 1]->stride / mc->tex_surface[tb + 1]->block.size;
240 texels = mc->texels[tb + 1] + mbpy * tex_pitch + mbpx;
241
242 if ((coded_block_pattern >> (1 - tb)) & 1)
243 {
244 short *cur_block = blocks + sb * VL_BLOCK_WIDTH * VL_BLOCK_HEIGHT;
245
246 vlGrabFrameCodedBlock
247 (
248 cur_block,
249 texels,
250 tex_pitch
251 );
252
253 ++sb;
254 }
255 else if (mc->zero_block[tb + 1].x < 0.0f)
256 {
257 vlGrabNoBlock(texels, tex_pitch);
258
259 mc->zero_block[tb + 1].x = (mbpx << 1) * mc->surface_tex_inv_size.x;
260 mc->zero_block[tb + 1].y = (mbpy << 1) * mc->surface_tex_inv_size.y;
261 }
262 }
263
264 return 0;
265 }
266
267 static inline enum vlMacroBlockTypeEx vlGetMacroBlockTypeEx(struct vlMpeg2MacroBlock *mb)
268 {
269 assert(mb);
270
271 switch (mb->mb_type)
272 {
273 case vlMacroBlockTypeIntra:
274 return vlMacroBlockExTypeIntra;
275 case vlMacroBlockTypeFwdPredicted:
276 return mb->mo_type == vlMotionTypeFrame ?
277 vlMacroBlockExTypeFwdPredictedFrame : vlMacroBlockExTypeFwdPredictedField;
278 case vlMacroBlockTypeBkwdPredicted:
279 return mb->mo_type == vlMotionTypeFrame ?
280 vlMacroBlockExTypeBkwdPredictedFrame : vlMacroBlockExTypeBkwdPredictedField;
281 case vlMacroBlockTypeBiPredicted:
282 return mb->mo_type == vlMotionTypeFrame ?
283 vlMacroBlockExTypeBiPredictedFrame : vlMacroBlockExTypeBiPredictedField;
284 default:
285 assert(0);
286 }
287
288 /* Unreachable */
289 return -1;
290 }
291
292 static inline int vlGrabMacroBlock
293 (
294 struct vlR16SnormBufferedMC *mc,
295 struct vlMpeg2MacroBlock *macroblock
296 )
297 {
298 assert(mc);
299 assert(macroblock);
300
301 mc->macroblocks[mc->num_macroblocks].mbx = macroblock->mbx;
302 mc->macroblocks[mc->num_macroblocks].mby = macroblock->mby;
303 mc->macroblocks[mc->num_macroblocks].mb_type = macroblock->mb_type;
304 mc->macroblocks[mc->num_macroblocks].mo_type = macroblock->mo_type;
305 mc->macroblocks[mc->num_macroblocks].dct_type = macroblock->dct_type;
306 mc->macroblocks[mc->num_macroblocks].PMV[0][0][0] = macroblock->PMV[0][0][0];
307 mc->macroblocks[mc->num_macroblocks].PMV[0][0][1] = macroblock->PMV[0][0][1];
308 mc->macroblocks[mc->num_macroblocks].PMV[0][1][0] = macroblock->PMV[0][1][0];
309 mc->macroblocks[mc->num_macroblocks].PMV[0][1][1] = macroblock->PMV[0][1][1];
310 mc->macroblocks[mc->num_macroblocks].PMV[1][0][0] = macroblock->PMV[1][0][0];
311 mc->macroblocks[mc->num_macroblocks].PMV[1][0][1] = macroblock->PMV[1][0][1];
312 mc->macroblocks[mc->num_macroblocks].PMV[1][1][0] = macroblock->PMV[1][1][0];
313 mc->macroblocks[mc->num_macroblocks].PMV[1][1][1] = macroblock->PMV[1][1][1];
314 mc->macroblocks[mc->num_macroblocks].cbp = macroblock->cbp;
315 mc->macroblocks[mc->num_macroblocks].blocks = macroblock->blocks;
316
317 vlGrabBlocks
318 (
319 mc,
320 macroblock->mbx,
321 macroblock->mby,
322 macroblock->dct_type,
323 macroblock->cbp,
324 macroblock->blocks
325 );
326
327 mc->num_macroblocks++;
328
329 return 0;
330 }
331
332 #define SET_BLOCK(vb, cbp, mbx, mby, unitx, unity, ofsx, ofsy, hx, hy, lm, cbm, crm, zb) \
333 (vb)[0].pos.x = (mbx) * (unitx) + (ofsx); (vb)[0].pos.y = (mby) * (unity) + (ofsy); \
334 (vb)[1].pos.x = (mbx) * (unitx) + (ofsx); (vb)[1].pos.y = (mby) * (unity) + (ofsy) + (hy); \
335 (vb)[2].pos.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[2].pos.y = (mby) * (unity) + (ofsy); \
336 (vb)[3].pos.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[3].pos.y = (mby) * (unity) + (ofsy); \
337 (vb)[4].pos.x = (mbx) * (unitx) + (ofsx); (vb)[4].pos.y = (mby) * (unity) + (ofsy) + (hy); \
338 (vb)[5].pos.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[5].pos.y = (mby) * (unity) + (ofsy) + (hy); \
339 \
340 if ((cbp) & (lm)) \
341 { \
342 (vb)[0].luma_tc.x = (mbx) * (unitx) + (ofsx); (vb)[0].luma_tc.y = (mby) * (unity) + (ofsy); \
343 (vb)[1].luma_tc.x = (mbx) * (unitx) + (ofsx); (vb)[1].luma_tc.y = (mby) * (unity) + (ofsy) + (hy); \
344 (vb)[2].luma_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[2].luma_tc.y = (mby) * (unity) + (ofsy); \
345 (vb)[3].luma_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[3].luma_tc.y = (mby) * (unity) + (ofsy); \
346 (vb)[4].luma_tc.x = (mbx) * (unitx) + (ofsx); (vb)[4].luma_tc.y = (mby) * (unity) + (ofsy) + (hy); \
347 (vb)[5].luma_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[5].luma_tc.y = (mby) * (unity) + (ofsy) + (hy); \
348 } \
349 else \
350 { \
351 (vb)[0].luma_tc.x = (zb)[0].x; (vb)[0].luma_tc.y = (zb)[0].y; \
352 (vb)[1].luma_tc.x = (zb)[0].x; (vb)[1].luma_tc.y = (zb)[0].y + (hy); \
353 (vb)[2].luma_tc.x = (zb)[0].x + (hx); (vb)[2].luma_tc.y = (zb)[0].y; \
354 (vb)[3].luma_tc.x = (zb)[0].x + (hx); (vb)[3].luma_tc.y = (zb)[0].y; \
355 (vb)[4].luma_tc.x = (zb)[0].x; (vb)[4].luma_tc.y = (zb)[0].y + (hy); \
356 (vb)[5].luma_tc.x = (zb)[0].x + (hx); (vb)[5].luma_tc.y = (zb)[0].y + (hy); \
357 } \
358 \
359 if ((cbp) & (cbm)) \
360 { \
361 (vb)[0].cb_tc.x = (mbx) * (unitx) + (ofsx); (vb)[0].cb_tc.y = (mby) * (unity) + (ofsy); \
362 (vb)[1].cb_tc.x = (mbx) * (unitx) + (ofsx); (vb)[1].cb_tc.y = (mby) * (unity) + (ofsy) + (hy); \
363 (vb)[2].cb_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[2].cb_tc.y = (mby) * (unity) + (ofsy); \
364 (vb)[3].cb_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[3].cb_tc.y = (mby) * (unity) + (ofsy); \
365 (vb)[4].cb_tc.x = (mbx) * (unitx) + (ofsx); (vb)[4].cb_tc.y = (mby) * (unity) + (ofsy) + (hy); \
366 (vb)[5].cb_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[5].cb_tc.y = (mby) * (unity) + (ofsy) + (hy); \
367 } \
368 else \
369 { \
370 (vb)[0].cb_tc.x = (zb)[1].x; (vb)[0].cb_tc.y = (zb)[1].y; \
371 (vb)[1].cb_tc.x = (zb)[1].x; (vb)[1].cb_tc.y = (zb)[1].y + (hy); \
372 (vb)[2].cb_tc.x = (zb)[1].x + (hx); (vb)[2].cb_tc.y = (zb)[1].y; \
373 (vb)[3].cb_tc.x = (zb)[1].x + (hx); (vb)[3].cb_tc.y = (zb)[1].y; \
374 (vb)[4].cb_tc.x = (zb)[1].x; (vb)[4].cb_tc.y = (zb)[1].y + (hy); \
375 (vb)[5].cb_tc.x = (zb)[1].x + (hx); (vb)[5].cb_tc.y = (zb)[1].y + (hy); \
376 } \
377 \
378 if ((cbp) & (crm)) \
379 { \
380 (vb)[0].cr_tc.x = (mbx) * (unitx) + (ofsx); (vb)[0].cr_tc.y = (mby) * (unity) + (ofsy); \
381 (vb)[1].cr_tc.x = (mbx) * (unitx) + (ofsx); (vb)[1].cr_tc.y = (mby) * (unity) + (ofsy) + (hy); \
382 (vb)[2].cr_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[2].cr_tc.y = (mby) * (unity) + (ofsy); \
383 (vb)[3].cr_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[3].cr_tc.y = (mby) * (unity) + (ofsy); \
384 (vb)[4].cr_tc.x = (mbx) * (unitx) + (ofsx); (vb)[4].cr_tc.y = (mby) * (unity) + (ofsy) + (hy); \
385 (vb)[5].cr_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[5].cr_tc.y = (mby) * (unity) + (ofsy) + (hy); \
386 } \
387 else \
388 { \
389 (vb)[0].cr_tc.x = (zb)[2].x; (vb)[0].cr_tc.y = (zb)[2].y; \
390 (vb)[1].cr_tc.x = (zb)[2].x; (vb)[1].cr_tc.y = (zb)[2].y + (hy); \
391 (vb)[2].cr_tc.x = (zb)[2].x + (hx); (vb)[2].cr_tc.y = (zb)[2].y; \
392 (vb)[3].cr_tc.x = (zb)[2].x + (hx); (vb)[3].cr_tc.y = (zb)[2].y; \
393 (vb)[4].cr_tc.x = (zb)[2].x; (vb)[4].cr_tc.y = (zb)[2].y + (hy); \
394 (vb)[5].cr_tc.x = (zb)[2].x + (hx); (vb)[5].cr_tc.y = (zb)[2].y + (hy); \
395 }
396
397 static inline int vlGenMacroblockVerts
398 (
399 struct vlR16SnormBufferedMC *mc,
400 struct vlMpeg2MacroBlock *macroblock,
401 unsigned int pos,
402 struct vlMacroBlockVertexStream0 *ycbcr_vb,
403 struct vlVertex2f **ref_vb
404 )
405 {
406 struct vlVertex2f mo_vec[2];
407 unsigned int i;
408
409 assert(mc);
410 assert(macroblock);
411 assert(ycbcr_vb);
412
413 switch (macroblock->mb_type)
414 {
415 case vlMacroBlockTypeBiPredicted:
416 {
417 struct vlVertex2f *vb;
418
419 assert(ref_vb && ref_vb[1]);
420
421 vb = ref_vb[1] + pos * 2 * 24;
422
423 mo_vec[0].x = macroblock->PMV[0][1][0] * 0.5f * mc->surface_tex_inv_size.x;
424 mo_vec[0].y = macroblock->PMV[0][1][1] * 0.5f * mc->surface_tex_inv_size.y;
425
426 if (macroblock->mo_type == vlMotionTypeFrame)
427 {
428 for (i = 0; i < 24 * 2; i += 2)
429 {
430 vb[i].x = mo_vec[0].x;
431 vb[i].y = mo_vec[0].y;
432 }
433 }
434 else
435 {
436 mo_vec[1].x = macroblock->PMV[1][1][0] * 0.5f * mc->surface_tex_inv_size.x;
437 mo_vec[1].y = macroblock->PMV[1][1][1] * 0.5f * mc->surface_tex_inv_size.y;
438
439 for (i = 0; i < 24 * 2; i += 2)
440 {
441 vb[i].x = mo_vec[0].x;
442 vb[i].y = mo_vec[0].y;
443 vb[i + 1].x = mo_vec[1].x;
444 vb[i + 1].y = mo_vec[1].y;
445 }
446 }
447
448 /* fall-through */
449 }
450 case vlMacroBlockTypeFwdPredicted:
451 case vlMacroBlockTypeBkwdPredicted:
452 {
453 struct vlVertex2f *vb;
454
455 assert(ref_vb && ref_vb[0]);
456
457 vb = ref_vb[0] + pos * 2 * 24;
458
459 if (macroblock->mb_type == vlMacroBlockTypeBkwdPredicted)
460 {
461 mo_vec[0].x = macroblock->PMV[0][1][0] * 0.5f * mc->surface_tex_inv_size.x;
462 mo_vec[0].y = macroblock->PMV[0][1][1] * 0.5f * mc->surface_tex_inv_size.y;
463
464 if (macroblock->mo_type == vlMotionTypeField)
465 {
466 mo_vec[1].x = macroblock->PMV[1][1][0] * 0.5f * mc->surface_tex_inv_size.x;
467 mo_vec[1].y = macroblock->PMV[1][1][1] * 0.5f * mc->surface_tex_inv_size.y;
468 }
469 }
470 else
471 {
472 mo_vec[0].x = macroblock->PMV[0][0][0] * 0.5f * mc->surface_tex_inv_size.x;
473 mo_vec[0].y = macroblock->PMV[0][0][1] * 0.5f * mc->surface_tex_inv_size.y;
474
475 if (macroblock->mo_type == vlMotionTypeField)
476 {
477 mo_vec[1].x = macroblock->PMV[1][0][0] * 0.5f * mc->surface_tex_inv_size.x;
478 mo_vec[1].y = macroblock->PMV[1][0][1] * 0.5f * mc->surface_tex_inv_size.y;
479 }
480 }
481
482 if (macroblock->mo_type == vlMotionTypeFrame)
483 {
484 for (i = 0; i < 24 * 2; i += 2)
485 {
486 vb[i].x = mo_vec[0].x;
487 vb[i].y = mo_vec[0].y;
488 }
489 }
490 else
491 {
492 for (i = 0; i < 24 * 2; i += 2)
493 {
494 vb[i].x = mo_vec[0].x;
495 vb[i].y = mo_vec[0].y;
496 vb[i + 1].x = mo_vec[1].x;
497 vb[i + 1].y = mo_vec[1].y;
498 }
499 }
500
501 /* fall-through */
502 }
503 case vlMacroBlockTypeIntra:
504 {
505 const struct vlVertex2f unit =
506 {
507 mc->surface_tex_inv_size.x * VL_MACROBLOCK_WIDTH,
508 mc->surface_tex_inv_size.y * VL_MACROBLOCK_HEIGHT
509 };
510 const struct vlVertex2f half =
511 {
512 mc->surface_tex_inv_size.x * (VL_MACROBLOCK_WIDTH / 2),
513 mc->surface_tex_inv_size.y * (VL_MACROBLOCK_HEIGHT / 2)
514 };
515
516 struct vlMacroBlockVertexStream0 *vb;
517
518 vb = ycbcr_vb + pos * 24;
519
520 SET_BLOCK
521 (
522 vb,
523 macroblock->cbp, macroblock->mbx, macroblock->mby,
524 unit.x, unit.y, 0, 0, half.x, half.y,
525 32, 2, 1, mc->zero_block
526 );
527
528 SET_BLOCK
529 (
530 vb + 6,
531 macroblock->cbp, macroblock->mbx, macroblock->mby,
532 unit.x, unit.y, half.x, 0, half.x, half.y,
533 16, 2, 1, mc->zero_block
534 );
535
536 SET_BLOCK
537 (
538 vb + 12,
539 macroblock->cbp, macroblock->mbx, macroblock->mby,
540 unit.x, unit.y, 0, half.y, half.x, half.y,
541 8, 2, 1, mc->zero_block
542 );
543
544 SET_BLOCK
545 (
546 vb + 18,
547 macroblock->cbp, macroblock->mbx, macroblock->mby,
548 unit.x, unit.y, half.x, half.y, half.x, half.y,
549 4, 2, 1, mc->zero_block
550 );
551
552 break;
553 }
554 default:
555 assert(0);
556 }
557
558 return 0;
559 }
560
561 static int vlFlush
562 (
563 struct vlRender *render
564 )
565 {
566 struct vlR16SnormBufferedMC *mc;
567 struct pipe_context *pipe;
568 struct vlVertexShaderConsts *vs_consts;
569 unsigned int num_macroblocks[vlNumMacroBlockExTypes] = {0};
570 unsigned int offset[vlNumMacroBlockExTypes];
571 unsigned int vb_start = 0;
572 unsigned int i;
573
574 assert(render);
575
576 mc = (struct vlR16SnormBufferedMC*)render;
577
578 if (!mc->buffered_surface)
579 return 0;
580
581 if (mc->num_macroblocks < mc->macroblocks_per_picture)
582 return 0;
583
584 pipe = mc->pipe;
585
586 for (i = 0; i < mc->num_macroblocks; ++i)
587 {
588 enum vlMacroBlockTypeEx mb_type_ex = vlGetMacroBlockTypeEx(&mc->macroblocks[i]);
589
590 num_macroblocks[mb_type_ex]++;
591 }
592
593 offset[0] = 0;
594
595 for (i = 1; i < vlNumMacroBlockExTypes; ++i)
596 offset[i] = offset[i - 1] + num_macroblocks[i - 1];
597
598 {
599 struct vlMacroBlockVertexStream0 *ycbcr_vb;
600 struct vlVertex2f *ref_vb[2];
601
602 ycbcr_vb = (struct vlMacroBlockVertexStream0*)pipe_buffer_map
603 (
604 pipe->screen,
605 mc->vertex_bufs.ycbcr.buffer,
606 PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD
607 );
608
609 for (i = 0; i < 2; ++i)
610 ref_vb[i] = (struct vlVertex2f*)pipe_buffer_map
611 (
612 pipe->screen,
613 mc->vertex_bufs.ref[i].buffer,
614 PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD
615 );
616
617 for (i = 0; i < mc->num_macroblocks; ++i)
618 {
619 enum vlMacroBlockTypeEx mb_type_ex = vlGetMacroBlockTypeEx(&mc->macroblocks[i]);
620
621 vlGenMacroblockVerts(mc, &mc->macroblocks[i], offset[mb_type_ex], ycbcr_vb, ref_vb);
622
623 offset[mb_type_ex]++;
624 }
625
626 pipe_buffer_unmap(pipe->screen, mc->vertex_bufs.ycbcr.buffer);
627 for (i = 0; i < 2; ++i)
628 pipe_buffer_unmap(pipe->screen, mc->vertex_bufs.ref[i].buffer);
629 }
630
631 for (i = 0; i < 3; ++i)
632 {
633 pipe_surface_unmap(mc->tex_surface[i]);
634 pipe_surface_reference(&mc->tex_surface[i], NULL);
635 }
636
637 mc->render_target.cbufs[0] = pipe->screen->get_tex_surface
638 (
639 pipe->screen,
640 mc->buffered_surface->texture,
641 0, 0, 0, PIPE_BUFFER_USAGE_GPU_READ | PIPE_BUFFER_USAGE_GPU_WRITE
642 );
643
644 pipe->set_framebuffer_state(pipe, &mc->render_target);
645 pipe->set_viewport_state(pipe, &mc->viewport);
646 vs_consts = pipe->winsys->buffer_map
647 (
648 pipe->winsys,
649 mc->vs_const_buf.buffer,
650 PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD
651 );
652
653 vs_consts->denorm.x = mc->buffered_surface->texture->width[0];
654 vs_consts->denorm.y = mc->buffered_surface->texture->height[0];
655
656 pipe_buffer_unmap(pipe->screen, mc->vs_const_buf.buffer);
657 pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, &mc->vs_const_buf);
658 pipe->set_constant_buffer(pipe, PIPE_SHADER_FRAGMENT, 0, &mc->fs_const_buf);
659
660 if (num_macroblocks[vlMacroBlockExTypeIntra] > 0)
661 {
662 pipe->set_vertex_buffers(pipe, 1, mc->vertex_bufs.all);
663 pipe->set_vertex_elements(pipe, 4, mc->vertex_elems);
664 pipe->set_sampler_textures(pipe, 3, mc->textures.all);
665 pipe->bind_sampler_states(pipe, 3, mc->samplers.all);
666 pipe->bind_vs_state(pipe, mc->i_vs);
667 pipe->bind_fs_state(pipe, mc->i_fs);
668
669 pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLES, vb_start, num_macroblocks[vlMacroBlockExTypeIntra] * 24);
670 vb_start += num_macroblocks[vlMacroBlockExTypeIntra] * 24;
671 }
672
673 if (num_macroblocks[vlMacroBlockExTypeFwdPredictedFrame] > 0)
674 {
675 pipe->set_vertex_buffers(pipe, 2, mc->vertex_bufs.all);
676 pipe->set_vertex_elements(pipe, 6, mc->vertex_elems);
677 mc->textures.ref[0] = mc->past_surface->texture;
678 pipe->set_sampler_textures(pipe, 4, mc->textures.all);
679 pipe->bind_sampler_states(pipe, 4, mc->samplers.all);
680 pipe->bind_vs_state(pipe, mc->p_vs[0]);
681 pipe->bind_fs_state(pipe, mc->p_fs[0]);
682
683 pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLES, vb_start, num_macroblocks[vlMacroBlockExTypeFwdPredictedFrame] * 24);
684 vb_start += num_macroblocks[vlMacroBlockExTypeFwdPredictedFrame] * 24;
685 }
686
687 if (num_macroblocks[vlMacroBlockExTypeFwdPredictedField] > 0)
688 {
689 pipe->set_vertex_buffers(pipe, 2, mc->vertex_bufs.all);
690 pipe->set_vertex_elements(pipe, 6, mc->vertex_elems);
691 mc->textures.ref[0] = mc->past_surface->texture;
692 pipe->set_sampler_textures(pipe, 4, mc->textures.all);
693 pipe->bind_sampler_states(pipe, 4, mc->samplers.all);
694 pipe->bind_vs_state(pipe, mc->p_vs[1]);
695 pipe->bind_fs_state(pipe, mc->p_fs[1]);
696
697 pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLES, vb_start, num_macroblocks[vlMacroBlockExTypeFwdPredictedField] * 24);
698 vb_start += num_macroblocks[vlMacroBlockExTypeFwdPredictedField] * 24;
699 }
700
701 if (num_macroblocks[vlMacroBlockExTypeBkwdPredictedFrame] > 0)
702 {
703 pipe->set_vertex_buffers(pipe, 2, mc->vertex_bufs.all);
704 pipe->set_vertex_elements(pipe, 6, mc->vertex_elems);
705 mc->textures.ref[0] = mc->future_surface->texture;
706 pipe->set_sampler_textures(pipe, 4, mc->textures.all);
707 pipe->bind_sampler_states(pipe, 4, mc->samplers.all);
708 pipe->bind_vs_state(pipe, mc->p_vs[0]);
709 pipe->bind_fs_state(pipe, mc->p_fs[0]);
710
711 pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLES, vb_start, num_macroblocks[vlMacroBlockExTypeBkwdPredictedFrame] * 24);
712 vb_start += num_macroblocks[vlMacroBlockExTypeBkwdPredictedFrame] * 24;
713 }
714
715 if (num_macroblocks[vlMacroBlockExTypeBkwdPredictedField] > 0)
716 {
717 pipe->set_vertex_buffers(pipe, 2, mc->vertex_bufs.all);
718 pipe->set_vertex_elements(pipe, 6, mc->vertex_elems);
719 mc->textures.ref[0] = mc->future_surface->texture;
720 pipe->set_sampler_textures(pipe, 4, mc->textures.all);
721 pipe->bind_sampler_states(pipe, 4, mc->samplers.all);
722 pipe->bind_vs_state(pipe, mc->p_vs[1]);
723 pipe->bind_fs_state(pipe, mc->p_fs[1]);
724
725 pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLES, vb_start, num_macroblocks[vlMacroBlockExTypeBkwdPredictedField] * 24);
726 vb_start += num_macroblocks[vlMacroBlockExTypeBkwdPredictedField] * 24;
727 }
728
729 if (num_macroblocks[vlMacroBlockExTypeBiPredictedFrame] > 0)
730 {
731 pipe->set_vertex_buffers(pipe, 3, mc->vertex_bufs.all);
732 pipe->set_vertex_elements(pipe, 8, mc->vertex_elems);
733 mc->textures.ref[0] = mc->past_surface->texture;
734 mc->textures.ref[1] = mc->future_surface->texture;
735 pipe->set_sampler_textures(pipe, 5, mc->textures.all);
736 pipe->bind_sampler_states(pipe, 5, mc->samplers.all);
737 pipe->bind_vs_state(pipe, mc->b_vs[0]);
738 pipe->bind_fs_state(pipe, mc->b_fs[0]);
739
740 pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLES, vb_start, num_macroblocks[vlMacroBlockExTypeBiPredictedFrame] * 24);
741 vb_start += num_macroblocks[vlMacroBlockExTypeBiPredictedFrame] * 24;
742 }
743
744 if (num_macroblocks[vlMacroBlockExTypeBiPredictedField] > 0)
745 {
746 pipe->set_vertex_buffers(pipe, 3, mc->vertex_bufs.all);
747 pipe->set_vertex_elements(pipe, 8, mc->vertex_elems);
748 mc->textures.ref[0] = mc->past_surface->texture;
749 mc->textures.ref[1] = mc->future_surface->texture;
750 pipe->set_sampler_textures(pipe, 5, mc->textures.all);
751 pipe->bind_sampler_states(pipe, 5, mc->samplers.all);
752 pipe->bind_vs_state(pipe, mc->b_vs[1]);
753 pipe->bind_fs_state(pipe, mc->b_fs[1]);
754
755 pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLES, vb_start, num_macroblocks[vlMacroBlockExTypeBiPredictedField] * 24);
756 vb_start += num_macroblocks[vlMacroBlockExTypeBiPredictedField] * 24;
757 }
758
759 pipe->flush(pipe, PIPE_FLUSH_RENDER_CACHE, &mc->buffered_surface->render_fence);
760 pipe_surface_reference(&mc->render_target.cbufs[0], NULL);
761
762 for (i = 0; i < 3; ++i)
763 mc->zero_block[i].x = -1.0f;
764
765 mc->buffered_surface = NULL;
766 mc->num_macroblocks = 0;
767
768 return 0;
769 }
770
771 static int vlRenderMacroBlocksMpeg2R16SnormBuffered
772 (
773 struct vlRender *render,
774 struct vlMpeg2MacroBlockBatch *batch,
775 struct vlSurface *surface
776 )
777 {
778 struct vlR16SnormBufferedMC *mc;
779 bool new_surface = false;
780 unsigned int i;
781
782 assert(render);
783
784 mc = (struct vlR16SnormBufferedMC*)render;
785
786 if (mc->buffered_surface)
787 {
788 if (mc->buffered_surface != surface)
789 {
790 vlFlush(&mc->base);
791 new_surface = true;
792 }
793 }
794 else
795 new_surface = true;
796
797 if (new_surface)
798 {
799 mc->buffered_surface = surface;
800 mc->past_surface = batch->past_surface;
801 mc->future_surface = batch->future_surface;
802 mc->surface_tex_inv_size.x = 1.0f / surface->texture->width[0];
803 mc->surface_tex_inv_size.y = 1.0f / surface->texture->height[0];
804
805 for (i = 0; i < 3; ++i)
806 {
807 mc->tex_surface[i] = mc->pipe->screen->get_tex_surface
808 (
809 mc->pipe->screen,
810 mc->textures.all[i],
811 0, 0, 0, PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD
812 );
813
814 mc->texels[i] = pipe_surface_map(mc->tex_surface[i], PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD);
815 }
816 }
817
818 for (i = 0; i < batch->num_macroblocks; ++i)
819 vlGrabMacroBlock(mc, &batch->macroblocks[i]);
820
821 return 0;
822 }
823
824 static inline int vlEnd
825 (
826 struct vlRender *render
827 )
828 {
829 assert(render);
830
831 return 0;
832 }
833
834 static int vlDestroy
835 (
836 struct vlRender *render
837 )
838 {
839 struct vlR16SnormBufferedMC *mc;
840 struct pipe_context *pipe;
841 unsigned int i;
842
843 assert(render);
844
845 mc = (struct vlR16SnormBufferedMC*)render;
846 pipe = mc->pipe;
847
848 for (i = 0; i < 5; ++i)
849 pipe->delete_sampler_state(pipe, mc->samplers.all[i]);
850
851 for (i = 0; i < 3; ++i)
852 pipe_buffer_reference(pipe->screen, &mc->vertex_bufs.all[i].buffer, NULL);
853
854 /* Textures 3 & 4 are not created directly, no need to release them here */
855 for (i = 0; i < 3; ++i)
856 pipe_texture_reference(&mc->textures.all[i], NULL);
857
858 pipe->delete_vs_state(pipe, mc->i_vs);
859 pipe->delete_fs_state(pipe, mc->i_fs);
860
861 for (i = 0; i < 2; ++i)
862 {
863 pipe->delete_vs_state(pipe, mc->p_vs[i]);
864 pipe->delete_fs_state(pipe, mc->p_fs[i]);
865 pipe->delete_vs_state(pipe, mc->b_vs[i]);
866 pipe->delete_fs_state(pipe, mc->b_fs[i]);
867 }
868
869 pipe_buffer_reference(pipe->screen, &mc->vs_const_buf.buffer, NULL);
870 pipe_buffer_reference(pipe->screen, &mc->fs_const_buf.buffer, NULL);
871
872 FREE(mc->macroblocks);
873 FREE(mc);
874
875 return 0;
876 }
877
878 /*
879 * Muliplier renormalizes block samples from 16 bits to 12 bits.
880 * Divider is used when calculating Y % 2 for choosing top or bottom
881 * field for P or B macroblocks.
882 * TODO: Use immediates.
883 */
884 static const struct vlFragmentShaderConsts fs_consts =
885 {
886 {32767.0f / 255.0f, 32767.0f / 255.0f, 32767.0f / 255.0f, 0.0f},
887 {0.5f, 2.0f, 0.0f, 0.0f}
888 };
889
890 #include "vl_r16snorm_mc_buf_shaders.inc"
891
892 static int vlCreateDataBufs
893 (
894 struct vlR16SnormBufferedMC *mc
895 )
896 {
897 const unsigned int mbw = align(mc->picture_width, VL_MACROBLOCK_WIDTH) / VL_MACROBLOCK_WIDTH;
898 const unsigned int mbh = align(mc->picture_height, VL_MACROBLOCK_HEIGHT) / VL_MACROBLOCK_HEIGHT;
899
900 struct pipe_context *pipe;
901 unsigned int i;
902
903 assert(mc);
904
905 pipe = mc->pipe;
906 mc->macroblocks_per_picture = mbw * mbh;
907
908 /* Create our vertex buffers */
909 mc->vertex_bufs.ycbcr.pitch = sizeof(struct vlVertex2f) * 4;
910 mc->vertex_bufs.ycbcr.max_index = 24 * mc->macroblocks_per_picture - 1;
911 mc->vertex_bufs.ycbcr.buffer_offset = 0;
912 mc->vertex_bufs.ycbcr.buffer = pipe_buffer_create
913 (
914 pipe->screen,
915 DEFAULT_BUF_ALIGNMENT,
916 PIPE_BUFFER_USAGE_VERTEX | PIPE_BUFFER_USAGE_DISCARD,
917 sizeof(struct vlVertex2f) * 4 * 24 * mc->macroblocks_per_picture
918 );
919
920 for (i = 1; i < 3; ++i)
921 {
922 mc->vertex_bufs.all[i].pitch = sizeof(struct vlVertex2f) * 2;
923 mc->vertex_bufs.all[i].max_index = 24 * mc->macroblocks_per_picture - 1;
924 mc->vertex_bufs.all[i].buffer_offset = 0;
925 mc->vertex_bufs.all[i].buffer = pipe_buffer_create
926 (
927 pipe->screen,
928 DEFAULT_BUF_ALIGNMENT,
929 PIPE_BUFFER_USAGE_VERTEX | PIPE_BUFFER_USAGE_DISCARD,
930 sizeof(struct vlVertex2f) * 2 * 24 * mc->macroblocks_per_picture
931 );
932 }
933
934 /* Position element */
935 mc->vertex_elems[0].src_offset = 0;
936 mc->vertex_elems[0].vertex_buffer_index = 0;
937 mc->vertex_elems[0].nr_components = 2;
938 mc->vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
939
940 /* Luma, texcoord element */
941 mc->vertex_elems[1].src_offset = sizeof(struct vlVertex2f);
942 mc->vertex_elems[1].vertex_buffer_index = 0;
943 mc->vertex_elems[1].nr_components = 2;
944 mc->vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
945
946 /* Chroma Cr texcoord element */
947 mc->vertex_elems[2].src_offset = sizeof(struct vlVertex2f) * 2;
948 mc->vertex_elems[2].vertex_buffer_index = 0;
949 mc->vertex_elems[2].nr_components = 2;
950 mc->vertex_elems[2].src_format = PIPE_FORMAT_R32G32_FLOAT;
951
952 /* Chroma Cb texcoord element */
953 mc->vertex_elems[3].src_offset = sizeof(struct vlVertex2f) * 3;
954 mc->vertex_elems[3].vertex_buffer_index = 0;
955 mc->vertex_elems[3].nr_components = 2;
956 mc->vertex_elems[3].src_format = PIPE_FORMAT_R32G32_FLOAT;
957
958 /* First ref surface top field texcoord element */
959 mc->vertex_elems[4].src_offset = 0;
960 mc->vertex_elems[4].vertex_buffer_index = 1;
961 mc->vertex_elems[4].nr_components = 2;
962 mc->vertex_elems[4].src_format = PIPE_FORMAT_R32G32_FLOAT;
963
964 /* First ref surface bottom field texcoord element */
965 mc->vertex_elems[5].src_offset = sizeof(struct vlVertex2f);
966 mc->vertex_elems[5].vertex_buffer_index = 1;
967 mc->vertex_elems[5].nr_components = 2;
968 mc->vertex_elems[5].src_format = PIPE_FORMAT_R32G32_FLOAT;
969
970 /* Second ref surface top field texcoord element */
971 mc->vertex_elems[6].src_offset = 0;
972 mc->vertex_elems[6].vertex_buffer_index = 2;
973 mc->vertex_elems[6].nr_components = 2;
974 mc->vertex_elems[6].src_format = PIPE_FORMAT_R32G32_FLOAT;
975
976 /* Second ref surface bottom field texcoord element */
977 mc->vertex_elems[7].src_offset = sizeof(struct vlVertex2f);
978 mc->vertex_elems[7].vertex_buffer_index = 2;
979 mc->vertex_elems[7].nr_components = 2;
980 mc->vertex_elems[7].src_format = PIPE_FORMAT_R32G32_FLOAT;
981
982 /* Create our constant buffer */
983 mc->vs_const_buf.size = sizeof(struct vlVertexShaderConsts);
984 mc->vs_const_buf.buffer = pipe_buffer_create
985 (
986 pipe->screen,
987 DEFAULT_BUF_ALIGNMENT,
988 PIPE_BUFFER_USAGE_CONSTANT | PIPE_BUFFER_USAGE_DISCARD,
989 mc->vs_const_buf.size
990 );
991
992 mc->fs_const_buf.size = sizeof(struct vlFragmentShaderConsts);
993 mc->fs_const_buf.buffer = pipe_buffer_create
994 (
995 pipe->screen,
996 DEFAULT_BUF_ALIGNMENT,
997 PIPE_BUFFER_USAGE_CONSTANT,
998 mc->fs_const_buf.size
999 );
1000
1001 memcpy
1002 (
1003 pipe_buffer_map(pipe->screen, mc->fs_const_buf.buffer, PIPE_BUFFER_USAGE_CPU_WRITE),
1004 &fs_consts,
1005 sizeof(struct vlFragmentShaderConsts)
1006 );
1007
1008 pipe_buffer_unmap(pipe->screen, mc->fs_const_buf.buffer);
1009
1010 mc->macroblocks = MALLOC(sizeof(struct vlMpeg2MacroBlock) * mc->macroblocks_per_picture);
1011
1012 return 0;
1013 }
1014
1015 static int vlInit
1016 (
1017 struct vlR16SnormBufferedMC *mc
1018 )
1019 {
1020 struct pipe_context *pipe;
1021 struct pipe_sampler_state sampler;
1022 struct pipe_texture template;
1023 unsigned int filters[5];
1024 unsigned int i;
1025
1026 assert(mc);
1027
1028 pipe = mc->pipe;
1029
1030 mc->buffered_surface = NULL;
1031 mc->past_surface = NULL;
1032 mc->future_surface = NULL;
1033 for (i = 0; i < 3; ++i)
1034 mc->zero_block[i].x = -1.0f;
1035 mc->num_macroblocks = 0;
1036
1037 /* For MC we render to textures, which are rounded up to nearest POT */
1038 mc->viewport.scale[0] = vlRoundUpPOT(mc->picture_width);
1039 mc->viewport.scale[1] = vlRoundUpPOT(mc->picture_height);
1040 mc->viewport.scale[2] = 1;
1041 mc->viewport.scale[3] = 1;
1042 mc->viewport.translate[0] = 0;
1043 mc->viewport.translate[1] = 0;
1044 mc->viewport.translate[2] = 0;
1045 mc->viewport.translate[3] = 0;
1046
1047 mc->render_target.width = vlRoundUpPOT(mc->picture_width);
1048 mc->render_target.height = vlRoundUpPOT(mc->picture_height);
1049 mc->render_target.num_cbufs = 1;
1050 /* FB for MC stage is a vlSurface created by the user, set at render time */
1051 mc->render_target.zsbuf = NULL;
1052
1053 filters[0] = PIPE_TEX_FILTER_NEAREST;
1054 /* FIXME: Linear causes discoloration around block edges */
1055 filters[1] = /*mc->picture_format == vlFormatYCbCr444 ?*/ PIPE_TEX_FILTER_NEAREST /*: PIPE_TEX_FILTER_LINEAR*/;
1056 filters[2] = /*mc->picture_format == vlFormatYCbCr444 ?*/ PIPE_TEX_FILTER_NEAREST /*: PIPE_TEX_FILTER_LINEAR*/;
1057 filters[3] = PIPE_TEX_FILTER_LINEAR;
1058 filters[4] = PIPE_TEX_FILTER_LINEAR;
1059
1060 for (i = 0; i < 5; ++i)
1061 {
1062 sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
1063 sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
1064 sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
1065 sampler.min_img_filter = filters[i];
1066 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
1067 sampler.mag_img_filter = filters[i];
1068 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
1069 sampler.compare_func = PIPE_FUNC_ALWAYS;
1070 sampler.normalized_coords = 1;
1071 /*sampler.prefilter = ;*/
1072 /*sampler.shadow_ambient = ;*/
1073 /*sampler.lod_bias = ;*/
1074 sampler.min_lod = 0;
1075 /*sampler.max_lod = ;*/
1076 /*sampler.border_color[i] = ;*/
1077 /*sampler.max_anisotropy = ;*/
1078 mc->samplers.all[i] = pipe->create_sampler_state(pipe, &sampler);
1079 }
1080
1081 memset(&template, 0, sizeof(struct pipe_texture));
1082 template.target = PIPE_TEXTURE_2D;
1083 template.format = PIPE_FORMAT_R16_SNORM;
1084 template.last_level = 0;
1085 template.width[0] = vlRoundUpPOT(mc->picture_width);
1086 template.height[0] = vlRoundUpPOT(mc->picture_height);
1087 template.depth[0] = 1;
1088 template.compressed = 0;
1089 pf_get_block(template.format, &template.block);
1090 template.tex_usage = PIPE_TEXTURE_USAGE_SAMPLER | PIPE_TEXTURE_USAGE_DYNAMIC;
1091
1092 mc->textures.y = pipe->screen->texture_create(pipe->screen, &template);
1093
1094 if (mc->picture_format == vlFormatYCbCr420)
1095 {
1096 template.width[0] = vlRoundUpPOT(mc->picture_width / 2);
1097 template.height[0] = vlRoundUpPOT(mc->picture_height / 2);
1098 }
1099 else if (mc->picture_format == vlFormatYCbCr422)
1100 template.height[0] = vlRoundUpPOT(mc->picture_height / 2);
1101
1102 mc->textures.cb = pipe->screen->texture_create(pipe->screen, &template);
1103 mc->textures.cr = pipe->screen->texture_create(pipe->screen, &template);
1104
1105 /* textures.all[3] & textures.all[4] are assigned from vlSurfaces for P and B macroblocks at render time */
1106
1107 vlCreateVertexShaderIMB(mc);
1108 vlCreateFragmentShaderIMB(mc);
1109 vlCreateVertexShaderFramePMB(mc);
1110 vlCreateVertexShaderFieldPMB(mc);
1111 vlCreateFragmentShaderFramePMB(mc);
1112 vlCreateFragmentShaderFieldPMB(mc);
1113 vlCreateVertexShaderFrameBMB(mc);
1114 vlCreateVertexShaderFieldBMB(mc);
1115 vlCreateFragmentShaderFrameBMB(mc);
1116 vlCreateFragmentShaderFieldBMB(mc);
1117 vlCreateDataBufs(mc);
1118
1119 return 0;
1120 }
1121
1122 int vlCreateR16SNormBufferedMC
1123 (
1124 struct pipe_context *pipe,
1125 unsigned int picture_width,
1126 unsigned int picture_height,
1127 enum vlFormat picture_format,
1128 struct vlRender **render
1129 )
1130 {
1131 struct vlR16SnormBufferedMC *mc;
1132
1133 assert(pipe);
1134 assert(render);
1135
1136 mc = CALLOC_STRUCT(vlR16SnormBufferedMC);
1137
1138 mc->base.vlBegin = &vlBegin;
1139 mc->base.vlRenderMacroBlocksMpeg2 = &vlRenderMacroBlocksMpeg2R16SnormBuffered;
1140 mc->base.vlEnd = &vlEnd;
1141 mc->base.vlFlush = &vlFlush;
1142 mc->base.vlDestroy = &vlDestroy;
1143 mc->pipe = pipe;
1144 mc->picture_width = picture_width;
1145 mc->picture_height = picture_height;
1146
1147 vlInit(mc);
1148
1149 *render = &mc->base;
1150
1151 return 0;
1152 }