[g3dvl] merge idct stage 2 and mc ycbcr stage into a single draw
[mesa.git] / src / gallium / auxiliary / vl / vl_idct.c
1 /**************************************************************************
2 *
3 * Copyright 2010 Christian König
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <assert.h>
29
30 #include <pipe/p_context.h>
31 #include <pipe/p_screen.h>
32
33 #include <util/u_draw.h>
34 #include <util/u_sampler.h>
35
36 #include <tgsi/tgsi_ureg.h>
37
38 #include "vl_defines.h"
39 #include "vl_types.h"
40 #include "vl_vertex_buffers.h"
41 #include "vl_idct.h"
42
43 enum VS_OUTPUT
44 {
45 VS_O_VPOS,
46 VS_O_L_ADDR0,
47 VS_O_L_ADDR1,
48 VS_O_R_ADDR0,
49 VS_O_R_ADDR1
50 };
51
52 static const float const_matrix[8][8] = {
53 { 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.353553f, 0.3535530f },
54 { 0.4903930f, 0.4157350f, 0.2777850f, 0.0975451f, -0.0975452f, -0.2777850f, -0.415735f, -0.4903930f },
55 { 0.4619400f, 0.1913420f, -0.1913420f, -0.4619400f, -0.4619400f, -0.1913420f, 0.191342f, 0.4619400f },
56 { 0.4157350f, -0.0975452f, -0.4903930f, -0.2777850f, 0.2777850f, 0.4903930f, 0.097545f, -0.4157350f },
57 { 0.3535530f, -0.3535530f, -0.3535530f, 0.3535540f, 0.3535530f, -0.3535540f, -0.353553f, 0.3535530f },
58 { 0.2777850f, -0.4903930f, 0.0975452f, 0.4157350f, -0.4157350f, -0.0975451f, 0.490393f, -0.2777850f },
59 { 0.1913420f, -0.4619400f, 0.4619400f, -0.1913420f, -0.1913410f, 0.4619400f, -0.461940f, 0.1913420f },
60 { 0.0975451f, -0.2777850f, 0.4157350f, -0.4903930f, 0.4903930f, -0.4157350f, 0.277786f, -0.0975458f }
61 };
62
63 static void
64 calc_addr(struct ureg_program *shader, struct ureg_dst addr[2],
65 struct ureg_src tc, struct ureg_src start, bool right_side,
66 bool transposed, float size)
67 {
68 unsigned wm_start = (right_side == transposed) ? TGSI_WRITEMASK_X : TGSI_WRITEMASK_Y;
69 unsigned sw_start = right_side ? TGSI_SWIZZLE_Y : TGSI_SWIZZLE_X;
70
71 unsigned wm_tc = (right_side == transposed) ? TGSI_WRITEMASK_Y : TGSI_WRITEMASK_X;
72 unsigned sw_tc = right_side ? TGSI_SWIZZLE_X : TGSI_SWIZZLE_Y;
73
74 /*
75 * addr[0..1].(start) = right_side ? start.x : tc.x
76 * addr[0..1].(tc) = right_side ? tc.y : start.y
77 * addr[0..1].z = tc.z
78 * addr[1].(start) += 1.0f / scale
79 */
80 ureg_MOV(shader, ureg_writemask(addr[0], wm_start), ureg_scalar(start, sw_start));
81 ureg_MOV(shader, ureg_writemask(addr[0], wm_tc), ureg_scalar(tc, sw_tc));
82 ureg_MOV(shader, ureg_writemask(addr[0], TGSI_WRITEMASK_Z), tc);
83
84 ureg_ADD(shader, ureg_writemask(addr[1], wm_start), ureg_scalar(start, sw_start), ureg_imm1f(shader, 1.0f / size));
85 ureg_MOV(shader, ureg_writemask(addr[1], wm_tc), ureg_scalar(tc, sw_tc));
86 ureg_MOV(shader, ureg_writemask(addr[1], TGSI_WRITEMASK_Z), tc);
87 }
88
89 static void
90 increment_addr(struct ureg_program *shader, struct ureg_dst daddr[2],
91 struct ureg_src saddr[2], bool right_side, bool transposed,
92 int pos, float size)
93 {
94 unsigned wm_start = (right_side == transposed) ? TGSI_WRITEMASK_X : TGSI_WRITEMASK_Y;
95 unsigned wm_tc = (right_side == transposed) ? TGSI_WRITEMASK_Y : TGSI_WRITEMASK_X;
96
97 /*
98 * daddr[0..1].(start) = saddr[0..1].(start)
99 * daddr[0..1].(tc) = saddr[0..1].(tc)
100 */
101
102 ureg_MOV(shader, ureg_writemask(daddr[0], wm_start), saddr[0]);
103 ureg_ADD(shader, ureg_writemask(daddr[0], wm_tc), saddr[0], ureg_imm1f(shader, pos / size));
104 ureg_MOV(shader, ureg_writemask(daddr[1], wm_start), saddr[1]);
105 ureg_ADD(shader, ureg_writemask(daddr[1], wm_tc), saddr[1], ureg_imm1f(shader, pos / size));
106 }
107
108 static void
109 fetch_four(struct ureg_program *shader, struct ureg_dst m[2], struct ureg_src addr[2], struct ureg_src sampler)
110 {
111 ureg_TEX(shader, m[0], TGSI_TEXTURE_3D, addr[0], sampler);
112 ureg_TEX(shader, m[1], TGSI_TEXTURE_3D, addr[1], sampler);
113 }
114
115 static void
116 matrix_mul(struct ureg_program *shader, struct ureg_dst dst, struct ureg_dst l[2], struct ureg_dst r[2])
117 {
118 struct ureg_dst tmp;
119
120 tmp = ureg_DECL_temporary(shader);
121
122 /*
123 * tmp.xy = dot4(m[0][0..1], m[1][0..1])
124 * dst = tmp.x + tmp.y
125 */
126 ureg_DP4(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X), ureg_src(l[0]), ureg_src(r[0]));
127 ureg_DP4(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_src(l[1]), ureg_src(r[1]));
128 ureg_ADD(shader, dst,
129 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X),
130 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y));
131
132 ureg_release_temporary(shader, tmp);
133 }
134
135 static void *
136 create_stage1_vert_shader(struct vl_idct *idct)
137 {
138 struct ureg_program *shader;
139 struct ureg_src vrect, vpos;
140 struct ureg_src scale;
141 struct ureg_dst t_tex, t_start;
142 struct ureg_dst o_vpos, o_l_addr[2], o_r_addr[2];
143
144 shader = ureg_create(TGSI_PROCESSOR_VERTEX);
145 if (!shader)
146 return NULL;
147
148 vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
149 vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
150
151 t_tex = ureg_DECL_temporary(shader);
152 t_start = ureg_DECL_temporary(shader);
153
154 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
155
156 o_l_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0);
157 o_l_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1);
158
159 o_r_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0);
160 o_r_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1);
161
162 /*
163 * scale = (BLOCK_WIDTH, BLOCK_HEIGHT) / (dst.width, dst.height)
164 *
165 * t_vpos = vpos + vrect
166 * o_vpos.xy = t_vpos * scale
167 * o_vpos.zw = vpos
168 *
169 * o_l_addr = calc_addr(...)
170 * o_r_addr = calc_addr(...)
171 *
172 */
173
174 scale = ureg_imm2f(shader,
175 (float)BLOCK_WIDTH / idct->buffer_width,
176 (float)BLOCK_HEIGHT / idct->buffer_height);
177
178 ureg_ADD(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), vpos, vrect);
179 ureg_MUL(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), ureg_src(t_tex), scale);
180
181 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_tex));
182 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), ureg_imm1f(shader, 1.0f));
183
184 ureg_MUL(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_Z),
185 ureg_scalar(vrect, TGSI_SWIZZLE_X),
186 ureg_imm1f(shader, BLOCK_WIDTH / idct->nr_of_render_targets));
187 ureg_MUL(shader, ureg_writemask(t_start, TGSI_WRITEMASK_XY), vpos, scale);
188
189 calc_addr(shader, o_l_addr, ureg_src(t_tex), ureg_src(t_start), false, false, idct->buffer_width / 4);
190 calc_addr(shader, o_r_addr, vrect, ureg_imm1f(shader, 0.0f), true, true, BLOCK_WIDTH / 4);
191
192 ureg_release_temporary(shader, t_tex);
193 ureg_release_temporary(shader, t_start);
194
195 ureg_END(shader);
196
197 return ureg_create_shader_and_destroy(shader, idct->pipe);
198 }
199
200 static void *
201 create_stage1_frag_shader(struct vl_idct *idct)
202 {
203 struct ureg_program *shader;
204
205 struct ureg_src l_addr[2], r_addr[2];
206
207 struct ureg_dst l[4][2], r[2];
208 struct ureg_dst fragment[idct->nr_of_render_targets];
209
210 unsigned i, j;
211
212 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
213 if (!shader)
214 return NULL;
215
216 l_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0, TGSI_INTERPOLATE_LINEAR);
217 l_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1, TGSI_INTERPOLATE_LINEAR);
218
219 r_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0, TGSI_INTERPOLATE_LINEAR);
220 r_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1, TGSI_INTERPOLATE_LINEAR);
221
222 for (i = 0; i < idct->nr_of_render_targets; ++i)
223 fragment[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, i);
224
225 for (i = 0; i < 4; ++i) {
226 l[i][0] = ureg_DECL_temporary(shader);
227 l[i][1] = ureg_DECL_temporary(shader);
228 }
229
230 r[0] = ureg_DECL_temporary(shader);
231 r[1] = ureg_DECL_temporary(shader);
232
233 for (i = 1; i < 4; ++i) {
234 increment_addr(shader, l[i], l_addr, false, false, i, idct->buffer_height);
235 }
236
237 for (i = 0; i < 4; ++i) {
238 struct ureg_src s_addr[2];
239 s_addr[0] = i == 0 ? l_addr[0] : ureg_src(l[i][0]);
240 s_addr[1] = i == 0 ? l_addr[1] : ureg_src(l[i][1]);
241 fetch_four(shader, l[i], s_addr, ureg_DECL_sampler(shader, 1));
242 }
243
244 for (i = 0; i < idct->nr_of_render_targets; ++i) {
245 if(i > 0)
246 increment_addr(shader, r, r_addr, true, true, i, BLOCK_HEIGHT);
247
248 struct ureg_src s_addr[2] = { ureg_src(r[0]), ureg_src(r[1]) };
249 s_addr[0] = i == 0 ? r_addr[0] : ureg_src(r[0]);
250 s_addr[1] = i == 0 ? r_addr[1] : ureg_src(r[1]);
251 fetch_four(shader, r, s_addr, ureg_DECL_sampler(shader, 0));
252
253 for (j = 0; j < 4; ++j) {
254 matrix_mul(shader, ureg_writemask(fragment[i], TGSI_WRITEMASK_X << j), l[j], r);
255 }
256 }
257
258 for (i = 0; i < 4; ++i) {
259 ureg_release_temporary(shader, l[i][0]);
260 ureg_release_temporary(shader, l[i][1]);
261 }
262 ureg_release_temporary(shader, r[0]);
263 ureg_release_temporary(shader, r[1]);
264
265 ureg_END(shader);
266
267 return ureg_create_shader_and_destroy(shader, idct->pipe);
268 }
269
270 void
271 vl_idct_stage2_vert_shader(struct vl_idct *idct, struct ureg_program *shader,
272 unsigned first_output, struct ureg_dst tex)
273 {
274 struct ureg_src vrect, vpos;
275 struct ureg_src scale;
276 struct ureg_dst t_start;
277 struct ureg_dst o_l_addr[2], o_r_addr[2];
278
279 vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
280 vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
281
282 t_start = ureg_DECL_temporary(shader);
283
284 --first_output;
285
286 o_l_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, first_output + VS_O_L_ADDR0);
287 o_l_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, first_output + VS_O_L_ADDR1);
288
289 o_r_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, first_output + VS_O_R_ADDR0);
290 o_r_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, first_output + VS_O_R_ADDR1);
291
292 scale = ureg_imm2f(shader,
293 (float)BLOCK_WIDTH / idct->buffer_width,
294 (float)BLOCK_HEIGHT / idct->buffer_height);
295
296 ureg_MUL(shader, ureg_writemask(tex, TGSI_WRITEMASK_Z),
297 ureg_scalar(vrect, TGSI_SWIZZLE_X),
298 ureg_imm1f(shader, BLOCK_WIDTH / idct->nr_of_render_targets));
299 ureg_MUL(shader, ureg_writemask(t_start, TGSI_WRITEMASK_XY), vpos, scale);
300
301 calc_addr(shader, o_l_addr, vrect, ureg_imm1f(shader, 0.0f), false, false, BLOCK_WIDTH / 4);
302 calc_addr(shader, o_r_addr, ureg_src(tex), ureg_src(t_start), true, false, idct->buffer_height / 4);
303 }
304
305 void
306 vl_idct_stage2_frag_shader(struct vl_idct *idct, struct ureg_program *shader,
307 unsigned first_input, struct ureg_dst fragment)
308 {
309 struct ureg_src l_addr[2], r_addr[2];
310
311 struct ureg_dst l[2], r[2];
312
313 --first_input;
314
315 l_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, first_input + VS_O_L_ADDR0, TGSI_INTERPOLATE_LINEAR);
316 l_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, first_input + VS_O_L_ADDR1, TGSI_INTERPOLATE_LINEAR);
317
318 r_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, first_input + VS_O_R_ADDR0, TGSI_INTERPOLATE_LINEAR);
319 r_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, first_input + VS_O_R_ADDR1, TGSI_INTERPOLATE_LINEAR);
320
321 l[0] = ureg_DECL_temporary(shader);
322 l[1] = ureg_DECL_temporary(shader);
323 r[0] = ureg_DECL_temporary(shader);
324 r[1] = ureg_DECL_temporary(shader);
325
326 fetch_four(shader, l, l_addr, ureg_DECL_sampler(shader, 0));
327 fetch_four(shader, r, r_addr, ureg_DECL_sampler(shader, 1));
328
329 matrix_mul(shader, fragment, l, r);
330
331 ureg_release_temporary(shader, l[0]);
332 ureg_release_temporary(shader, l[1]);
333 ureg_release_temporary(shader, r[0]);
334 ureg_release_temporary(shader, r[1]);
335 }
336
337 static bool
338 init_shaders(struct vl_idct *idct)
339 {
340 idct->vs = create_stage1_vert_shader(idct);
341 if (!idct->vs)
342 goto error_vs;
343
344 idct->fs = create_stage1_frag_shader(idct);
345 if (!idct->fs)
346 goto error_fs;
347
348 return true;
349
350 error_fs:
351 idct->pipe->delete_vs_state(idct->pipe, idct->vs);
352
353 error_vs:
354 return false;
355 }
356
357 static void
358 cleanup_shaders(struct vl_idct *idct)
359 {
360 idct->pipe->delete_vs_state(idct->pipe, idct->vs);
361 idct->pipe->delete_fs_state(idct->pipe, idct->fs);
362 }
363
364 static bool
365 init_state(struct vl_idct *idct)
366 {
367 struct pipe_blend_state blend;
368 struct pipe_rasterizer_state rs_state;
369 struct pipe_sampler_state sampler;
370 unsigned i;
371
372 assert(idct);
373
374 memset(&rs_state, 0, sizeof(rs_state));
375 rs_state.gl_rasterization_rules = false;
376 idct->rs_state = idct->pipe->create_rasterizer_state(idct->pipe, &rs_state);
377 if (!idct->rs_state)
378 goto error_rs_state;
379
380 memset(&blend, 0, sizeof blend);
381
382 blend.independent_blend_enable = 0;
383 blend.rt[0].blend_enable = 0;
384 blend.rt[0].rgb_func = PIPE_BLEND_ADD;
385 blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_ONE;
386 blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ONE;
387 blend.rt[0].alpha_func = PIPE_BLEND_ADD;
388 blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
389 blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
390 blend.logicop_enable = 0;
391 blend.logicop_func = PIPE_LOGICOP_CLEAR;
392 /* Needed to allow color writes to FB, even if blending disabled */
393 blend.rt[0].colormask = PIPE_MASK_RGBA;
394 blend.dither = 0;
395 idct->blend = idct->pipe->create_blend_state(idct->pipe, &blend);
396 if (!idct->blend)
397 goto error_blend;
398
399 for (i = 0; i < 2; ++i) {
400 memset(&sampler, 0, sizeof(sampler));
401 sampler.wrap_s = PIPE_TEX_WRAP_REPEAT;
402 sampler.wrap_t = PIPE_TEX_WRAP_REPEAT;
403 sampler.wrap_r = PIPE_TEX_WRAP_REPEAT;
404 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
405 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
406 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
407 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
408 sampler.compare_func = PIPE_FUNC_ALWAYS;
409 sampler.normalized_coords = 1;
410 idct->samplers[i] = idct->pipe->create_sampler_state(idct->pipe, &sampler);
411 if (!idct->samplers[i])
412 goto error_samplers;
413 }
414
415 return true;
416
417 error_samplers:
418 for (i = 0; i < 2; ++i)
419 if (idct->samplers[i])
420 idct->pipe->delete_sampler_state(idct->pipe, idct->samplers[i]);
421
422 idct->pipe->delete_rasterizer_state(idct->pipe, idct->rs_state);
423
424 error_blend:
425 idct->pipe->delete_blend_state(idct->pipe, idct->blend);
426
427 error_rs_state:
428 return false;
429 }
430
431 static void
432 cleanup_state(struct vl_idct *idct)
433 {
434 unsigned i;
435
436 for (i = 0; i < 2; ++i)
437 idct->pipe->delete_sampler_state(idct->pipe, idct->samplers[i]);
438
439 idct->pipe->delete_rasterizer_state(idct->pipe, idct->rs_state);
440 idct->pipe->delete_blend_state(idct->pipe, idct->blend);
441 }
442
443 static bool
444 init_intermediate(struct vl_idct *idct, struct vl_idct_buffer *buffer)
445 {
446 struct pipe_resource *tex;
447 struct pipe_surface surf_templ;
448 unsigned i;
449
450 assert(idct && buffer);
451
452 tex = buffer->sampler_views.individual.intermediate->texture;
453
454 buffer->fb_state.width = tex->width0;
455 buffer->fb_state.height = tex->height0;
456 buffer->fb_state.nr_cbufs = idct->nr_of_render_targets;
457 for(i = 0; i < idct->nr_of_render_targets; ++i) {
458 memset(&surf_templ, 0, sizeof(surf_templ));
459 surf_templ.format = tex->format;
460 surf_templ.u.tex.first_layer = i;
461 surf_templ.u.tex.last_layer = i;
462 surf_templ.usage = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
463 buffer->fb_state.cbufs[i] = idct->pipe->create_surface(
464 idct->pipe, tex, &surf_templ);
465
466 if (!buffer->fb_state.cbufs[i])
467 goto error_surfaces;
468 }
469
470 buffer->viewport.scale[0] = tex->width0;
471 buffer->viewport.scale[1] = tex->height0;
472
473 return true;
474
475 error_surfaces:
476 for(i = 0; i < idct->nr_of_render_targets; ++i)
477 pipe_surface_reference(&buffer->fb_state.cbufs[i], NULL);
478
479 return false;
480 }
481
482 static void
483 cleanup_intermediate(struct vl_idct *idct, struct vl_idct_buffer *buffer)
484 {
485 unsigned i;
486
487 assert(idct && buffer);
488
489 for(i = 0; i < idct->nr_of_render_targets; ++i)
490 pipe_surface_reference(&buffer->fb_state.cbufs[i], NULL);
491
492 pipe_sampler_view_reference(&buffer->sampler_views.individual.intermediate, NULL);
493 }
494
495 struct pipe_sampler_view *
496 vl_idct_upload_matrix(struct pipe_context *pipe, float scale)
497 {
498 struct pipe_resource tex_templ, *matrix;
499 struct pipe_sampler_view sv_templ, *sv;
500 struct pipe_transfer *buf_transfer;
501 unsigned i, j, pitch;
502 float *f;
503
504 struct pipe_box rect =
505 {
506 0, 0, 0,
507 BLOCK_WIDTH / 4,
508 BLOCK_HEIGHT,
509 1
510 };
511
512 assert(pipe);
513
514 memset(&tex_templ, 0, sizeof(tex_templ));
515 tex_templ.target = PIPE_TEXTURE_2D;
516 tex_templ.format = PIPE_FORMAT_R32G32B32A32_FLOAT;
517 tex_templ.last_level = 0;
518 tex_templ.width0 = 2;
519 tex_templ.height0 = 8;
520 tex_templ.depth0 = 1;
521 tex_templ.array_size = 1;
522 tex_templ.usage = PIPE_USAGE_IMMUTABLE;
523 tex_templ.bind = PIPE_BIND_SAMPLER_VIEW;
524 tex_templ.flags = 0;
525
526 matrix = pipe->screen->resource_create(pipe->screen, &tex_templ);
527 if (!matrix)
528 goto error_matrix;
529
530 buf_transfer = pipe->get_transfer
531 (
532 pipe, matrix,
533 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
534 &rect
535 );
536 if (!buf_transfer)
537 goto error_transfer;
538
539 pitch = buf_transfer->stride / sizeof(float);
540
541 f = pipe->transfer_map(pipe, buf_transfer);
542 if (!f)
543 goto error_map;
544
545 for(i = 0; i < BLOCK_HEIGHT; ++i)
546 for(j = 0; j < BLOCK_WIDTH; ++j)
547 // transpose and scale
548 f[i * pitch + j] = const_matrix[j][i] * scale;
549
550 pipe->transfer_unmap(pipe, buf_transfer);
551 pipe->transfer_destroy(pipe, buf_transfer);
552
553 memset(&sv_templ, 0, sizeof(sv_templ));
554 u_sampler_view_default_template(&sv_templ, matrix, matrix->format);
555 sv = pipe->create_sampler_view(pipe, matrix, &sv_templ);
556 pipe_resource_reference(&matrix, NULL);
557 if (!sv)
558 goto error_map;
559
560 return sv;
561
562 error_map:
563 pipe->transfer_destroy(pipe, buf_transfer);
564
565 error_transfer:
566 pipe_resource_reference(&matrix, NULL);
567
568 error_matrix:
569 return NULL;
570 }
571
572 bool vl_idct_init(struct vl_idct *idct, struct pipe_context *pipe,
573 unsigned buffer_width, unsigned buffer_height,
574 unsigned nr_of_render_targets,
575 struct pipe_sampler_view *matrix,
576 struct pipe_sampler_view *transpose)
577 {
578 assert(idct && pipe && matrix);
579
580 idct->pipe = pipe;
581 idct->buffer_width = buffer_width;
582 idct->buffer_height = buffer_height;
583 idct->nr_of_render_targets = nr_of_render_targets;
584
585 pipe_sampler_view_reference(&idct->matrix, matrix);
586 pipe_sampler_view_reference(&idct->transpose, transpose);
587
588 if(!init_shaders(idct))
589 return false;
590
591 if(!init_state(idct)) {
592 cleanup_shaders(idct);
593 return false;
594 }
595
596 return true;
597 }
598
599 void
600 vl_idct_cleanup(struct vl_idct *idct)
601 {
602 cleanup_shaders(idct);
603 cleanup_state(idct);
604
605 pipe_sampler_view_reference(&idct->matrix, NULL);
606 }
607
608 bool
609 vl_idct_init_buffer(struct vl_idct *idct, struct vl_idct_buffer *buffer,
610 struct pipe_sampler_view *source,
611 struct pipe_sampler_view *intermediate,
612 struct pipe_surface *destination)
613 {
614 assert(buffer);
615 assert(idct);
616 assert(source);
617 assert(destination);
618
619 memset(buffer, 0, sizeof(struct vl_idct_buffer));
620
621 pipe_sampler_view_reference(&buffer->sampler_views.individual.matrix, idct->matrix);
622 pipe_sampler_view_reference(&buffer->sampler_views.individual.source, source);
623 pipe_sampler_view_reference(&buffer->sampler_views.individual.transpose, idct->transpose);
624 pipe_sampler_view_reference(&buffer->sampler_views.individual.intermediate, intermediate);
625
626 if (!init_intermediate(idct, buffer))
627 return false;
628
629 buffer->viewport.scale[2] = 1;
630 buffer->viewport.scale[3] = 1;
631 buffer->viewport.translate[0] = 0;
632 buffer->viewport.translate[1] = 0;
633 buffer->viewport.translate[2] = 0;
634 buffer->viewport.translate[3] = 0;
635
636 return true;
637 }
638
639 void
640 vl_idct_cleanup_buffer(struct vl_idct *idct, struct vl_idct_buffer *buffer)
641 {
642 unsigned i;
643
644 assert(idct && buffer);
645
646 for(i = 0; i < idct->nr_of_render_targets; ++i)
647 pipe_surface_reference(&buffer->fb_state.cbufs[i], NULL);
648
649 cleanup_intermediate(idct, buffer);
650 }
651
652 void
653 vl_idct_flush(struct vl_idct *idct, struct vl_idct_buffer *buffer, unsigned num_instances)
654 {
655 assert(idct);
656 assert(buffer);
657
658 idct->pipe->bind_rasterizer_state(idct->pipe, idct->rs_state);
659 idct->pipe->bind_blend_state(idct->pipe, idct->blend);
660 idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers);
661
662 /* first stage */
663 idct->pipe->set_framebuffer_state(idct->pipe, &buffer->fb_state);
664 idct->pipe->set_viewport_state(idct->pipe, &buffer->viewport);
665 idct->pipe->set_fragment_sampler_views(idct->pipe, 2, buffer->sampler_views.stage[0]);
666 idct->pipe->bind_vs_state(idct->pipe, idct->vs);
667 idct->pipe->bind_fs_state(idct->pipe, idct->fs);
668 util_draw_arrays_instanced(idct->pipe, PIPE_PRIM_QUADS, 0, 4, 0, num_instances);
669 }
670
671 void
672 vl_idct_prepare_stage2(struct vl_idct *idct, struct vl_idct_buffer *buffer)
673 {
674 assert(idct);
675 assert(buffer);
676
677 /* second stage */
678 idct->pipe->bind_rasterizer_state(idct->pipe, idct->rs_state);
679 idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers);
680 idct->pipe->set_fragment_sampler_views(idct->pipe, 2, buffer->sampler_views.stage[1]);
681 }
682