[g3dvl] make number of idct render targets configurable
[mesa.git] / src / gallium / auxiliary / vl / vl_idct.c
1 /**************************************************************************
2 *
3 * Copyright 2010 Christian König
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "vl_idct.h"
29 #include "vl_vertex_buffers.h"
30 #include "vl_defines.h"
31 #include "util/u_draw.h"
32 #include <assert.h>
33 #include <pipe/p_context.h>
34 #include <pipe/p_screen.h>
35 #include <util/u_inlines.h>
36 #include <util/u_sampler.h>
37 #include <util/u_format.h>
38 #include <tgsi/tgsi_ureg.h>
39 #include "vl_types.h"
40
41 enum VS_OUTPUT
42 {
43 VS_O_VPOS,
44 VS_O_L_ADDR0,
45 VS_O_L_ADDR1,
46 VS_O_R_ADDR0,
47 VS_O_R_ADDR1
48 };
49
50 static const float const_matrix[8][8] = {
51 { 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.353553f, 0.3535530f },
52 { 0.4903930f, 0.4157350f, 0.2777850f, 0.0975451f, -0.0975452f, -0.2777850f, -0.415735f, -0.4903930f },
53 { 0.4619400f, 0.1913420f, -0.1913420f, -0.4619400f, -0.4619400f, -0.1913420f, 0.191342f, 0.4619400f },
54 { 0.4157350f, -0.0975452f, -0.4903930f, -0.2777850f, 0.2777850f, 0.4903930f, 0.097545f, -0.4157350f },
55 { 0.3535530f, -0.3535530f, -0.3535530f, 0.3535540f, 0.3535530f, -0.3535540f, -0.353553f, 0.3535530f },
56 { 0.2777850f, -0.4903930f, 0.0975452f, 0.4157350f, -0.4157350f, -0.0975451f, 0.490393f, -0.2777850f },
57 { 0.1913420f, -0.4619400f, 0.4619400f, -0.1913420f, -0.1913410f, 0.4619400f, -0.461940f, 0.1913420f },
58 { 0.0975451f, -0.2777850f, 0.4157350f, -0.4903930f, 0.4903930f, -0.4157350f, 0.277786f, -0.0975458f }
59 };
60
61 static void
62 calc_addr(struct ureg_program *shader, struct ureg_dst addr[2],
63 struct ureg_src tc, struct ureg_src start, bool right_side,
64 bool transposed, float size)
65 {
66 unsigned wm_start = (right_side == transposed) ? TGSI_WRITEMASK_X : TGSI_WRITEMASK_Y;
67 unsigned sw_start = right_side ? TGSI_SWIZZLE_Y : TGSI_SWIZZLE_X;
68
69 unsigned wm_tc = (right_side == transposed) ? TGSI_WRITEMASK_Y : TGSI_WRITEMASK_X;
70 unsigned sw_tc = right_side ? TGSI_SWIZZLE_X : TGSI_SWIZZLE_Y;
71
72 /*
73 * addr[0..1].(start) = right_side ? start.x : tc.x
74 * addr[0..1].(tc) = right_side ? tc.y : start.y
75 * addr[0..1].z = tc.z
76 * addr[1].(start) += 1.0f / scale
77 */
78 ureg_MOV(shader, ureg_writemask(addr[0], wm_start), ureg_scalar(start, sw_start));
79 ureg_MOV(shader, ureg_writemask(addr[0], wm_tc), ureg_scalar(tc, sw_tc));
80 ureg_MOV(shader, ureg_writemask(addr[0], TGSI_WRITEMASK_Z), tc);
81
82 ureg_ADD(shader, ureg_writemask(addr[1], wm_start), ureg_scalar(start, sw_start), ureg_imm1f(shader, 1.0f / size));
83 ureg_MOV(shader, ureg_writemask(addr[1], wm_tc), ureg_scalar(tc, sw_tc));
84 ureg_MOV(shader, ureg_writemask(addr[1], TGSI_WRITEMASK_Z), tc);
85 }
86
87 static void *
88 create_vert_shader(struct vl_idct *idct, bool matrix_stage)
89 {
90 struct ureg_program *shader;
91 struct ureg_src vrect, vpos, vblock, eb;
92 struct ureg_src scale, blocks_xy;
93 struct ureg_dst t_tex, t_start;
94 struct ureg_dst o_vpos, o_l_addr[2], o_r_addr[2];
95 unsigned label;
96
97 shader = ureg_create(TGSI_PROCESSOR_VERTEX);
98 if (!shader)
99 return NULL;
100
101 t_tex = ureg_DECL_temporary(shader);
102 t_start = ureg_DECL_temporary(shader);
103
104 vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
105 vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
106 vblock = ureg_swizzle(vrect, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_W, TGSI_SWIZZLE_X, TGSI_SWIZZLE_X);
107
108 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
109
110 eb = ureg_DECL_vs_input(shader, VS_I_EB);
111
112 o_l_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0);
113 o_l_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1);
114
115 o_r_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0);
116 o_r_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1);
117
118 /*
119 * scale = (BLOCK_WIDTH, BLOCK_HEIGHT) / (dst.width, dst.height)
120 * blocks_xy = (blocks_x, blocks_y)
121 *
122 * if eb.(vblock.y, vblock.x)
123 * o_vpos.xy = -1
124 * else
125 * t_tex = vpos * blocks_xy + vblock
126 * t_start = t_tex * scale
127 * t_tex = t_tex + vrect
128 * o_vpos.xy = t_tex * scale
129 *
130 * o_l_addr = calc_addr(...)
131 * o_r_addr = calc_addr(...)
132 * endif
133 * o_vpos.zw = vpos
134 *
135 */
136
137 scale = ureg_imm2f(shader,
138 (float)BLOCK_WIDTH / idct->buffer_width,
139 (float)BLOCK_HEIGHT / idct->buffer_height);
140
141 blocks_xy = ureg_imm2f(shader, idct->blocks_x, idct->blocks_y);
142
143 if (idct->blocks_x > 1 || idct->blocks_y > 1) {
144 ureg_CMP(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY),
145 ureg_negate(ureg_scalar(vblock, TGSI_SWIZZLE_Y)),
146 ureg_swizzle(eb, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_W, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_W),
147 ureg_swizzle(eb, TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y, TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y));
148
149 ureg_CMP(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_X),
150 ureg_negate(ureg_scalar(vblock, TGSI_SWIZZLE_X)),
151 ureg_scalar(ureg_src(t_tex), TGSI_SWIZZLE_Y),
152 ureg_scalar(ureg_src(t_tex), TGSI_SWIZZLE_X));
153
154 eb = ureg_src(t_tex);
155 }
156
157 ureg_IF(shader, ureg_scalar(eb, TGSI_SWIZZLE_X), &label);
158
159 ureg_MOV(shader, o_vpos, ureg_imm1f(shader, -1.0f));
160
161 ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
162 ureg_ELSE(shader, &label);
163
164 ureg_MAD(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), vpos, blocks_xy, vblock);
165 ureg_MUL(shader, ureg_writemask(t_start, TGSI_WRITEMASK_XY), ureg_src(t_tex), scale);
166
167 ureg_ADD(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), ureg_src(t_tex), vrect);
168
169 ureg_MUL(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), ureg_src(t_tex), scale);
170 ureg_MUL(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_Z),
171 ureg_scalar(vrect, TGSI_SWIZZLE_X),
172 ureg_imm1f(shader, BLOCK_WIDTH / idct->nr_of_render_targets));
173
174 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_tex));
175
176 if(matrix_stage) {
177 calc_addr(shader, o_l_addr, ureg_src(t_tex), ureg_src(t_start), false, false, idct->buffer_width / 4);
178 calc_addr(shader, o_r_addr, vrect, ureg_imm1f(shader, 0.0f), true, true, BLOCK_WIDTH / 4);
179 } else {
180 calc_addr(shader, o_l_addr, vrect, ureg_imm1f(shader, 0.0f), false, false, BLOCK_WIDTH / 4);
181 calc_addr(shader, o_r_addr, ureg_src(t_tex), ureg_src(t_start), true, false, idct->buffer_height / 4);
182 }
183
184 ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
185 ureg_ENDIF(shader);
186
187 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), vpos);
188
189 ureg_release_temporary(shader, t_tex);
190 ureg_release_temporary(shader, t_start);
191
192 ureg_END(shader);
193
194 return ureg_create_shader_and_destroy(shader, idct->pipe);
195 }
196
197 static void
198 increment_addr(struct ureg_program *shader, struct ureg_dst daddr[2],
199 struct ureg_src saddr[2], bool right_side, bool transposed,
200 int pos, float size)
201 {
202 unsigned wm_start = (right_side == transposed) ? TGSI_WRITEMASK_X : TGSI_WRITEMASK_Y;
203 unsigned wm_tc = (right_side == transposed) ? TGSI_WRITEMASK_Y : TGSI_WRITEMASK_X;
204
205 /*
206 * daddr[0..1].(start) = saddr[0..1].(start)
207 * daddr[0..1].(tc) = saddr[0..1].(tc)
208 */
209
210 ureg_MOV(shader, ureg_writemask(daddr[0], wm_start), saddr[0]);
211 ureg_ADD(shader, ureg_writemask(daddr[0], wm_tc), saddr[0], ureg_imm1f(shader, pos / size));
212 ureg_MOV(shader, ureg_writemask(daddr[1], wm_start), saddr[1]);
213 ureg_ADD(shader, ureg_writemask(daddr[1], wm_tc), saddr[1], ureg_imm1f(shader, pos / size));
214 }
215
216 static void
217 fetch_four(struct ureg_program *shader, struct ureg_dst m[2], struct ureg_src addr[2], struct ureg_src sampler)
218 {
219 ureg_TEX(shader, m[0], TGSI_TEXTURE_3D, addr[0], sampler);
220 ureg_TEX(shader, m[1], TGSI_TEXTURE_3D, addr[1], sampler);
221 }
222
223 static void
224 matrix_mul(struct ureg_program *shader, struct ureg_dst dst, struct ureg_dst l[2], struct ureg_dst r[2])
225 {
226 struct ureg_dst tmp;
227
228 tmp = ureg_DECL_temporary(shader);
229
230 /*
231 * tmp.xy = dot4(m[0][0..1], m[1][0..1])
232 * dst = tmp.x + tmp.y
233 */
234 ureg_DP4(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X), ureg_src(l[0]), ureg_src(r[0]));
235 ureg_DP4(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_src(l[1]), ureg_src(r[1]));
236 ureg_ADD(shader, dst,
237 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X),
238 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y));
239
240 ureg_release_temporary(shader, tmp);
241 }
242
243 static void *
244 create_matrix_frag_shader(struct vl_idct *idct)
245 {
246 struct ureg_program *shader;
247
248 struct ureg_src l_addr[2], r_addr[2];
249
250 struct ureg_dst l[4][2], r[2];
251 struct ureg_dst fragment[idct->nr_of_render_targets];
252
253 unsigned i, j;
254
255 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
256 if (!shader)
257 return NULL;
258
259 l_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0, TGSI_INTERPOLATE_LINEAR);
260 l_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1, TGSI_INTERPOLATE_LINEAR);
261
262 r_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0, TGSI_INTERPOLATE_LINEAR);
263 r_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1, TGSI_INTERPOLATE_LINEAR);
264
265 for (i = 0; i < idct->nr_of_render_targets; ++i)
266 fragment[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, i);
267
268 for (i = 0; i < 4; ++i) {
269 l[i][0] = ureg_DECL_temporary(shader);
270 l[i][1] = ureg_DECL_temporary(shader);
271 }
272
273 r[0] = ureg_DECL_temporary(shader);
274 r[1] = ureg_DECL_temporary(shader);
275
276 for (i = 1; i < 4; ++i) {
277 increment_addr(shader, l[i], l_addr, false, false, i, idct->buffer_height);
278 }
279
280 for (i = 0; i < 4; ++i) {
281 struct ureg_src s_addr[2];
282 s_addr[0] = i == 0 ? l_addr[0] : ureg_src(l[i][0]);
283 s_addr[1] = i == 0 ? l_addr[1] : ureg_src(l[i][1]);
284 fetch_four(shader, l[i], s_addr, ureg_DECL_sampler(shader, 1));
285 }
286
287 for (i = 0; i < idct->nr_of_render_targets; ++i) {
288 if(i > 0)
289 increment_addr(shader, r, r_addr, true, true, i, BLOCK_HEIGHT);
290
291 struct ureg_src s_addr[2] = { ureg_src(r[0]), ureg_src(r[1]) };
292 s_addr[0] = i == 0 ? r_addr[0] : ureg_src(r[0]);
293 s_addr[1] = i == 0 ? r_addr[1] : ureg_src(r[1]);
294 fetch_four(shader, r, s_addr, ureg_DECL_sampler(shader, 0));
295
296 for (j = 0; j < 4; ++j) {
297 matrix_mul(shader, ureg_writemask(fragment[i], TGSI_WRITEMASK_X << j), l[j], r);
298 }
299 }
300
301 for (i = 0; i < 4; ++i) {
302 ureg_release_temporary(shader, l[i][0]);
303 ureg_release_temporary(shader, l[i][1]);
304 }
305 ureg_release_temporary(shader, r[0]);
306 ureg_release_temporary(shader, r[1]);
307
308 ureg_END(shader);
309
310 return ureg_create_shader_and_destroy(shader, idct->pipe);
311 }
312
313 static void *
314 create_transpose_frag_shader(struct vl_idct *idct)
315 {
316 struct ureg_program *shader;
317
318 struct ureg_src l_addr[2], r_addr[2];
319
320 struct ureg_dst l[2], r[2];
321 struct ureg_dst fragment;
322
323 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
324 if (!shader)
325 return NULL;
326
327 l_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0, TGSI_INTERPOLATE_LINEAR);
328 l_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1, TGSI_INTERPOLATE_LINEAR);
329
330 r_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0, TGSI_INTERPOLATE_LINEAR);
331 r_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1, TGSI_INTERPOLATE_LINEAR);
332
333 l[0] = ureg_DECL_temporary(shader);
334 l[1] = ureg_DECL_temporary(shader);
335 r[0] = ureg_DECL_temporary(shader);
336 r[1] = ureg_DECL_temporary(shader);
337
338 fetch_four(shader, l, l_addr, ureg_DECL_sampler(shader, 0));
339 fetch_four(shader, r, r_addr, ureg_DECL_sampler(shader, 1));
340
341 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
342
343 matrix_mul(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X), l, r);
344
345 ureg_release_temporary(shader, l[0]);
346 ureg_release_temporary(shader, l[1]);
347 ureg_release_temporary(shader, r[0]);
348 ureg_release_temporary(shader, r[1]);
349
350 ureg_END(shader);
351
352 return ureg_create_shader_and_destroy(shader, idct->pipe);
353 }
354
355 static bool
356 init_shaders(struct vl_idct *idct)
357 {
358 idct->matrix_vs = create_vert_shader(idct, true);
359 if (!idct->matrix_vs)
360 goto error_matrix_vs;
361
362 idct->matrix_fs = create_matrix_frag_shader(idct);
363 if (!idct->matrix_fs)
364 goto error_matrix_fs;
365
366 idct->transpose_vs = create_vert_shader(idct, false);
367 if (!idct->transpose_vs)
368 goto error_transpose_vs;
369
370 idct->transpose_fs = create_transpose_frag_shader(idct);
371 if (!idct->transpose_fs)
372 goto error_transpose_fs;
373
374 return true;
375
376 error_transpose_fs:
377 idct->pipe->delete_vs_state(idct->pipe, idct->transpose_vs);
378
379 error_transpose_vs:
380 idct->pipe->delete_fs_state(idct->pipe, idct->matrix_fs);
381
382 error_matrix_fs:
383 idct->pipe->delete_vs_state(idct->pipe, idct->matrix_vs);
384
385 error_matrix_vs:
386 return false;
387 }
388
389 static void
390 cleanup_shaders(struct vl_idct *idct)
391 {
392 idct->pipe->delete_vs_state(idct->pipe, idct->matrix_vs);
393 idct->pipe->delete_fs_state(idct->pipe, idct->matrix_fs);
394 idct->pipe->delete_vs_state(idct->pipe, idct->transpose_vs);
395 idct->pipe->delete_fs_state(idct->pipe, idct->transpose_fs);
396 }
397
398 static bool
399 init_state(struct vl_idct *idct)
400 {
401 struct pipe_sampler_state sampler;
402 struct pipe_rasterizer_state rs_state;
403 unsigned i;
404
405 assert(idct);
406
407 memset(&rs_state, 0, sizeof(rs_state));
408 rs_state.gl_rasterization_rules = false;
409 idct->rs_state = idct->pipe->create_rasterizer_state(idct->pipe, &rs_state);
410 if (!idct->rs_state)
411 goto error_rs_state;
412
413 for (i = 0; i < 2; ++i) {
414 memset(&sampler, 0, sizeof(sampler));
415 sampler.wrap_s = PIPE_TEX_WRAP_REPEAT;
416 sampler.wrap_t = PIPE_TEX_WRAP_REPEAT;
417 sampler.wrap_r = PIPE_TEX_WRAP_REPEAT;
418 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
419 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
420 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
421 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
422 sampler.compare_func = PIPE_FUNC_ALWAYS;
423 sampler.normalized_coords = 1;
424 idct->samplers[i] = idct->pipe->create_sampler_state(idct->pipe, &sampler);
425 if (!idct->samplers[i])
426 goto error_samplers;
427 }
428
429 return true;
430
431 error_samplers:
432 for (i = 0; i < 2; ++i)
433 if (idct->samplers[i])
434 idct->pipe->delete_sampler_state(idct->pipe, idct->samplers[i]);
435
436 idct->pipe->delete_rasterizer_state(idct->pipe, idct->rs_state);
437
438 error_rs_state:
439 return false;
440 }
441
442 static void
443 cleanup_state(struct vl_idct *idct)
444 {
445 unsigned i;
446
447 for (i = 0; i < 2; ++i)
448 idct->pipe->delete_sampler_state(idct->pipe, idct->samplers[i]);
449
450 idct->pipe->delete_rasterizer_state(idct->pipe, idct->rs_state);
451 }
452
453 static bool
454 init_intermediate(struct vl_idct *idct, struct vl_idct_buffer *buffer)
455 {
456 struct pipe_resource *tex;
457 struct pipe_surface surf_templ;
458 unsigned i;
459
460 assert(idct && buffer);
461
462 tex = buffer->sampler_views.individual.intermediate->texture;
463
464 buffer->fb_state[0].width = tex->width0;
465 buffer->fb_state[0].height = tex->height0;
466 buffer->fb_state[0].nr_cbufs = idct->nr_of_render_targets;
467 for(i = 0; i < idct->nr_of_render_targets; ++i) {
468 memset(&surf_templ, 0, sizeof(surf_templ));
469 surf_templ.format = tex->format;
470 surf_templ.u.tex.first_layer = i;
471 surf_templ.u.tex.last_layer = i;
472 surf_templ.usage = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
473 buffer->fb_state[0].cbufs[i] = idct->pipe->create_surface(
474 idct->pipe, tex, &surf_templ);
475
476 if (!buffer->fb_state[0].cbufs[i])
477 goto error_surfaces;
478 }
479
480 buffer->viewport[0].scale[0] = tex->width0;
481 buffer->viewport[0].scale[1] = tex->height0;
482
483 return true;
484
485 error_surfaces:
486 for(i = 0; i < idct->nr_of_render_targets; ++i)
487 pipe_surface_reference(&buffer->fb_state[0].cbufs[i], NULL);
488
489 return false;
490 }
491
492 static void
493 cleanup_intermediate(struct vl_idct *idct, struct vl_idct_buffer *buffer)
494 {
495 unsigned i;
496
497 assert(idct && buffer);
498
499 for(i = 0; i < idct->nr_of_render_targets; ++i)
500 pipe_surface_reference(&buffer->fb_state[0].cbufs[i], NULL);
501
502 pipe_sampler_view_reference(&buffer->sampler_views.individual.intermediate, NULL);
503 }
504
505 struct pipe_sampler_view *
506 vl_idct_upload_matrix(struct pipe_context *pipe, float scale)
507 {
508 struct pipe_resource tex_templ, *matrix;
509 struct pipe_sampler_view sv_templ, *sv;
510 struct pipe_transfer *buf_transfer;
511 unsigned i, j, pitch;
512 float *f;
513
514 struct pipe_box rect =
515 {
516 0, 0, 0,
517 BLOCK_WIDTH / 4,
518 BLOCK_HEIGHT,
519 1
520 };
521
522 assert(pipe);
523
524 memset(&tex_templ, 0, sizeof(tex_templ));
525 tex_templ.target = PIPE_TEXTURE_2D;
526 tex_templ.format = PIPE_FORMAT_R32G32B32A32_FLOAT;
527 tex_templ.last_level = 0;
528 tex_templ.width0 = 2;
529 tex_templ.height0 = 8;
530 tex_templ.depth0 = 1;
531 tex_templ.array_size = 1;
532 tex_templ.usage = PIPE_USAGE_IMMUTABLE;
533 tex_templ.bind = PIPE_BIND_SAMPLER_VIEW;
534 tex_templ.flags = 0;
535
536 matrix = pipe->screen->resource_create(pipe->screen, &tex_templ);
537 if (!matrix)
538 goto error_matrix;
539
540 buf_transfer = pipe->get_transfer
541 (
542 pipe, matrix,
543 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
544 &rect
545 );
546 if (!buf_transfer)
547 goto error_transfer;
548
549 pitch = buf_transfer->stride / sizeof(float);
550
551 f = pipe->transfer_map(pipe, buf_transfer);
552 if (!f)
553 goto error_map;
554
555 for(i = 0; i < BLOCK_HEIGHT; ++i)
556 for(j = 0; j < BLOCK_WIDTH; ++j)
557 // transpose and scale
558 f[i * pitch + j] = const_matrix[j][i] * scale;
559
560 pipe->transfer_unmap(pipe, buf_transfer);
561 pipe->transfer_destroy(pipe, buf_transfer);
562
563 memset(&sv_templ, 0, sizeof(sv_templ));
564 u_sampler_view_default_template(&sv_templ, matrix, matrix->format);
565 sv = pipe->create_sampler_view(pipe, matrix, &sv_templ);
566 pipe_resource_reference(&matrix, NULL);
567 if (!sv)
568 goto error_map;
569
570 return sv;
571
572 error_map:
573 pipe->transfer_destroy(pipe, buf_transfer);
574
575 error_transfer:
576 pipe_resource_reference(&matrix, NULL);
577
578 error_matrix:
579 return NULL;
580 }
581
582 bool vl_idct_init(struct vl_idct *idct, struct pipe_context *pipe,
583 unsigned buffer_width, unsigned buffer_height,
584 unsigned blocks_x, unsigned blocks_y,
585 unsigned nr_of_render_targets,
586 struct pipe_sampler_view *matrix)
587 {
588 assert(idct && pipe && matrix);
589
590 idct->pipe = pipe;
591 idct->buffer_width = buffer_width;
592 idct->buffer_height = buffer_height;
593 idct->blocks_x = blocks_x;
594 idct->blocks_y = blocks_y;
595 idct->nr_of_render_targets = nr_of_render_targets;
596 pipe_sampler_view_reference(&idct->matrix, matrix);
597
598 if(!init_shaders(idct))
599 return false;
600
601 if(!init_state(idct)) {
602 cleanup_shaders(idct);
603 return false;
604 }
605
606 return true;
607 }
608
609 void
610 vl_idct_cleanup(struct vl_idct *idct)
611 {
612 cleanup_shaders(idct);
613 cleanup_state(idct);
614
615 pipe_sampler_view_reference(&idct->matrix, NULL);
616 }
617
618 bool
619 vl_idct_init_buffer(struct vl_idct *idct, struct vl_idct_buffer *buffer,
620 struct pipe_sampler_view *source,
621 struct pipe_sampler_view *intermediate,
622 struct pipe_surface *destination)
623 {
624 unsigned i;
625
626 assert(buffer);
627 assert(idct);
628 assert(source);
629 assert(destination);
630
631 pipe_sampler_view_reference(&buffer->sampler_views.individual.matrix, idct->matrix);
632 pipe_sampler_view_reference(&buffer->sampler_views.individual.source, source);
633 pipe_sampler_view_reference(&buffer->sampler_views.individual.transpose, idct->matrix);
634 pipe_sampler_view_reference(&buffer->sampler_views.individual.intermediate, intermediate);
635
636 if (!init_intermediate(idct, buffer))
637 return false;
638
639 /* init state */
640 buffer->fb_state[1].width = destination->texture->width0;
641 buffer->fb_state[1].height = destination->texture->height0;
642 buffer->fb_state[1].nr_cbufs = 1;
643 pipe_surface_reference(&buffer->fb_state[1].cbufs[0], destination);
644
645 buffer->viewport[1].scale[0] = destination->texture->width0;
646 buffer->viewport[1].scale[1] = destination->texture->height0;
647
648 for(i = 0; i < 2; ++i) {
649 buffer->viewport[i].scale[2] = 1;
650 buffer->viewport[i].scale[3] = 1;
651 buffer->viewport[i].translate[0] = 0;
652 buffer->viewport[i].translate[1] = 0;
653 buffer->viewport[i].translate[2] = 0;
654 buffer->viewport[i].translate[3] = 0;
655
656 buffer->fb_state[i].zsbuf = NULL;
657 }
658
659 return true;
660 }
661
662 void
663 vl_idct_cleanup_buffer(struct vl_idct *idct, struct vl_idct_buffer *buffer)
664 {
665 unsigned i;
666
667 assert(idct && buffer);
668
669 for(i = 0; i < idct->nr_of_render_targets; ++i)
670 pipe_surface_reference(&buffer->fb_state[0].cbufs[i], NULL);
671
672 pipe_surface_reference(&buffer->fb_state[1].cbufs[0], NULL);
673
674 cleanup_intermediate(idct, buffer);
675 }
676
677 void
678 vl_idct_flush(struct vl_idct *idct, struct vl_idct_buffer *buffer, unsigned num_instances)
679 {
680 unsigned num_verts;
681
682 assert(idct);
683 assert(buffer);
684
685 if(num_instances > 0) {
686 num_verts = idct->blocks_x * idct->blocks_y * 4;
687
688 idct->pipe->bind_rasterizer_state(idct->pipe, idct->rs_state);
689 idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers);
690
691 /* first stage */
692 idct->pipe->set_framebuffer_state(idct->pipe, &buffer->fb_state[0]);
693 idct->pipe->set_viewport_state(idct->pipe, &buffer->viewport[0]);
694 idct->pipe->set_fragment_sampler_views(idct->pipe, 2, buffer->sampler_views.stage[0]);
695 idct->pipe->bind_vs_state(idct->pipe, idct->matrix_vs);
696 idct->pipe->bind_fs_state(idct->pipe, idct->matrix_fs);
697 util_draw_arrays_instanced(idct->pipe, PIPE_PRIM_QUADS, 0, num_verts, 0, num_instances);
698
699 /* second stage */
700 idct->pipe->set_framebuffer_state(idct->pipe, &buffer->fb_state[1]);
701 idct->pipe->set_viewport_state(idct->pipe, &buffer->viewport[1]);
702 idct->pipe->set_fragment_sampler_views(idct->pipe, 2, buffer->sampler_views.stage[1]);
703 idct->pipe->bind_vs_state(idct->pipe, idct->transpose_vs);
704 idct->pipe->bind_fs_state(idct->pipe, idct->transpose_fs);
705 util_draw_arrays_instanced(idct->pipe, PIPE_PRIM_QUADS, 0, num_verts, 0, num_instances);
706 }
707 }