ca3b1cb53abb9318641980c30f2c8ae95f03baec
[mesa.git] / src / gallium / auxiliary / vl / vl_idct.c
1 /**************************************************************************
2 *
3 * Copyright 2010 Christian König
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <assert.h>
29
30 #include <pipe/p_context.h>
31 #include <pipe/p_screen.h>
32
33 #include <util/u_draw.h>
34 #include <util/u_sampler.h>
35
36 #include <tgsi/tgsi_ureg.h>
37
38 #include "vl_defines.h"
39 #include "vl_types.h"
40 #include "vl_vertex_buffers.h"
41 #include "vl_idct.h"
42
43 enum VS_OUTPUT
44 {
45 VS_O_VPOS,
46 VS_O_L_ADDR0,
47 VS_O_L_ADDR1,
48 VS_O_R_ADDR0,
49 VS_O_R_ADDR1
50 };
51
52 static const float const_matrix[8][8] = {
53 { 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.353553f, 0.3535530f },
54 { 0.4903930f, 0.4157350f, 0.2777850f, 0.0975451f, -0.0975452f, -0.2777850f, -0.415735f, -0.4903930f },
55 { 0.4619400f, 0.1913420f, -0.1913420f, -0.4619400f, -0.4619400f, -0.1913420f, 0.191342f, 0.4619400f },
56 { 0.4157350f, -0.0975452f, -0.4903930f, -0.2777850f, 0.2777850f, 0.4903930f, 0.097545f, -0.4157350f },
57 { 0.3535530f, -0.3535530f, -0.3535530f, 0.3535540f, 0.3535530f, -0.3535540f, -0.353553f, 0.3535530f },
58 { 0.2777850f, -0.4903930f, 0.0975452f, 0.4157350f, -0.4157350f, -0.0975451f, 0.490393f, -0.2777850f },
59 { 0.1913420f, -0.4619400f, 0.4619400f, -0.1913420f, -0.1913410f, 0.4619400f, -0.461940f, 0.1913420f },
60 { 0.0975451f, -0.2777850f, 0.4157350f, -0.4903930f, 0.4903930f, -0.4157350f, 0.277786f, -0.0975458f }
61 };
62
63 static void
64 calc_addr(struct ureg_program *shader, struct ureg_dst addr[2],
65 struct ureg_src tc, struct ureg_src start, bool right_side,
66 bool transposed, float size)
67 {
68 unsigned wm_start = (right_side == transposed) ? TGSI_WRITEMASK_X : TGSI_WRITEMASK_Y;
69 unsigned sw_start = right_side ? TGSI_SWIZZLE_Y : TGSI_SWIZZLE_X;
70
71 unsigned wm_tc = (right_side == transposed) ? TGSI_WRITEMASK_Y : TGSI_WRITEMASK_X;
72 unsigned sw_tc = right_side ? TGSI_SWIZZLE_X : TGSI_SWIZZLE_Y;
73
74 /*
75 * addr[0..1].(start) = right_side ? start.x : tc.x
76 * addr[0..1].(tc) = right_side ? tc.y : start.y
77 * addr[0..1].z = tc.z
78 * addr[1].(start) += 1.0f / scale
79 */
80 ureg_MOV(shader, ureg_writemask(addr[0], wm_start), ureg_scalar(start, sw_start));
81 ureg_MOV(shader, ureg_writemask(addr[0], wm_tc), ureg_scalar(tc, sw_tc));
82 ureg_MOV(shader, ureg_writemask(addr[0], TGSI_WRITEMASK_Z), tc);
83
84 ureg_ADD(shader, ureg_writemask(addr[1], wm_start), ureg_scalar(start, sw_start), ureg_imm1f(shader, 1.0f / size));
85 ureg_MOV(shader, ureg_writemask(addr[1], wm_tc), ureg_scalar(tc, sw_tc));
86 ureg_MOV(shader, ureg_writemask(addr[1], TGSI_WRITEMASK_Z), tc);
87 }
88
89 static void *
90 create_vert_shader(struct vl_idct *idct, bool matrix_stage)
91 {
92 struct ureg_program *shader;
93 struct ureg_src vrect, vpos;
94 struct ureg_src scale;
95 struct ureg_dst t_tex, t_start;
96 struct ureg_dst o_vpos, o_l_addr[2], o_r_addr[2];
97
98 shader = ureg_create(TGSI_PROCESSOR_VERTEX);
99 if (!shader)
100 return NULL;
101
102 t_tex = ureg_DECL_temporary(shader);
103 t_start = ureg_DECL_temporary(shader);
104
105 vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
106 vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
107
108 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
109
110 o_l_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0);
111 o_l_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1);
112
113 o_r_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0);
114 o_r_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1);
115
116 /*
117 * scale = (BLOCK_WIDTH, BLOCK_HEIGHT) / (dst.width, dst.height)
118 *
119 * t_vpos = vpos + vrect
120 * o_vpos.xy = t_vpos * scale
121 * o_vpos.zw = vpos
122 *
123 * o_l_addr = calc_addr(...)
124 * o_r_addr = calc_addr(...)
125 *
126 */
127
128 scale = ureg_imm2f(shader,
129 (float)BLOCK_WIDTH / idct->buffer_width,
130 (float)BLOCK_HEIGHT / idct->buffer_height);
131
132 ureg_ADD(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), vpos, vrect);
133 ureg_MUL(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), ureg_src(t_tex), scale);
134 ureg_MUL(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_Z),
135 ureg_scalar(vrect, TGSI_SWIZZLE_X),
136 ureg_imm1f(shader, BLOCK_WIDTH / idct->nr_of_render_targets));
137
138 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_tex));
139 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), ureg_imm1f(shader, 1.0f));
140
141 ureg_MUL(shader, ureg_writemask(t_start, TGSI_WRITEMASK_XY), vpos, scale);
142
143 if(matrix_stage) {
144 calc_addr(shader, o_l_addr, ureg_src(t_tex), ureg_src(t_start), false, false, idct->buffer_width / 4);
145 calc_addr(shader, o_r_addr, vrect, ureg_imm1f(shader, 0.0f), true, true, BLOCK_WIDTH / 4);
146 } else {
147 calc_addr(shader, o_l_addr, vrect, ureg_imm1f(shader, 0.0f), false, false, BLOCK_WIDTH / 4);
148 calc_addr(shader, o_r_addr, ureg_src(t_tex), ureg_src(t_start), true, false, idct->buffer_height / 4);
149 }
150
151 ureg_release_temporary(shader, t_tex);
152 ureg_release_temporary(shader, t_start);
153
154 ureg_END(shader);
155
156 return ureg_create_shader_and_destroy(shader, idct->pipe);
157 }
158
159 static void
160 increment_addr(struct ureg_program *shader, struct ureg_dst daddr[2],
161 struct ureg_src saddr[2], bool right_side, bool transposed,
162 int pos, float size)
163 {
164 unsigned wm_start = (right_side == transposed) ? TGSI_WRITEMASK_X : TGSI_WRITEMASK_Y;
165 unsigned wm_tc = (right_side == transposed) ? TGSI_WRITEMASK_Y : TGSI_WRITEMASK_X;
166
167 /*
168 * daddr[0..1].(start) = saddr[0..1].(start)
169 * daddr[0..1].(tc) = saddr[0..1].(tc)
170 */
171
172 ureg_MOV(shader, ureg_writemask(daddr[0], wm_start), saddr[0]);
173 ureg_ADD(shader, ureg_writemask(daddr[0], wm_tc), saddr[0], ureg_imm1f(shader, pos / size));
174 ureg_MOV(shader, ureg_writemask(daddr[1], wm_start), saddr[1]);
175 ureg_ADD(shader, ureg_writemask(daddr[1], wm_tc), saddr[1], ureg_imm1f(shader, pos / size));
176 }
177
178 static void
179 fetch_four(struct ureg_program *shader, struct ureg_dst m[2], struct ureg_src addr[2], struct ureg_src sampler)
180 {
181 ureg_TEX(shader, m[0], TGSI_TEXTURE_3D, addr[0], sampler);
182 ureg_TEX(shader, m[1], TGSI_TEXTURE_3D, addr[1], sampler);
183 }
184
185 static void
186 matrix_mul(struct ureg_program *shader, struct ureg_dst dst, struct ureg_dst l[2], struct ureg_dst r[2])
187 {
188 struct ureg_dst tmp;
189
190 tmp = ureg_DECL_temporary(shader);
191
192 /*
193 * tmp.xy = dot4(m[0][0..1], m[1][0..1])
194 * dst = tmp.x + tmp.y
195 */
196 ureg_DP4(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X), ureg_src(l[0]), ureg_src(r[0]));
197 ureg_DP4(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_src(l[1]), ureg_src(r[1]));
198 ureg_ADD(shader, dst,
199 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X),
200 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y));
201
202 ureg_release_temporary(shader, tmp);
203 }
204
205 static void *
206 create_matrix_frag_shader(struct vl_idct *idct)
207 {
208 struct ureg_program *shader;
209
210 struct ureg_src l_addr[2], r_addr[2];
211
212 struct ureg_dst l[4][2], r[2];
213 struct ureg_dst fragment[idct->nr_of_render_targets];
214
215 unsigned i, j;
216
217 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
218 if (!shader)
219 return NULL;
220
221 l_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0, TGSI_INTERPOLATE_LINEAR);
222 l_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1, TGSI_INTERPOLATE_LINEAR);
223
224 r_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0, TGSI_INTERPOLATE_LINEAR);
225 r_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1, TGSI_INTERPOLATE_LINEAR);
226
227 for (i = 0; i < idct->nr_of_render_targets; ++i)
228 fragment[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, i);
229
230 for (i = 0; i < 4; ++i) {
231 l[i][0] = ureg_DECL_temporary(shader);
232 l[i][1] = ureg_DECL_temporary(shader);
233 }
234
235 r[0] = ureg_DECL_temporary(shader);
236 r[1] = ureg_DECL_temporary(shader);
237
238 for (i = 1; i < 4; ++i) {
239 increment_addr(shader, l[i], l_addr, false, false, i, idct->buffer_height);
240 }
241
242 for (i = 0; i < 4; ++i) {
243 struct ureg_src s_addr[2];
244 s_addr[0] = i == 0 ? l_addr[0] : ureg_src(l[i][0]);
245 s_addr[1] = i == 0 ? l_addr[1] : ureg_src(l[i][1]);
246 fetch_four(shader, l[i], s_addr, ureg_DECL_sampler(shader, 1));
247 }
248
249 for (i = 0; i < idct->nr_of_render_targets; ++i) {
250 if(i > 0)
251 increment_addr(shader, r, r_addr, true, true, i, BLOCK_HEIGHT);
252
253 struct ureg_src s_addr[2] = { ureg_src(r[0]), ureg_src(r[1]) };
254 s_addr[0] = i == 0 ? r_addr[0] : ureg_src(r[0]);
255 s_addr[1] = i == 0 ? r_addr[1] : ureg_src(r[1]);
256 fetch_four(shader, r, s_addr, ureg_DECL_sampler(shader, 0));
257
258 for (j = 0; j < 4; ++j) {
259 matrix_mul(shader, ureg_writemask(fragment[i], TGSI_WRITEMASK_X << j), l[j], r);
260 }
261 }
262
263 for (i = 0; i < 4; ++i) {
264 ureg_release_temporary(shader, l[i][0]);
265 ureg_release_temporary(shader, l[i][1]);
266 }
267 ureg_release_temporary(shader, r[0]);
268 ureg_release_temporary(shader, r[1]);
269
270 ureg_END(shader);
271
272 return ureg_create_shader_and_destroy(shader, idct->pipe);
273 }
274
275 static void *
276 create_transpose_frag_shader(struct vl_idct *idct)
277 {
278 struct ureg_program *shader;
279
280 struct ureg_src l_addr[2], r_addr[2];
281
282 struct ureg_dst l[2], r[2];
283 struct ureg_dst fragment;
284
285 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
286 if (!shader)
287 return NULL;
288
289 l_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0, TGSI_INTERPOLATE_LINEAR);
290 l_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1, TGSI_INTERPOLATE_LINEAR);
291
292 r_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0, TGSI_INTERPOLATE_LINEAR);
293 r_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1, TGSI_INTERPOLATE_LINEAR);
294
295 l[0] = ureg_DECL_temporary(shader);
296 l[1] = ureg_DECL_temporary(shader);
297 r[0] = ureg_DECL_temporary(shader);
298 r[1] = ureg_DECL_temporary(shader);
299
300 fetch_four(shader, l, l_addr, ureg_DECL_sampler(shader, 0));
301 fetch_four(shader, r, r_addr, ureg_DECL_sampler(shader, 1));
302
303 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
304
305 matrix_mul(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X), l, r);
306
307 ureg_release_temporary(shader, l[0]);
308 ureg_release_temporary(shader, l[1]);
309 ureg_release_temporary(shader, r[0]);
310 ureg_release_temporary(shader, r[1]);
311
312 ureg_END(shader);
313
314 return ureg_create_shader_and_destroy(shader, idct->pipe);
315 }
316
317 static bool
318 init_shaders(struct vl_idct *idct)
319 {
320 idct->matrix_vs = create_vert_shader(idct, true);
321 if (!idct->matrix_vs)
322 goto error_matrix_vs;
323
324 idct->matrix_fs = create_matrix_frag_shader(idct);
325 if (!idct->matrix_fs)
326 goto error_matrix_fs;
327
328 idct->transpose_vs = create_vert_shader(idct, false);
329 if (!idct->transpose_vs)
330 goto error_transpose_vs;
331
332 idct->transpose_fs = create_transpose_frag_shader(idct);
333 if (!idct->transpose_fs)
334 goto error_transpose_fs;
335
336 return true;
337
338 error_transpose_fs:
339 idct->pipe->delete_vs_state(idct->pipe, idct->transpose_vs);
340
341 error_transpose_vs:
342 idct->pipe->delete_fs_state(idct->pipe, idct->matrix_fs);
343
344 error_matrix_fs:
345 idct->pipe->delete_vs_state(idct->pipe, idct->matrix_vs);
346
347 error_matrix_vs:
348 return false;
349 }
350
351 static void
352 cleanup_shaders(struct vl_idct *idct)
353 {
354 idct->pipe->delete_vs_state(idct->pipe, idct->matrix_vs);
355 idct->pipe->delete_fs_state(idct->pipe, idct->matrix_fs);
356 idct->pipe->delete_vs_state(idct->pipe, idct->transpose_vs);
357 idct->pipe->delete_fs_state(idct->pipe, idct->transpose_fs);
358 }
359
360 static bool
361 init_state(struct vl_idct *idct)
362 {
363 struct pipe_blend_state blend;
364 struct pipe_rasterizer_state rs_state;
365 struct pipe_sampler_state sampler;
366 unsigned i;
367
368 assert(idct);
369
370 memset(&rs_state, 0, sizeof(rs_state));
371 rs_state.gl_rasterization_rules = false;
372 idct->rs_state = idct->pipe->create_rasterizer_state(idct->pipe, &rs_state);
373 if (!idct->rs_state)
374 goto error_rs_state;
375
376 memset(&blend, 0, sizeof blend);
377
378 blend.independent_blend_enable = 0;
379 blend.rt[0].blend_enable = 0;
380 blend.rt[0].rgb_func = PIPE_BLEND_ADD;
381 blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_ONE;
382 blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ONE;
383 blend.rt[0].alpha_func = PIPE_BLEND_ADD;
384 blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
385 blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
386 blend.logicop_enable = 0;
387 blend.logicop_func = PIPE_LOGICOP_CLEAR;
388 /* Needed to allow color writes to FB, even if blending disabled */
389 blend.rt[0].colormask = PIPE_MASK_RGBA;
390 blend.dither = 0;
391 idct->blend = idct->pipe->create_blend_state(idct->pipe, &blend);
392 if (!idct->blend)
393 goto error_blend;
394
395 for (i = 0; i < 2; ++i) {
396 memset(&sampler, 0, sizeof(sampler));
397 sampler.wrap_s = PIPE_TEX_WRAP_REPEAT;
398 sampler.wrap_t = PIPE_TEX_WRAP_REPEAT;
399 sampler.wrap_r = PIPE_TEX_WRAP_REPEAT;
400 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
401 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
402 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
403 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
404 sampler.compare_func = PIPE_FUNC_ALWAYS;
405 sampler.normalized_coords = 1;
406 idct->samplers[i] = idct->pipe->create_sampler_state(idct->pipe, &sampler);
407 if (!idct->samplers[i])
408 goto error_samplers;
409 }
410
411 return true;
412
413 error_samplers:
414 for (i = 0; i < 2; ++i)
415 if (idct->samplers[i])
416 idct->pipe->delete_sampler_state(idct->pipe, idct->samplers[i]);
417
418 idct->pipe->delete_rasterizer_state(idct->pipe, idct->rs_state);
419
420 error_blend:
421 idct->pipe->delete_blend_state(idct->pipe, idct->blend);
422
423 error_rs_state:
424 return false;
425 }
426
427 static void
428 cleanup_state(struct vl_idct *idct)
429 {
430 unsigned i;
431
432 for (i = 0; i < 2; ++i)
433 idct->pipe->delete_sampler_state(idct->pipe, idct->samplers[i]);
434
435 idct->pipe->delete_rasterizer_state(idct->pipe, idct->rs_state);
436 idct->pipe->delete_blend_state(idct->pipe, idct->blend);
437 }
438
439 static bool
440 init_intermediate(struct vl_idct *idct, struct vl_idct_buffer *buffer)
441 {
442 struct pipe_resource *tex;
443 struct pipe_surface surf_templ;
444 unsigned i;
445
446 assert(idct && buffer);
447
448 tex = buffer->sampler_views.individual.intermediate->texture;
449
450 buffer->fb_state[0].width = tex->width0;
451 buffer->fb_state[0].height = tex->height0;
452 buffer->fb_state[0].nr_cbufs = idct->nr_of_render_targets;
453 for(i = 0; i < idct->nr_of_render_targets; ++i) {
454 memset(&surf_templ, 0, sizeof(surf_templ));
455 surf_templ.format = tex->format;
456 surf_templ.u.tex.first_layer = i;
457 surf_templ.u.tex.last_layer = i;
458 surf_templ.usage = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
459 buffer->fb_state[0].cbufs[i] = idct->pipe->create_surface(
460 idct->pipe, tex, &surf_templ);
461
462 if (!buffer->fb_state[0].cbufs[i])
463 goto error_surfaces;
464 }
465
466 buffer->viewport[0].scale[0] = tex->width0;
467 buffer->viewport[0].scale[1] = tex->height0;
468
469 return true;
470
471 error_surfaces:
472 for(i = 0; i < idct->nr_of_render_targets; ++i)
473 pipe_surface_reference(&buffer->fb_state[0].cbufs[i], NULL);
474
475 return false;
476 }
477
478 static void
479 cleanup_intermediate(struct vl_idct *idct, struct vl_idct_buffer *buffer)
480 {
481 unsigned i;
482
483 assert(idct && buffer);
484
485 for(i = 0; i < idct->nr_of_render_targets; ++i)
486 pipe_surface_reference(&buffer->fb_state[0].cbufs[i], NULL);
487
488 pipe_sampler_view_reference(&buffer->sampler_views.individual.intermediate, NULL);
489 }
490
491 struct pipe_sampler_view *
492 vl_idct_upload_matrix(struct pipe_context *pipe, float scale)
493 {
494 struct pipe_resource tex_templ, *matrix;
495 struct pipe_sampler_view sv_templ, *sv;
496 struct pipe_transfer *buf_transfer;
497 unsigned i, j, pitch;
498 float *f;
499
500 struct pipe_box rect =
501 {
502 0, 0, 0,
503 BLOCK_WIDTH / 4,
504 BLOCK_HEIGHT,
505 1
506 };
507
508 assert(pipe);
509
510 memset(&tex_templ, 0, sizeof(tex_templ));
511 tex_templ.target = PIPE_TEXTURE_2D;
512 tex_templ.format = PIPE_FORMAT_R32G32B32A32_FLOAT;
513 tex_templ.last_level = 0;
514 tex_templ.width0 = 2;
515 tex_templ.height0 = 8;
516 tex_templ.depth0 = 1;
517 tex_templ.array_size = 1;
518 tex_templ.usage = PIPE_USAGE_IMMUTABLE;
519 tex_templ.bind = PIPE_BIND_SAMPLER_VIEW;
520 tex_templ.flags = 0;
521
522 matrix = pipe->screen->resource_create(pipe->screen, &tex_templ);
523 if (!matrix)
524 goto error_matrix;
525
526 buf_transfer = pipe->get_transfer
527 (
528 pipe, matrix,
529 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
530 &rect
531 );
532 if (!buf_transfer)
533 goto error_transfer;
534
535 pitch = buf_transfer->stride / sizeof(float);
536
537 f = pipe->transfer_map(pipe, buf_transfer);
538 if (!f)
539 goto error_map;
540
541 for(i = 0; i < BLOCK_HEIGHT; ++i)
542 for(j = 0; j < BLOCK_WIDTH; ++j)
543 // transpose and scale
544 f[i * pitch + j] = const_matrix[j][i] * scale;
545
546 pipe->transfer_unmap(pipe, buf_transfer);
547 pipe->transfer_destroy(pipe, buf_transfer);
548
549 memset(&sv_templ, 0, sizeof(sv_templ));
550 u_sampler_view_default_template(&sv_templ, matrix, matrix->format);
551 sv = pipe->create_sampler_view(pipe, matrix, &sv_templ);
552 pipe_resource_reference(&matrix, NULL);
553 if (!sv)
554 goto error_map;
555
556 return sv;
557
558 error_map:
559 pipe->transfer_destroy(pipe, buf_transfer);
560
561 error_transfer:
562 pipe_resource_reference(&matrix, NULL);
563
564 error_matrix:
565 return NULL;
566 }
567
568 bool vl_idct_init(struct vl_idct *idct, struct pipe_context *pipe,
569 unsigned buffer_width, unsigned buffer_height,
570 unsigned nr_of_render_targets,
571 struct pipe_sampler_view *matrix,
572 struct pipe_sampler_view *transpose)
573 {
574 assert(idct && pipe && matrix);
575
576 idct->pipe = pipe;
577 idct->buffer_width = buffer_width;
578 idct->buffer_height = buffer_height;
579 idct->nr_of_render_targets = nr_of_render_targets;
580
581 pipe_sampler_view_reference(&idct->matrix, matrix);
582 pipe_sampler_view_reference(&idct->transpose, transpose);
583
584 if(!init_shaders(idct))
585 return false;
586
587 if(!init_state(idct)) {
588 cleanup_shaders(idct);
589 return false;
590 }
591
592 return true;
593 }
594
595 void
596 vl_idct_cleanup(struct vl_idct *idct)
597 {
598 cleanup_shaders(idct);
599 cleanup_state(idct);
600
601 pipe_sampler_view_reference(&idct->matrix, NULL);
602 }
603
604 bool
605 vl_idct_init_buffer(struct vl_idct *idct, struct vl_idct_buffer *buffer,
606 struct pipe_sampler_view *source,
607 struct pipe_sampler_view *intermediate,
608 struct pipe_surface *destination)
609 {
610 unsigned i;
611
612 assert(buffer);
613 assert(idct);
614 assert(source);
615 assert(destination);
616
617 pipe_sampler_view_reference(&buffer->sampler_views.individual.matrix, idct->matrix);
618 pipe_sampler_view_reference(&buffer->sampler_views.individual.source, source);
619 pipe_sampler_view_reference(&buffer->sampler_views.individual.transpose, idct->transpose);
620 pipe_sampler_view_reference(&buffer->sampler_views.individual.intermediate, intermediate);
621
622 if (!init_intermediate(idct, buffer))
623 return false;
624
625 /* init state */
626 buffer->fb_state[1].width = destination->texture->width0;
627 buffer->fb_state[1].height = destination->texture->height0;
628 buffer->fb_state[1].nr_cbufs = 1;
629 pipe_surface_reference(&buffer->fb_state[1].cbufs[0], destination);
630
631 buffer->viewport[1].scale[0] = destination->texture->width0;
632 buffer->viewport[1].scale[1] = destination->texture->height0;
633
634 for(i = 0; i < 2; ++i) {
635 buffer->viewport[i].scale[2] = 1;
636 buffer->viewport[i].scale[3] = 1;
637 buffer->viewport[i].translate[0] = 0;
638 buffer->viewport[i].translate[1] = 0;
639 buffer->viewport[i].translate[2] = 0;
640 buffer->viewport[i].translate[3] = 0;
641
642 buffer->fb_state[i].zsbuf = NULL;
643 }
644
645 return true;
646 }
647
648 void
649 vl_idct_cleanup_buffer(struct vl_idct *idct, struct vl_idct_buffer *buffer)
650 {
651 unsigned i;
652
653 assert(idct && buffer);
654
655 for(i = 0; i < idct->nr_of_render_targets; ++i)
656 pipe_surface_reference(&buffer->fb_state[0].cbufs[i], NULL);
657
658 pipe_surface_reference(&buffer->fb_state[1].cbufs[0], NULL);
659
660 cleanup_intermediate(idct, buffer);
661 }
662
663 void
664 vl_idct_flush(struct vl_idct *idct, struct vl_idct_buffer *buffer, unsigned num_instances)
665 {
666 assert(idct);
667 assert(buffer);
668
669 if(num_instances > 0) {
670 idct->pipe->bind_rasterizer_state(idct->pipe, idct->rs_state);
671 idct->pipe->bind_blend_state(idct->pipe, idct->blend);
672 idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers);
673
674 /* first stage */
675 idct->pipe->set_framebuffer_state(idct->pipe, &buffer->fb_state[0]);
676 idct->pipe->set_viewport_state(idct->pipe, &buffer->viewport[0]);
677 idct->pipe->set_fragment_sampler_views(idct->pipe, 2, buffer->sampler_views.stage[0]);
678 idct->pipe->bind_vs_state(idct->pipe, idct->matrix_vs);
679 idct->pipe->bind_fs_state(idct->pipe, idct->matrix_fs);
680 util_draw_arrays_instanced(idct->pipe, PIPE_PRIM_QUADS, 0, 4, 0, num_instances);
681
682 /* second stage */
683 idct->pipe->set_framebuffer_state(idct->pipe, &buffer->fb_state[1]);
684 idct->pipe->set_viewport_state(idct->pipe, &buffer->viewport[1]);
685 idct->pipe->set_fragment_sampler_views(idct->pipe, 2, buffer->sampler_views.stage[1]);
686 idct->pipe->bind_vs_state(idct->pipe, idct->transpose_vs);
687 idct->pipe->bind_fs_state(idct->pipe, idct->transpose_fs);
688 util_draw_arrays_instanced(idct->pipe, PIPE_PRIM_QUADS, 0, 4, 0, num_instances);
689 }
690 }