move to four component calculation for idct code
[mesa.git] / src / gallium / auxiliary / vl / vl_idct.c
1 /**************************************************************************
2 *
3 * Copyright 2010 Christian König
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "vl_idct.h"
29 #include "vl_vertex_buffers.h"
30 #include "util/u_draw.h"
31 #include <assert.h>
32 #include <pipe/p_context.h>
33 #include <pipe/p_screen.h>
34 #include <util/u_inlines.h>
35 #include <util/u_sampler.h>
36 #include <util/u_format.h>
37 #include <tgsi/tgsi_ureg.h>
38 #include "vl_types.h"
39
40 #define BLOCK_WIDTH 8
41 #define BLOCK_HEIGHT 8
42
43 #define SCALE_FACTOR_16_TO_9 (32768.0f / 256.0f)
44
45 #define STAGE1_SCALE 4.0f
46 #define STAGE2_SCALE (SCALE_FACTOR_16_TO_9 / STAGE1_SCALE)
47
48 #define NR_RENDER_TARGETS 1
49
50 struct vertex_shader_consts
51 {
52 struct vertex4f norm;
53 };
54
55 enum VS_INPUT
56 {
57 VS_I_RECT,
58 VS_I_VPOS,
59
60 NUM_VS_INPUTS
61 };
62
63 enum VS_OUTPUT
64 {
65 VS_O_VPOS,
66 VS_O_BLOCK,
67 VS_O_TEX,
68 VS_O_START
69 };
70
71 static const float const_matrix[8][8] = {
72 { 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.353553f, 0.3535530f },
73 { 0.4903930f, 0.4157350f, 0.2777850f, 0.0975451f, -0.0975452f, -0.2777850f, -0.415735f, -0.4903930f },
74 { 0.4619400f, 0.1913420f, -0.1913420f, -0.4619400f, -0.4619400f, -0.1913420f, 0.191342f, 0.4619400f },
75 { 0.4157350f, -0.0975452f, -0.4903930f, -0.2777850f, 0.2777850f, 0.4903930f, 0.097545f, -0.4157350f },
76 { 0.3535530f, -0.3535530f, -0.3535530f, 0.3535540f, 0.3535530f, -0.3535540f, -0.353553f, 0.3535530f },
77 { 0.2777850f, -0.4903930f, 0.0975452f, 0.4157350f, -0.4157350f, -0.0975451f, 0.490393f, -0.2777850f },
78 { 0.1913420f, -0.4619400f, 0.4619400f, -0.1913420f, -0.1913410f, 0.4619400f, -0.461940f, 0.1913420f },
79 { 0.0975451f, -0.2777850f, 0.4157350f, -0.4903930f, 0.4903930f, -0.4157350f, 0.277786f, -0.0975458f }
80 };
81
82 static void *
83 create_vert_shader(struct vl_idct *idct, bool calc_src_cords)
84 {
85 struct ureg_program *shader;
86 struct ureg_src scale;
87 struct ureg_src vrect, vpos;
88 struct ureg_dst t_vpos;
89 struct ureg_dst o_vpos, o_block, o_tex, o_start;
90
91 shader = ureg_create(TGSI_PROCESSOR_VERTEX);
92 if (!shader)
93 return NULL;
94
95 t_vpos = ureg_DECL_temporary(shader);
96
97 vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
98 vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
99
100 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
101
102 /*
103 * scale = (BLOCK_WIDTH, BLOCK_HEIGHT) / (dst.width, dst.height)
104 *
105 * t_vpos = vpos + vrect
106 * o_vpos.xy = t_vpos * scale
107 * o_vpos.zw = vpos
108 *
109 * o_block = vrect
110 * o_tex = t_pos
111 * o_start = vpos * scale
112 *
113 */
114 scale = ureg_imm2f(shader,
115 (float)BLOCK_WIDTH / idct->destination->width0,
116 (float)BLOCK_HEIGHT / idct->destination->height0);
117
118 ureg_ADD(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), vpos, vrect);
119 ureg_MUL(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos), scale);
120 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
121 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), vpos);
122
123 if(calc_src_cords) {
124 o_block = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK);
125 o_tex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX);
126 o_start = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_START);
127
128 ureg_MOV(shader, ureg_writemask(o_block, TGSI_WRITEMASK_XY), vrect);
129 ureg_MOV(shader, ureg_writemask(o_tex, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
130 ureg_MUL(shader, ureg_writemask(o_start, TGSI_WRITEMASK_XY), vpos, scale);
131 }
132
133 ureg_release_temporary(shader, t_vpos);
134
135 ureg_END(shader);
136
137 return ureg_create_shader_and_destroy(shader, idct->pipe);
138 }
139
140 static void
141 fetch_four(struct ureg_program *shader, struct ureg_dst m[2],
142 struct ureg_src tc, struct ureg_src sampler,
143 struct ureg_src start, struct ureg_src block,
144 bool right_side, bool transposed, float size)
145 {
146 struct ureg_dst t_tc;
147 unsigned wm_start = (right_side == transposed) ? TGSI_WRITEMASK_X : TGSI_WRITEMASK_Y;
148 unsigned wm_tc = (right_side == transposed) ? TGSI_WRITEMASK_Y : TGSI_WRITEMASK_X;
149
150 t_tc = ureg_DECL_temporary(shader);
151 m[0] = ureg_DECL_temporary(shader);
152 m[1] = ureg_DECL_temporary(shader);
153
154 /*
155 * t_tc.x = right_side ? start.x : tc.x
156 * t_tc.y = right_side ? tc.y : start.y
157 * m[0..1] = tex(t_tc++, sampler)
158 */
159 if(!right_side) {
160 ureg_MOV(shader, ureg_writemask(t_tc, wm_start), ureg_scalar(start, TGSI_SWIZZLE_X));
161 ureg_MOV(shader, ureg_writemask(t_tc, wm_tc), ureg_scalar(tc, TGSI_SWIZZLE_Y));
162 } else {
163 ureg_MOV(shader, ureg_writemask(t_tc, wm_start), ureg_scalar(start, TGSI_SWIZZLE_Y));
164 ureg_MOV(shader, ureg_writemask(t_tc, wm_tc), ureg_scalar(tc, TGSI_SWIZZLE_X));
165 }
166
167 #if NR_RENDER_TARGETS == 8
168 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Z), ureg_scalar(block, TGSI_SWIZZLE_X));
169 #else
170 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Z), ureg_imm1f(shader, 0.0f));
171 #endif
172
173 ureg_TEX(shader, m[0], TGSI_TEXTURE_3D, ureg_src(t_tc), sampler);
174 ureg_ADD(shader, ureg_writemask(t_tc, wm_start), ureg_src(t_tc), ureg_imm1f(shader, 1.0f / size));
175 ureg_TEX(shader, m[1], TGSI_TEXTURE_3D, ureg_src(t_tc), sampler);
176
177 ureg_release_temporary(shader, t_tc);
178 }
179
180 static void
181 matrix_mul(struct ureg_program *shader, struct ureg_dst dst, struct ureg_dst l[2], struct ureg_dst r[2])
182 {
183 struct ureg_dst tmp[2];
184 unsigned i;
185
186 for(i = 0; i < 2; ++i) {
187 tmp[i] = ureg_DECL_temporary(shader);
188 }
189
190 /*
191 * tmp[0..1] = dot4(m[0][0..1], m[1][0..1])
192 * dst = tmp[0] + tmp[1]
193 */
194 ureg_DP4(shader, ureg_writemask(tmp[0], TGSI_WRITEMASK_X), ureg_src(l[0]), ureg_src(r[0]));
195 ureg_DP4(shader, ureg_writemask(tmp[1], TGSI_WRITEMASK_X), ureg_src(l[1]), ureg_src(r[1]));
196 ureg_ADD(shader, dst,
197 ureg_scalar(ureg_src(tmp[0]), TGSI_SWIZZLE_X),
198 ureg_scalar(ureg_src(tmp[1]), TGSI_SWIZZLE_X));
199
200 for(i = 0; i < 2; ++i) {
201 ureg_release_temporary(shader, tmp[i]);
202 }
203 }
204
205 static void *
206 create_transpose_frag_shader(struct vl_idct *idct)
207 {
208 struct pipe_resource *transpose = idct->textures.individual.transpose;
209 struct pipe_resource *intermediate = idct->textures.individual.intermediate;
210
211 struct ureg_program *shader;
212
213 struct ureg_src block, tex, sampler[2];
214 struct ureg_src start[2];
215
216 struct ureg_dst m[2][2];
217 struct ureg_dst tmp, fragment;
218
219 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
220 if (!shader)
221 return NULL;
222
223 block = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK, TGSI_INTERPOLATE_LINEAR);
224 tex = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX, TGSI_INTERPOLATE_CONSTANT);
225
226 sampler[0] = ureg_DECL_sampler(shader, 0);
227 sampler[1] = ureg_DECL_sampler(shader, 1);
228
229 start[0] = ureg_imm1f(shader, 0.0f);
230 start[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_START, TGSI_INTERPOLATE_CONSTANT);
231
232 fetch_four(shader, m[0], block, sampler[0], start[0], block, false, false, transpose->width0);
233 fetch_four(shader, m[1], tex, sampler[1], start[1], block, true, false, intermediate->height0);
234
235 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
236
237 tmp = ureg_DECL_temporary(shader);
238 matrix_mul(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X), m[0], m[1]);
239 ureg_MUL(shader, fragment, ureg_src(tmp), ureg_imm1f(shader, STAGE2_SCALE));
240
241 ureg_release_temporary(shader, tmp);
242 ureg_release_temporary(shader, m[0][0]);
243 ureg_release_temporary(shader, m[0][1]);
244 ureg_release_temporary(shader, m[1][0]);
245 ureg_release_temporary(shader, m[1][1]);
246
247 ureg_END(shader);
248
249 return ureg_create_shader_and_destroy(shader, idct->pipe);
250 }
251
252 static void *
253 create_matrix_frag_shader(struct vl_idct *idct)
254 {
255 struct pipe_resource *matrix = idct->textures.individual.matrix;
256 struct pipe_resource *source = idct->textures.individual.source;
257
258 struct ureg_program *shader;
259
260 struct ureg_src tex, block, sampler[2];
261 struct ureg_src start[2];
262
263 struct ureg_dst l[4][2], r[2];
264 struct ureg_dst t_tc, tmp, fragment[NR_RENDER_TARGETS];
265
266 unsigned i, j;
267
268 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
269 if (!shader)
270 return NULL;
271
272 t_tc = ureg_DECL_temporary(shader);
273 tmp = ureg_DECL_temporary(shader);
274
275 tex = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX, TGSI_INTERPOLATE_LINEAR);
276 block = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK, TGSI_INTERPOLATE_LINEAR);
277
278 sampler[0] = ureg_DECL_sampler(shader, 1);
279 sampler[1] = ureg_DECL_sampler(shader, 0);
280
281 start[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_START, TGSI_INTERPOLATE_CONSTANT);
282 start[1] = ureg_imm1f(shader, 0.0f);
283
284 for (i = 0; i < NR_RENDER_TARGETS; ++i)
285 fragment[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, i);
286
287 /* pixel center is at 0.5 not 0.0 !!! */
288 ureg_ADD(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Y),
289 tex, ureg_imm1f(shader, -2.0f / source->height0));
290
291 for (i = 0; i < 4; ++i) {
292 fetch_four(shader, l[i], ureg_src(t_tc), sampler[0], start[0], block, false, false, source->width0);
293 ureg_MUL(shader, l[i][0], ureg_src(l[i][0]), ureg_imm1f(shader, STAGE1_SCALE));
294 ureg_MUL(shader, l[i][1], ureg_src(l[i][1]), ureg_imm1f(shader, STAGE1_SCALE));
295 if(i != 3)
296 ureg_ADD(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Y),
297 ureg_src(t_tc), ureg_imm1f(shader, 1.0f / source->height0));
298 }
299
300 for (i = 0; i < NR_RENDER_TARGETS; ++i) {
301
302 #if NR_RENDER_TARGETS == 8
303 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_imm1f(shader, 1.0f / BLOCK_WIDTH * i));
304 fetch_four(shader, r, ureg_src(t_tc), sampler[1], start[1], block, true, true, matrix->width0);
305 #elif NR_RENDER_TARGETS == 1
306 fetch_four(shader, r, block, sampler[1], start[1], block, true, true, matrix->width0);
307 #else
308 #error invalid number of render targets
309 #endif
310
311 for (j = 0; j < 4; ++j) {
312 matrix_mul(shader, ureg_writemask(fragment[i], TGSI_WRITEMASK_X << j), l[j], r);
313 }
314 ureg_release_temporary(shader, r[0]);
315 ureg_release_temporary(shader, r[1]);
316 }
317
318 ureg_release_temporary(shader, t_tc);
319 ureg_release_temporary(shader, tmp);
320
321 for (i = 0; i < 4; ++i) {
322 ureg_release_temporary(shader, l[i][0]);
323 ureg_release_temporary(shader, l[i][1]);
324 }
325
326 ureg_END(shader);
327
328 return ureg_create_shader_and_destroy(shader, idct->pipe);
329 }
330
331 static void *
332 create_empty_block_frag_shader(struct vl_idct *idct)
333 {
334 struct ureg_program *shader;
335 struct ureg_dst fragment;
336
337 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
338 if (!shader)
339 return NULL;
340
341 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
342
343 ureg_MOV(shader, fragment, ureg_imm1f(shader, 0.0f));
344
345 ureg_END(shader);
346
347 return ureg_create_shader_and_destroy(shader, idct->pipe);
348 }
349
350 static bool
351 init_shaders(struct vl_idct *idct)
352 {
353 idct->matrix_vs = create_vert_shader(idct, true);
354 idct->matrix_fs = create_matrix_frag_shader(idct);
355
356 idct->transpose_vs = create_vert_shader(idct, true);
357 idct->transpose_fs = create_transpose_frag_shader(idct);
358
359 idct->eb_vs = create_vert_shader(idct, false);
360 idct->eb_fs = create_empty_block_frag_shader(idct);
361
362 return
363 idct->transpose_vs != NULL && idct->transpose_fs != NULL &&
364 idct->matrix_vs != NULL && idct->matrix_fs != NULL &&
365 idct->eb_vs != NULL && idct->eb_fs != NULL;
366 }
367
368 static void
369 cleanup_shaders(struct vl_idct *idct)
370 {
371 idct->pipe->delete_vs_state(idct->pipe, idct->transpose_vs);
372 idct->pipe->delete_fs_state(idct->pipe, idct->transpose_fs);
373
374 idct->pipe->delete_vs_state(idct->pipe, idct->matrix_vs);
375 idct->pipe->delete_fs_state(idct->pipe, idct->matrix_fs);
376
377 idct->pipe->delete_vs_state(idct->pipe, idct->eb_vs);
378 idct->pipe->delete_fs_state(idct->pipe, idct->eb_fs);
379 }
380
381 static bool
382 init_buffers(struct vl_idct *idct)
383 {
384 struct pipe_resource template;
385 struct pipe_sampler_view sampler_view;
386 struct pipe_vertex_element vertex_elems[2];
387 unsigned i;
388
389 memset(&template, 0, sizeof(struct pipe_resource));
390 template.last_level = 0;
391 template.depth0 = 1;
392 template.bind = PIPE_BIND_SAMPLER_VIEW;
393 template.flags = 0;
394
395 template.target = PIPE_TEXTURE_2D;
396 template.format = PIPE_FORMAT_R16G16B16A16_SNORM;
397 template.width0 = idct->destination->width0 / 4;
398 template.height0 = idct->destination->height0;
399 template.depth0 = 1;
400 template.usage = PIPE_USAGE_STREAM;
401 idct->textures.individual.source = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
402
403 template.target = PIPE_TEXTURE_3D;
404 template.format = PIPE_FORMAT_R16G16B16A16_SNORM;
405 template.width0 = idct->destination->width0 / NR_RENDER_TARGETS;
406 template.height0 = idct->destination->height0 / 4;
407 template.depth0 = NR_RENDER_TARGETS;
408 template.usage = PIPE_USAGE_STATIC;
409 idct->textures.individual.intermediate = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
410
411 for (i = 0; i < 4; ++i) {
412 if(idct->textures.all[i] == NULL)
413 return false; /* a texture failed to allocate */
414
415 u_sampler_view_default_template(&sampler_view, idct->textures.all[i], idct->textures.all[i]->format);
416 idct->sampler_views.all[i] = idct->pipe->create_sampler_view(idct->pipe, idct->textures.all[i], &sampler_view);
417 }
418
419 idct->vertex_bufs.individual.quad = vl_vb_upload_quads(idct->pipe, idct->max_blocks);
420
421 if(idct->vertex_bufs.individual.quad.buffer == NULL)
422 return false;
423
424 idct->vertex_bufs.individual.pos.stride = sizeof(struct vertex2f);
425 idct->vertex_bufs.individual.pos.max_index = 4 * idct->max_blocks - 1;
426 idct->vertex_bufs.individual.pos.buffer_offset = 0;
427 idct->vertex_bufs.individual.pos.buffer = pipe_buffer_create
428 (
429 idct->pipe->screen,
430 PIPE_BIND_VERTEX_BUFFER,
431 sizeof(struct vertex2f) * 4 * idct->max_blocks
432 );
433
434 if(idct->vertex_bufs.individual.pos.buffer == NULL)
435 return false;
436
437 /* Rect element */
438 vertex_elems[0].src_offset = 0;
439 vertex_elems[0].instance_divisor = 0;
440 vertex_elems[0].vertex_buffer_index = 0;
441 vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
442
443 /* Pos element */
444 vertex_elems[1].src_offset = 0;
445 vertex_elems[1].instance_divisor = 0;
446 vertex_elems[1].vertex_buffer_index = 1;
447 vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
448
449 idct->vertex_elems_state = idct->pipe->create_vertex_elements_state(idct->pipe, 2, vertex_elems);
450
451 return true;
452 }
453
454 static void
455 cleanup_buffers(struct vl_idct *idct)
456 {
457 unsigned i;
458
459 assert(idct);
460
461 for (i = 0; i < 4; ++i) {
462 pipe_sampler_view_reference(&idct->sampler_views.all[i], NULL);
463 pipe_resource_reference(&idct->textures.all[i], NULL);
464 }
465
466 idct->pipe->delete_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
467 pipe_resource_reference(&idct->vertex_bufs.individual.quad.buffer, NULL);
468 pipe_resource_reference(&idct->vertex_bufs.individual.pos.buffer, NULL);
469 }
470
471 static void
472 init_state(struct vl_idct *idct)
473 {
474 struct pipe_sampler_state sampler;
475 unsigned i;
476
477 idct->viewport[0].scale[0] = idct->textures.individual.intermediate->width0;
478 idct->viewport[0].scale[1] = idct->textures.individual.intermediate->height0;
479
480 idct->viewport[1].scale[0] = idct->destination->width0;
481 idct->viewport[1].scale[1] = idct->destination->height0;
482
483 idct->fb_state[0].width = idct->textures.individual.intermediate->width0;
484 idct->fb_state[0].height = idct->textures.individual.intermediate->height0;
485
486 idct->fb_state[0].nr_cbufs = NR_RENDER_TARGETS;
487 for(i = 0; i < NR_RENDER_TARGETS; ++i) {
488 idct->fb_state[0].cbufs[i] = idct->pipe->screen->get_tex_surface(
489 idct->pipe->screen, idct->textures.individual.intermediate, 0, 0, i,
490 PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET);
491 }
492
493 idct->fb_state[1].width = idct->destination->width0;
494 idct->fb_state[1].height = idct->destination->height0;
495
496 idct->fb_state[1].nr_cbufs = 1;
497 idct->fb_state[1].cbufs[0] = idct->pipe->screen->get_tex_surface(
498 idct->pipe->screen, idct->destination, 0, 0, 0,
499 PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET);
500
501 for(i = 0; i < 2; ++i) {
502 idct->viewport[i].scale[2] = 1;
503 idct->viewport[i].scale[3] = 1;
504 idct->viewport[i].translate[0] = 0;
505 idct->viewport[i].translate[1] = 0;
506 idct->viewport[i].translate[2] = 0;
507 idct->viewport[i].translate[3] = 0;
508
509 idct->fb_state[i].zsbuf = NULL;
510 }
511
512 for (i = 0; i < 4; ++i) {
513 memset(&sampler, 0, sizeof(sampler));
514 sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
515 sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
516 sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
517 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
518 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
519 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
520 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
521 sampler.compare_func = PIPE_FUNC_ALWAYS;
522 sampler.normalized_coords = 1;
523 /*sampler.shadow_ambient = ; */
524 /*sampler.lod_bias = ; */
525 sampler.min_lod = 0;
526 /*sampler.max_lod = ; */
527 /*sampler.border_color[0] = ; */
528 /*sampler.max_anisotropy = ; */
529 idct->samplers.all[i] = idct->pipe->create_sampler_state(idct->pipe, &sampler);
530 }
531 }
532
533 static void
534 cleanup_state(struct vl_idct *idct)
535 {
536 unsigned i;
537
538 for(i = 0; i < NR_RENDER_TARGETS; ++i) {
539 idct->pipe->screen->tex_surface_destroy(idct->fb_state[0].cbufs[i]);
540 }
541
542 idct->pipe->screen->tex_surface_destroy(idct->fb_state[1].cbufs[0]);
543
544 for (i = 0; i < 4; ++i)
545 idct->pipe->delete_sampler_state(idct->pipe, idct->samplers.all[i]);
546 }
547
548 struct pipe_resource *
549 vl_idct_upload_matrix(struct pipe_context *pipe)
550 {
551 struct pipe_resource template, *matrix;
552 struct pipe_transfer *buf_transfer;
553 unsigned i, j, pitch;
554 float *f;
555
556 struct pipe_box rect =
557 {
558 0, 0, 0,
559 BLOCK_WIDTH,
560 BLOCK_HEIGHT,
561 1
562 };
563
564 memset(&template, 0, sizeof(struct pipe_resource));
565 template.target = PIPE_TEXTURE_2D;
566 template.format = PIPE_FORMAT_R32G32B32A32_FLOAT;
567 template.last_level = 0;
568 template.width0 = 2;
569 template.height0 = 8;
570 template.depth0 = 1;
571 template.usage = PIPE_USAGE_IMMUTABLE;
572 template.bind = PIPE_BIND_SAMPLER_VIEW;
573 template.flags = 0;
574
575 matrix = pipe->screen->resource_create(pipe->screen, &template);
576
577 /* matrix */
578 buf_transfer = pipe->get_transfer
579 (
580 pipe, matrix,
581 u_subresource(0, 0),
582 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
583 &rect
584 );
585 pitch = buf_transfer->stride / sizeof(float);
586
587 f = pipe->transfer_map(pipe, buf_transfer);
588 for(i = 0; i < BLOCK_HEIGHT; ++i)
589 for(j = 0; j < BLOCK_WIDTH; ++j)
590 f[i * pitch + j] = const_matrix[j][i]; // transpose
591
592 pipe->transfer_unmap(pipe, buf_transfer);
593 pipe->transfer_destroy(pipe, buf_transfer);
594
595 return matrix;
596 }
597
598 static void
599 xfer_buffers_map(struct vl_idct *idct)
600 {
601 struct pipe_box rect =
602 {
603 0, 0, 0,
604 idct->textures.individual.source->width0,
605 idct->textures.individual.source->height0,
606 1
607 };
608
609 idct->tex_transfer = idct->pipe->get_transfer
610 (
611 idct->pipe, idct->textures.individual.source,
612 u_subresource(0, 0),
613 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
614 &rect
615 );
616
617 idct->texels = idct->pipe->transfer_map(idct->pipe, idct->tex_transfer);
618 }
619
620 static void
621 xfer_buffers_unmap(struct vl_idct *idct)
622 {
623 idct->pipe->transfer_unmap(idct->pipe, idct->tex_transfer);
624 idct->pipe->transfer_destroy(idct->pipe, idct->tex_transfer);
625 }
626
627 bool
628 vl_idct_init(struct vl_idct *idct, struct pipe_context *pipe, struct pipe_resource *dst, struct pipe_resource *matrix)
629 {
630 assert(idct && pipe && dst);
631
632 idct->pipe = pipe;
633 pipe_resource_reference(&idct->textures.individual.matrix, matrix);
634 pipe_resource_reference(&idct->textures.individual.transpose, matrix);
635 pipe_resource_reference(&idct->destination, dst);
636
637 idct->max_blocks =
638 align(idct->destination->width0, BLOCK_WIDTH) / BLOCK_WIDTH *
639 align(idct->destination->height0, BLOCK_HEIGHT) / BLOCK_HEIGHT *
640 idct->destination->depth0;
641
642 if(!init_buffers(idct))
643 return false;
644
645 if(!init_shaders(idct)) {
646 cleanup_buffers(idct);
647 return false;
648 }
649
650 if(!vl_vb_init(&idct->blocks, idct->max_blocks)) {
651 cleanup_shaders(idct);
652 cleanup_buffers(idct);
653 return false;
654 }
655
656 if(!vl_vb_init(&idct->empty_blocks, idct->max_blocks)) {
657 vl_vb_cleanup(&idct->blocks);
658 cleanup_shaders(idct);
659 cleanup_buffers(idct);
660 return false;
661 }
662
663 init_state(idct);
664
665 xfer_buffers_map(idct);
666
667 return true;
668 }
669
670 void
671 vl_idct_cleanup(struct vl_idct *idct)
672 {
673 vl_vb_cleanup(&idct->blocks);
674 vl_vb_cleanup(&idct->empty_blocks);
675 cleanup_shaders(idct);
676 cleanup_buffers(idct);
677
678 cleanup_state(idct);
679
680 pipe_resource_reference(&idct->destination, NULL);
681 }
682
683 void
684 vl_idct_add_block(struct vl_idct *idct, unsigned x, unsigned y, short *block)
685 {
686 unsigned tex_pitch;
687 short *texels;
688
689 unsigned i;
690
691 assert(idct);
692
693 if(block) {
694 tex_pitch = idct->tex_transfer->stride / sizeof(short);
695 texels = idct->texels + y * tex_pitch * BLOCK_HEIGHT + x * BLOCK_WIDTH;
696
697 for (i = 0; i < BLOCK_HEIGHT; ++i)
698 memcpy(texels + i * tex_pitch, block + i * BLOCK_WIDTH, BLOCK_WIDTH * sizeof(short));
699
700 vl_vb_add_block(&idct->blocks, false, x, y);
701 } else {
702
703 vl_vb_add_block(&idct->empty_blocks, true, x, y);
704 }
705 }
706
707 void
708 vl_idct_flush(struct vl_idct *idct)
709 {
710 struct pipe_transfer *vec_transfer;
711 struct quadf *vectors;
712 unsigned num_blocks, num_empty_blocks;
713
714 assert(idct);
715
716 vectors = pipe_buffer_map
717 (
718 idct->pipe,
719 idct->vertex_bufs.individual.pos.buffer,
720 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
721 &vec_transfer
722 );
723
724 num_blocks = vl_vb_upload(&idct->blocks, vectors);
725 num_empty_blocks = vl_vb_upload(&idct->empty_blocks, vectors + num_blocks);
726
727 pipe_buffer_unmap(idct->pipe, idct->vertex_bufs.individual.pos.buffer, vec_transfer);
728
729 xfer_buffers_unmap(idct);
730
731 if(num_blocks > 0) {
732
733 /* first stage */
734 idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state[0]);
735 idct->pipe->set_viewport_state(idct->pipe, &idct->viewport[0]);
736
737 idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
738 idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
739 idct->pipe->set_fragment_sampler_views(idct->pipe, 2, idct->sampler_views.stage[0]);
740 idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[0]);
741 idct->pipe->bind_vs_state(idct->pipe, idct->matrix_vs);
742 idct->pipe->bind_fs_state(idct->pipe, idct->matrix_fs);
743
744 util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, num_blocks * 4);
745
746 /* second stage */
747 idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state[1]);
748 idct->pipe->set_viewport_state(idct->pipe, &idct->viewport[1]);
749
750 idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
751 idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
752 idct->pipe->set_fragment_sampler_views(idct->pipe, 2, idct->sampler_views.stage[1]);
753 idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[1]);
754 idct->pipe->bind_vs_state(idct->pipe, idct->transpose_vs);
755 idct->pipe->bind_fs_state(idct->pipe, idct->transpose_fs);
756
757 util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, num_blocks * 4);
758 }
759
760 if(num_empty_blocks > 0) {
761
762 /* empty block handling */
763 idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state[1]);
764 idct->pipe->set_viewport_state(idct->pipe, &idct->viewport[1]);
765
766 idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
767 idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
768 idct->pipe->bind_vs_state(idct->pipe, idct->eb_vs);
769 idct->pipe->bind_fs_state(idct->pipe, idct->eb_fs);
770
771 util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, num_blocks * 4, num_empty_blocks * 4);
772 }
773
774 xfer_buffers_map(idct);
775 }