[g3dvl] switch to r32 float for idct matrix
[mesa.git] / src / gallium / auxiliary / vl / vl_idct.c
1 /**************************************************************************
2 *
3 * Copyright 2010 Christian König
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "vl_idct.h"
29 #include "util/u_draw.h"
30 #include <assert.h>
31 #include <pipe/p_context.h>
32 #include <pipe/p_screen.h>
33 #include <util/u_inlines.h>
34 #include <util/u_sampler.h>
35 #include <util/u_format.h>
36 #include <tgsi/tgsi_ureg.h>
37 #include "vl_types.h"
38
39 #define BLOCK_WIDTH 8
40 #define BLOCK_HEIGHT 8
41
42 #define SCALE_FACTOR_16_TO_9 (32768.0f / 256.0f)
43
44 struct vertex_shader_consts
45 {
46 struct vertex4f norm;
47 };
48
49 enum VS_INPUT
50 {
51 VS_I_RECT,
52 VS_I_VPOS,
53
54 NUM_VS_INPUTS
55 };
56
57 enum VS_OUTPUT
58 {
59 VS_O_VPOS,
60 VS_O_BLOCK,
61 VS_O_TEX,
62 VS_O_START,
63 VS_O_STEP
64 };
65
66 static const float const_matrix[8][8] = {
67 { 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.353553f, 0.3535530f },
68 { 0.4903930f, 0.4157350f, 0.2777850f, 0.0975451f, -0.0975452f, -0.2777850f, -0.415735f, -0.4903930f },
69 { 0.4619400f, 0.1913420f, -0.1913420f, -0.4619400f, -0.4619400f, -0.1913420f, 0.191342f, 0.4619400f },
70 { 0.4157350f, -0.0975452f, -0.4903930f, -0.2777850f, 0.2777850f, 0.4903930f, 0.097545f, -0.4157350f },
71 { 0.3535530f, -0.3535530f, -0.3535530f, 0.3535540f, 0.3535530f, -0.3535540f, -0.353553f, 0.3535530f },
72 { 0.2777850f, -0.4903930f, 0.0975452f, 0.4157350f, -0.4157350f, -0.0975451f, 0.490393f, -0.2777850f },
73 { 0.1913420f, -0.4619400f, 0.4619400f, -0.1913420f, -0.1913410f, 0.4619400f, -0.461940f, 0.1913420f },
74 { 0.0975451f, -0.2777850f, 0.4157350f, -0.4903930f, 0.4903930f, -0.4157350f, 0.277786f, -0.0975458f }
75 };
76
77 /* vertices for a quad covering a block */
78 static const struct vertex2f const_quad[4] = {
79 {0.0f, 0.0f}, {1.0f, 0.0f}, {1.0f, 1.0f}, {0.0f, 1.0f}
80 };
81
82 static void *
83 create_vert_shader(struct vl_idct *idct)
84 {
85 struct ureg_program *shader;
86 struct ureg_src norm, bs;
87 struct ureg_src vrect, vpos;
88 struct ureg_dst scale, t_vpos;
89 struct ureg_dst o_vpos, o_block, o_tex, o_start, o_step;
90
91 shader = ureg_create(TGSI_PROCESSOR_VERTEX);
92 if (!shader)
93 return NULL;
94
95 norm = ureg_DECL_constant(shader, 0);
96 bs = ureg_imm2f(shader, BLOCK_WIDTH, BLOCK_HEIGHT);
97
98 scale = ureg_DECL_temporary(shader);
99 t_vpos = ureg_DECL_temporary(shader);
100
101 vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
102 vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
103
104 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
105 o_block = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK);
106 o_tex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX);
107 o_start = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_START);
108 o_step = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_STEP);
109
110 /*
111 * scale = norm * mbs;
112 *
113 * t_vpos = vpos + vrect
114 * o_vpos.xy = t_vpos * scale
115 * o_vpos.zw = vpos
116 *
117 * o_block = vrect
118 * o_tex = t_pos
119 * o_start = vpos * scale
120 * o_step = norm
121 *
122 */
123 ureg_MUL(shader, ureg_writemask(scale, TGSI_WRITEMASK_XY), norm, bs);
124
125 ureg_ADD(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), vpos, vrect);
126 ureg_MUL(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos), ureg_src(scale));
127 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
128 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), vpos);
129
130 ureg_MOV(shader, ureg_writemask(o_tex, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
131 ureg_MOV(shader, ureg_writemask(o_block, TGSI_WRITEMASK_XY), vrect);
132 ureg_MUL(shader, ureg_writemask(o_start, TGSI_WRITEMASK_XY), vpos, ureg_src(scale));
133 ureg_MOV(shader, ureg_writemask(o_step, TGSI_WRITEMASK_XY), norm);
134
135 ureg_release_temporary(shader, t_vpos);
136 ureg_release_temporary(shader, scale);
137
138 ureg_END(shader);
139
140 return ureg_create_shader_and_destroy(shader, idct->pipe);
141 }
142
143 static void
144 matrix_mul(struct ureg_program *shader, struct ureg_dst dst,
145 struct ureg_src tc[2], struct ureg_src sampler[2],
146 struct ureg_src start[2], struct ureg_src step[2],
147 float scale[2])
148 {
149 struct ureg_dst t_tc[2], m[2][2], tmp[2];
150 unsigned i, j;
151
152 for(i = 0; i < 2; ++i) {
153 t_tc[i] = ureg_DECL_temporary(shader);
154 for(j = 0; j < 2; ++j)
155 m[i][j] = ureg_DECL_temporary(shader);
156 tmp[i] = ureg_DECL_temporary(shader);
157 }
158
159 /*
160 * m[0..1][0] = ?
161 * tmp[0..1] = dot4(m[0..1][0], m[0..1][1])
162 * fragment = tmp[0] + tmp[1]
163 */
164 ureg_MOV(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_X), start[0]);
165 ureg_MOV(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_Y), tc[0]);
166
167 ureg_MOV(shader, ureg_writemask(t_tc[1], TGSI_WRITEMASK_X), tc[1]);
168 ureg_MOV(shader, ureg_writemask(t_tc[1], TGSI_WRITEMASK_Y), start[1]);
169
170 for(i = 0; i < 2; ++i) {
171 for(j = 0; j < 4; ++j) {
172 /* Nouveau and r600g can't writemask tex dst regs (yet?), do in two steps */
173 ureg_TEX(shader, tmp[0], TGSI_TEXTURE_2D, ureg_src(t_tc[0]), sampler[0]);
174 ureg_MOV(shader, ureg_writemask(m[i][0], TGSI_WRITEMASK_X << j), ureg_scalar(ureg_src(tmp[0]), TGSI_SWIZZLE_X));
175
176 ureg_TEX(shader, tmp[1], TGSI_TEXTURE_2D, ureg_src(t_tc[1]), sampler[1]);
177 ureg_MOV(shader, ureg_writemask(m[i][1], TGSI_WRITEMASK_X << j), ureg_scalar(ureg_src(tmp[1]), TGSI_SWIZZLE_X));
178
179 ureg_ADD(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_X), ureg_src(t_tc[0]), step[0]);
180 ureg_ADD(shader, ureg_writemask(t_tc[1], TGSI_WRITEMASK_Y), ureg_src(t_tc[1]), step[1]);
181 }
182
183 if(scale[0] != 1.0f)
184 ureg_MUL(shader, m[i][0], ureg_src(m[i][0]), ureg_scalar(ureg_imm1f(shader, scale[0]), TGSI_SWIZZLE_X));
185
186 if(scale[1] != 1.0f)
187 ureg_MUL(shader, m[i][1], ureg_src(m[i][1]), ureg_scalar(ureg_imm1f(shader, scale[1]), TGSI_SWIZZLE_X));
188 }
189
190 ureg_DP4(shader, ureg_writemask(tmp[0], TGSI_WRITEMASK_X), ureg_src(m[0][0]), ureg_src(m[0][1]));
191 ureg_DP4(shader, ureg_writemask(tmp[1], TGSI_WRITEMASK_X), ureg_src(m[1][0]), ureg_src(m[1][1]));
192 ureg_ADD(shader, ureg_writemask(dst, TGSI_WRITEMASK_X), ureg_src(tmp[0]), ureg_src(tmp[1]));
193
194 for(i = 0; i < 2; ++i) {
195 ureg_release_temporary(shader, t_tc[i]);
196 for(j = 0; j < 2; ++j)
197 ureg_release_temporary(shader, m[i][j]);
198 ureg_release_temporary(shader, tmp[i]);
199 }
200 }
201
202 static void *
203 create_transpose_frag_shader(struct vl_idct *idct)
204 {
205 struct ureg_program *shader;
206 struct ureg_src tc[2], sampler[2];
207 struct ureg_src start[2], step[2];
208 struct ureg_dst fragment;
209 float scale[2];
210
211 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
212 if (!shader)
213 return NULL;
214
215 tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK, TGSI_INTERPOLATE_LINEAR);
216 tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX, TGSI_INTERPOLATE_LINEAR);
217
218 start[0] = ureg_imm1f(shader, 0.0f);
219 start[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_START, TGSI_INTERPOLATE_CONSTANT);
220
221 step[0] = ureg_imm1f(shader, 1.0f / BLOCK_HEIGHT);
222 step[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_STEP, TGSI_INTERPOLATE_CONSTANT);
223
224 sampler[0] = ureg_DECL_sampler(shader, 0);
225 sampler[1] = ureg_DECL_sampler(shader, 2);
226
227 scale[0] = 1.0f;
228 scale[1] = SCALE_FACTOR_16_TO_9;
229
230 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
231
232 matrix_mul(shader, fragment, tc, sampler, start, step, scale);
233
234 ureg_END(shader);
235
236 return ureg_create_shader_and_destroy(shader, idct->pipe);
237 }
238
239 static void *
240 create_matrix_frag_shader(struct vl_idct *idct)
241 {
242 struct ureg_program *shader;
243 struct ureg_src tc[2], sampler[2];
244 struct ureg_src start[2], step[2];
245 struct ureg_dst fragment;
246 float scale[2];
247
248 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
249 if (!shader)
250 return NULL;
251
252 tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX, TGSI_INTERPOLATE_LINEAR);
253 tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK, TGSI_INTERPOLATE_LINEAR);
254
255 start[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_START, TGSI_INTERPOLATE_CONSTANT);
256 start[1] = ureg_imm1f(shader, 0.0f);
257
258 step[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_STEP, TGSI_INTERPOLATE_CONSTANT);
259 step[1] = ureg_imm1f(shader, 1.0f / BLOCK_WIDTH);
260
261 sampler[0] = ureg_DECL_sampler(shader, 3);
262 sampler[1] = ureg_DECL_sampler(shader, 1);
263
264 scale[0] = 1.0f;
265 scale[1] = 1.0f;
266
267 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
268
269 matrix_mul(shader, fragment, tc, sampler, start, step, scale);
270
271 ureg_END(shader);
272
273 return ureg_create_shader_and_destroy(shader, idct->pipe);
274 }
275
276 static void *
277 create_empty_block_frag_shader(struct vl_idct *idct)
278 {
279 struct ureg_program *shader;
280 struct ureg_dst fragment;
281
282 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
283 if (!shader)
284 return NULL;
285
286 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
287
288 ureg_MOV(shader, fragment, ureg_imm1f(shader, 0.0f));
289
290 ureg_END(shader);
291
292 return ureg_create_shader_and_destroy(shader, idct->pipe);
293 }
294
295 static void
296 xfer_buffers_map(struct vl_idct *idct)
297 {
298 struct pipe_box rect =
299 {
300 0, 0, 0,
301 idct->destination->width0,
302 idct->destination->height0,
303 1
304 };
305
306 idct->tex_transfer = idct->pipe->get_transfer
307 (
308 #if 1
309 idct->pipe, idct->textures.individual.source,
310 #else
311 idct->pipe, idct->destination,
312 #endif
313 u_subresource(0, 0),
314 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
315 &rect
316 );
317
318 idct->texels = idct->pipe->transfer_map(idct->pipe, idct->tex_transfer);
319
320 idct->vectors = pipe_buffer_map
321 (
322 idct->pipe,
323 idct->vertex_bufs.individual.pos.buffer,
324 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
325 &idct->vec_transfer
326 );
327 }
328
329 static void
330 xfer_buffers_unmap(struct vl_idct *idct)
331 {
332 pipe_buffer_unmap(idct->pipe, idct->vertex_bufs.individual.pos.buffer, idct->vec_transfer);
333
334 idct->pipe->transfer_unmap(idct->pipe, idct->tex_transfer);
335 idct->pipe->transfer_destroy(idct->pipe, idct->tex_transfer);
336 }
337
338 static bool
339 init_shaders(struct vl_idct *idct)
340 {
341 assert(idct->vs = create_vert_shader(idct));
342 assert(idct->transpose_fs = create_transpose_frag_shader(idct));
343 assert(idct->matrix_fs = create_matrix_frag_shader(idct));
344 assert(idct->eb_fs = create_empty_block_frag_shader(idct));
345
346 return
347 idct->vs != NULL &&
348 idct->transpose_fs != NULL &&
349 idct->matrix_fs != NULL &&
350 idct->eb_fs != NULL;
351 }
352
353 static void
354 cleanup_shaders(struct vl_idct *idct)
355 {
356 idct->pipe->delete_vs_state(idct->pipe, idct->vs);
357 idct->pipe->delete_fs_state(idct->pipe, idct->transpose_fs);
358 idct->pipe->delete_fs_state(idct->pipe, idct->matrix_fs);
359 idct->pipe->delete_fs_state(idct->pipe, idct->eb_fs);
360 }
361
362 static bool
363 init_buffers(struct vl_idct *idct)
364 {
365 struct pipe_resource template;
366 struct pipe_sampler_view sampler_view;
367 struct pipe_vertex_element vertex_elems[2];
368 unsigned i;
369
370 idct->max_blocks =
371 align(idct->destination->width0, BLOCK_WIDTH) / BLOCK_WIDTH *
372 align(idct->destination->height0, BLOCK_HEIGHT) / BLOCK_HEIGHT *
373 idct->destination->depth0;
374
375 memset(&template, 0, sizeof(struct pipe_resource));
376 template.target = PIPE_TEXTURE_2D;
377 template.format = PIPE_FORMAT_R32_FLOAT;
378 template.last_level = 0;
379 template.width0 = 8;
380 template.height0 = 8;
381 template.depth0 = 1;
382 template.usage = PIPE_USAGE_IMMUTABLE;
383 template.bind = PIPE_BIND_SAMPLER_VIEW;
384 template.flags = 0;
385
386 idct->textures.individual.matrix = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
387 idct->textures.individual.transpose = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
388
389 template.format = idct->destination->format;
390 template.width0 = idct->destination->width0;
391 template.height0 = idct->destination->height0;
392 template.depth0 = idct->destination->depth0;
393 template.usage = PIPE_USAGE_DYNAMIC;
394 idct->textures.individual.source = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
395
396 template.usage = PIPE_USAGE_STATIC;
397 idct->textures.individual.intermediate = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
398
399 for (i = 0; i < 4; ++i) {
400 if(idct->textures.all[i] == NULL)
401 return false; /* a texture failed to allocate */
402
403 u_sampler_view_default_template(&sampler_view, idct->textures.all[i], idct->textures.all[i]->format);
404 idct->sampler_views.all[i] = idct->pipe->create_sampler_view(idct->pipe, idct->textures.all[i], &sampler_view);
405 }
406
407 idct->vertex_bufs.individual.quad.stride = sizeof(struct vertex2f);
408 idct->vertex_bufs.individual.quad.max_index = 4 * idct->max_blocks - 1;
409 idct->vertex_bufs.individual.quad.buffer_offset = 0;
410 idct->vertex_bufs.individual.quad.buffer = pipe_buffer_create
411 (
412 idct->pipe->screen,
413 PIPE_BIND_VERTEX_BUFFER,
414 sizeof(struct vertex2f) * 4 * idct->max_blocks
415 );
416
417 if(idct->vertex_bufs.individual.quad.buffer == NULL)
418 return false;
419
420 idct->vertex_bufs.individual.pos.stride = sizeof(struct vertex2f);
421 idct->vertex_bufs.individual.pos.max_index = 4 * idct->max_blocks - 1;
422 idct->vertex_bufs.individual.pos.buffer_offset = 0;
423 idct->vertex_bufs.individual.pos.buffer = pipe_buffer_create
424 (
425 idct->pipe->screen,
426 PIPE_BIND_VERTEX_BUFFER,
427 sizeof(struct vertex2f) * 4 * idct->max_blocks
428 );
429
430 if(idct->vertex_bufs.individual.pos.buffer == NULL)
431 return false;
432
433 /* Rect element */
434 vertex_elems[0].src_offset = 0;
435 vertex_elems[0].instance_divisor = 0;
436 vertex_elems[0].vertex_buffer_index = 0;
437 vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
438
439 /* Pos element */
440 vertex_elems[1].src_offset = 0;
441 vertex_elems[1].instance_divisor = 0;
442 vertex_elems[1].vertex_buffer_index = 1;
443 vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
444
445 idct->vertex_elems_state = idct->pipe->create_vertex_elements_state(idct->pipe, 2, vertex_elems);
446
447 idct->vs_const_buf = pipe_buffer_create
448 (
449 idct->pipe->screen,
450 PIPE_BIND_CONSTANT_BUFFER,
451 sizeof(struct vertex_shader_consts)
452 );
453
454 if(idct->vs_const_buf == NULL)
455 return false;
456
457 return true;
458 }
459
460 static void
461 cleanup_buffers(struct vl_idct *idct)
462 {
463 unsigned i;
464
465 assert(idct);
466
467 pipe_resource_reference(&idct->vs_const_buf, NULL);
468
469 for (i = 0; i < 4; ++i) {
470 pipe_sampler_view_reference(&idct->sampler_views.all[i], NULL);
471 pipe_resource_reference(&idct->textures.all[i], NULL);
472 }
473
474 idct->pipe->delete_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
475 pipe_resource_reference(&idct->vertex_bufs.individual.quad.buffer, NULL);
476 pipe_resource_reference(&idct->vertex_bufs.individual.pos.buffer, NULL);
477 }
478
479 static void
480 init_constants(struct vl_idct *idct)
481 {
482 struct pipe_transfer *buf_transfer;
483 struct vertex_shader_consts *vs_consts;
484 struct vertex2f *v;
485 float *f;
486
487 struct pipe_box rect =
488 {
489 0, 0, 0,
490 BLOCK_WIDTH,
491 BLOCK_HEIGHT,
492 1
493 };
494
495 unsigned i, j, pitch;
496
497 /* quad vectors */
498 v = pipe_buffer_map
499 (
500 idct->pipe,
501 idct->vertex_bufs.individual.quad.buffer,
502 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
503 &buf_transfer
504 );
505 for ( i = 0; i < idct->max_blocks; ++i)
506 memcpy(v + i * 4, &const_quad, sizeof(const_quad));
507 pipe_buffer_unmap(idct->pipe, idct->vertex_bufs.individual.quad.buffer, buf_transfer);
508
509 /* transposed matrix */
510 buf_transfer = idct->pipe->get_transfer
511 (
512 idct->pipe, idct->textures.individual.transpose,
513 u_subresource(0, 0),
514 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
515 &rect
516 );
517 pitch = buf_transfer->stride / util_format_get_blocksize(buf_transfer->resource->format);
518
519 f = idct->pipe->transfer_map(idct->pipe, buf_transfer);
520 for(i = 0; i < BLOCK_HEIGHT; ++i)
521 for(j = 0; j < BLOCK_WIDTH; ++j)
522 f[i * pitch + j] = const_matrix[j][i]; // transpose
523
524 idct->pipe->transfer_unmap(idct->pipe, buf_transfer);
525 idct->pipe->transfer_destroy(idct->pipe, buf_transfer);
526
527 /* matrix */
528 buf_transfer = idct->pipe->get_transfer
529 (
530 idct->pipe, idct->textures.individual.matrix,
531 u_subresource(0, 0),
532 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
533 &rect
534 );
535 pitch = buf_transfer->stride / util_format_get_blocksize(buf_transfer->resource->format);
536
537 f = idct->pipe->transfer_map(idct->pipe, buf_transfer);
538 for(i = 0; i < BLOCK_HEIGHT; ++i)
539 for(j = 0; j < BLOCK_WIDTH; ++j)
540 f[i * pitch + j] = const_matrix[i][j];
541
542 idct->pipe->transfer_unmap(idct->pipe, buf_transfer);
543 idct->pipe->transfer_destroy(idct->pipe, buf_transfer);
544
545 /* normalisation constants */
546 vs_consts = pipe_buffer_map
547 (
548 idct->pipe, idct->vs_const_buf,
549 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
550 &buf_transfer
551 );
552
553 vs_consts->norm.x = 1.0f / idct->destination->width0;
554 vs_consts->norm.y = 1.0f / idct->destination->height0;
555
556 pipe_buffer_unmap(idct->pipe, idct->vs_const_buf, buf_transfer);
557 }
558
559 static void
560 init_state(struct vl_idct *idct)
561 {
562 struct pipe_sampler_state sampler;
563 unsigned i;
564
565 idct->num_blocks = 0;
566 idct->num_empty_blocks = 0;
567
568 idct->viewport.scale[0] = idct->destination->width0;
569 idct->viewport.scale[1] = idct->destination->height0;
570 idct->viewport.scale[2] = 1;
571 idct->viewport.scale[3] = 1;
572 idct->viewport.translate[0] = 0;
573 idct->viewport.translate[1] = 0;
574 idct->viewport.translate[2] = 0;
575 idct->viewport.translate[3] = 0;
576
577 idct->fb_state.width = idct->destination->width0;
578 idct->fb_state.height = idct->destination->height0;
579 idct->fb_state.nr_cbufs = 1;
580 idct->fb_state.zsbuf = NULL;
581
582 for (i = 0; i < 4; ++i) {
583 memset(&sampler, 0, sizeof(sampler));
584 sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
585 sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
586 sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
587 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
588 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
589 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
590 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
591 sampler.compare_func = PIPE_FUNC_ALWAYS;
592 sampler.normalized_coords = 1;
593 /*sampler.shadow_ambient = ; */
594 /*sampler.lod_bias = ; */
595 sampler.min_lod = 0;
596 /*sampler.max_lod = ; */
597 /*sampler.border_color[0] = ; */
598 /*sampler.max_anisotropy = ; */
599 idct->samplers.all[i] = idct->pipe->create_sampler_state(idct->pipe, &sampler);
600 }
601 }
602
603 static void
604 cleanup_state(struct vl_idct *idct)
605 {
606 unsigned i;
607
608 for (i = 0; i < 4; ++i)
609 idct->pipe->delete_sampler_state(idct->pipe, idct->samplers.all[i]);
610 }
611
612 bool
613 vl_idct_init(struct vl_idct *idct, struct pipe_context *pipe, struct pipe_resource *dst)
614 {
615 assert(idct && pipe && dst);
616
617 idct->pipe = pipe;
618 pipe_resource_reference(&idct->destination, dst);
619
620 init_state(idct);
621
622 if(!init_shaders(idct))
623 return false;
624
625 if(!init_buffers(idct)) {
626 cleanup_shaders(idct);
627 return false;
628 }
629
630 idct->surfaces.intermediate = idct->pipe->screen->get_tex_surface(
631 idct->pipe->screen, idct->textures.individual.intermediate, 0, 0, 0,
632 PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET);
633
634 idct->surfaces.destination = idct->pipe->screen->get_tex_surface(
635 idct->pipe->screen, idct->destination, 0, 0, 0,
636 PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET);
637
638 init_constants(idct);
639 xfer_buffers_map(idct);
640
641 return true;
642 }
643
644 void
645 vl_idct_cleanup(struct vl_idct *idct)
646 {
647 idct->pipe->screen->tex_surface_destroy(idct->surfaces.destination);
648 idct->pipe->screen->tex_surface_destroy(idct->surfaces.intermediate);
649
650 cleanup_shaders(idct);
651 cleanup_buffers(idct);
652
653 cleanup_state(idct);
654
655 pipe_resource_reference(&idct->destination, NULL);
656 }
657
658 void
659 vl_idct_add_block(struct vl_idct *idct, unsigned x, unsigned y, short *block)
660 {
661 struct vertex2f v, *v_dst;
662
663 unsigned tex_pitch;
664 short *texels;
665
666 unsigned i;
667
668 assert(idct);
669
670 if(block) {
671 tex_pitch = idct->tex_transfer->stride / util_format_get_blocksize(idct->tex_transfer->resource->format);
672 texels = idct->texels + y * tex_pitch * BLOCK_HEIGHT + x * BLOCK_WIDTH;
673
674 for (i = 0; i < BLOCK_HEIGHT; ++i)
675 memcpy(texels + i * tex_pitch, block + i * BLOCK_WIDTH, BLOCK_WIDTH * 2);
676
677 /* non empty blocks fills the vector buffer from left to right */
678 v_dst = idct->vectors + idct->num_blocks * 4;
679
680 idct->num_blocks++;
681
682 } else {
683
684 /* while empty blocks fills the vector buffer from right to left */
685 v_dst = idct->vectors + (idct->max_blocks - idct->num_empty_blocks) * 4 - 4;
686
687 idct->num_empty_blocks++;
688 }
689
690 v.x = x;
691 v.y = y;
692
693 for (i = 0; i < 4; ++i) {
694 v_dst[i] = v;
695 }
696 }
697
698 void
699 vl_idct_flush(struct vl_idct *idct)
700 {
701 xfer_buffers_unmap(idct);
702
703 idct->pipe->set_constant_buffer(idct->pipe, PIPE_SHADER_VERTEX, 0, idct->vs_const_buf);
704
705 if(idct->num_blocks > 0) {
706
707 /* first stage */
708 idct->fb_state.cbufs[0] = idct->surfaces.intermediate;
709 idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state);
710 idct->pipe->set_viewport_state(idct->pipe, &idct->viewport);
711
712 idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
713 idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
714 idct->pipe->set_fragment_sampler_views(idct->pipe, 4, idct->sampler_views.all);
715 idct->pipe->bind_fragment_sampler_states(idct->pipe, 4, idct->samplers.all);
716 idct->pipe->bind_vs_state(idct->pipe, idct->vs);
717 idct->pipe->bind_fs_state(idct->pipe, idct->transpose_fs);
718
719 util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, idct->num_blocks * 4);
720
721 idct->pipe->flush(idct->pipe, PIPE_FLUSH_RENDER_CACHE, NULL);
722
723 /* second stage */
724 idct->fb_state.cbufs[0] = idct->surfaces.destination;
725 idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state);
726 idct->pipe->set_viewport_state(idct->pipe, &idct->viewport);
727
728 idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
729 idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
730 idct->pipe->set_fragment_sampler_views(idct->pipe, 4, idct->sampler_views.all);
731 idct->pipe->bind_fragment_sampler_states(idct->pipe, 4, idct->samplers.all);
732 idct->pipe->bind_vs_state(idct->pipe, idct->vs);
733 idct->pipe->bind_fs_state(idct->pipe, idct->matrix_fs);
734
735 util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, idct->num_blocks * 4);
736 }
737
738 if(idct->num_empty_blocks > 0) {
739
740 /* empty block handling */
741 idct->fb_state.cbufs[0] = idct->surfaces.destination;
742 idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state);
743 idct->pipe->set_viewport_state(idct->pipe, &idct->viewport);
744
745 idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
746 idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
747 idct->pipe->set_fragment_sampler_views(idct->pipe, 4, idct->sampler_views.all);
748 idct->pipe->bind_fragment_sampler_states(idct->pipe, 4, idct->samplers.all);
749 idct->pipe->bind_vs_state(idct->pipe, idct->vs);
750 idct->pipe->bind_fs_state(idct->pipe, idct->eb_fs);
751
752 util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS,
753 (idct->max_blocks - idct->num_empty_blocks) * 4,
754 idct->num_empty_blocks * 4);
755 }
756
757 idct->pipe->flush(idct->pipe, PIPE_FLUSH_RENDER_CACHE, NULL);
758
759 idct->num_blocks = 0;
760 idct->num_empty_blocks = 0;
761 xfer_buffers_map(idct);
762 }