make nr of render targets configureable for testing
[mesa.git] / src / gallium / auxiliary / vl / vl_idct.c
1 /**************************************************************************
2 *
3 * Copyright 2010 Christian König
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "vl_idct.h"
29 #include "util/u_draw.h"
30 #include <assert.h>
31 #include <pipe/p_context.h>
32 #include <pipe/p_screen.h>
33 #include <util/u_inlines.h>
34 #include <util/u_sampler.h>
35 #include <util/u_format.h>
36 #include <tgsi/tgsi_ureg.h>
37 #include "vl_types.h"
38
39 #define BLOCK_WIDTH 8
40 #define BLOCK_HEIGHT 8
41
42 #define SCALE_FACTOR_16_TO_9 (32768.0f / 256.0f)
43
44 #define STAGE1_SCALE 4.0f
45 #define STAGE2_SCALE (SCALE_FACTOR_16_TO_9 / STAGE1_SCALE)
46
47 #define NR_RENDER_TARGETS 1
48
49 struct vertex_shader_consts
50 {
51 struct vertex4f norm;
52 };
53
54 enum VS_INPUT
55 {
56 VS_I_RECT,
57 VS_I_VPOS,
58
59 NUM_VS_INPUTS
60 };
61
62 enum VS_OUTPUT
63 {
64 VS_O_VPOS,
65 VS_O_BLOCK,
66 VS_O_TEX,
67 VS_O_START
68 };
69
70 static const float const_matrix[8][8] = {
71 { 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.353553f, 0.3535530f },
72 { 0.4903930f, 0.4157350f, 0.2777850f, 0.0975451f, -0.0975452f, -0.2777850f, -0.415735f, -0.4903930f },
73 { 0.4619400f, 0.1913420f, -0.1913420f, -0.4619400f, -0.4619400f, -0.1913420f, 0.191342f, 0.4619400f },
74 { 0.4157350f, -0.0975452f, -0.4903930f, -0.2777850f, 0.2777850f, 0.4903930f, 0.097545f, -0.4157350f },
75 { 0.3535530f, -0.3535530f, -0.3535530f, 0.3535540f, 0.3535530f, -0.3535540f, -0.353553f, 0.3535530f },
76 { 0.2777850f, -0.4903930f, 0.0975452f, 0.4157350f, -0.4157350f, -0.0975451f, 0.490393f, -0.2777850f },
77 { 0.1913420f, -0.4619400f, 0.4619400f, -0.1913420f, -0.1913410f, 0.4619400f, -0.461940f, 0.1913420f },
78 { 0.0975451f, -0.2777850f, 0.4157350f, -0.4903930f, 0.4903930f, -0.4157350f, 0.277786f, -0.0975458f }
79 };
80
81 /* vertices for a quad covering a block */
82 static const struct vertex2f const_quad[4] = {
83 {0.0f, 0.0f}, {1.0f, 0.0f}, {1.0f, 1.0f}, {0.0f, 1.0f}
84 };
85
86 static void *
87 create_vert_shader(struct vl_idct *idct, bool calc_src_cords)
88 {
89 struct ureg_program *shader;
90 struct ureg_src scale;
91 struct ureg_src vrect, vpos;
92 struct ureg_dst t_vpos;
93 struct ureg_dst o_vpos, o_block, o_tex, o_start;
94
95 shader = ureg_create(TGSI_PROCESSOR_VERTEX);
96 if (!shader)
97 return NULL;
98
99 t_vpos = ureg_DECL_temporary(shader);
100
101 vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
102 vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
103
104 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
105
106 /*
107 * scale = (BLOCK_WIDTH, BLOCK_HEIGHT) / (dst.width, dst.height)
108 *
109 * t_vpos = vpos + vrect
110 * o_vpos.xy = t_vpos * scale
111 * o_vpos.zw = vpos
112 *
113 * o_block = vrect
114 * o_tex = t_pos
115 * o_start = vpos * scale
116 *
117 */
118 scale = ureg_imm2f(shader,
119 (float)BLOCK_WIDTH / idct->destination->width0,
120 (float)BLOCK_HEIGHT / idct->destination->height0);
121
122 ureg_ADD(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), vpos, vrect);
123 ureg_MUL(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos), scale);
124 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
125 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), vpos);
126
127 if(calc_src_cords) {
128 o_block = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK);
129 o_tex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX);
130 o_start = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_START);
131
132 ureg_MOV(shader, ureg_writemask(o_block, TGSI_WRITEMASK_XY), vrect);
133 ureg_MOV(shader, ureg_writemask(o_tex, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
134 ureg_MUL(shader, ureg_writemask(o_start, TGSI_WRITEMASK_XY), vpos, scale);
135 }
136
137 ureg_release_temporary(shader, t_vpos);
138
139 ureg_END(shader);
140
141 return ureg_create_shader_and_destroy(shader, idct->pipe);
142 }
143
144 static void
145 fetch_one(struct ureg_program *shader, struct ureg_dst m[2],
146 struct ureg_src tc, struct ureg_src sampler,
147 struct ureg_src start, struct ureg_src block, float height)
148 {
149 struct ureg_dst t_tc, tmp;
150 unsigned i, j;
151
152 t_tc = ureg_DECL_temporary(shader);
153 tmp = ureg_DECL_temporary(shader);
154
155 m[0] = ureg_DECL_temporary(shader);
156 m[1] = ureg_DECL_temporary(shader);
157
158 /*
159 * t_tc.x = right_side ? start.x : tc.x
160 * t_tc.y = right_side ? tc.y : start.y
161 * m[0..1].xyzw = tex(t_tc++, sampler)
162 */
163 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_scalar(tc, TGSI_SWIZZLE_X));
164 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Y), ureg_scalar(start, TGSI_SWIZZLE_Y));
165
166 #if NR_RENDER_TARGETS == 8
167 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Z), ureg_scalar(block, TGSI_SWIZZLE_X));
168 #else
169 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Z), ureg_imm1f(shader, 0.0f));
170 #endif
171
172 for(i = 0; i < 2; ++i) {
173 for(j = 0; j < 4; ++j) {
174 /* Nouveau and r600g can't writemask tex dst regs (yet?), do in two steps */
175 ureg_TEX(shader, tmp, TGSI_TEXTURE_3D, ureg_src(t_tc), sampler);
176 ureg_MOV(shader, ureg_writemask(m[i], TGSI_WRITEMASK_X << j), ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X));
177
178 if(i != 1 || j != 3) /* skip the last add */
179 ureg_ADD(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Y),
180 ureg_src(t_tc), ureg_imm1f(shader, 1.0f / height));
181 }
182 }
183
184 ureg_release_temporary(shader, t_tc);
185 ureg_release_temporary(shader, tmp);
186 }
187
188 static void
189 fetch_four(struct ureg_program *shader, struct ureg_dst m[2],
190 struct ureg_src tc, struct ureg_src sampler,
191 struct ureg_src start, bool right_side, float width)
192 {
193 struct ureg_dst t_tc;
194
195 t_tc = ureg_DECL_temporary(shader);
196 m[0] = ureg_DECL_temporary(shader);
197 m[1] = ureg_DECL_temporary(shader);
198
199 /*
200 * t_tc.x = right_side ? start.x : tc.x
201 * t_tc.y = right_side ? tc.y : start.y
202 * m[0..1] = tex(t_tc++, sampler)
203 */
204 if(right_side) {
205 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_scalar(start, TGSI_SWIZZLE_Y));
206 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Y), ureg_scalar(tc, TGSI_SWIZZLE_X));
207 } else {
208 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_scalar(start, TGSI_SWIZZLE_X));
209 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Y), ureg_scalar(tc, TGSI_SWIZZLE_Y));
210 }
211
212 ureg_TEX(shader, m[0], TGSI_TEXTURE_2D, ureg_src(t_tc), sampler);
213 ureg_ADD(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_src(t_tc), ureg_imm1f(shader, 1.0f / width));
214 ureg_TEX(shader, m[1], TGSI_TEXTURE_2D, ureg_src(t_tc), sampler);
215
216 ureg_release_temporary(shader, t_tc);
217 }
218
219 static void
220 matrix_mul(struct ureg_program *shader, struct ureg_dst dst, struct ureg_dst l[2], struct ureg_dst r[2])
221 {
222 struct ureg_dst tmp[2];
223 unsigned i;
224
225 for(i = 0; i < 2; ++i) {
226 tmp[i] = ureg_DECL_temporary(shader);
227 }
228
229 /*
230 * tmp[0..1] = dot4(m[0][0..1], m[1][0..1])
231 * dst = tmp[0] + tmp[1]
232 */
233 ureg_DP4(shader, ureg_writemask(tmp[0], TGSI_WRITEMASK_X), ureg_src(l[0]), ureg_src(r[0]));
234 ureg_DP4(shader, ureg_writemask(tmp[1], TGSI_WRITEMASK_X), ureg_src(l[1]), ureg_src(r[1]));
235 ureg_ADD(shader, dst, ureg_src(tmp[0]), ureg_src(tmp[1]));
236
237 for(i = 0; i < 2; ++i) {
238 ureg_release_temporary(shader, tmp[i]);
239 }
240 }
241
242 static void *
243 create_transpose_frag_shader(struct vl_idct *idct)
244 {
245 struct pipe_resource *transpose = idct->textures.individual.transpose;
246 struct pipe_resource *intermediate = idct->textures.individual.intermediate;
247
248 struct ureg_program *shader;
249
250 struct ureg_src block, tex, sampler[2];
251 struct ureg_src start[2];
252
253 struct ureg_dst m[2][2];
254 struct ureg_dst tmp, fragment;
255
256 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
257 if (!shader)
258 return NULL;
259
260 block = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK, TGSI_INTERPOLATE_LINEAR);
261 tex = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX, TGSI_INTERPOLATE_CONSTANT);
262
263 sampler[0] = ureg_DECL_sampler(shader, 0);
264 sampler[1] = ureg_DECL_sampler(shader, 1);
265
266 start[0] = ureg_imm1f(shader, 0.0f);
267 start[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_START, TGSI_INTERPOLATE_CONSTANT);
268
269 fetch_four(shader, m[0], block, sampler[0], start[0], false, transpose->width0);
270 fetch_one(shader, m[1], tex, sampler[1], start[1], block, intermediate->height0);
271
272 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
273
274 tmp = ureg_DECL_temporary(shader);
275 matrix_mul(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X), m[0], m[1]);
276 ureg_MUL(shader, fragment, ureg_src(tmp), ureg_imm1f(shader, STAGE2_SCALE));
277
278 ureg_release_temporary(shader, tmp);
279 ureg_release_temporary(shader, m[0][0]);
280 ureg_release_temporary(shader, m[0][1]);
281 ureg_release_temporary(shader, m[1][0]);
282 ureg_release_temporary(shader, m[1][1]);
283
284 ureg_END(shader);
285
286 return ureg_create_shader_and_destroy(shader, idct->pipe);
287 }
288
289 static void *
290 create_matrix_frag_shader(struct vl_idct *idct)
291 {
292 struct pipe_resource *matrix = idct->textures.individual.matrix;
293 struct pipe_resource *source = idct->textures.individual.source;
294
295 struct ureg_program *shader;
296
297 struct ureg_src tc[2], sampler[2];
298 struct ureg_src start[2];
299
300 struct ureg_dst l[2], r[2];
301 struct ureg_dst t_tc, tmp, fragment[NR_RENDER_TARGETS];
302
303 unsigned i;
304
305 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
306 if (!shader)
307 return NULL;
308
309 t_tc = ureg_DECL_temporary(shader);
310 tmp = ureg_DECL_temporary(shader);
311
312 tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX, TGSI_INTERPOLATE_LINEAR);
313 tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK, TGSI_INTERPOLATE_LINEAR);
314
315 sampler[0] = ureg_DECL_sampler(shader, 1);
316 sampler[1] = ureg_DECL_sampler(shader, 0);
317
318 start[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_START, TGSI_INTERPOLATE_CONSTANT);
319 start[1] = ureg_imm1f(shader, 0.0f);
320
321 for (i = 0; i < NR_RENDER_TARGETS; ++i)
322 fragment[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, i);
323
324 fetch_four(shader, l, tc[0], sampler[0], start[0], false, source->width0);
325 ureg_MUL(shader, l[0], ureg_src(l[0]), ureg_scalar(ureg_imm1f(shader, STAGE1_SCALE), TGSI_SWIZZLE_X));
326 ureg_MUL(shader, l[1], ureg_src(l[1]), ureg_scalar(ureg_imm1f(shader, STAGE1_SCALE), TGSI_SWIZZLE_X));
327
328 for (i = 0; i < NR_RENDER_TARGETS; ++i) {
329
330 #if NR_RENDER_TARGETS == 8
331 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_imm1f(shader, 1.0f / BLOCK_WIDTH * i));
332 fetch_four(shader, r, ureg_src(t_tc), sampler[1], start[1], true, matrix->width0);
333 #elif NR_RENDER_TARGETS == 1
334 fetch_four(shader, r, tc[1], sampler[1], start[1], true, matrix->width0);
335 #else
336 #error invalid number of render targets
337 #endif
338
339 matrix_mul(shader, fragment[i], l, r);
340 ureg_release_temporary(shader, r[0]);
341 ureg_release_temporary(shader, r[1]);
342 }
343
344 ureg_release_temporary(shader, t_tc);
345 ureg_release_temporary(shader, tmp);
346 ureg_release_temporary(shader, l[0]);
347 ureg_release_temporary(shader, l[1]);
348
349 ureg_END(shader);
350
351 return ureg_create_shader_and_destroy(shader, idct->pipe);
352 }
353
354 static void *
355 create_empty_block_frag_shader(struct vl_idct *idct)
356 {
357 struct ureg_program *shader;
358 struct ureg_dst fragment;
359
360 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
361 if (!shader)
362 return NULL;
363
364 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
365
366 ureg_MOV(shader, fragment, ureg_imm1f(shader, 0.0f));
367
368 ureg_END(shader);
369
370 return ureg_create_shader_and_destroy(shader, idct->pipe);
371 }
372
373 static bool
374 init_shaders(struct vl_idct *idct)
375 {
376 idct->matrix_vs = create_vert_shader(idct, true);
377 idct->matrix_fs = create_matrix_frag_shader(idct);
378
379 idct->transpose_vs = create_vert_shader(idct, true);
380 idct->transpose_fs = create_transpose_frag_shader(idct);
381
382 idct->eb_vs = create_vert_shader(idct, false);
383 idct->eb_fs = create_empty_block_frag_shader(idct);
384
385 return
386 idct->transpose_vs != NULL && idct->transpose_fs != NULL &&
387 idct->matrix_vs != NULL && idct->matrix_fs != NULL &&
388 idct->eb_vs != NULL && idct->eb_fs != NULL;
389 }
390
391 static void
392 cleanup_shaders(struct vl_idct *idct)
393 {
394 idct->pipe->delete_vs_state(idct->pipe, idct->transpose_vs);
395 idct->pipe->delete_fs_state(idct->pipe, idct->transpose_fs);
396
397 idct->pipe->delete_vs_state(idct->pipe, idct->matrix_vs);
398 idct->pipe->delete_fs_state(idct->pipe, idct->matrix_fs);
399
400 idct->pipe->delete_vs_state(idct->pipe, idct->eb_vs);
401 idct->pipe->delete_fs_state(idct->pipe, idct->eb_fs);
402 }
403
404 static bool
405 init_buffers(struct vl_idct *idct)
406 {
407 struct pipe_resource template;
408 struct pipe_sampler_view sampler_view;
409 struct pipe_vertex_element vertex_elems[2];
410 unsigned i;
411
412 idct->max_blocks =
413 align(idct->destination->width0, BLOCK_WIDTH) / BLOCK_WIDTH *
414 align(idct->destination->height0, BLOCK_HEIGHT) / BLOCK_HEIGHT *
415 idct->destination->depth0;
416
417 memset(&template, 0, sizeof(struct pipe_resource));
418 template.last_level = 0;
419 template.depth0 = 1;
420 template.bind = PIPE_BIND_SAMPLER_VIEW;
421 template.flags = 0;
422
423 template.target = PIPE_TEXTURE_2D;
424 template.format = PIPE_FORMAT_R16G16B16A16_SNORM;
425 template.width0 = idct->destination->width0 / 4;
426 template.height0 = idct->destination->height0;
427 template.depth0 = 1;
428 template.usage = PIPE_USAGE_STREAM;
429 idct->textures.individual.source = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
430
431 template.target = PIPE_TEXTURE_3D;
432 template.format = PIPE_FORMAT_R16_SNORM;
433 template.width0 = idct->destination->width0 / NR_RENDER_TARGETS;
434 template.depth0 = NR_RENDER_TARGETS;
435 template.usage = PIPE_USAGE_STATIC;
436 idct->textures.individual.intermediate = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
437
438 for (i = 0; i < 4; ++i) {
439 if(idct->textures.all[i] == NULL)
440 return false; /* a texture failed to allocate */
441
442 u_sampler_view_default_template(&sampler_view, idct->textures.all[i], idct->textures.all[i]->format);
443 idct->sampler_views.all[i] = idct->pipe->create_sampler_view(idct->pipe, idct->textures.all[i], &sampler_view);
444 }
445
446 idct->vertex_bufs.individual.quad.stride = sizeof(struct vertex2f);
447 idct->vertex_bufs.individual.quad.max_index = 4 * idct->max_blocks - 1;
448 idct->vertex_bufs.individual.quad.buffer_offset = 0;
449 idct->vertex_bufs.individual.quad.buffer = pipe_buffer_create
450 (
451 idct->pipe->screen,
452 PIPE_BIND_VERTEX_BUFFER,
453 sizeof(struct vertex2f) * 4 * idct->max_blocks
454 );
455
456 if(idct->vertex_bufs.individual.quad.buffer == NULL)
457 return false;
458
459 idct->vertex_bufs.individual.pos.stride = sizeof(struct vertex2f);
460 idct->vertex_bufs.individual.pos.max_index = 4 * idct->max_blocks - 1;
461 idct->vertex_bufs.individual.pos.buffer_offset = 0;
462 idct->vertex_bufs.individual.pos.buffer = pipe_buffer_create
463 (
464 idct->pipe->screen,
465 PIPE_BIND_VERTEX_BUFFER,
466 sizeof(struct vertex2f) * 4 * idct->max_blocks
467 );
468
469 if(idct->vertex_bufs.individual.pos.buffer == NULL)
470 return false;
471
472 /* Rect element */
473 vertex_elems[0].src_offset = 0;
474 vertex_elems[0].instance_divisor = 0;
475 vertex_elems[0].vertex_buffer_index = 0;
476 vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
477
478 /* Pos element */
479 vertex_elems[1].src_offset = 0;
480 vertex_elems[1].instance_divisor = 0;
481 vertex_elems[1].vertex_buffer_index = 1;
482 vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
483
484 idct->vertex_elems_state = idct->pipe->create_vertex_elements_state(idct->pipe, 2, vertex_elems);
485
486 return true;
487 }
488
489 static void
490 cleanup_buffers(struct vl_idct *idct)
491 {
492 unsigned i;
493
494 assert(idct);
495
496 for (i = 0; i < 4; ++i) {
497 pipe_sampler_view_reference(&idct->sampler_views.all[i], NULL);
498 pipe_resource_reference(&idct->textures.all[i], NULL);
499 }
500
501 idct->pipe->delete_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
502 pipe_resource_reference(&idct->vertex_bufs.individual.quad.buffer, NULL);
503 pipe_resource_reference(&idct->vertex_bufs.individual.pos.buffer, NULL);
504 }
505
506 static void
507 init_constants(struct vl_idct *idct)
508 {
509 struct pipe_transfer *buf_transfer;
510 struct vertex2f *v;
511
512 unsigned i;
513
514 /* quad vectors */
515 v = pipe_buffer_map
516 (
517 idct->pipe,
518 idct->vertex_bufs.individual.quad.buffer,
519 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
520 &buf_transfer
521 );
522 for ( i = 0; i < idct->max_blocks; ++i)
523 memcpy(v + i * 4, &const_quad, sizeof(const_quad));
524 pipe_buffer_unmap(idct->pipe, idct->vertex_bufs.individual.quad.buffer, buf_transfer);
525 }
526
527 static void
528 init_state(struct vl_idct *idct)
529 {
530 struct pipe_sampler_state sampler;
531 unsigned i;
532
533 idct->num_blocks = 0;
534 idct->num_empty_blocks = 0;
535
536 idct->viewport[0].scale[0] = idct->textures.individual.intermediate->width0;
537 idct->viewport[0].scale[1] = idct->textures.individual.intermediate->height0;
538
539 idct->viewport[1].scale[0] = idct->destination->width0;
540 idct->viewport[1].scale[1] = idct->destination->height0;
541
542 idct->fb_state[0].width = idct->textures.individual.intermediate->width0;
543 idct->fb_state[0].height = idct->textures.individual.intermediate->height0;
544
545 idct->fb_state[0].nr_cbufs = NR_RENDER_TARGETS;
546 for(i = 0; i < NR_RENDER_TARGETS; ++i) {
547 idct->fb_state[0].cbufs[i] = idct->pipe->screen->get_tex_surface(
548 idct->pipe->screen, idct->textures.individual.intermediate, 0, 0, i,
549 PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET);
550 }
551
552 idct->fb_state[1].width = idct->destination->width0;
553 idct->fb_state[1].height = idct->destination->height0;
554
555 idct->fb_state[1].nr_cbufs = 1;
556 idct->fb_state[1].cbufs[0] = idct->pipe->screen->get_tex_surface(
557 idct->pipe->screen, idct->destination, 0, 0, 0,
558 PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET);
559
560 for(i = 0; i < 2; ++i) {
561 idct->viewport[i].scale[2] = 1;
562 idct->viewport[i].scale[3] = 1;
563 idct->viewport[i].translate[0] = 0;
564 idct->viewport[i].translate[1] = 0;
565 idct->viewport[i].translate[2] = 0;
566 idct->viewport[i].translate[3] = 0;
567
568 idct->fb_state[i].zsbuf = NULL;
569 }
570
571 for (i = 0; i < 4; ++i) {
572 memset(&sampler, 0, sizeof(sampler));
573 sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
574 sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
575 sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
576 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
577 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
578 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
579 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
580 sampler.compare_func = PIPE_FUNC_ALWAYS;
581 sampler.normalized_coords = 1;
582 /*sampler.shadow_ambient = ; */
583 /*sampler.lod_bias = ; */
584 sampler.min_lod = 0;
585 /*sampler.max_lod = ; */
586 /*sampler.border_color[0] = ; */
587 /*sampler.max_anisotropy = ; */
588 idct->samplers.all[i] = idct->pipe->create_sampler_state(idct->pipe, &sampler);
589 }
590 }
591
592 static void
593 cleanup_state(struct vl_idct *idct)
594 {
595 unsigned i;
596
597 for(i = 0; i < NR_RENDER_TARGETS; ++i) {
598 idct->pipe->screen->tex_surface_destroy(idct->fb_state[0].cbufs[i]);
599 }
600
601 idct->pipe->screen->tex_surface_destroy(idct->fb_state[1].cbufs[0]);
602
603 for (i = 0; i < 4; ++i)
604 idct->pipe->delete_sampler_state(idct->pipe, idct->samplers.all[i]);
605 }
606
607 struct pipe_resource *
608 vl_idct_upload_matrix(struct pipe_context *pipe)
609 {
610 struct pipe_resource template, *matrix;
611 struct pipe_transfer *buf_transfer;
612 unsigned i, j, pitch;
613 float *f;
614
615 struct pipe_box rect =
616 {
617 0, 0, 0,
618 BLOCK_WIDTH,
619 BLOCK_HEIGHT,
620 1
621 };
622
623 memset(&template, 0, sizeof(struct pipe_resource));
624 template.target = PIPE_TEXTURE_2D;
625 template.format = PIPE_FORMAT_R32G32B32A32_FLOAT;
626 template.last_level = 0;
627 template.width0 = 2;
628 template.height0 = 8;
629 template.depth0 = 1;
630 template.usage = PIPE_USAGE_IMMUTABLE;
631 template.bind = PIPE_BIND_SAMPLER_VIEW;
632 template.flags = 0;
633
634 matrix = pipe->screen->resource_create(pipe->screen, &template);
635
636 /* matrix */
637 buf_transfer = pipe->get_transfer
638 (
639 pipe, matrix,
640 u_subresource(0, 0),
641 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
642 &rect
643 );
644 pitch = buf_transfer->stride / sizeof(float);
645
646 f = pipe->transfer_map(pipe, buf_transfer);
647 for(i = 0; i < BLOCK_HEIGHT; ++i)
648 for(j = 0; j < BLOCK_WIDTH; ++j)
649 f[i * pitch + j] = const_matrix[j][i]; // transpose
650
651 pipe->transfer_unmap(pipe, buf_transfer);
652 pipe->transfer_destroy(pipe, buf_transfer);
653
654 return matrix;
655 }
656
657 static void
658 xfer_buffers_map(struct vl_idct *idct)
659 {
660 struct pipe_box rect =
661 {
662 0, 0, 0,
663 idct->textures.individual.source->width0,
664 idct->textures.individual.source->height0,
665 1
666 };
667
668 idct->tex_transfer = idct->pipe->get_transfer
669 (
670 idct->pipe, idct->textures.individual.source,
671 u_subresource(0, 0),
672 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
673 &rect
674 );
675
676 idct->texels = idct->pipe->transfer_map(idct->pipe, idct->tex_transfer);
677
678 idct->vectors = pipe_buffer_map
679 (
680 idct->pipe,
681 idct->vertex_bufs.individual.pos.buffer,
682 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
683 &idct->vec_transfer
684 );
685
686 idct->next_empty_block.l_x = ~1;
687 idct->next_empty_block.l_y = ~1;
688 idct->next_empty_block.r_x = ~1;
689 idct->next_empty_block.r_y = ~1;
690 }
691
692 static void
693 xfer_buffers_unmap(struct vl_idct *idct)
694 {
695 pipe_buffer_unmap(idct->pipe, idct->vertex_bufs.individual.pos.buffer, idct->vec_transfer);
696
697 idct->pipe->transfer_unmap(idct->pipe, idct->tex_transfer);
698 idct->pipe->transfer_destroy(idct->pipe, idct->tex_transfer);
699 }
700
701 bool
702 vl_idct_init(struct vl_idct *idct, struct pipe_context *pipe, struct pipe_resource *dst, struct pipe_resource *matrix)
703 {
704 assert(idct && pipe && dst);
705
706 idct->pipe = pipe;
707 pipe_resource_reference(&idct->textures.individual.matrix, matrix);
708 pipe_resource_reference(&idct->textures.individual.transpose, matrix);
709 pipe_resource_reference(&idct->destination, dst);
710
711 if(!init_buffers(idct))
712 return false;
713
714 if(!init_shaders(idct)) {
715 cleanup_buffers(idct);
716 return false;
717 }
718
719 init_state(idct);
720
721 init_constants(idct);
722 xfer_buffers_map(idct);
723
724 return true;
725 }
726
727 void
728 vl_idct_cleanup(struct vl_idct *idct)
729 {
730 cleanup_shaders(idct);
731 cleanup_buffers(idct);
732
733 cleanup_state(idct);
734
735 pipe_resource_reference(&idct->destination, NULL);
736 }
737
738 static void
739 flush_empty_block(struct vl_idct *idct, unsigned new_x, unsigned new_y)
740 {
741 if (idct->next_empty_block.l_x == ~1 ||
742 idct->next_empty_block.l_y == ~1) {
743
744 idct->next_empty_block.l_x = new_x;
745 idct->next_empty_block.l_y = new_y;
746
747 } else if (idct->next_empty_block.r_x != (new_x - 1) ||
748 idct->next_empty_block.r_y != new_y) {
749
750 struct vertex2f l, r, *v_dst;
751
752 v_dst = idct->vectors + (idct->max_blocks - idct->num_empty_blocks) * 4 - 4;
753
754 l.x = idct->next_empty_block.l_x;
755 l.y = idct->next_empty_block.l_y;
756 r.x = idct->next_empty_block.r_x;
757 r.y = idct->next_empty_block.r_y;
758 v_dst[0] = v_dst[3] = l;
759 v_dst[1] = v_dst[2] = r;
760
761 idct->next_empty_block.l_x = new_x;
762 idct->next_empty_block.l_y = new_y;
763 idct->num_empty_blocks++;
764 }
765
766 idct->next_empty_block.r_x = new_x;
767 idct->next_empty_block.r_y = new_y;
768 }
769
770 void
771 vl_idct_add_block(struct vl_idct *idct, unsigned x, unsigned y, short *block)
772 {
773 struct vertex2f v, *v_dst;
774
775 unsigned tex_pitch;
776 short *texels;
777
778 unsigned i;
779
780 assert(idct);
781
782 if(block) {
783 tex_pitch = idct->tex_transfer->stride / sizeof(short);
784 texels = idct->texels + y * tex_pitch * BLOCK_HEIGHT + x * BLOCK_WIDTH;
785
786 for (i = 0; i < BLOCK_HEIGHT; ++i)
787 memcpy(texels + i * tex_pitch, block + i * BLOCK_WIDTH, BLOCK_WIDTH * sizeof(short));
788
789 /* non empty blocks fills the vector buffer from left to right */
790 v_dst = idct->vectors + idct->num_blocks * 4;
791
792 idct->num_blocks++;
793
794 v.x = x;
795 v.y = y;
796
797 for (i = 0; i < 4; ++i) {
798 v_dst[i] = v;
799 }
800
801 } else {
802
803 /* while empty blocks fills the vector buffer from right to left */
804 flush_empty_block(idct, x, y);
805 }
806 }
807
808 void
809 vl_idct_flush(struct vl_idct *idct)
810 {
811 flush_empty_block(idct, ~1, ~1);
812 xfer_buffers_unmap(idct);
813
814 if(idct->num_blocks > 0) {
815
816 /* first stage */
817 idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state[0]);
818 idct->pipe->set_viewport_state(idct->pipe, &idct->viewport[0]);
819
820 idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
821 idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
822 idct->pipe->set_fragment_sampler_views(idct->pipe, 2, idct->sampler_views.stage[0]);
823 idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[0]);
824 idct->pipe->bind_vs_state(idct->pipe, idct->matrix_vs);
825 idct->pipe->bind_fs_state(idct->pipe, idct->matrix_fs);
826
827 util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, idct->num_blocks * 4);
828
829 /* second stage */
830 idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state[1]);
831 idct->pipe->set_viewport_state(idct->pipe, &idct->viewport[1]);
832
833 idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
834 idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
835 idct->pipe->set_fragment_sampler_views(idct->pipe, 2, idct->sampler_views.stage[1]);
836 idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[1]);
837 idct->pipe->bind_vs_state(idct->pipe, idct->transpose_vs);
838 idct->pipe->bind_fs_state(idct->pipe, idct->transpose_fs);
839
840 util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, idct->num_blocks * 4);
841 }
842
843 if(idct->num_empty_blocks > 0) {
844
845 /* empty block handling */
846 idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state[1]);
847 idct->pipe->set_viewport_state(idct->pipe, &idct->viewport[1]);
848
849 idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
850 idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
851 idct->pipe->bind_vs_state(idct->pipe, idct->eb_vs);
852 idct->pipe->bind_fs_state(idct->pipe, idct->eb_fs);
853
854 util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS,
855 (idct->max_blocks - idct->num_empty_blocks) * 4,
856 idct->num_empty_blocks * 4);
857 }
858
859 idct->num_blocks = 0;
860 idct->num_empty_blocks = 0;
861 xfer_buffers_map(idct);
862 }