move empty block handling back into mc for testing
[mesa.git] / src / gallium / auxiliary / vl / vl_idct.c
1 /**************************************************************************
2 *
3 * Copyright 2010 Christian König
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "vl_idct.h"
29 #include "vl_vertex_buffers.h"
30 #include "util/u_draw.h"
31 #include <assert.h>
32 #include <pipe/p_context.h>
33 #include <pipe/p_screen.h>
34 #include <util/u_inlines.h>
35 #include <util/u_sampler.h>
36 #include <util/u_format.h>
37 #include <tgsi/tgsi_ureg.h>
38 #include "vl_types.h"
39
40 #define BLOCK_WIDTH 8
41 #define BLOCK_HEIGHT 8
42
43 #define SCALE_FACTOR_16_TO_9 (32768.0f / 256.0f)
44
45 #define STAGE1_SCALE 4.0f
46 #define STAGE2_SCALE (SCALE_FACTOR_16_TO_9 / STAGE1_SCALE)
47
48 #define NR_RENDER_TARGETS 1
49
50 struct vertex_shader_consts
51 {
52 struct vertex4f norm;
53 };
54
55 enum VS_INPUT
56 {
57 VS_I_RECT,
58 VS_I_VPOS,
59
60 NUM_VS_INPUTS
61 };
62
63 enum VS_OUTPUT
64 {
65 VS_O_VPOS,
66 VS_O_BLOCK,
67 VS_O_TEX,
68 VS_O_START
69 };
70
71 static const float const_matrix[8][8] = {
72 { 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.353553f, 0.3535530f },
73 { 0.4903930f, 0.4157350f, 0.2777850f, 0.0975451f, -0.0975452f, -0.2777850f, -0.415735f, -0.4903930f },
74 { 0.4619400f, 0.1913420f, -0.1913420f, -0.4619400f, -0.4619400f, -0.1913420f, 0.191342f, 0.4619400f },
75 { 0.4157350f, -0.0975452f, -0.4903930f, -0.2777850f, 0.2777850f, 0.4903930f, 0.097545f, -0.4157350f },
76 { 0.3535530f, -0.3535530f, -0.3535530f, 0.3535540f, 0.3535530f, -0.3535540f, -0.353553f, 0.3535530f },
77 { 0.2777850f, -0.4903930f, 0.0975452f, 0.4157350f, -0.4157350f, -0.0975451f, 0.490393f, -0.2777850f },
78 { 0.1913420f, -0.4619400f, 0.4619400f, -0.1913420f, -0.1913410f, 0.4619400f, -0.461940f, 0.1913420f },
79 { 0.0975451f, -0.2777850f, 0.4157350f, -0.4903930f, 0.4903930f, -0.4157350f, 0.277786f, -0.0975458f }
80 };
81
82 static void *
83 create_vert_shader(struct vl_idct *idct, bool calc_src_cords)
84 {
85 struct ureg_program *shader;
86 struct ureg_src scale;
87 struct ureg_src vrect, vpos;
88 struct ureg_dst t_vpos;
89 struct ureg_dst o_vpos, o_block, o_tex, o_start;
90
91 shader = ureg_create(TGSI_PROCESSOR_VERTEX);
92 if (!shader)
93 return NULL;
94
95 t_vpos = ureg_DECL_temporary(shader);
96
97 vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
98 vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
99
100 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
101
102 /*
103 * scale = (BLOCK_WIDTH, BLOCK_HEIGHT) / (dst.width, dst.height)
104 *
105 * t_vpos = vpos + vrect
106 * o_vpos.xy = t_vpos * scale
107 * o_vpos.zw = vpos
108 *
109 * o_block = vrect
110 * o_tex = t_pos
111 * o_start = vpos * scale
112 *
113 */
114 scale = ureg_imm2f(shader,
115 (float)BLOCK_WIDTH / idct->destination->width0,
116 (float)BLOCK_HEIGHT / idct->destination->height0);
117
118 ureg_ADD(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), vpos, vrect);
119 ureg_MUL(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos), scale);
120 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
121 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), vpos);
122
123 if(calc_src_cords) {
124 o_block = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK);
125 o_tex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX);
126 o_start = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_START);
127
128 ureg_MOV(shader, ureg_writemask(o_block, TGSI_WRITEMASK_XY), vrect);
129 ureg_MOV(shader, ureg_writemask(o_tex, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
130 ureg_MUL(shader, ureg_writemask(o_start, TGSI_WRITEMASK_XY), vpos, scale);
131 }
132
133 ureg_release_temporary(shader, t_vpos);
134
135 ureg_END(shader);
136
137 return ureg_create_shader_and_destroy(shader, idct->pipe);
138 }
139
140 static void
141 fetch_four(struct ureg_program *shader, struct ureg_dst m[2],
142 struct ureg_src tc, struct ureg_src sampler,
143 struct ureg_src start, struct ureg_src block,
144 bool right_side, bool transposed, float size)
145 {
146 struct ureg_dst t_tc;
147 unsigned wm_start = (right_side == transposed) ? TGSI_WRITEMASK_X : TGSI_WRITEMASK_Y;
148 unsigned wm_tc = (right_side == transposed) ? TGSI_WRITEMASK_Y : TGSI_WRITEMASK_X;
149
150 t_tc = ureg_DECL_temporary(shader);
151 m[0] = ureg_DECL_temporary(shader);
152 m[1] = ureg_DECL_temporary(shader);
153
154 /*
155 * t_tc.x = right_side ? start.x : tc.x
156 * t_tc.y = right_side ? tc.y : start.y
157 * m[0..1] = tex(t_tc++, sampler)
158 */
159 if(!right_side) {
160 ureg_MOV(shader, ureg_writemask(t_tc, wm_start), ureg_scalar(start, TGSI_SWIZZLE_X));
161 ureg_MOV(shader, ureg_writemask(t_tc, wm_tc), ureg_scalar(tc, TGSI_SWIZZLE_Y));
162 } else {
163 ureg_MOV(shader, ureg_writemask(t_tc, wm_start), ureg_scalar(start, TGSI_SWIZZLE_Y));
164 ureg_MOV(shader, ureg_writemask(t_tc, wm_tc), ureg_scalar(tc, TGSI_SWIZZLE_X));
165 }
166
167 #if NR_RENDER_TARGETS == 8
168 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Z), ureg_scalar(block, TGSI_SWIZZLE_X));
169 #else
170 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Z), ureg_imm1f(shader, 0.0f));
171 #endif
172
173 ureg_TEX(shader, m[0], TGSI_TEXTURE_3D, ureg_src(t_tc), sampler);
174 ureg_ADD(shader, ureg_writemask(t_tc, wm_start), ureg_src(t_tc), ureg_imm1f(shader, 1.0f / size));
175 ureg_TEX(shader, m[1], TGSI_TEXTURE_3D, ureg_src(t_tc), sampler);
176
177 ureg_release_temporary(shader, t_tc);
178 }
179
180 static void
181 matrix_mul(struct ureg_program *shader, struct ureg_dst dst, struct ureg_dst l[2], struct ureg_dst r[2])
182 {
183 struct ureg_dst tmp[2];
184 unsigned i;
185
186 for(i = 0; i < 2; ++i) {
187 tmp[i] = ureg_DECL_temporary(shader);
188 }
189
190 /*
191 * tmp[0..1] = dot4(m[0][0..1], m[1][0..1])
192 * dst = tmp[0] + tmp[1]
193 */
194 ureg_DP4(shader, ureg_writemask(tmp[0], TGSI_WRITEMASK_X), ureg_src(l[0]), ureg_src(r[0]));
195 ureg_DP4(shader, ureg_writemask(tmp[1], TGSI_WRITEMASK_X), ureg_src(l[1]), ureg_src(r[1]));
196 ureg_ADD(shader, dst,
197 ureg_scalar(ureg_src(tmp[0]), TGSI_SWIZZLE_X),
198 ureg_scalar(ureg_src(tmp[1]), TGSI_SWIZZLE_X));
199
200 for(i = 0; i < 2; ++i) {
201 ureg_release_temporary(shader, tmp[i]);
202 }
203 }
204
205 static void *
206 create_transpose_frag_shader(struct vl_idct *idct)
207 {
208 struct pipe_resource *transpose = idct->textures.individual.transpose;
209 struct pipe_resource *intermediate = idct->textures.individual.intermediate;
210
211 struct ureg_program *shader;
212
213 struct ureg_src block, tex, sampler[2];
214 struct ureg_src start[2];
215
216 struct ureg_dst m[2][2];
217 struct ureg_dst tmp, fragment;
218
219 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
220 if (!shader)
221 return NULL;
222
223 block = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK, TGSI_INTERPOLATE_LINEAR);
224 tex = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX, TGSI_INTERPOLATE_CONSTANT);
225
226 sampler[0] = ureg_DECL_sampler(shader, 0);
227 sampler[1] = ureg_DECL_sampler(shader, 1);
228
229 start[0] = ureg_imm1f(shader, 0.0f);
230 start[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_START, TGSI_INTERPOLATE_CONSTANT);
231
232 fetch_four(shader, m[0], block, sampler[0], start[0], block, false, false, transpose->width0);
233 fetch_four(shader, m[1], tex, sampler[1], start[1], block, true, false, intermediate->height0);
234
235 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
236
237 tmp = ureg_DECL_temporary(shader);
238 matrix_mul(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X), m[0], m[1]);
239 ureg_MUL(shader, fragment, ureg_src(tmp), ureg_imm1f(shader, STAGE2_SCALE));
240
241 ureg_release_temporary(shader, tmp);
242 ureg_release_temporary(shader, m[0][0]);
243 ureg_release_temporary(shader, m[0][1]);
244 ureg_release_temporary(shader, m[1][0]);
245 ureg_release_temporary(shader, m[1][1]);
246
247 ureg_END(shader);
248
249 return ureg_create_shader_and_destroy(shader, idct->pipe);
250 }
251
252 static void *
253 create_matrix_frag_shader(struct vl_idct *idct)
254 {
255 struct pipe_resource *matrix = idct->textures.individual.matrix;
256 struct pipe_resource *source = idct->textures.individual.source;
257
258 struct ureg_program *shader;
259
260 struct ureg_src tex, block, sampler[2];
261 struct ureg_src start[2];
262
263 struct ureg_dst l[4][2], r[2];
264 struct ureg_dst t_tc, tmp, fragment[NR_RENDER_TARGETS];
265
266 unsigned i, j;
267
268 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
269 if (!shader)
270 return NULL;
271
272 t_tc = ureg_DECL_temporary(shader);
273 tmp = ureg_DECL_temporary(shader);
274
275 tex = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX, TGSI_INTERPOLATE_LINEAR);
276 block = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK, TGSI_INTERPOLATE_LINEAR);
277
278 sampler[0] = ureg_DECL_sampler(shader, 1);
279 sampler[1] = ureg_DECL_sampler(shader, 0);
280
281 start[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_START, TGSI_INTERPOLATE_CONSTANT);
282 start[1] = ureg_imm1f(shader, 0.0f);
283
284 for (i = 0; i < NR_RENDER_TARGETS; ++i)
285 fragment[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, i);
286
287 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Y), tex);
288 for (i = 0; i < 4; ++i) {
289 fetch_four(shader, l[i], ureg_src(t_tc), sampler[0], start[0], block, false, false, source->width0);
290 ureg_MUL(shader, l[i][0], ureg_src(l[i][0]), ureg_imm1f(shader, STAGE1_SCALE));
291 ureg_MUL(shader, l[i][1], ureg_src(l[i][1]), ureg_imm1f(shader, STAGE1_SCALE));
292 if(i != 3)
293 ureg_ADD(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Y),
294 ureg_src(t_tc), ureg_imm1f(shader, 1.0f / source->height0));
295 }
296
297 for (i = 0; i < NR_RENDER_TARGETS; ++i) {
298
299 #if NR_RENDER_TARGETS == 8
300 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_imm1f(shader, 1.0f / BLOCK_WIDTH * i));
301 fetch_four(shader, r, ureg_src(t_tc), sampler[1], start[1], block, true, true, matrix->width0);
302 #elif NR_RENDER_TARGETS == 1
303 fetch_four(shader, r, block, sampler[1], start[1], block, true, true, matrix->width0);
304 #else
305 #error invalid number of render targets
306 #endif
307
308 for (j = 0; j < 4; ++j) {
309 matrix_mul(shader, ureg_writemask(fragment[i], TGSI_WRITEMASK_X << j), l[j], r);
310 }
311 ureg_release_temporary(shader, r[0]);
312 ureg_release_temporary(shader, r[1]);
313 }
314
315 ureg_release_temporary(shader, t_tc);
316 ureg_release_temporary(shader, tmp);
317
318 for (i = 0; i < 4; ++i) {
319 ureg_release_temporary(shader, l[i][0]);
320 ureg_release_temporary(shader, l[i][1]);
321 }
322
323 ureg_END(shader);
324
325 return ureg_create_shader_and_destroy(shader, idct->pipe);
326 }
327
328 static void *
329 create_empty_block_frag_shader(struct vl_idct *idct)
330 {
331 struct ureg_program *shader;
332 struct ureg_dst fragment;
333
334 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
335 if (!shader)
336 return NULL;
337
338 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
339
340 ureg_MOV(shader, fragment, ureg_imm1f(shader, 0.0f));
341
342 ureg_END(shader);
343
344 return ureg_create_shader_and_destroy(shader, idct->pipe);
345 }
346
347 static bool
348 init_shaders(struct vl_idct *idct)
349 {
350 idct->matrix_vs = create_vert_shader(idct, true);
351 idct->matrix_fs = create_matrix_frag_shader(idct);
352
353 idct->transpose_vs = create_vert_shader(idct, true);
354 idct->transpose_fs = create_transpose_frag_shader(idct);
355
356 idct->eb_vs = create_vert_shader(idct, false);
357 idct->eb_fs = create_empty_block_frag_shader(idct);
358
359 return
360 idct->transpose_vs != NULL && idct->transpose_fs != NULL &&
361 idct->matrix_vs != NULL && idct->matrix_fs != NULL &&
362 idct->eb_vs != NULL && idct->eb_fs != NULL;
363 }
364
365 static void
366 cleanup_shaders(struct vl_idct *idct)
367 {
368 idct->pipe->delete_vs_state(idct->pipe, idct->transpose_vs);
369 idct->pipe->delete_fs_state(idct->pipe, idct->transpose_fs);
370
371 idct->pipe->delete_vs_state(idct->pipe, idct->matrix_vs);
372 idct->pipe->delete_fs_state(idct->pipe, idct->matrix_fs);
373
374 idct->pipe->delete_vs_state(idct->pipe, idct->eb_vs);
375 idct->pipe->delete_fs_state(idct->pipe, idct->eb_fs);
376 }
377
378 static bool
379 init_buffers(struct vl_idct *idct)
380 {
381 struct pipe_resource template;
382 struct pipe_sampler_view sampler_view;
383 struct pipe_vertex_element vertex_elems[2];
384 unsigned i;
385
386 memset(&template, 0, sizeof(struct pipe_resource));
387 template.last_level = 0;
388 template.depth0 = 1;
389 template.bind = PIPE_BIND_SAMPLER_VIEW;
390 template.flags = 0;
391
392 template.target = PIPE_TEXTURE_2D;
393 template.format = PIPE_FORMAT_R16G16B16A16_SNORM;
394 template.width0 = idct->destination->width0 / 4;
395 template.height0 = idct->destination->height0;
396 template.depth0 = 1;
397 template.usage = PIPE_USAGE_STREAM;
398 idct->textures.individual.source = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
399
400 template.target = PIPE_TEXTURE_3D;
401 template.format = PIPE_FORMAT_R16G16B16A16_SNORM;
402 template.width0 = idct->destination->width0 / NR_RENDER_TARGETS;
403 template.height0 = idct->destination->height0 / 4;
404 template.depth0 = NR_RENDER_TARGETS;
405 template.usage = PIPE_USAGE_STATIC;
406 idct->textures.individual.intermediate = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
407
408 for (i = 0; i < 4; ++i) {
409 if(idct->textures.all[i] == NULL)
410 return false; /* a texture failed to allocate */
411
412 u_sampler_view_default_template(&sampler_view, idct->textures.all[i], idct->textures.all[i]->format);
413 idct->sampler_views.all[i] = idct->pipe->create_sampler_view(idct->pipe, idct->textures.all[i], &sampler_view);
414 }
415
416 idct->vertex_bufs.individual.quad = vl_vb_upload_quads(idct->pipe, idct->max_blocks);
417
418 if(idct->vertex_bufs.individual.quad.buffer == NULL)
419 return false;
420
421 idct->vertex_bufs.individual.pos.stride = sizeof(struct vertex2f);
422 idct->vertex_bufs.individual.pos.max_index = 4 * idct->max_blocks - 1;
423 idct->vertex_bufs.individual.pos.buffer_offset = 0;
424 idct->vertex_bufs.individual.pos.buffer = pipe_buffer_create
425 (
426 idct->pipe->screen,
427 PIPE_BIND_VERTEX_BUFFER,
428 sizeof(struct vertex2f) * 4 * idct->max_blocks
429 );
430
431 if(idct->vertex_bufs.individual.pos.buffer == NULL)
432 return false;
433
434 /* Rect element */
435 vertex_elems[0].src_offset = 0;
436 vertex_elems[0].instance_divisor = 0;
437 vertex_elems[0].vertex_buffer_index = 0;
438 vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
439
440 /* Pos element */
441 vertex_elems[1].src_offset = 0;
442 vertex_elems[1].instance_divisor = 0;
443 vertex_elems[1].vertex_buffer_index = 1;
444 vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
445
446 idct->vertex_elems_state = idct->pipe->create_vertex_elements_state(idct->pipe, 2, vertex_elems);
447
448 return true;
449 }
450
451 static void
452 cleanup_buffers(struct vl_idct *idct)
453 {
454 unsigned i;
455
456 assert(idct);
457
458 for (i = 0; i < 4; ++i) {
459 pipe_sampler_view_reference(&idct->sampler_views.all[i], NULL);
460 pipe_resource_reference(&idct->textures.all[i], NULL);
461 }
462
463 idct->pipe->delete_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
464 pipe_resource_reference(&idct->vertex_bufs.individual.quad.buffer, NULL);
465 pipe_resource_reference(&idct->vertex_bufs.individual.pos.buffer, NULL);
466 }
467
468 static void
469 init_state(struct vl_idct *idct)
470 {
471 struct pipe_sampler_state sampler;
472 struct pipe_rasterizer_state rs_state;
473 unsigned i;
474
475 idct->viewport[0].scale[0] = idct->textures.individual.intermediate->width0;
476 idct->viewport[0].scale[1] = idct->textures.individual.intermediate->height0;
477
478 idct->viewport[1].scale[0] = idct->destination->width0;
479 idct->viewport[1].scale[1] = idct->destination->height0;
480
481 idct->fb_state[0].width = idct->textures.individual.intermediate->width0;
482 idct->fb_state[0].height = idct->textures.individual.intermediate->height0;
483
484 idct->fb_state[0].nr_cbufs = NR_RENDER_TARGETS;
485 for(i = 0; i < NR_RENDER_TARGETS; ++i) {
486 idct->fb_state[0].cbufs[i] = idct->pipe->screen->get_tex_surface(
487 idct->pipe->screen, idct->textures.individual.intermediate, 0, 0, i,
488 PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET);
489 }
490
491 idct->fb_state[1].width = idct->destination->width0;
492 idct->fb_state[1].height = idct->destination->height0;
493
494 idct->fb_state[1].nr_cbufs = 1;
495 idct->fb_state[1].cbufs[0] = idct->pipe->screen->get_tex_surface(
496 idct->pipe->screen, idct->destination, 0, 0, 0,
497 PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET);
498
499 for(i = 0; i < 2; ++i) {
500 idct->viewport[i].scale[2] = 1;
501 idct->viewport[i].scale[3] = 1;
502 idct->viewport[i].translate[0] = 0;
503 idct->viewport[i].translate[1] = 0;
504 idct->viewport[i].translate[2] = 0;
505 idct->viewport[i].translate[3] = 0;
506
507 idct->fb_state[i].zsbuf = NULL;
508 }
509
510 for (i = 0; i < 4; ++i) {
511 memset(&sampler, 0, sizeof(sampler));
512 sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
513 sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
514 sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
515 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
516 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
517 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
518 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
519 sampler.compare_func = PIPE_FUNC_ALWAYS;
520 sampler.normalized_coords = 1;
521 /*sampler.shadow_ambient = ; */
522 /*sampler.lod_bias = ; */
523 sampler.min_lod = 0;
524 /*sampler.max_lod = ; */
525 /*sampler.border_color[0] = ; */
526 /*sampler.max_anisotropy = ; */
527 idct->samplers.all[i] = idct->pipe->create_sampler_state(idct->pipe, &sampler);
528 }
529
530 memset(&rs_state, 0, sizeof(rs_state));
531 /*rs_state.sprite_coord_enable */
532 rs_state.sprite_coord_mode = PIPE_SPRITE_COORD_UPPER_LEFT;
533 rs_state.point_quad_rasterization = true;
534 rs_state.point_size = BLOCK_WIDTH;
535 rs_state.gl_rasterization_rules = false;
536 idct->rs_state = idct->pipe->create_rasterizer_state(idct->pipe, &rs_state);
537 }
538
539 static void
540 cleanup_state(struct vl_idct *idct)
541 {
542 unsigned i;
543
544 for(i = 0; i < NR_RENDER_TARGETS; ++i) {
545 idct->pipe->screen->tex_surface_destroy(idct->fb_state[0].cbufs[i]);
546 }
547
548 idct->pipe->screen->tex_surface_destroy(idct->fb_state[1].cbufs[0]);
549
550 for (i = 0; i < 4; ++i)
551 idct->pipe->delete_sampler_state(idct->pipe, idct->samplers.all[i]);
552
553 idct->pipe->delete_rasterizer_state(idct->pipe, idct->rs_state);
554 }
555
556 struct pipe_resource *
557 vl_idct_upload_matrix(struct pipe_context *pipe)
558 {
559 struct pipe_resource template, *matrix;
560 struct pipe_transfer *buf_transfer;
561 unsigned i, j, pitch;
562 float *f;
563
564 struct pipe_box rect =
565 {
566 0, 0, 0,
567 BLOCK_WIDTH,
568 BLOCK_HEIGHT,
569 1
570 };
571
572 memset(&template, 0, sizeof(struct pipe_resource));
573 template.target = PIPE_TEXTURE_2D;
574 template.format = PIPE_FORMAT_R32G32B32A32_FLOAT;
575 template.last_level = 0;
576 template.width0 = 2;
577 template.height0 = 8;
578 template.depth0 = 1;
579 template.usage = PIPE_USAGE_IMMUTABLE;
580 template.bind = PIPE_BIND_SAMPLER_VIEW;
581 template.flags = 0;
582
583 matrix = pipe->screen->resource_create(pipe->screen, &template);
584
585 /* matrix */
586 buf_transfer = pipe->get_transfer
587 (
588 pipe, matrix,
589 u_subresource(0, 0),
590 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
591 &rect
592 );
593 pitch = buf_transfer->stride / sizeof(float);
594
595 f = pipe->transfer_map(pipe, buf_transfer);
596 for(i = 0; i < BLOCK_HEIGHT; ++i)
597 for(j = 0; j < BLOCK_WIDTH; ++j)
598 f[i * pitch + j] = const_matrix[j][i]; // transpose
599
600 pipe->transfer_unmap(pipe, buf_transfer);
601 pipe->transfer_destroy(pipe, buf_transfer);
602
603 return matrix;
604 }
605
606 static void
607 xfer_buffers_map(struct vl_idct *idct)
608 {
609 struct pipe_box rect =
610 {
611 0, 0, 0,
612 idct->textures.individual.source->width0,
613 idct->textures.individual.source->height0,
614 1
615 };
616
617 idct->tex_transfer = idct->pipe->get_transfer
618 (
619 idct->pipe, idct->textures.individual.source,
620 u_subresource(0, 0),
621 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
622 &rect
623 );
624
625 idct->texels = idct->pipe->transfer_map(idct->pipe, idct->tex_transfer);
626 }
627
628 static void
629 xfer_buffers_unmap(struct vl_idct *idct)
630 {
631 idct->pipe->transfer_unmap(idct->pipe, idct->tex_transfer);
632 idct->pipe->transfer_destroy(idct->pipe, idct->tex_transfer);
633 }
634
635 bool
636 vl_idct_init(struct vl_idct *idct, struct pipe_context *pipe, struct pipe_resource *dst, struct pipe_resource *matrix)
637 {
638 assert(idct && pipe && dst);
639
640 idct->pipe = pipe;
641 pipe_resource_reference(&idct->textures.individual.matrix, matrix);
642 pipe_resource_reference(&idct->textures.individual.transpose, matrix);
643 pipe_resource_reference(&idct->destination, dst);
644
645 idct->max_blocks =
646 align(idct->destination->width0, BLOCK_WIDTH) / BLOCK_WIDTH *
647 align(idct->destination->height0, BLOCK_HEIGHT) / BLOCK_HEIGHT *
648 idct->destination->depth0;
649
650 if(!init_buffers(idct))
651 return false;
652
653 if(!init_shaders(idct)) {
654 cleanup_buffers(idct);
655 return false;
656 }
657
658 if(!vl_vb_init(&idct->blocks, idct->max_blocks)) {
659 cleanup_shaders(idct);
660 cleanup_buffers(idct);
661 return false;
662 }
663
664 init_state(idct);
665
666 xfer_buffers_map(idct);
667
668 return true;
669 }
670
671 void
672 vl_idct_cleanup(struct vl_idct *idct)
673 {
674 vl_vb_cleanup(&idct->blocks);
675 cleanup_shaders(idct);
676 cleanup_buffers(idct);
677
678 cleanup_state(idct);
679
680 pipe_resource_reference(&idct->destination, NULL);
681 }
682
683 void
684 vl_idct_add_block(struct vl_idct *idct, unsigned x, unsigned y, short *block)
685 {
686 unsigned tex_pitch;
687 short *texels;
688
689 unsigned i;
690
691 assert(idct);
692
693 tex_pitch = idct->tex_transfer->stride / sizeof(short);
694 texels = idct->texels + y * tex_pitch * BLOCK_HEIGHT + x * BLOCK_WIDTH;
695
696 for (i = 0; i < BLOCK_HEIGHT; ++i)
697 memcpy(texels + i * tex_pitch, block + i * BLOCK_WIDTH, BLOCK_WIDTH * sizeof(short));
698
699 vl_vb_add_block(&idct->blocks, x, y);
700 }
701
702 void
703 vl_idct_flush(struct vl_idct *idct)
704 {
705 struct pipe_transfer *vec_transfer;
706 struct quadf *vectors;
707 unsigned num_blocks;
708
709 assert(idct);
710
711 vectors = pipe_buffer_map
712 (
713 idct->pipe,
714 idct->vertex_bufs.individual.pos.buffer,
715 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
716 &vec_transfer
717 );
718
719 num_blocks = vl_vb_upload(&idct->blocks, vectors);
720
721 pipe_buffer_unmap(idct->pipe, idct->vertex_bufs.individual.pos.buffer, vec_transfer);
722
723 xfer_buffers_unmap(idct);
724
725 if(num_blocks > 0) {
726
727 idct->pipe->bind_rasterizer_state(idct->pipe, idct->rs_state);
728
729 /* first stage */
730 idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state[0]);
731 idct->pipe->set_viewport_state(idct->pipe, &idct->viewport[0]);
732
733 idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
734 idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
735 idct->pipe->set_fragment_sampler_views(idct->pipe, 2, idct->sampler_views.stage[0]);
736 idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[0]);
737 idct->pipe->bind_vs_state(idct->pipe, idct->matrix_vs);
738 idct->pipe->bind_fs_state(idct->pipe, idct->matrix_fs);
739
740 util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, num_blocks * 4);
741
742 /* second stage */
743 idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state[1]);
744 idct->pipe->set_viewport_state(idct->pipe, &idct->viewport[1]);
745
746 idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
747 idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
748 idct->pipe->set_fragment_sampler_views(idct->pipe, 2, idct->sampler_views.stage[1]);
749 idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[1]);
750 idct->pipe->bind_vs_state(idct->pipe, idct->transpose_vs);
751 idct->pipe->bind_fs_state(idct->pipe, idct->transpose_fs);
752
753 util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, num_blocks * 4);
754 }
755
756 xfer_buffers_map(idct);
757 }