[g3dvl] give idct stage 1 & 2 its own sb_state and viewport
[mesa.git] / src / gallium / auxiliary / vl / vl_idct.c
1 /**************************************************************************
2 *
3 * Copyright 2010 Christian König
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "vl_idct.h"
29 #include "util/u_draw.h"
30 #include <assert.h>
31 #include <pipe/p_context.h>
32 #include <pipe/p_screen.h>
33 #include <util/u_inlines.h>
34 #include <util/u_sampler.h>
35 #include <util/u_format.h>
36 #include <tgsi/tgsi_ureg.h>
37 #include "vl_types.h"
38
39 #define BLOCK_WIDTH 8
40 #define BLOCK_HEIGHT 8
41
42 #define SCALE_FACTOR_16_TO_9 (32768.0f / 256.0f)
43
44 #define STAGE1_SCALE 4.0f
45 #define STAGE2_SCALE (SCALE_FACTOR_16_TO_9 / STAGE1_SCALE)
46
47 struct vertex_shader_consts
48 {
49 struct vertex4f norm;
50 };
51
52 enum VS_INPUT
53 {
54 VS_I_RECT,
55 VS_I_VPOS,
56
57 NUM_VS_INPUTS
58 };
59
60 enum VS_OUTPUT
61 {
62 VS_O_VPOS,
63 VS_O_BLOCK,
64 VS_O_TEX,
65 VS_O_START
66 };
67
68 static const float const_matrix[8][8] = {
69 { 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.353553f, 0.3535530f },
70 { 0.4903930f, 0.4157350f, 0.2777850f, 0.0975451f, -0.0975452f, -0.2777850f, -0.415735f, -0.4903930f },
71 { 0.4619400f, 0.1913420f, -0.1913420f, -0.4619400f, -0.4619400f, -0.1913420f, 0.191342f, 0.4619400f },
72 { 0.4157350f, -0.0975452f, -0.4903930f, -0.2777850f, 0.2777850f, 0.4903930f, 0.097545f, -0.4157350f },
73 { 0.3535530f, -0.3535530f, -0.3535530f, 0.3535540f, 0.3535530f, -0.3535540f, -0.353553f, 0.3535530f },
74 { 0.2777850f, -0.4903930f, 0.0975452f, 0.4157350f, -0.4157350f, -0.0975451f, 0.490393f, -0.2777850f },
75 { 0.1913420f, -0.4619400f, 0.4619400f, -0.1913420f, -0.1913410f, 0.4619400f, -0.461940f, 0.1913420f },
76 { 0.0975451f, -0.2777850f, 0.4157350f, -0.4903930f, 0.4903930f, -0.4157350f, 0.277786f, -0.0975458f }
77 };
78
79 /* vertices for a quad covering a block */
80 static const struct vertex2f const_quad[4] = {
81 {0.0f, 0.0f}, {1.0f, 0.0f}, {1.0f, 1.0f}, {0.0f, 1.0f}
82 };
83
84 static void *
85 create_vert_shader(struct vl_idct *idct)
86 {
87 struct ureg_program *shader;
88 struct ureg_src scale;
89 struct ureg_src vrect, vpos;
90 struct ureg_dst t_vpos;
91 struct ureg_dst o_vpos, o_block, o_tex, o_start;
92
93 shader = ureg_create(TGSI_PROCESSOR_VERTEX);
94 if (!shader)
95 return NULL;
96
97 scale = ureg_imm2f(shader,
98 (float)BLOCK_WIDTH / idct->destination->width0,
99 (float)BLOCK_HEIGHT / idct->destination->height0);
100
101 t_vpos = ureg_DECL_temporary(shader);
102
103 vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
104 vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
105
106 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
107 o_block = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK);
108 o_tex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX);
109 o_start = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_START);
110
111 /*
112 * t_vpos = vpos + vrect
113 * o_vpos.xy = t_vpos * scale
114 * o_vpos.zw = vpos
115 *
116 * o_block = vrect
117 * o_tex = t_pos
118 * o_start = vpos * scale
119 *
120 */
121 ureg_ADD(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), vpos, vrect);
122 ureg_MUL(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos), scale);
123 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
124 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), vpos);
125
126 ureg_MOV(shader, ureg_writemask(o_block, TGSI_WRITEMASK_XY), vrect);
127 ureg_MOV(shader, ureg_writemask(o_tex, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
128 ureg_MUL(shader, ureg_writemask(o_start, TGSI_WRITEMASK_XY), vpos, scale);
129
130 ureg_release_temporary(shader, t_vpos);
131
132 ureg_END(shader);
133
134 return ureg_create_shader_and_destroy(shader, idct->pipe);
135 }
136
137 static void
138 fetch_one(struct ureg_program *shader, struct ureg_dst m[2],
139 struct ureg_src tc, struct ureg_src sampler,
140 struct ureg_src start, bool right_side, float size)
141 {
142 struct ureg_dst t_tc, tmp;
143 unsigned i, j;
144
145 t_tc = ureg_DECL_temporary(shader);
146 tmp = ureg_DECL_temporary(shader);
147
148 m[0] = ureg_DECL_temporary(shader);
149 m[1] = ureg_DECL_temporary(shader);
150
151 /*
152 * t_tc.x = right_side ? start.x : tc.x
153 * t_tc.y = right_side ? tc.y : start.y
154 * m[0..1].xyzw = tex(t_tc++, sampler)
155 */
156 if(right_side) {
157 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_scalar(tc, TGSI_SWIZZLE_X));
158 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Y), ureg_scalar(start, TGSI_SWIZZLE_Y));
159 } else {
160 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_scalar(start, TGSI_SWIZZLE_X));
161 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Y), ureg_scalar(tc, TGSI_SWIZZLE_Y));
162 }
163 for(i = 0; i < 2; ++i) {
164 for(j = 0; j < 4; ++j) {
165 /* Nouveau and r600g can't writemask tex dst regs (yet?), do in two steps */
166 ureg_TEX(shader, tmp, TGSI_TEXTURE_2D, ureg_src(t_tc), sampler);
167 ureg_MOV(shader, ureg_writemask(m[i], TGSI_WRITEMASK_X << j), ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X));
168
169 if(i != 1 || j != 3) /* skip the last add */
170 ureg_ADD(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X << right_side),
171 ureg_src(t_tc), ureg_imm1f(shader, 1.0f / size));
172 }
173 }
174
175 ureg_release_temporary(shader, t_tc);
176 ureg_release_temporary(shader, tmp);
177 }
178
179 static void
180 fetch_four(struct ureg_program *shader, struct ureg_dst m[2],
181 struct ureg_src tc, struct ureg_src sampler,
182 struct ureg_src start, bool right_side, float size)
183 {
184 struct ureg_dst t_tc;
185
186 t_tc = ureg_DECL_temporary(shader);
187 m[0] = ureg_DECL_temporary(shader);
188 m[1] = ureg_DECL_temporary(shader);
189
190 /*
191 * t_tc.x = right_side ? start.x : tc.x
192 * t_tc.y = right_side ? tc.y : start.y
193 * m[0..1] = tex(t_tc++, sampler)
194 */
195 if(right_side) {
196 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_scalar(start, TGSI_SWIZZLE_Y));
197 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Y), ureg_scalar(tc, TGSI_SWIZZLE_X));
198 } else {
199 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_scalar(start, TGSI_SWIZZLE_X));
200 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Y), ureg_scalar(tc, TGSI_SWIZZLE_Y));
201 }
202
203 ureg_TEX(shader, m[0], TGSI_TEXTURE_2D, ureg_src(t_tc), sampler);
204 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_imm1f(shader, 4.0f / size));
205 ureg_TEX(shader, m[1], TGSI_TEXTURE_2D, ureg_src(t_tc), sampler);
206
207 ureg_release_temporary(shader, t_tc);
208 }
209
210 static struct ureg_dst
211 matrix_mul(struct ureg_program *shader, struct ureg_dst m[2][2])
212 {
213 struct ureg_dst dst, tmp[2];
214 unsigned i;
215
216 dst = ureg_DECL_temporary(shader);
217 for(i = 0; i < 2; ++i) {
218 tmp[i] = ureg_DECL_temporary(shader);
219 }
220
221 /*
222 * tmp[0..1] = dot4(m[0][0..1], m[1][0..1])
223 * dst = tmp[0] + tmp[1]
224 */
225 ureg_DP4(shader, ureg_writemask(tmp[0], TGSI_WRITEMASK_X), ureg_src(m[0][0]), ureg_src(m[1][0]));
226 ureg_DP4(shader, ureg_writemask(tmp[1], TGSI_WRITEMASK_X), ureg_src(m[0][1]), ureg_src(m[1][1]));
227 ureg_ADD(shader, ureg_writemask(dst, TGSI_WRITEMASK_X), ureg_src(tmp[0]), ureg_src(tmp[1]));
228
229 for(i = 0; i < 2; ++i) {
230 ureg_release_temporary(shader, tmp[i]);
231 }
232
233 return dst;
234 }
235
236 static void *
237 create_transpose_frag_shader(struct vl_idct *idct)
238 {
239 struct ureg_program *shader;
240
241 struct ureg_src tc[2], sampler[2];
242 struct ureg_src start[2];
243
244 struct ureg_dst m[2][2];
245 struct ureg_dst tmp, fragment;
246
247 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
248 if (!shader)
249 return NULL;
250
251 tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK, TGSI_INTERPOLATE_LINEAR);
252 tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX, TGSI_INTERPOLATE_LINEAR);
253
254 sampler[0] = ureg_DECL_sampler(shader, 0);
255 sampler[1] = ureg_DECL_sampler(shader, 1);
256
257 start[0] = ureg_imm1f(shader, 0.0f);
258 start[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_START, TGSI_INTERPOLATE_CONSTANT);
259
260 fetch_four(shader, m[0], tc[0], sampler[0], start[0], false, BLOCK_WIDTH);
261 fetch_one(shader, m[1], tc[1], sampler[1], start[1], true, idct->destination->height0);
262
263 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
264
265 tmp = matrix_mul(shader, m);
266 ureg_MUL(shader, fragment, ureg_src(tmp), ureg_imm1f(shader, STAGE2_SCALE));
267
268 ureg_release_temporary(shader, tmp);
269 ureg_release_temporary(shader, m[0][0]);
270 ureg_release_temporary(shader, m[0][1]);
271 ureg_release_temporary(shader, m[1][0]);
272 ureg_release_temporary(shader, m[1][1]);
273
274 ureg_END(shader);
275
276 return ureg_create_shader_and_destroy(shader, idct->pipe);
277 }
278
279 static void *
280 create_matrix_frag_shader(struct vl_idct *idct)
281 {
282 struct ureg_program *shader;
283
284 struct ureg_src tc[2], sampler[2];
285 struct ureg_src start[2];
286
287 struct ureg_dst m[2][2];
288 struct ureg_dst tmp, fragment;
289
290 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
291 if (!shader)
292 return NULL;
293
294 tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX, TGSI_INTERPOLATE_LINEAR);
295 tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK, TGSI_INTERPOLATE_LINEAR);
296
297 sampler[0] = ureg_DECL_sampler(shader, 1);
298 sampler[1] = ureg_DECL_sampler(shader, 0);
299
300 start[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_START, TGSI_INTERPOLATE_CONSTANT);
301 start[1] = ureg_imm1f(shader, 0.0f);
302
303 fetch_four(shader, m[0], tc[0], sampler[0], start[0], false, idct->destination->width0);
304 fetch_four(shader, m[1], tc[1], sampler[1], start[1], true, BLOCK_HEIGHT);
305
306 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
307
308 tmp = matrix_mul(shader, m);
309 ureg_MUL(shader, fragment, ureg_src(tmp), ureg_imm1f(shader, STAGE1_SCALE));
310
311 ureg_release_temporary(shader, tmp);
312 ureg_release_temporary(shader, m[0][0]);
313 ureg_release_temporary(shader, m[0][1]);
314 ureg_release_temporary(shader, m[1][0]);
315 ureg_release_temporary(shader, m[1][1]);
316
317 ureg_END(shader);
318
319 return ureg_create_shader_and_destroy(shader, idct->pipe);
320 }
321
322 static void *
323 create_empty_block_frag_shader(struct vl_idct *idct)
324 {
325 struct ureg_program *shader;
326 struct ureg_dst fragment;
327
328 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
329 if (!shader)
330 return NULL;
331
332 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
333
334 ureg_MOV(shader, fragment, ureg_imm1f(shader, 0.0f));
335
336 ureg_END(shader);
337
338 return ureg_create_shader_and_destroy(shader, idct->pipe);
339 }
340
341 static void
342 xfer_buffers_map(struct vl_idct *idct)
343 {
344 struct pipe_box rect =
345 {
346 0, 0, 0,
347 idct->destination->width0,
348 idct->destination->height0,
349 1
350 };
351
352 idct->tex_transfer = idct->pipe->get_transfer
353 (
354 idct->pipe, idct->textures.individual.source,
355 u_subresource(0, 0),
356 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
357 &rect
358 );
359
360 idct->texels = idct->pipe->transfer_map(idct->pipe, idct->tex_transfer);
361
362 idct->vectors = pipe_buffer_map
363 (
364 idct->pipe,
365 idct->vertex_bufs.individual.pos.buffer,
366 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
367 &idct->vec_transfer
368 );
369 }
370
371 static void
372 xfer_buffers_unmap(struct vl_idct *idct)
373 {
374 pipe_buffer_unmap(idct->pipe, idct->vertex_bufs.individual.pos.buffer, idct->vec_transfer);
375
376 idct->pipe->transfer_unmap(idct->pipe, idct->tex_transfer);
377 idct->pipe->transfer_destroy(idct->pipe, idct->tex_transfer);
378 }
379
380 static bool
381 init_shaders(struct vl_idct *idct)
382 {
383 idct->vs = create_vert_shader(idct);
384 idct->transpose_fs = create_transpose_frag_shader(idct);
385 idct->matrix_fs = create_matrix_frag_shader(idct);
386 idct->eb_fs = create_empty_block_frag_shader(idct);
387
388 return
389 idct->vs != NULL &&
390 idct->transpose_fs != NULL &&
391 idct->matrix_fs != NULL &&
392 idct->eb_fs != NULL;
393 }
394
395 static void
396 cleanup_shaders(struct vl_idct *idct)
397 {
398 idct->pipe->delete_vs_state(idct->pipe, idct->vs);
399 idct->pipe->delete_fs_state(idct->pipe, idct->transpose_fs);
400 idct->pipe->delete_fs_state(idct->pipe, idct->matrix_fs);
401 idct->pipe->delete_fs_state(idct->pipe, idct->eb_fs);
402 }
403
404 static bool
405 init_buffers(struct vl_idct *idct)
406 {
407 struct pipe_resource template;
408 struct pipe_sampler_view sampler_view;
409 struct pipe_vertex_element vertex_elems[2];
410 unsigned i;
411
412 idct->max_blocks =
413 align(idct->destination->width0, BLOCK_WIDTH) / BLOCK_WIDTH *
414 align(idct->destination->height0, BLOCK_HEIGHT) / BLOCK_HEIGHT *
415 idct->destination->depth0;
416
417 memset(&template, 0, sizeof(struct pipe_resource));
418 template.target = PIPE_TEXTURE_2D;
419 template.format = PIPE_FORMAT_R32G32B32A32_FLOAT;
420 template.last_level = 0;
421 template.width0 = 2;
422 template.height0 = 8;
423 template.depth0 = 1;
424 template.usage = PIPE_USAGE_IMMUTABLE;
425 template.bind = PIPE_BIND_SAMPLER_VIEW;
426 template.flags = 0;
427
428 template.format = PIPE_FORMAT_R16G16B16A16_SNORM;
429 template.width0 = idct->destination->width0;
430 template.height0 = idct->destination->height0;
431 template.depth0 = idct->destination->depth0;
432 template.usage = PIPE_USAGE_STREAM;
433 idct->textures.individual.source = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
434
435 template.usage = PIPE_USAGE_STATIC;
436 idct->textures.individual.intermediate = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
437
438 for (i = 0; i < 4; ++i) {
439 if(idct->textures.all[i] == NULL)
440 return false; /* a texture failed to allocate */
441
442 u_sampler_view_default_template(&sampler_view, idct->textures.all[i], idct->textures.all[i]->format);
443 idct->sampler_views.all[i] = idct->pipe->create_sampler_view(idct->pipe, idct->textures.all[i], &sampler_view);
444 }
445
446 idct->vertex_bufs.individual.quad.stride = sizeof(struct vertex2f);
447 idct->vertex_bufs.individual.quad.max_index = 4 * idct->max_blocks - 1;
448 idct->vertex_bufs.individual.quad.buffer_offset = 0;
449 idct->vertex_bufs.individual.quad.buffer = pipe_buffer_create
450 (
451 idct->pipe->screen,
452 PIPE_BIND_VERTEX_BUFFER,
453 sizeof(struct vertex2f) * 4 * idct->max_blocks
454 );
455
456 if(idct->vertex_bufs.individual.quad.buffer == NULL)
457 return false;
458
459 idct->vertex_bufs.individual.pos.stride = sizeof(struct vertex2f);
460 idct->vertex_bufs.individual.pos.max_index = 4 * idct->max_blocks - 1;
461 idct->vertex_bufs.individual.pos.buffer_offset = 0;
462 idct->vertex_bufs.individual.pos.buffer = pipe_buffer_create
463 (
464 idct->pipe->screen,
465 PIPE_BIND_VERTEX_BUFFER,
466 sizeof(struct vertex2f) * 4 * idct->max_blocks
467 );
468
469 if(idct->vertex_bufs.individual.pos.buffer == NULL)
470 return false;
471
472 /* Rect element */
473 vertex_elems[0].src_offset = 0;
474 vertex_elems[0].instance_divisor = 0;
475 vertex_elems[0].vertex_buffer_index = 0;
476 vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
477
478 /* Pos element */
479 vertex_elems[1].src_offset = 0;
480 vertex_elems[1].instance_divisor = 0;
481 vertex_elems[1].vertex_buffer_index = 1;
482 vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
483
484 idct->vertex_elems_state = idct->pipe->create_vertex_elements_state(idct->pipe, 2, vertex_elems);
485
486 return true;
487 }
488
489 static void
490 cleanup_buffers(struct vl_idct *idct)
491 {
492 unsigned i;
493
494 assert(idct);
495
496 for (i = 0; i < 4; ++i) {
497 pipe_sampler_view_reference(&idct->sampler_views.all[i], NULL);
498 pipe_resource_reference(&idct->textures.all[i], NULL);
499 }
500
501 idct->pipe->delete_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
502 pipe_resource_reference(&idct->vertex_bufs.individual.quad.buffer, NULL);
503 pipe_resource_reference(&idct->vertex_bufs.individual.pos.buffer, NULL);
504 }
505
506 static void
507 init_constants(struct vl_idct *idct)
508 {
509 struct pipe_transfer *buf_transfer;
510 struct vertex2f *v;
511
512 unsigned i;
513
514 /* quad vectors */
515 v = pipe_buffer_map
516 (
517 idct->pipe,
518 idct->vertex_bufs.individual.quad.buffer,
519 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
520 &buf_transfer
521 );
522 for ( i = 0; i < idct->max_blocks; ++i)
523 memcpy(v + i * 4, &const_quad, sizeof(const_quad));
524 pipe_buffer_unmap(idct->pipe, idct->vertex_bufs.individual.quad.buffer, buf_transfer);
525 }
526
527 static void
528 init_state(struct vl_idct *idct)
529 {
530 struct pipe_sampler_state sampler;
531 unsigned i;
532
533 idct->num_blocks = 0;
534 idct->num_empty_blocks = 0;
535
536 idct->viewport[0].scale[0] = idct->destination->width0;
537 idct->viewport[1].scale[0] = idct->destination->width0;
538
539 idct->fb_state[0].width = idct->destination->width0;
540 idct->fb_state[1].width = idct->destination->width0;
541
542 for(i = 0; i < 2; ++i) {
543 idct->viewport[i].scale[1] = idct->destination->height0;
544 idct->viewport[i].scale[2] = 1;
545 idct->viewport[i].scale[3] = 1;
546 idct->viewport[i].translate[0] = 0;
547 idct->viewport[i].translate[1] = 0;
548 idct->viewport[i].translate[2] = 0;
549 idct->viewport[i].translate[3] = 0;
550
551 idct->fb_state[i].height = idct->destination->height0;
552 idct->fb_state[i].nr_cbufs = 1;
553 idct->fb_state[i].zsbuf = NULL;
554 }
555
556 for (i = 0; i < 4; ++i) {
557 memset(&sampler, 0, sizeof(sampler));
558 sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
559 sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
560 sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
561 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
562 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
563 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
564 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
565 sampler.compare_func = PIPE_FUNC_ALWAYS;
566 sampler.normalized_coords = 1;
567 /*sampler.shadow_ambient = ; */
568 /*sampler.lod_bias = ; */
569 sampler.min_lod = 0;
570 /*sampler.max_lod = ; */
571 /*sampler.border_color[0] = ; */
572 /*sampler.max_anisotropy = ; */
573 idct->samplers.all[i] = idct->pipe->create_sampler_state(idct->pipe, &sampler);
574 }
575 }
576
577 static void
578 cleanup_state(struct vl_idct *idct)
579 {
580 unsigned i;
581
582 for (i = 0; i < 4; ++i)
583 idct->pipe->delete_sampler_state(idct->pipe, idct->samplers.all[i]);
584 }
585
586 struct pipe_resource *
587 vl_idct_upload_matrix(struct pipe_context *pipe)
588 {
589 struct pipe_resource template, *matrix;
590 struct pipe_transfer *buf_transfer;
591 unsigned i, j, pitch;
592 float *f;
593
594 struct pipe_box rect =
595 {
596 0, 0, 0,
597 BLOCK_WIDTH,
598 BLOCK_HEIGHT,
599 1
600 };
601
602 memset(&template, 0, sizeof(struct pipe_resource));
603 template.target = PIPE_TEXTURE_2D;
604 template.format = PIPE_FORMAT_R32G32B32A32_FLOAT;
605 template.last_level = 0;
606 template.width0 = 2;
607 template.height0 = 8;
608 template.depth0 = 1;
609 template.usage = PIPE_USAGE_IMMUTABLE;
610 template.bind = PIPE_BIND_SAMPLER_VIEW;
611 template.flags = 0;
612
613 matrix = pipe->screen->resource_create(pipe->screen, &template);
614
615 /* matrix */
616 buf_transfer = pipe->get_transfer
617 (
618 pipe, matrix,
619 u_subresource(0, 0),
620 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
621 &rect
622 );
623 pitch = buf_transfer->stride / util_format_get_blocksize(buf_transfer->resource->format);
624
625 f = pipe->transfer_map(pipe, buf_transfer);
626 for(i = 0; i < BLOCK_HEIGHT; ++i)
627 for(j = 0; j < BLOCK_WIDTH; ++j)
628 f[i * pitch * 4 + j] = const_matrix[j][i]; // transpose
629
630 pipe->transfer_unmap(pipe, buf_transfer);
631 pipe->transfer_destroy(pipe, buf_transfer);
632
633 return matrix;
634 }
635
636 bool
637 vl_idct_init(struct vl_idct *idct, struct pipe_context *pipe, struct pipe_resource *dst, struct pipe_resource *matrix)
638 {
639 assert(idct && pipe && dst);
640
641 idct->pipe = pipe;
642 pipe_resource_reference(&idct->textures.individual.matrix, matrix);
643 pipe_resource_reference(&idct->textures.individual.transpose, matrix);
644 pipe_resource_reference(&idct->destination, dst);
645
646 init_state(idct);
647
648 if(!init_shaders(idct))
649 return false;
650
651 if(!init_buffers(idct)) {
652 cleanup_shaders(idct);
653 return false;
654 }
655
656 idct->surfaces.intermediate = idct->pipe->screen->get_tex_surface(
657 idct->pipe->screen, idct->textures.individual.intermediate, 0, 0, 0,
658 PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET);
659 idct->fb_state[0].cbufs[0] = idct->surfaces.intermediate;
660
661 idct->surfaces.destination = idct->pipe->screen->get_tex_surface(
662 idct->pipe->screen, idct->destination, 0, 0, 0,
663 PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET);
664 idct->fb_state[1].cbufs[0] = idct->surfaces.destination;
665
666 init_constants(idct);
667 xfer_buffers_map(idct);
668
669 return true;
670 }
671
672 void
673 vl_idct_cleanup(struct vl_idct *idct)
674 {
675 idct->pipe->screen->tex_surface_destroy(idct->surfaces.destination);
676 idct->pipe->screen->tex_surface_destroy(idct->surfaces.intermediate);
677
678 cleanup_shaders(idct);
679 cleanup_buffers(idct);
680
681 cleanup_state(idct);
682
683 pipe_resource_reference(&idct->destination, NULL);
684 }
685
686 void
687 vl_idct_add_block(struct vl_idct *idct, unsigned x, unsigned y, short *block)
688 {
689 struct vertex2f v, *v_dst;
690
691 unsigned tex_pitch;
692 short *texels;
693
694 unsigned i;
695
696 assert(idct);
697
698 if(block) {
699 tex_pitch = idct->tex_transfer->stride / util_format_get_blocksize(idct->tex_transfer->resource->format);
700 texels = idct->texels + (y * tex_pitch * BLOCK_HEIGHT + x * BLOCK_WIDTH) * 4;
701
702 for (i = 0; i < BLOCK_HEIGHT; ++i)
703 memcpy(texels + i * tex_pitch * 4, block + i * BLOCK_WIDTH, BLOCK_WIDTH * 2);
704
705 /* non empty blocks fills the vector buffer from left to right */
706 v_dst = idct->vectors + idct->num_blocks * 4;
707
708 idct->num_blocks++;
709
710 } else {
711
712 /* while empty blocks fills the vector buffer from right to left */
713 v_dst = idct->vectors + (idct->max_blocks - idct->num_empty_blocks) * 4 - 4;
714
715 idct->num_empty_blocks++;
716 }
717
718 v.x = x;
719 v.y = y;
720
721 for (i = 0; i < 4; ++i) {
722 v_dst[i] = v;
723 }
724 }
725
726 void
727 vl_idct_flush(struct vl_idct *idct)
728 {
729 xfer_buffers_unmap(idct);
730
731 if(idct->num_blocks > 0) {
732
733 /* first stage */
734 idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state[0]);
735 idct->pipe->set_viewport_state(idct->pipe, &idct->viewport[0]);
736
737 idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
738 idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
739 idct->pipe->set_fragment_sampler_views(idct->pipe, 2, idct->sampler_views.stage[0]);
740 idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[0]);
741 idct->pipe->bind_vs_state(idct->pipe, idct->vs);
742 idct->pipe->bind_fs_state(idct->pipe, idct->matrix_fs);
743
744 util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, idct->num_blocks * 4);
745
746 /* second stage */
747 idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state[1]);
748 idct->pipe->set_viewport_state(idct->pipe, &idct->viewport[1]);
749
750 idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
751 idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
752 idct->pipe->set_fragment_sampler_views(idct->pipe, 2, idct->sampler_views.stage[1]);
753 idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[1]);
754 idct->pipe->bind_vs_state(idct->pipe, idct->vs);
755 idct->pipe->bind_fs_state(idct->pipe, idct->transpose_fs);
756
757 util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, idct->num_blocks * 4);
758 }
759
760 if(idct->num_empty_blocks > 0) {
761
762 /* empty block handling */
763 idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state[1]);
764 idct->pipe->set_viewport_state(idct->pipe, &idct->viewport[1]);
765
766 idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
767 idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
768 idct->pipe->set_fragment_sampler_views(idct->pipe, 4, idct->sampler_views.all);
769 idct->pipe->bind_fragment_sampler_states(idct->pipe, 4, idct->samplers.all);
770 idct->pipe->bind_vs_state(idct->pipe, idct->vs);
771 idct->pipe->bind_fs_state(idct->pipe, idct->eb_fs);
772
773 util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS,
774 (idct->max_blocks - idct->num_empty_blocks) * 4,
775 idct->num_empty_blocks * 4);
776 }
777
778 idct->num_blocks = 0;
779 idct->num_empty_blocks = 0;
780 xfer_buffers_map(idct);
781 }