1 /**************************************************************************
3 * Copyright 2010 Christian König
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "vl_vertex_buffers.h"
30 #include "util/u_draw.h"
32 #include <pipe/p_context.h>
33 #include <pipe/p_screen.h>
34 #include <util/u_inlines.h>
35 #include <util/u_sampler.h>
36 #include <util/u_format.h>
37 #include <tgsi/tgsi_ureg.h>
41 #define BLOCK_HEIGHT 8
43 #define SCALE_FACTOR_16_TO_9 (32768.0f / 256.0f)
45 #define STAGE1_SCALE 4.0f
46 #define STAGE2_SCALE (SCALE_FACTOR_16_TO_9 / STAGE1_SCALE)
48 #define NR_RENDER_TARGETS 1
66 static const float const_matrix
[8][8] = {
67 { 0.3535530f
, 0.3535530f
, 0.3535530f
, 0.3535530f
, 0.3535530f
, 0.3535530f
, 0.353553f
, 0.3535530f
},
68 { 0.4903930f
, 0.4157350f
, 0.2777850f
, 0.0975451f
, -0.0975452f
, -0.2777850f
, -0.415735f
, -0.4903930f
},
69 { 0.4619400f
, 0.1913420f
, -0.1913420f
, -0.4619400f
, -0.4619400f
, -0.1913420f
, 0.191342f
, 0.4619400f
},
70 { 0.4157350f
, -0.0975452f
, -0.4903930f
, -0.2777850f
, 0.2777850f
, 0.4903930f
, 0.097545f
, -0.4157350f
},
71 { 0.3535530f
, -0.3535530f
, -0.3535530f
, 0.3535540f
, 0.3535530f
, -0.3535540f
, -0.353553f
, 0.3535530f
},
72 { 0.2777850f
, -0.4903930f
, 0.0975452f
, 0.4157350f
, -0.4157350f
, -0.0975451f
, 0.490393f
, -0.2777850f
},
73 { 0.1913420f
, -0.4619400f
, 0.4619400f
, -0.1913420f
, -0.1913410f
, 0.4619400f
, -0.461940f
, 0.1913420f
},
74 { 0.0975451f
, -0.2777850f
, 0.4157350f
, -0.4903930f
, 0.4903930f
, -0.4157350f
, 0.277786f
, -0.0975458f
}
78 create_vert_shader(struct vl_idct
*idct
)
80 struct ureg_program
*shader
;
81 struct ureg_src scale
;
82 struct ureg_src vrect
, vpos
;
83 struct ureg_dst t_vpos
;
84 struct ureg_dst o_vpos
, o_block
, o_tex
, o_start
;
86 shader
= ureg_create(TGSI_PROCESSOR_VERTEX
);
90 t_vpos
= ureg_DECL_temporary(shader
);
92 vrect
= ureg_DECL_vs_input(shader
, VS_I_RECT
);
93 vpos
= ureg_DECL_vs_input(shader
, VS_I_VPOS
);
95 o_vpos
= ureg_DECL_output(shader
, TGSI_SEMANTIC_POSITION
, VS_O_VPOS
);
96 o_block
= ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_BLOCK
);
97 o_tex
= ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_TEX
);
98 o_start
= ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_START
);
101 * scale = (BLOCK_WIDTH, BLOCK_HEIGHT) / (dst.width, dst.height)
103 * t_vpos = vpos + vrect
104 * o_vpos.xy = t_vpos * scale
109 * o_start = vpos * scale
112 scale
= ureg_imm2f(shader
,
113 (float)BLOCK_WIDTH
/ idct
->destination
->width0
,
114 (float)BLOCK_HEIGHT
/ idct
->destination
->height0
);
116 ureg_ADD(shader
, ureg_writemask(t_vpos
, TGSI_WRITEMASK_XY
), vpos
, vrect
);
117 ureg_MUL(shader
, ureg_writemask(t_vpos
, TGSI_WRITEMASK_XY
), ureg_src(t_vpos
), scale
);
118 ureg_MOV(shader
, ureg_writemask(o_vpos
, TGSI_WRITEMASK_XY
), ureg_src(t_vpos
));
119 ureg_MOV(shader
, ureg_writemask(o_vpos
, TGSI_WRITEMASK_ZW
), vpos
);
121 ureg_MOV(shader
, ureg_writemask(o_block
, TGSI_WRITEMASK_XY
), vrect
);
122 ureg_MOV(shader
, ureg_writemask(o_tex
, TGSI_WRITEMASK_XY
), ureg_src(t_vpos
));
123 ureg_MUL(shader
, ureg_writemask(o_start
, TGSI_WRITEMASK_XY
), vpos
, scale
);
125 ureg_release_temporary(shader
, t_vpos
);
129 return ureg_create_shader_and_destroy(shader
, idct
->pipe
);
133 fetch_four(struct ureg_program
*shader
, struct ureg_dst m
[2],
134 struct ureg_src tc
, struct ureg_src sampler
,
135 struct ureg_src start
, struct ureg_src block
,
136 bool right_side
, bool transposed
, float size
)
138 struct ureg_dst t_tc
;
139 unsigned wm_start
= (right_side
== transposed
) ? TGSI_WRITEMASK_X
: TGSI_WRITEMASK_Y
;
140 unsigned wm_tc
= (right_side
== transposed
) ? TGSI_WRITEMASK_Y
: TGSI_WRITEMASK_X
;
142 t_tc
= ureg_DECL_temporary(shader
);
143 m
[0] = ureg_DECL_temporary(shader
);
144 m
[1] = ureg_DECL_temporary(shader
);
147 * t_tc.x = right_side ? start.x : tc.x
148 * t_tc.y = right_side ? tc.y : start.y
149 * m[0..1] = tex(t_tc++, sampler)
152 ureg_MOV(shader
, ureg_writemask(t_tc
, wm_start
), ureg_scalar(start
, TGSI_SWIZZLE_X
));
153 ureg_MOV(shader
, ureg_writemask(t_tc
, wm_tc
), ureg_scalar(tc
, TGSI_SWIZZLE_Y
));
155 ureg_MOV(shader
, ureg_writemask(t_tc
, wm_start
), ureg_scalar(start
, TGSI_SWIZZLE_Y
));
156 ureg_MOV(shader
, ureg_writemask(t_tc
, wm_tc
), ureg_scalar(tc
, TGSI_SWIZZLE_X
));
159 #if NR_RENDER_TARGETS == 8
160 ureg_MOV(shader
, ureg_writemask(t_tc
, TGSI_WRITEMASK_Z
), ureg_scalar(block
, TGSI_SWIZZLE_X
));
162 ureg_MOV(shader
, ureg_writemask(t_tc
, TGSI_WRITEMASK_Z
), ureg_imm1f(shader
, 0.0f
));
165 ureg_TEX(shader
, m
[0], TGSI_TEXTURE_3D
, ureg_src(t_tc
), sampler
);
166 ureg_ADD(shader
, ureg_writemask(t_tc
, wm_start
), ureg_src(t_tc
), ureg_imm1f(shader
, 1.0f
/ size
));
167 ureg_TEX(shader
, m
[1], TGSI_TEXTURE_3D
, ureg_src(t_tc
), sampler
);
169 ureg_release_temporary(shader
, t_tc
);
173 matrix_mul(struct ureg_program
*shader
, struct ureg_dst dst
, struct ureg_dst l
[2], struct ureg_dst r
[2])
175 struct ureg_dst tmp
[2];
178 for(i
= 0; i
< 2; ++i
) {
179 tmp
[i
] = ureg_DECL_temporary(shader
);
183 * tmp[0..1] = dot4(m[0][0..1], m[1][0..1])
184 * dst = tmp[0] + tmp[1]
186 ureg_DP4(shader
, ureg_writemask(tmp
[0], TGSI_WRITEMASK_X
), ureg_src(l
[0]), ureg_src(r
[0]));
187 ureg_DP4(shader
, ureg_writemask(tmp
[1], TGSI_WRITEMASK_X
), ureg_src(l
[1]), ureg_src(r
[1]));
188 ureg_ADD(shader
, dst
,
189 ureg_scalar(ureg_src(tmp
[0]), TGSI_SWIZZLE_X
),
190 ureg_scalar(ureg_src(tmp
[1]), TGSI_SWIZZLE_X
));
192 for(i
= 0; i
< 2; ++i
) {
193 ureg_release_temporary(shader
, tmp
[i
]);
198 create_transpose_frag_shader(struct vl_idct
*idct
)
200 struct pipe_resource
*transpose
= idct
->textures
.individual
.transpose
;
201 struct pipe_resource
*intermediate
= idct
->textures
.individual
.intermediate
;
203 struct ureg_program
*shader
;
205 struct ureg_src block
, tex
, sampler
[2];
206 struct ureg_src start
[2];
208 struct ureg_dst l
[2], r
[2];
209 struct ureg_dst tmp
, fragment
;
211 shader
= ureg_create(TGSI_PROCESSOR_FRAGMENT
);
215 block
= ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_BLOCK
, TGSI_INTERPOLATE_LINEAR
);
216 tex
= ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_TEX
, TGSI_INTERPOLATE_CONSTANT
);
218 sampler
[0] = ureg_DECL_sampler(shader
, 0);
219 sampler
[1] = ureg_DECL_sampler(shader
, 1);
221 start
[0] = ureg_imm1f(shader
, 0.0f
);
222 start
[1] = ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_START
, TGSI_INTERPOLATE_CONSTANT
);
224 fetch_four(shader
, l
, block
, sampler
[0], start
[0], block
, false, false, transpose
->width0
);
225 fetch_four(shader
, r
, tex
, sampler
[1], start
[1], block
, true, false, intermediate
->height0
);
227 fragment
= ureg_DECL_output(shader
, TGSI_SEMANTIC_COLOR
, 0);
229 tmp
= ureg_DECL_temporary(shader
);
230 matrix_mul(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_X
), l
, r
);
231 ureg_MUL(shader
, fragment
, ureg_src(tmp
), ureg_imm1f(shader
, STAGE2_SCALE
));
233 ureg_release_temporary(shader
, tmp
);
234 ureg_release_temporary(shader
, l
[0]);
235 ureg_release_temporary(shader
, l
[1]);
236 ureg_release_temporary(shader
, r
[0]);
237 ureg_release_temporary(shader
, r
[1]);
241 return ureg_create_shader_and_destroy(shader
, idct
->pipe
);
245 create_matrix_frag_shader(struct vl_idct
*idct
)
247 struct pipe_resource
*matrix
= idct
->textures
.individual
.matrix
;
248 struct pipe_resource
*source
= idct
->textures
.individual
.source
;
250 struct ureg_program
*shader
;
252 struct ureg_src tex
, block
, sampler
[2];
253 struct ureg_src start
[2];
255 struct ureg_dst l
[4][2], r
[2];
256 struct ureg_dst t_tc
, tmp
, fragment
[NR_RENDER_TARGETS
];
260 shader
= ureg_create(TGSI_PROCESSOR_FRAGMENT
);
264 t_tc
= ureg_DECL_temporary(shader
);
265 tmp
= ureg_DECL_temporary(shader
);
267 tex
= ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_TEX
, TGSI_INTERPOLATE_LINEAR
);
268 block
= ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_BLOCK
, TGSI_INTERPOLATE_LINEAR
);
270 sampler
[0] = ureg_DECL_sampler(shader
, 1);
271 sampler
[1] = ureg_DECL_sampler(shader
, 0);
273 start
[0] = ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_START
, TGSI_INTERPOLATE_CONSTANT
);
274 start
[1] = ureg_imm1f(shader
, 0.0f
);
276 for (i
= 0; i
< NR_RENDER_TARGETS
; ++i
)
277 fragment
[i
] = ureg_DECL_output(shader
, TGSI_SEMANTIC_COLOR
, i
);
279 ureg_MOV(shader
, ureg_writemask(t_tc
, TGSI_WRITEMASK_Y
), tex
);
280 for (i
= 0; i
< 4; ++i
) {
281 fetch_four(shader
, l
[i
], ureg_src(t_tc
), sampler
[0], start
[0], block
, false, false, source
->width0
);
282 ureg_MUL(shader
, l
[i
][0], ureg_src(l
[i
][0]), ureg_imm1f(shader
, STAGE1_SCALE
));
283 ureg_MUL(shader
, l
[i
][1], ureg_src(l
[i
][1]), ureg_imm1f(shader
, STAGE1_SCALE
));
285 ureg_ADD(shader
, ureg_writemask(t_tc
, TGSI_WRITEMASK_Y
),
286 ureg_src(t_tc
), ureg_imm1f(shader
, 1.0f
/ source
->height0
));
289 for (i
= 0; i
< NR_RENDER_TARGETS
; ++i
) {
291 #if NR_RENDER_TARGETS == 8
292 ureg_MOV(shader
, ureg_writemask(t_tc
, TGSI_WRITEMASK_X
), ureg_imm1f(shader
, 1.0f
/ BLOCK_WIDTH
* i
));
293 fetch_four(shader
, r
, ureg_src(t_tc
), sampler
[1], start
[1], block
, true, true, matrix
->width0
);
294 #elif NR_RENDER_TARGETS == 1
295 fetch_four(shader
, r
, block
, sampler
[1], start
[1], block
, true, true, matrix
->width0
);
297 #error invalid number of render targets
300 for (j
= 0; j
< 4; ++j
) {
301 matrix_mul(shader
, ureg_writemask(fragment
[i
], TGSI_WRITEMASK_X
<< j
), l
[j
], r
);
303 ureg_release_temporary(shader
, r
[0]);
304 ureg_release_temporary(shader
, r
[1]);
307 ureg_release_temporary(shader
, t_tc
);
308 ureg_release_temporary(shader
, tmp
);
310 for (i
= 0; i
< 4; ++i
) {
311 ureg_release_temporary(shader
, l
[i
][0]);
312 ureg_release_temporary(shader
, l
[i
][1]);
317 return ureg_create_shader_and_destroy(shader
, idct
->pipe
);
321 init_shaders(struct vl_idct
*idct
)
323 idct
->vs
= create_vert_shader(idct
);
324 idct
->matrix_fs
= create_matrix_frag_shader(idct
);
325 idct
->transpose_fs
= create_transpose_frag_shader(idct
);
329 idct
->transpose_fs
!= NULL
&&
330 idct
->matrix_fs
!= NULL
;
334 cleanup_shaders(struct vl_idct
*idct
)
336 idct
->pipe
->delete_vs_state(idct
->pipe
, idct
->vs
);
337 idct
->pipe
->delete_fs_state(idct
->pipe
, idct
->matrix_fs
);
338 idct
->pipe
->delete_fs_state(idct
->pipe
, idct
->transpose_fs
);
342 init_buffers(struct vl_idct
*idct
)
344 struct pipe_resource
template;
345 struct pipe_sampler_view sampler_view
;
346 struct pipe_vertex_element vertex_elems
[2];
349 memset(&template, 0, sizeof(struct pipe_resource
));
350 template.last_level
= 0;
352 template.bind
= PIPE_BIND_SAMPLER_VIEW
;
355 template.target
= PIPE_TEXTURE_2D
;
356 template.format
= PIPE_FORMAT_R16G16B16A16_SNORM
;
357 template.width0
= idct
->destination
->width0
/ 4;
358 template.height0
= idct
->destination
->height0
;
360 template.usage
= PIPE_USAGE_STREAM
;
361 idct
->textures
.individual
.source
= idct
->pipe
->screen
->resource_create(idct
->pipe
->screen
, &template);
363 template.target
= PIPE_TEXTURE_3D
;
364 template.format
= PIPE_FORMAT_R16G16B16A16_SNORM
;
365 template.width0
= idct
->destination
->width0
/ NR_RENDER_TARGETS
;
366 template.height0
= idct
->destination
->height0
/ 4;
367 template.depth0
= NR_RENDER_TARGETS
;
368 template.usage
= PIPE_USAGE_STATIC
;
369 idct
->textures
.individual
.intermediate
= idct
->pipe
->screen
->resource_create(idct
->pipe
->screen
, &template);
371 for (i
= 0; i
< 4; ++i
) {
372 if(idct
->textures
.all
[i
] == NULL
)
373 return false; /* a texture failed to allocate */
375 u_sampler_view_default_template(&sampler_view
, idct
->textures
.all
[i
], idct
->textures
.all
[i
]->format
);
376 idct
->sampler_views
.all
[i
] = idct
->pipe
->create_sampler_view(idct
->pipe
, idct
->textures
.all
[i
], &sampler_view
);
379 idct
->vertex_bufs
.individual
.quad
= vl_vb_upload_quads(idct
->pipe
, idct
->max_blocks
);
381 if(idct
->vertex_bufs
.individual
.quad
.buffer
== NULL
)
384 idct
->vertex_bufs
.individual
.pos
.stride
= sizeof(struct vertex2f
);
385 idct
->vertex_bufs
.individual
.pos
.max_index
= 4 * idct
->max_blocks
- 1;
386 idct
->vertex_bufs
.individual
.pos
.buffer_offset
= 0;
387 idct
->vertex_bufs
.individual
.pos
.buffer
= pipe_buffer_create
390 PIPE_BIND_VERTEX_BUFFER
,
391 sizeof(struct vertex2f
) * 4 * idct
->max_blocks
394 if(idct
->vertex_bufs
.individual
.pos
.buffer
== NULL
)
398 vertex_elems
[0].src_offset
= 0;
399 vertex_elems
[0].instance_divisor
= 0;
400 vertex_elems
[0].vertex_buffer_index
= 0;
401 vertex_elems
[0].src_format
= PIPE_FORMAT_R32G32_FLOAT
;
404 vertex_elems
[1].src_offset
= 0;
405 vertex_elems
[1].instance_divisor
= 0;
406 vertex_elems
[1].vertex_buffer_index
= 1;
407 vertex_elems
[1].src_format
= PIPE_FORMAT_R32G32_FLOAT
;
409 idct
->vertex_elems_state
= idct
->pipe
->create_vertex_elements_state(idct
->pipe
, 2, vertex_elems
);
415 cleanup_buffers(struct vl_idct
*idct
)
421 for (i
= 0; i
< 4; ++i
) {
422 pipe_sampler_view_reference(&idct
->sampler_views
.all
[i
], NULL
);
423 pipe_resource_reference(&idct
->textures
.all
[i
], NULL
);
426 idct
->pipe
->delete_vertex_elements_state(idct
->pipe
, idct
->vertex_elems_state
);
427 pipe_resource_reference(&idct
->vertex_bufs
.individual
.quad
.buffer
, NULL
);
428 pipe_resource_reference(&idct
->vertex_bufs
.individual
.pos
.buffer
, NULL
);
432 init_state(struct vl_idct
*idct
)
434 struct pipe_sampler_state sampler
;
435 struct pipe_rasterizer_state rs_state
;
438 idct
->viewport
[0].scale
[0] = idct
->textures
.individual
.intermediate
->width0
;
439 idct
->viewport
[0].scale
[1] = idct
->textures
.individual
.intermediate
->height0
;
441 idct
->viewport
[1].scale
[0] = idct
->destination
->width0
;
442 idct
->viewport
[1].scale
[1] = idct
->destination
->height0
;
444 idct
->fb_state
[0].width
= idct
->textures
.individual
.intermediate
->width0
;
445 idct
->fb_state
[0].height
= idct
->textures
.individual
.intermediate
->height0
;
447 idct
->fb_state
[0].nr_cbufs
= NR_RENDER_TARGETS
;
448 for(i
= 0; i
< NR_RENDER_TARGETS
; ++i
) {
449 idct
->fb_state
[0].cbufs
[i
] = idct
->pipe
->screen
->get_tex_surface(
450 idct
->pipe
->screen
, idct
->textures
.individual
.intermediate
, 0, 0, i
,
451 PIPE_BIND_SAMPLER_VIEW
| PIPE_BIND_RENDER_TARGET
);
454 idct
->fb_state
[1].width
= idct
->destination
->width0
;
455 idct
->fb_state
[1].height
= idct
->destination
->height0
;
457 idct
->fb_state
[1].nr_cbufs
= 1;
458 idct
->fb_state
[1].cbufs
[0] = idct
->pipe
->screen
->get_tex_surface(
459 idct
->pipe
->screen
, idct
->destination
, 0, 0, 0,
460 PIPE_BIND_SAMPLER_VIEW
| PIPE_BIND_RENDER_TARGET
);
462 for(i
= 0; i
< 2; ++i
) {
463 idct
->viewport
[i
].scale
[2] = 1;
464 idct
->viewport
[i
].scale
[3] = 1;
465 idct
->viewport
[i
].translate
[0] = 0;
466 idct
->viewport
[i
].translate
[1] = 0;
467 idct
->viewport
[i
].translate
[2] = 0;
468 idct
->viewport
[i
].translate
[3] = 0;
470 idct
->fb_state
[i
].zsbuf
= NULL
;
473 for (i
= 0; i
< 4; ++i
) {
474 memset(&sampler
, 0, sizeof(sampler
));
475 sampler
.wrap_s
= PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
476 sampler
.wrap_t
= PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
477 sampler
.wrap_r
= PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
478 sampler
.min_img_filter
= PIPE_TEX_FILTER_NEAREST
;
479 sampler
.min_mip_filter
= PIPE_TEX_MIPFILTER_NONE
;
480 sampler
.mag_img_filter
= PIPE_TEX_FILTER_NEAREST
;
481 sampler
.compare_mode
= PIPE_TEX_COMPARE_NONE
;
482 sampler
.compare_func
= PIPE_FUNC_ALWAYS
;
483 sampler
.normalized_coords
= 1;
484 /*sampler.shadow_ambient = ; */
485 /*sampler.lod_bias = ; */
487 /*sampler.max_lod = ; */
488 /*sampler.border_color[0] = ; */
489 /*sampler.max_anisotropy = ; */
490 idct
->samplers
.all
[i
] = idct
->pipe
->create_sampler_state(idct
->pipe
, &sampler
);
493 memset(&rs_state
, 0, sizeof(rs_state
));
494 /*rs_state.sprite_coord_enable */
495 rs_state
.sprite_coord_mode
= PIPE_SPRITE_COORD_UPPER_LEFT
;
496 rs_state
.point_quad_rasterization
= true;
497 rs_state
.point_size
= BLOCK_WIDTH
;
498 rs_state
.gl_rasterization_rules
= false;
499 idct
->rs_state
= idct
->pipe
->create_rasterizer_state(idct
->pipe
, &rs_state
);
503 cleanup_state(struct vl_idct
*idct
)
507 for(i
= 0; i
< NR_RENDER_TARGETS
; ++i
) {
508 idct
->pipe
->screen
->tex_surface_destroy(idct
->fb_state
[0].cbufs
[i
]);
511 idct
->pipe
->screen
->tex_surface_destroy(idct
->fb_state
[1].cbufs
[0]);
513 for (i
= 0; i
< 4; ++i
)
514 idct
->pipe
->delete_sampler_state(idct
->pipe
, idct
->samplers
.all
[i
]);
516 idct
->pipe
->delete_rasterizer_state(idct
->pipe
, idct
->rs_state
);
519 struct pipe_resource
*
520 vl_idct_upload_matrix(struct pipe_context
*pipe
)
522 struct pipe_resource
template, *matrix
;
523 struct pipe_transfer
*buf_transfer
;
524 unsigned i
, j
, pitch
;
527 struct pipe_box rect
=
535 memset(&template, 0, sizeof(struct pipe_resource
));
536 template.target
= PIPE_TEXTURE_2D
;
537 template.format
= PIPE_FORMAT_R32G32B32A32_FLOAT
;
538 template.last_level
= 0;
540 template.height0
= 8;
542 template.usage
= PIPE_USAGE_IMMUTABLE
;
543 template.bind
= PIPE_BIND_SAMPLER_VIEW
;
546 matrix
= pipe
->screen
->resource_create(pipe
->screen
, &template);
549 buf_transfer
= pipe
->get_transfer
553 PIPE_TRANSFER_WRITE
| PIPE_TRANSFER_DISCARD
,
556 pitch
= buf_transfer
->stride
/ sizeof(float);
558 f
= pipe
->transfer_map(pipe
, buf_transfer
);
559 for(i
= 0; i
< BLOCK_HEIGHT
; ++i
)
560 for(j
= 0; j
< BLOCK_WIDTH
; ++j
)
561 f
[i
* pitch
+ j
] = const_matrix
[j
][i
]; // transpose
563 pipe
->transfer_unmap(pipe
, buf_transfer
);
564 pipe
->transfer_destroy(pipe
, buf_transfer
);
570 xfer_buffers_map(struct vl_idct
*idct
)
572 struct pipe_box rect
=
575 idct
->textures
.individual
.source
->width0
,
576 idct
->textures
.individual
.source
->height0
,
580 idct
->tex_transfer
= idct
->pipe
->get_transfer
582 idct
->pipe
, idct
->textures
.individual
.source
,
584 PIPE_TRANSFER_WRITE
| PIPE_TRANSFER_DISCARD
,
588 idct
->texels
= idct
->pipe
->transfer_map(idct
->pipe
, idct
->tex_transfer
);
592 xfer_buffers_unmap(struct vl_idct
*idct
)
594 idct
->pipe
->transfer_unmap(idct
->pipe
, idct
->tex_transfer
);
595 idct
->pipe
->transfer_destroy(idct
->pipe
, idct
->tex_transfer
);
599 vl_idct_init(struct vl_idct
*idct
, struct pipe_context
*pipe
, struct pipe_resource
*dst
, struct pipe_resource
*matrix
)
601 assert(idct
&& pipe
&& dst
);
604 pipe_resource_reference(&idct
->textures
.individual
.matrix
, matrix
);
605 pipe_resource_reference(&idct
->textures
.individual
.transpose
, matrix
);
606 pipe_resource_reference(&idct
->destination
, dst
);
609 align(idct
->destination
->width0
, BLOCK_WIDTH
) / BLOCK_WIDTH
*
610 align(idct
->destination
->height0
, BLOCK_HEIGHT
) / BLOCK_HEIGHT
*
611 idct
->destination
->depth0
;
613 if(!init_buffers(idct
))
616 if(!init_shaders(idct
)) {
617 cleanup_buffers(idct
);
621 if(!vl_vb_init(&idct
->blocks
, idct
->max_blocks
)) {
622 cleanup_shaders(idct
);
623 cleanup_buffers(idct
);
629 xfer_buffers_map(idct
);
635 vl_idct_cleanup(struct vl_idct
*idct
)
637 vl_vb_cleanup(&idct
->blocks
);
638 cleanup_shaders(idct
);
639 cleanup_buffers(idct
);
643 pipe_resource_reference(&idct
->destination
, NULL
);
647 vl_idct_add_block(struct vl_idct
*idct
, unsigned x
, unsigned y
, short *block
)
656 tex_pitch
= idct
->tex_transfer
->stride
/ sizeof(short);
657 texels
= idct
->texels
+ y
* tex_pitch
* BLOCK_HEIGHT
+ x
* BLOCK_WIDTH
;
659 for (i
= 0; i
< BLOCK_HEIGHT
; ++i
)
660 memcpy(texels
+ i
* tex_pitch
, block
+ i
* BLOCK_WIDTH
, BLOCK_WIDTH
* sizeof(short));
662 vl_vb_add_block(&idct
->blocks
, x
, y
);
666 vl_idct_flush(struct vl_idct
*idct
)
668 struct pipe_transfer
*vec_transfer
;
669 struct quadf
*vectors
;
674 vectors
= pipe_buffer_map
677 idct
->vertex_bufs
.individual
.pos
.buffer
,
678 PIPE_TRANSFER_WRITE
| PIPE_TRANSFER_DISCARD
,
682 num_blocks
= vl_vb_upload(&idct
->blocks
, vectors
);
684 pipe_buffer_unmap(idct
->pipe
, idct
->vertex_bufs
.individual
.pos
.buffer
, vec_transfer
);
686 xfer_buffers_unmap(idct
);
690 idct
->pipe
->bind_rasterizer_state(idct
->pipe
, idct
->rs_state
);
691 idct
->pipe
->set_vertex_buffers(idct
->pipe
, 2, idct
->vertex_bufs
.all
);
692 idct
->pipe
->bind_vertex_elements_state(idct
->pipe
, idct
->vertex_elems_state
);
693 idct
->pipe
->bind_vs_state(idct
->pipe
, idct
->vs
);
696 idct
->pipe
->set_framebuffer_state(idct
->pipe
, &idct
->fb_state
[0]);
697 idct
->pipe
->set_viewport_state(idct
->pipe
, &idct
->viewport
[0]);
698 idct
->pipe
->set_fragment_sampler_views(idct
->pipe
, 2, idct
->sampler_views
.stage
[0]);
699 idct
->pipe
->bind_fragment_sampler_states(idct
->pipe
, 2, idct
->samplers
.stage
[0]);
700 idct
->pipe
->bind_fs_state(idct
->pipe
, idct
->matrix_fs
);
701 util_draw_arrays(idct
->pipe
, PIPE_PRIM_QUADS
, 0, num_blocks
* 4);
704 idct
->pipe
->set_framebuffer_state(idct
->pipe
, &idct
->fb_state
[1]);
705 idct
->pipe
->set_viewport_state(idct
->pipe
, &idct
->viewport
[1]);
706 idct
->pipe
->set_fragment_sampler_views(idct
->pipe
, 2, idct
->sampler_views
.stage
[1]);
707 idct
->pipe
->bind_fragment_sampler_states(idct
->pipe
, 2, idct
->samplers
.stage
[1]);
708 idct
->pipe
->bind_fs_state(idct
->pipe
, idct
->transpose_fs
);
709 util_draw_arrays(idct
->pipe
, PIPE_PRIM_QUADS
, 0, num_blocks
* 4);
712 xfer_buffers_map(idct
);