1 /**************************************************************************
3 * Copyright 2010 Christian König
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 #include <pipe/p_context.h>
31 #include <pipe/p_screen.h>
33 #include <util/u_draw.h>
34 #include <util/u_sampler.h>
36 #include <tgsi/tgsi_ureg.h>
38 #include "vl_defines.h"
40 #include "vl_vertex_buffers.h"
52 static const float const_matrix
[8][8] = {
53 { 0.3535530f
, 0.3535530f
, 0.3535530f
, 0.3535530f
, 0.3535530f
, 0.3535530f
, 0.353553f
, 0.3535530f
},
54 { 0.4903930f
, 0.4157350f
, 0.2777850f
, 0.0975451f
, -0.0975452f
, -0.2777850f
, -0.415735f
, -0.4903930f
},
55 { 0.4619400f
, 0.1913420f
, -0.1913420f
, -0.4619400f
, -0.4619400f
, -0.1913420f
, 0.191342f
, 0.4619400f
},
56 { 0.4157350f
, -0.0975452f
, -0.4903930f
, -0.2777850f
, 0.2777850f
, 0.4903930f
, 0.097545f
, -0.4157350f
},
57 { 0.3535530f
, -0.3535530f
, -0.3535530f
, 0.3535540f
, 0.3535530f
, -0.3535540f
, -0.353553f
, 0.3535530f
},
58 { 0.2777850f
, -0.4903930f
, 0.0975452f
, 0.4157350f
, -0.4157350f
, -0.0975451f
, 0.490393f
, -0.2777850f
},
59 { 0.1913420f
, -0.4619400f
, 0.4619400f
, -0.1913420f
, -0.1913410f
, 0.4619400f
, -0.461940f
, 0.1913420f
},
60 { 0.0975451f
, -0.2777850f
, 0.4157350f
, -0.4903930f
, 0.4903930f
, -0.4157350f
, 0.277786f
, -0.0975458f
}
64 calc_addr(struct ureg_program
*shader
, struct ureg_dst addr
[2],
65 struct ureg_src tc
, struct ureg_src start
, bool right_side
,
66 bool transposed
, float size
)
68 unsigned wm_start
= (right_side
== transposed
) ? TGSI_WRITEMASK_X
: TGSI_WRITEMASK_Y
;
69 unsigned sw_start
= right_side
? TGSI_SWIZZLE_Y
: TGSI_SWIZZLE_X
;
71 unsigned wm_tc
= (right_side
== transposed
) ? TGSI_WRITEMASK_Y
: TGSI_WRITEMASK_X
;
72 unsigned sw_tc
= right_side
? TGSI_SWIZZLE_X
: TGSI_SWIZZLE_Y
;
75 * addr[0..1].(start) = right_side ? start.x : tc.x
76 * addr[0..1].(tc) = right_side ? tc.y : start.y
78 * addr[1].(start) += 1.0f / scale
80 ureg_MOV(shader
, ureg_writemask(addr
[0], wm_start
), ureg_scalar(start
, sw_start
));
81 ureg_MOV(shader
, ureg_writemask(addr
[0], wm_tc
), ureg_scalar(tc
, sw_tc
));
82 ureg_MOV(shader
, ureg_writemask(addr
[0], TGSI_WRITEMASK_Z
), tc
);
84 ureg_ADD(shader
, ureg_writemask(addr
[1], wm_start
), ureg_scalar(start
, sw_start
), ureg_imm1f(shader
, 1.0f
/ size
));
85 ureg_MOV(shader
, ureg_writemask(addr
[1], wm_tc
), ureg_scalar(tc
, sw_tc
));
86 ureg_MOV(shader
, ureg_writemask(addr
[1], TGSI_WRITEMASK_Z
), tc
);
90 increment_addr(struct ureg_program
*shader
, struct ureg_dst daddr
[2],
91 struct ureg_src saddr
[2], bool right_side
, bool transposed
,
94 unsigned wm_start
= (right_side
== transposed
) ? TGSI_WRITEMASK_X
: TGSI_WRITEMASK_Y
;
95 unsigned wm_tc
= (right_side
== transposed
) ? TGSI_WRITEMASK_Y
: TGSI_WRITEMASK_X
;
98 * daddr[0..1].(start) = saddr[0..1].(start)
99 * daddr[0..1].(tc) = saddr[0..1].(tc)
102 ureg_MOV(shader
, ureg_writemask(daddr
[0], wm_start
), saddr
[0]);
103 ureg_ADD(shader
, ureg_writemask(daddr
[0], wm_tc
), saddr
[0], ureg_imm1f(shader
, pos
/ size
));
104 ureg_MOV(shader
, ureg_writemask(daddr
[1], wm_start
), saddr
[1]);
105 ureg_ADD(shader
, ureg_writemask(daddr
[1], wm_tc
), saddr
[1], ureg_imm1f(shader
, pos
/ size
));
109 fetch_four(struct ureg_program
*shader
, struct ureg_dst m
[2], struct ureg_src addr
[2], struct ureg_src sampler
)
111 ureg_TEX(shader
, m
[0], TGSI_TEXTURE_3D
, addr
[0], sampler
);
112 ureg_TEX(shader
, m
[1], TGSI_TEXTURE_3D
, addr
[1], sampler
);
116 matrix_mul(struct ureg_program
*shader
, struct ureg_dst dst
, struct ureg_dst l
[2], struct ureg_dst r
[2])
120 tmp
= ureg_DECL_temporary(shader
);
123 * tmp.xy = dot4(m[0][0..1], m[1][0..1])
124 * dst = tmp.x + tmp.y
126 ureg_DP4(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_X
), ureg_src(l
[0]), ureg_src(r
[0]));
127 ureg_DP4(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_Y
), ureg_src(l
[1]), ureg_src(r
[1]));
128 ureg_ADD(shader
, dst
,
129 ureg_scalar(ureg_src(tmp
), TGSI_SWIZZLE_X
),
130 ureg_scalar(ureg_src(tmp
), TGSI_SWIZZLE_Y
));
132 ureg_release_temporary(shader
, tmp
);
136 create_stage1_vert_shader(struct vl_idct
*idct
)
138 struct ureg_program
*shader
;
139 struct ureg_src vrect
, vpos
;
140 struct ureg_src scale
;
141 struct ureg_dst t_tex
, t_start
;
142 struct ureg_dst o_vpos
, o_l_addr
[2], o_r_addr
[2];
144 shader
= ureg_create(TGSI_PROCESSOR_VERTEX
);
148 vrect
= ureg_DECL_vs_input(shader
, VS_I_RECT
);
149 vpos
= ureg_DECL_vs_input(shader
, VS_I_VPOS
);
151 t_tex
= ureg_DECL_temporary(shader
);
152 t_start
= ureg_DECL_temporary(shader
);
154 o_vpos
= ureg_DECL_output(shader
, TGSI_SEMANTIC_POSITION
, VS_O_VPOS
);
156 o_l_addr
[0] = ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_L_ADDR0
);
157 o_l_addr
[1] = ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_L_ADDR1
);
159 o_r_addr
[0] = ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_R_ADDR0
);
160 o_r_addr
[1] = ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_R_ADDR1
);
163 * scale = (BLOCK_WIDTH, BLOCK_HEIGHT) / (dst.width, dst.height)
165 * t_vpos = vpos + vrect
166 * o_vpos.xy = t_vpos * scale
169 * o_l_addr = calc_addr(...)
170 * o_r_addr = calc_addr(...)
174 scale
= ureg_imm2f(shader
,
175 (float)BLOCK_WIDTH
/ idct
->buffer_width
,
176 (float)BLOCK_HEIGHT
/ idct
->buffer_height
);
178 ureg_ADD(shader
, ureg_writemask(t_tex
, TGSI_WRITEMASK_XY
), vpos
, vrect
);
179 ureg_MUL(shader
, ureg_writemask(t_tex
, TGSI_WRITEMASK_XY
), ureg_src(t_tex
), scale
);
181 ureg_MOV(shader
, ureg_writemask(o_vpos
, TGSI_WRITEMASK_XY
), ureg_src(t_tex
));
182 ureg_MOV(shader
, ureg_writemask(o_vpos
, TGSI_WRITEMASK_ZW
), ureg_imm1f(shader
, 1.0f
));
184 ureg_MUL(shader
, ureg_writemask(t_tex
, TGSI_WRITEMASK_Z
),
185 ureg_scalar(vrect
, TGSI_SWIZZLE_X
),
186 ureg_imm1f(shader
, BLOCK_WIDTH
/ idct
->nr_of_render_targets
));
187 ureg_MUL(shader
, ureg_writemask(t_start
, TGSI_WRITEMASK_XY
), vpos
, scale
);
189 calc_addr(shader
, o_l_addr
, ureg_src(t_tex
), ureg_src(t_start
), false, false, idct
->buffer_width
/ 4);
190 calc_addr(shader
, o_r_addr
, vrect
, ureg_imm1f(shader
, 0.0f
), true, true, BLOCK_WIDTH
/ 4);
192 ureg_release_temporary(shader
, t_tex
);
193 ureg_release_temporary(shader
, t_start
);
197 return ureg_create_shader_and_destroy(shader
, idct
->pipe
);
201 create_stage1_frag_shader(struct vl_idct
*idct
)
203 struct ureg_program
*shader
;
205 struct ureg_src l_addr
[2], r_addr
[2];
207 struct ureg_dst l
[4][2], r
[2];
208 struct ureg_dst fragment
[idct
->nr_of_render_targets
];
212 shader
= ureg_create(TGSI_PROCESSOR_FRAGMENT
);
216 l_addr
[0] = ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_L_ADDR0
, TGSI_INTERPOLATE_LINEAR
);
217 l_addr
[1] = ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_L_ADDR1
, TGSI_INTERPOLATE_LINEAR
);
219 r_addr
[0] = ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_R_ADDR0
, TGSI_INTERPOLATE_LINEAR
);
220 r_addr
[1] = ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_R_ADDR1
, TGSI_INTERPOLATE_LINEAR
);
222 for (i
= 0; i
< idct
->nr_of_render_targets
; ++i
)
223 fragment
[i
] = ureg_DECL_output(shader
, TGSI_SEMANTIC_COLOR
, i
);
225 for (i
= 0; i
< 4; ++i
) {
226 l
[i
][0] = ureg_DECL_temporary(shader
);
227 l
[i
][1] = ureg_DECL_temporary(shader
);
230 r
[0] = ureg_DECL_temporary(shader
);
231 r
[1] = ureg_DECL_temporary(shader
);
233 for (i
= 1; i
< 4; ++i
) {
234 increment_addr(shader
, l
[i
], l_addr
, false, false, i
, idct
->buffer_height
);
237 for (i
= 0; i
< 4; ++i
) {
238 struct ureg_src s_addr
[2];
239 s_addr
[0] = i
== 0 ? l_addr
[0] : ureg_src(l
[i
][0]);
240 s_addr
[1] = i
== 0 ? l_addr
[1] : ureg_src(l
[i
][1]);
241 fetch_four(shader
, l
[i
], s_addr
, ureg_DECL_sampler(shader
, 1));
244 for (i
= 0; i
< idct
->nr_of_render_targets
; ++i
) {
246 increment_addr(shader
, r
, r_addr
, true, true, i
, BLOCK_HEIGHT
);
248 struct ureg_src s_addr
[2] = { ureg_src(r
[0]), ureg_src(r
[1]) };
249 s_addr
[0] = i
== 0 ? r_addr
[0] : ureg_src(r
[0]);
250 s_addr
[1] = i
== 0 ? r_addr
[1] : ureg_src(r
[1]);
251 fetch_four(shader
, r
, s_addr
, ureg_DECL_sampler(shader
, 0));
253 for (j
= 0; j
< 4; ++j
) {
254 matrix_mul(shader
, ureg_writemask(fragment
[i
], TGSI_WRITEMASK_X
<< j
), l
[j
], r
);
258 for (i
= 0; i
< 4; ++i
) {
259 ureg_release_temporary(shader
, l
[i
][0]);
260 ureg_release_temporary(shader
, l
[i
][1]);
262 ureg_release_temporary(shader
, r
[0]);
263 ureg_release_temporary(shader
, r
[1]);
267 return ureg_create_shader_and_destroy(shader
, idct
->pipe
);
271 vl_idct_stage2_vert_shader(struct vl_idct
*idct
, struct ureg_program
*shader
,
272 unsigned first_output
, struct ureg_dst tex
)
274 struct ureg_src vrect
, vpos
;
275 struct ureg_src scale
;
276 struct ureg_dst t_start
;
277 struct ureg_dst o_l_addr
[2], o_r_addr
[2];
279 vrect
= ureg_DECL_vs_input(shader
, VS_I_RECT
);
280 vpos
= ureg_DECL_vs_input(shader
, VS_I_VPOS
);
282 t_start
= ureg_DECL_temporary(shader
);
286 o_l_addr
[0] = ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, first_output
+ VS_O_L_ADDR0
);
287 o_l_addr
[1] = ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, first_output
+ VS_O_L_ADDR1
);
289 o_r_addr
[0] = ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, first_output
+ VS_O_R_ADDR0
);
290 o_r_addr
[1] = ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, first_output
+ VS_O_R_ADDR1
);
292 scale
= ureg_imm2f(shader
,
293 (float)BLOCK_WIDTH
/ idct
->buffer_width
,
294 (float)BLOCK_HEIGHT
/ idct
->buffer_height
);
296 ureg_MUL(shader
, ureg_writemask(tex
, TGSI_WRITEMASK_Z
),
297 ureg_scalar(vrect
, TGSI_SWIZZLE_X
),
298 ureg_imm1f(shader
, BLOCK_WIDTH
/ idct
->nr_of_render_targets
));
299 ureg_MUL(shader
, ureg_writemask(t_start
, TGSI_WRITEMASK_XY
), vpos
, scale
);
301 calc_addr(shader
, o_l_addr
, vrect
, ureg_imm1f(shader
, 0.0f
), false, false, BLOCK_WIDTH
/ 4);
302 calc_addr(shader
, o_r_addr
, ureg_src(tex
), ureg_src(t_start
), true, false, idct
->buffer_height
/ 4);
306 vl_idct_stage2_frag_shader(struct vl_idct
*idct
, struct ureg_program
*shader
,
307 unsigned first_input
, struct ureg_dst fragment
)
309 struct ureg_src l_addr
[2], r_addr
[2];
311 struct ureg_dst l
[2], r
[2];
315 l_addr
[0] = ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, first_input
+ VS_O_L_ADDR0
, TGSI_INTERPOLATE_LINEAR
);
316 l_addr
[1] = ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, first_input
+ VS_O_L_ADDR1
, TGSI_INTERPOLATE_LINEAR
);
318 r_addr
[0] = ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, first_input
+ VS_O_R_ADDR0
, TGSI_INTERPOLATE_LINEAR
);
319 r_addr
[1] = ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, first_input
+ VS_O_R_ADDR1
, TGSI_INTERPOLATE_LINEAR
);
321 l
[0] = ureg_DECL_temporary(shader
);
322 l
[1] = ureg_DECL_temporary(shader
);
323 r
[0] = ureg_DECL_temporary(shader
);
324 r
[1] = ureg_DECL_temporary(shader
);
326 fetch_four(shader
, l
, l_addr
, ureg_DECL_sampler(shader
, 0));
327 fetch_four(shader
, r
, r_addr
, ureg_DECL_sampler(shader
, 1));
329 matrix_mul(shader
, fragment
, l
, r
);
331 ureg_release_temporary(shader
, l
[0]);
332 ureg_release_temporary(shader
, l
[1]);
333 ureg_release_temporary(shader
, r
[0]);
334 ureg_release_temporary(shader
, r
[1]);
338 init_shaders(struct vl_idct
*idct
)
340 idct
->vs
= create_stage1_vert_shader(idct
);
344 idct
->fs
= create_stage1_frag_shader(idct
);
351 idct
->pipe
->delete_vs_state(idct
->pipe
, idct
->vs
);
358 cleanup_shaders(struct vl_idct
*idct
)
360 idct
->pipe
->delete_vs_state(idct
->pipe
, idct
->vs
);
361 idct
->pipe
->delete_fs_state(idct
->pipe
, idct
->fs
);
365 init_state(struct vl_idct
*idct
)
367 struct pipe_blend_state blend
;
368 struct pipe_rasterizer_state rs_state
;
369 struct pipe_sampler_state sampler
;
374 memset(&rs_state
, 0, sizeof(rs_state
));
375 rs_state
.gl_rasterization_rules
= false;
376 idct
->rs_state
= idct
->pipe
->create_rasterizer_state(idct
->pipe
, &rs_state
);
380 memset(&blend
, 0, sizeof blend
);
382 blend
.independent_blend_enable
= 0;
383 blend
.rt
[0].blend_enable
= 0;
384 blend
.rt
[0].rgb_func
= PIPE_BLEND_ADD
;
385 blend
.rt
[0].rgb_src_factor
= PIPE_BLENDFACTOR_ONE
;
386 blend
.rt
[0].rgb_dst_factor
= PIPE_BLENDFACTOR_ONE
;
387 blend
.rt
[0].alpha_func
= PIPE_BLEND_ADD
;
388 blend
.rt
[0].alpha_src_factor
= PIPE_BLENDFACTOR_ONE
;
389 blend
.rt
[0].alpha_dst_factor
= PIPE_BLENDFACTOR_ONE
;
390 blend
.logicop_enable
= 0;
391 blend
.logicop_func
= PIPE_LOGICOP_CLEAR
;
392 /* Needed to allow color writes to FB, even if blending disabled */
393 blend
.rt
[0].colormask
= PIPE_MASK_RGBA
;
395 idct
->blend
= idct
->pipe
->create_blend_state(idct
->pipe
, &blend
);
399 for (i
= 0; i
< 2; ++i
) {
400 memset(&sampler
, 0, sizeof(sampler
));
401 sampler
.wrap_s
= PIPE_TEX_WRAP_REPEAT
;
402 sampler
.wrap_t
= PIPE_TEX_WRAP_REPEAT
;
403 sampler
.wrap_r
= PIPE_TEX_WRAP_REPEAT
;
404 sampler
.min_img_filter
= PIPE_TEX_FILTER_NEAREST
;
405 sampler
.min_mip_filter
= PIPE_TEX_MIPFILTER_NONE
;
406 sampler
.mag_img_filter
= PIPE_TEX_FILTER_NEAREST
;
407 sampler
.compare_mode
= PIPE_TEX_COMPARE_NONE
;
408 sampler
.compare_func
= PIPE_FUNC_ALWAYS
;
409 sampler
.normalized_coords
= 1;
410 idct
->samplers
[i
] = idct
->pipe
->create_sampler_state(idct
->pipe
, &sampler
);
411 if (!idct
->samplers
[i
])
418 for (i
= 0; i
< 2; ++i
)
419 if (idct
->samplers
[i
])
420 idct
->pipe
->delete_sampler_state(idct
->pipe
, idct
->samplers
[i
]);
422 idct
->pipe
->delete_rasterizer_state(idct
->pipe
, idct
->rs_state
);
425 idct
->pipe
->delete_blend_state(idct
->pipe
, idct
->blend
);
432 cleanup_state(struct vl_idct
*idct
)
436 for (i
= 0; i
< 2; ++i
)
437 idct
->pipe
->delete_sampler_state(idct
->pipe
, idct
->samplers
[i
]);
439 idct
->pipe
->delete_rasterizer_state(idct
->pipe
, idct
->rs_state
);
440 idct
->pipe
->delete_blend_state(idct
->pipe
, idct
->blend
);
444 init_intermediate(struct vl_idct
*idct
, struct vl_idct_buffer
*buffer
)
446 struct pipe_resource
*tex
;
447 struct pipe_surface surf_templ
;
450 assert(idct
&& buffer
);
452 tex
= buffer
->sampler_views
.individual
.intermediate
->texture
;
454 buffer
->fb_state
.width
= tex
->width0
;
455 buffer
->fb_state
.height
= tex
->height0
;
456 buffer
->fb_state
.nr_cbufs
= idct
->nr_of_render_targets
;
457 for(i
= 0; i
< idct
->nr_of_render_targets
; ++i
) {
458 memset(&surf_templ
, 0, sizeof(surf_templ
));
459 surf_templ
.format
= tex
->format
;
460 surf_templ
.u
.tex
.first_layer
= i
;
461 surf_templ
.u
.tex
.last_layer
= i
;
462 surf_templ
.usage
= PIPE_BIND_SAMPLER_VIEW
| PIPE_BIND_RENDER_TARGET
;
463 buffer
->fb_state
.cbufs
[i
] = idct
->pipe
->create_surface(
464 idct
->pipe
, tex
, &surf_templ
);
466 if (!buffer
->fb_state
.cbufs
[i
])
470 buffer
->viewport
.scale
[0] = tex
->width0
;
471 buffer
->viewport
.scale
[1] = tex
->height0
;
476 for(i
= 0; i
< idct
->nr_of_render_targets
; ++i
)
477 pipe_surface_reference(&buffer
->fb_state
.cbufs
[i
], NULL
);
483 cleanup_intermediate(struct vl_idct
*idct
, struct vl_idct_buffer
*buffer
)
487 assert(idct
&& buffer
);
489 for(i
= 0; i
< idct
->nr_of_render_targets
; ++i
)
490 pipe_surface_reference(&buffer
->fb_state
.cbufs
[i
], NULL
);
492 pipe_sampler_view_reference(&buffer
->sampler_views
.individual
.intermediate
, NULL
);
495 struct pipe_sampler_view
*
496 vl_idct_upload_matrix(struct pipe_context
*pipe
, float scale
)
498 struct pipe_resource tex_templ
, *matrix
;
499 struct pipe_sampler_view sv_templ
, *sv
;
500 struct pipe_transfer
*buf_transfer
;
501 unsigned i
, j
, pitch
;
504 struct pipe_box rect
=
514 memset(&tex_templ
, 0, sizeof(tex_templ
));
515 tex_templ
.target
= PIPE_TEXTURE_2D
;
516 tex_templ
.format
= PIPE_FORMAT_R32G32B32A32_FLOAT
;
517 tex_templ
.last_level
= 0;
518 tex_templ
.width0
= 2;
519 tex_templ
.height0
= 8;
520 tex_templ
.depth0
= 1;
521 tex_templ
.array_size
= 1;
522 tex_templ
.usage
= PIPE_USAGE_IMMUTABLE
;
523 tex_templ
.bind
= PIPE_BIND_SAMPLER_VIEW
;
526 matrix
= pipe
->screen
->resource_create(pipe
->screen
, &tex_templ
);
530 buf_transfer
= pipe
->get_transfer
533 0, PIPE_TRANSFER_WRITE
| PIPE_TRANSFER_DISCARD
,
539 pitch
= buf_transfer
->stride
/ sizeof(float);
541 f
= pipe
->transfer_map(pipe
, buf_transfer
);
545 for(i
= 0; i
< BLOCK_HEIGHT
; ++i
)
546 for(j
= 0; j
< BLOCK_WIDTH
; ++j
)
547 // transpose and scale
548 f
[i
* pitch
+ j
] = const_matrix
[j
][i
] * scale
;
550 pipe
->transfer_unmap(pipe
, buf_transfer
);
551 pipe
->transfer_destroy(pipe
, buf_transfer
);
553 memset(&sv_templ
, 0, sizeof(sv_templ
));
554 u_sampler_view_default_template(&sv_templ
, matrix
, matrix
->format
);
555 sv
= pipe
->create_sampler_view(pipe
, matrix
, &sv_templ
);
556 pipe_resource_reference(&matrix
, NULL
);
563 pipe
->transfer_destroy(pipe
, buf_transfer
);
566 pipe_resource_reference(&matrix
, NULL
);
572 bool vl_idct_init(struct vl_idct
*idct
, struct pipe_context
*pipe
,
573 unsigned buffer_width
, unsigned buffer_height
,
574 unsigned nr_of_render_targets
,
575 struct pipe_sampler_view
*matrix
,
576 struct pipe_sampler_view
*transpose
)
578 assert(idct
&& pipe
&& matrix
);
581 idct
->buffer_width
= buffer_width
;
582 idct
->buffer_height
= buffer_height
;
583 idct
->nr_of_render_targets
= nr_of_render_targets
;
585 pipe_sampler_view_reference(&idct
->matrix
, matrix
);
586 pipe_sampler_view_reference(&idct
->transpose
, transpose
);
588 if(!init_shaders(idct
))
591 if(!init_state(idct
)) {
592 cleanup_shaders(idct
);
600 vl_idct_cleanup(struct vl_idct
*idct
)
602 cleanup_shaders(idct
);
605 pipe_sampler_view_reference(&idct
->matrix
, NULL
);
609 vl_idct_init_buffer(struct vl_idct
*idct
, struct vl_idct_buffer
*buffer
,
610 struct pipe_sampler_view
*source
,
611 struct pipe_sampler_view
*intermediate
,
612 struct pipe_surface
*destination
)
619 memset(buffer
, 0, sizeof(struct vl_idct_buffer
));
621 pipe_sampler_view_reference(&buffer
->sampler_views
.individual
.matrix
, idct
->matrix
);
622 pipe_sampler_view_reference(&buffer
->sampler_views
.individual
.source
, source
);
623 pipe_sampler_view_reference(&buffer
->sampler_views
.individual
.transpose
, idct
->transpose
);
624 pipe_sampler_view_reference(&buffer
->sampler_views
.individual
.intermediate
, intermediate
);
626 if (!init_intermediate(idct
, buffer
))
629 buffer
->viewport
.scale
[2] = 1;
630 buffer
->viewport
.scale
[3] = 1;
631 buffer
->viewport
.translate
[0] = 0;
632 buffer
->viewport
.translate
[1] = 0;
633 buffer
->viewport
.translate
[2] = 0;
634 buffer
->viewport
.translate
[3] = 0;
640 vl_idct_cleanup_buffer(struct vl_idct
*idct
, struct vl_idct_buffer
*buffer
)
644 assert(idct
&& buffer
);
646 for(i
= 0; i
< idct
->nr_of_render_targets
; ++i
)
647 pipe_surface_reference(&buffer
->fb_state
.cbufs
[i
], NULL
);
649 cleanup_intermediate(idct
, buffer
);
653 vl_idct_flush(struct vl_idct
*idct
, struct vl_idct_buffer
*buffer
, unsigned num_instances
)
658 idct
->pipe
->bind_rasterizer_state(idct
->pipe
, idct
->rs_state
);
659 idct
->pipe
->bind_blend_state(idct
->pipe
, idct
->blend
);
660 idct
->pipe
->bind_fragment_sampler_states(idct
->pipe
, 2, idct
->samplers
);
663 idct
->pipe
->set_framebuffer_state(idct
->pipe
, &buffer
->fb_state
);
664 idct
->pipe
->set_viewport_state(idct
->pipe
, &buffer
->viewport
);
665 idct
->pipe
->set_fragment_sampler_views(idct
->pipe
, 2, buffer
->sampler_views
.stage
[0]);
666 idct
->pipe
->bind_vs_state(idct
->pipe
, idct
->vs
);
667 idct
->pipe
->bind_fs_state(idct
->pipe
, idct
->fs
);
668 util_draw_arrays_instanced(idct
->pipe
, PIPE_PRIM_QUADS
, 0, 4, 0, num_instances
);
672 vl_idct_prepare_stage2(struct vl_idct
*idct
, struct vl_idct_buffer
*buffer
)
678 idct
->pipe
->bind_rasterizer_state(idct
->pipe
, idct
->rs_state
);
679 idct
->pipe
->bind_fragment_sampler_states(idct
->pipe
, 2, idct
->samplers
);
680 idct
->pipe
->set_fragment_sampler_views(idct
->pipe
, 2, buffer
->sampler_views
.stage
[1]);