1 #include "xorg_exa_tgsi.h"
3 /*### stupidity defined in X11/extensions/XI.h */
6 #include "pipe/p_format.h"
7 #include "pipe/p_context.h"
8 #include "pipe/p_state.h"
9 #include "pipe/p_inlines.h"
10 #include "pipe/p_shader_tokens.h"
12 #include "util/u_memory.h"
13 #include "util/u_simple_shaders.h"
15 #include "tgsi/tgsi_ureg.h"
17 #include "cso_cache/cso_context.h"
18 #include "cso_cache/cso_hash.h"
22 * IN[1] = src tex coord | solid fill color
23 * IN[2] = mask tex coord
24 * IN[3] = dst tex coord
25 * CONST[0] = (2/dst_width, 2/dst_height, 1, 1)
26 * CONST[1] = (-1, -1, 0, 0)
29 * OUT[1] = src tex coord | solid fill color
30 * OUT[2] = mask tex coord
31 * OUT[3] = dst tex coord
38 * IN[0] = pos src | solid fill color
41 * CONST[0] = (0, 0, 0, 1)
47 struct xorg_renderer
*r
;
49 struct cso_hash
*vs_hash
;
50 struct cso_hash
*fs_hash
;
54 src_in_mask(struct ureg_program
*ureg
,
60 if (component_alpha
== FS_CA_FULL
) {
61 ureg_MUL(ureg
, dst
, src
, mask
);
62 } else if (component_alpha
== FS_CA_SRCALPHA
) {
64 ureg_scalar(src
, TGSI_SWIZZLE_W
), mask
);
67 ureg_MUL(ureg
, dst
, src
,
68 ureg_scalar(mask
, TGSI_SWIZZLE_X
));
72 static struct ureg_src
73 vs_normalize_coords(struct ureg_program
*ureg
, struct ureg_src coords
,
74 struct ureg_src const0
, struct ureg_src const1
)
76 struct ureg_dst tmp
= ureg_DECL_temporary(ureg
);
78 ureg_MAD(ureg
, tmp
, coords
, const0
, const1
);
80 ureg_release_temporary(ureg
, tmp
);
85 linear_gradient(struct ureg_program
*ureg
,
88 struct ureg_src sampler
,
89 struct ureg_src coords
,
90 struct ureg_src const0124
,
91 struct ureg_src matrow0
,
92 struct ureg_src matrow1
,
93 struct ureg_src matrow2
)
95 struct ureg_dst temp0
= ureg_DECL_temporary(ureg
);
96 struct ureg_dst temp1
= ureg_DECL_temporary(ureg
);
97 struct ureg_dst temp2
= ureg_DECL_temporary(ureg
);
98 struct ureg_dst temp3
= ureg_DECL_temporary(ureg
);
99 struct ureg_dst temp4
= ureg_DECL_temporary(ureg
);
100 struct ureg_dst temp5
= ureg_DECL_temporary(ureg
);
103 ureg_writemask(temp0
, TGSI_WRITEMASK_XY
), pos
);
105 ureg_writemask(temp0
, TGSI_WRITEMASK_Z
),
106 ureg_scalar(const0124
, TGSI_SWIZZLE_Y
));
108 ureg_DP3(ureg
, temp1
, matrow0
, ureg_src(temp0
));
109 ureg_DP3(ureg
, temp2
, matrow1
, ureg_src(temp0
));
110 ureg_DP3(ureg
, temp3
, matrow2
, ureg_src(temp0
));
111 ureg_RCP(ureg
, temp3
, ureg_src(temp3
));
112 ureg_MUL(ureg
, temp1
, ureg_src(temp1
), ureg_src(temp3
));
113 ureg_MUL(ureg
, temp2
, ureg_src(temp2
), ureg_src(temp3
));
115 ureg_MOV(ureg
, ureg_writemask(temp4
, TGSI_WRITEMASK_X
),
117 ureg_MOV(ureg
, ureg_writemask(temp4
, TGSI_WRITEMASK_Y
),
120 ureg_MUL(ureg
, temp0
,
121 ureg_scalar(coords
, TGSI_SWIZZLE_Y
),
122 ureg_scalar(ureg_src(temp4
), TGSI_SWIZZLE_Y
));
123 ureg_MAD(ureg
, temp1
,
124 ureg_scalar(coords
, TGSI_SWIZZLE_X
),
125 ureg_scalar(ureg_src(temp4
), TGSI_SWIZZLE_X
),
128 ureg_MUL(ureg
, temp2
,
130 ureg_scalar(coords
, TGSI_SWIZZLE_Z
));
133 TGSI_TEXTURE_1D
, ureg_src(temp2
), sampler
);
135 ureg_release_temporary(ureg
, temp0
);
136 ureg_release_temporary(ureg
, temp1
);
137 ureg_release_temporary(ureg
, temp2
);
138 ureg_release_temporary(ureg
, temp3
);
139 ureg_release_temporary(ureg
, temp4
);
140 ureg_release_temporary(ureg
, temp5
);
145 radial_gradient(struct ureg_program
*ureg
,
148 struct ureg_src sampler
,
149 struct ureg_src coords
,
150 struct ureg_src const0124
,
151 struct ureg_src matrow0
,
152 struct ureg_src matrow1
,
153 struct ureg_src matrow2
)
155 struct ureg_dst temp0
= ureg_DECL_temporary(ureg
);
156 struct ureg_dst temp1
= ureg_DECL_temporary(ureg
);
157 struct ureg_dst temp2
= ureg_DECL_temporary(ureg
);
158 struct ureg_dst temp3
= ureg_DECL_temporary(ureg
);
159 struct ureg_dst temp4
= ureg_DECL_temporary(ureg
);
160 struct ureg_dst temp5
= ureg_DECL_temporary(ureg
);
163 ureg_writemask(temp0
, TGSI_WRITEMASK_XY
),
166 ureg_writemask(temp0
, TGSI_WRITEMASK_Z
),
167 ureg_scalar(const0124
, TGSI_SWIZZLE_Y
));
169 ureg_DP3(ureg
, temp1
, matrow0
, ureg_src(temp0
));
170 ureg_DP3(ureg
, temp2
, matrow1
, ureg_src(temp0
));
171 ureg_DP3(ureg
, temp3
, matrow2
, ureg_src(temp0
));
172 ureg_RCP(ureg
, temp3
, ureg_src(temp3
));
173 ureg_MUL(ureg
, temp1
, ureg_src(temp1
), ureg_src(temp3
));
174 ureg_MUL(ureg
, temp2
, ureg_src(temp2
), ureg_src(temp3
));
176 ureg_MOV(ureg
, ureg_writemask(temp5
, TGSI_WRITEMASK_X
),
178 ureg_MOV(ureg
, ureg_writemask(temp5
, TGSI_WRITEMASK_Y
),
181 ureg_MUL(ureg
, temp0
, ureg_scalar(coords
, TGSI_SWIZZLE_Y
),
182 ureg_scalar(ureg_src(temp5
), TGSI_SWIZZLE_Y
));
183 ureg_MAD(ureg
, temp1
,
184 ureg_scalar(coords
, TGSI_SWIZZLE_X
),
185 ureg_scalar(ureg_src(temp5
), TGSI_SWIZZLE_X
),
187 ureg_ADD(ureg
, temp1
,
188 ureg_src(temp1
), ureg_src(temp1
));
189 ureg_MUL(ureg
, temp3
,
190 ureg_scalar(ureg_src(temp5
), TGSI_SWIZZLE_Y
),
191 ureg_scalar(ureg_src(temp5
), TGSI_SWIZZLE_Y
));
192 ureg_MAD(ureg
, temp4
,
193 ureg_scalar(ureg_src(temp5
), TGSI_SWIZZLE_X
),
194 ureg_scalar(ureg_src(temp5
), TGSI_SWIZZLE_X
),
196 ureg_MOV(ureg
, temp4
, ureg_negate(ureg_src(temp4
)));
197 ureg_MUL(ureg
, temp2
,
198 ureg_scalar(coords
, TGSI_SWIZZLE_Z
),
200 ureg_MUL(ureg
, temp0
,
201 ureg_scalar(const0124
, TGSI_SWIZZLE_W
),
203 ureg_MUL(ureg
, temp3
,
204 ureg_src(temp1
), ureg_src(temp1
));
205 ureg_SUB(ureg
, temp2
,
206 ureg_src(temp3
), ureg_src(temp0
));
207 ureg_RSQ(ureg
, temp2
, ureg_abs(ureg_src(temp2
)));
208 ureg_RCP(ureg
, temp2
, ureg_src(temp2
));
209 ureg_SUB(ureg
, temp1
,
210 ureg_src(temp2
), ureg_src(temp1
));
211 ureg_ADD(ureg
, temp0
,
212 ureg_scalar(coords
, TGSI_SWIZZLE_Z
),
213 ureg_scalar(coords
, TGSI_SWIZZLE_Z
));
214 ureg_RCP(ureg
, temp0
, ureg_src(temp0
));
215 ureg_MUL(ureg
, temp2
,
216 ureg_src(temp1
), ureg_src(temp0
));
217 ureg_TEX(ureg
, out
, TGSI_TEXTURE_1D
,
218 ureg_src(temp2
), sampler
);
220 ureg_release_temporary(ureg
, temp0
);
221 ureg_release_temporary(ureg
, temp1
);
222 ureg_release_temporary(ureg
, temp2
);
223 ureg_release_temporary(ureg
, temp3
);
224 ureg_release_temporary(ureg
, temp4
);
225 ureg_release_temporary(ureg
, temp5
);
229 create_vs(struct pipe_context
*pipe
,
232 struct ureg_program
*ureg
;
235 struct ureg_src const0
, const1
;
236 boolean is_fill
= vs_traits
& VS_FILL
;
237 boolean is_composite
= vs_traits
& VS_COMPOSITE
;
238 boolean has_mask
= vs_traits
& VS_MASK
;
239 boolean is_yuv
= vs_traits
& VS_YUV
;
240 unsigned input_slot
= 0;
242 ureg
= ureg_create(TGSI_PROCESSOR_VERTEX
);
246 const0
= ureg_DECL_constant(ureg
, 0);
247 const1
= ureg_DECL_constant(ureg
, 1);
249 /* it has to be either a fill or a composite op */
250 debug_assert(is_fill
^ is_composite
);
252 src
= ureg_DECL_vs_input(ureg
, input_slot
++);
253 dst
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_POSITION
, 0);
254 src
= vs_normalize_coords(ureg
, src
,
256 ureg_MOV(ureg
, dst
, src
);
259 src
= ureg_DECL_vs_input(ureg
, input_slot
++);
260 dst
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_GENERIC
, 0);
261 ureg_MOV(ureg
, dst
, src
);
263 src
= ureg_DECL_vs_input(ureg
, input_slot
++);
264 dst
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_GENERIC
, 1);
265 ureg_MOV(ureg
, dst
, src
);
267 src
= ureg_DECL_vs_input(ureg
, input_slot
++);
268 dst
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_GENERIC
, 2);
269 ureg_MOV(ureg
, dst
, src
);
273 src
= ureg_DECL_vs_input(ureg
, input_slot
++);
274 dst
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_GENERIC
, 0);
275 ureg_MOV(ureg
, dst
, src
);
279 src
= ureg_DECL_vs_input(ureg
, input_slot
++);
280 dst
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_COLOR
, 0);
281 ureg_MOV(ureg
, dst
, src
);
285 src
= ureg_DECL_vs_input(ureg
, input_slot
++);
286 dst
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_GENERIC
, 1);
287 ureg_MOV(ureg
, dst
, src
);
292 return ureg_create_shader_and_destroy(ureg
, pipe
);
296 create_yuv_shader(struct pipe_context
*pipe
, struct ureg_program
*ureg
)
298 struct ureg_src y_sampler
, u_sampler
, v_sampler
;
300 struct ureg_src matrow0
, matrow1
, matrow2
;
301 struct ureg_dst y
, u
, v
, rgb
;
302 struct ureg_dst out
= ureg_DECL_output(ureg
,
306 pos
= ureg_DECL_fs_input(ureg
,
307 TGSI_SEMANTIC_GENERIC
,
309 TGSI_INTERPOLATE_PERSPECTIVE
);
311 rgb
= ureg_DECL_temporary(ureg
);
312 y
= ureg_DECL_temporary(ureg
);
313 u
= ureg_DECL_temporary(ureg
);
314 v
= ureg_DECL_temporary(ureg
);
316 y_sampler
= ureg_DECL_sampler(ureg
, 0);
317 u_sampler
= ureg_DECL_sampler(ureg
, 1);
318 v_sampler
= ureg_DECL_sampler(ureg
, 2);
320 matrow0
= ureg_DECL_constant(ureg
, 0);
321 matrow1
= ureg_DECL_constant(ureg
, 1);
322 matrow2
= ureg_DECL_constant(ureg
, 2);
325 TGSI_TEXTURE_2D
, pos
, y_sampler
);
327 TGSI_TEXTURE_2D
, pos
, u_sampler
);
329 TGSI_TEXTURE_2D
, pos
, v_sampler
);
332 ureg_scalar(ureg_src(y
), TGSI_SWIZZLE_X
),
335 ureg_scalar(ureg_src(u
), TGSI_SWIZZLE_X
),
339 ureg_scalar(ureg_src(v
), TGSI_SWIZZLE_X
),
344 ureg_MOV(ureg
, ureg_writemask(rgb
, TGSI_WRITEMASK_W
),
345 ureg_scalar(matrow0
, TGSI_SWIZZLE_X
));
347 ureg_MOV(ureg
, out
, ureg_src(rgb
));
349 ureg_release_temporary(ureg
, rgb
);
350 ureg_release_temporary(ureg
, y
);
351 ureg_release_temporary(ureg
, u
);
352 ureg_release_temporary(ureg
, v
);
356 return ureg_create_shader_and_destroy(ureg
, pipe
);
360 create_fs(struct pipe_context
*pipe
,
363 struct ureg_program
*ureg
;
364 struct ureg_src
/*dst_sampler,*/ src_sampler
, mask_sampler
;
365 struct ureg_src
/*dst_pos,*/ src_input
, mask_pos
;
366 struct ureg_dst src
, mask
;
368 boolean has_mask
= fs_traits
& FS_MASK
;
369 boolean is_fill
= fs_traits
& FS_FILL
;
370 boolean is_composite
= fs_traits
& FS_COMPOSITE
;
371 boolean is_solid
= fs_traits
& FS_SOLID_FILL
;
372 boolean is_lingrad
= fs_traits
& FS_LINGRAD_FILL
;
373 boolean is_radgrad
= fs_traits
& FS_RADGRAD_FILL
;
374 unsigned comp_alpha
= fs_traits
& FS_COMPONENT_ALPHA
;
375 boolean is_yuv
= fs_traits
& FS_YUV
;
377 ureg
= ureg_create(TGSI_PROCESSOR_FRAGMENT
);
381 /* it has to be either a fill, a composite op or a yuv conversion */
382 debug_assert((is_fill
^ is_composite
) ^ is_yuv
);
384 out
= ureg_DECL_output(ureg
,
389 src_sampler
= ureg_DECL_sampler(ureg
, 0);
390 src_input
= ureg_DECL_fs_input(ureg
,
391 TGSI_SEMANTIC_GENERIC
,
393 TGSI_INTERPOLATE_PERSPECTIVE
);
394 } else if (is_fill
) {
396 src_input
= ureg_DECL_fs_input(ureg
,
399 TGSI_INTERPOLATE_PERSPECTIVE
);
401 src_input
= ureg_DECL_fs_input(ureg
,
402 TGSI_SEMANTIC_POSITION
,
404 TGSI_INTERPOLATE_PERSPECTIVE
);
406 debug_assert(is_yuv
);
407 return create_yuv_shader(pipe
, ureg
);
411 mask_sampler
= ureg_DECL_sampler(ureg
, 1);
412 mask_pos
= ureg_DECL_fs_input(ureg
,
413 TGSI_SEMANTIC_GENERIC
,
415 TGSI_INTERPOLATE_PERSPECTIVE
);
418 #if 0 /* unused right now */
419 dst_sampler
= ureg_DECL_sampler(ureg
, 2);
420 dst_pos
= ureg_DECL_fs_input(ureg
,
421 TGSI_SEMANTIC_POSITION
,
423 TGSI_INTERPOLATE_PERSPECTIVE
);
428 src
= ureg_DECL_temporary(ureg
);
432 TGSI_TEXTURE_2D
, src_input
, src_sampler
);
433 } else if (is_fill
) {
436 src
= ureg_dst(src_input
);
438 ureg_MOV(ureg
, out
, src_input
);
439 } else if (is_lingrad
|| is_radgrad
) {
440 struct ureg_src coords
, const0124
,
441 matrow0
, matrow1
, matrow2
;
444 src
= ureg_DECL_temporary(ureg
);
448 coords
= ureg_DECL_constant(ureg
, 0);
449 const0124
= ureg_DECL_constant(ureg
, 1);
450 matrow0
= ureg_DECL_constant(ureg
, 2);
451 matrow1
= ureg_DECL_constant(ureg
, 3);
452 matrow2
= ureg_DECL_constant(ureg
, 4);
455 linear_gradient(ureg
, src
,
456 src_input
, src_sampler
,
458 matrow0
, matrow1
, matrow2
);
459 } else if (is_radgrad
) {
460 radial_gradient(ureg
, src
,
461 src_input
, src_sampler
,
463 matrow0
, matrow1
, matrow2
);
466 debug_assert(!"Unknown fill type!");
470 mask
= ureg_DECL_temporary(ureg
);
472 TGSI_TEXTURE_2D
, mask_pos
, mask_sampler
);
474 src_in_mask(ureg
, out
, ureg_src(src
), ureg_src(mask
), comp_alpha
);
475 ureg_release_temporary(ureg
, mask
);
480 return ureg_create_shader_and_destroy(ureg
, pipe
);
483 struct xorg_shaders
* xorg_shaders_create(struct xorg_renderer
*r
)
485 struct xorg_shaders
*sc
= CALLOC_STRUCT(xorg_shaders
);
488 sc
->vs_hash
= cso_hash_create();
489 sc
->fs_hash
= cso_hash_create();
495 cache_destroy(struct cso_context
*cso
,
496 struct cso_hash
*hash
,
499 struct cso_hash_iter iter
= cso_hash_first_node(hash
);
500 while (!cso_hash_iter_is_null(iter
)) {
501 void *shader
= (void *)cso_hash_iter_data(iter
);
502 if (processor
== PIPE_SHADER_FRAGMENT
) {
503 cso_delete_fragment_shader(cso
, shader
);
504 } else if (processor
== PIPE_SHADER_VERTEX
) {
505 cso_delete_vertex_shader(cso
, shader
);
507 iter
= cso_hash_erase(hash
, iter
);
509 cso_hash_delete(hash
);
512 void xorg_shaders_destroy(struct xorg_shaders
*sc
)
514 cache_destroy(sc
->r
->cso
, sc
->vs_hash
,
516 cache_destroy(sc
->r
->cso
, sc
->fs_hash
,
517 PIPE_SHADER_FRAGMENT
);
523 shader_from_cache(struct pipe_context
*pipe
,
525 struct cso_hash
*hash
,
530 struct cso_hash_iter iter
= cso_hash_find(hash
, key
);
532 if (cso_hash_iter_is_null(iter
)) {
533 if (type
== PIPE_SHADER_VERTEX
)
534 shader
= create_vs(pipe
, key
);
536 shader
= create_fs(pipe
, key
);
537 cso_hash_insert(hash
, key
, shader
);
539 shader
= (void *)cso_hash_iter_data(iter
);
544 struct xorg_shader
xorg_shaders_get(struct xorg_shaders
*sc
,
548 struct xorg_shader shader
= { NULL
, NULL
};
551 vs
= shader_from_cache(sc
->r
->pipe
, PIPE_SHADER_VERTEX
,
552 sc
->vs_hash
, vs_traits
);
553 fs
= shader_from_cache(sc
->r
->pipe
, PIPE_SHADER_FRAGMENT
,
554 sc
->fs_hash
, fs_traits
);
556 debug_assert(vs
&& fs
);