2 * Copyright (C) 2005-2007 Brian Paul All Rights Reserved.
3 * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
4 * Copyright © 2010 Intel Corporation
5 * Copyright © 2011 Bryan Cain
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
28 * \file glsl_to_tgsi.cpp
30 * Translate GLSL IR to TGSI.
33 #include "st_glsl_to_tgsi.h"
35 #include "compiler/glsl/glsl_parser_extras.h"
36 #include "compiler/glsl/ir_optimization.h"
37 #include "compiler/glsl/program.h"
39 #include "main/errors.h"
40 #include "main/shaderobj.h"
41 #include "main/uniforms.h"
42 #include "main/shaderapi.h"
43 #include "main/shaderimage.h"
44 #include "program/prog_instruction.h"
46 #include "pipe/p_context.h"
47 #include "pipe/p_screen.h"
48 #include "tgsi/tgsi_ureg.h"
49 #include "tgsi/tgsi_info.h"
50 #include "util/u_math.h"
51 #include "util/u_memory.h"
52 #include "st_glsl_types.h"
53 #include "st_program.h"
54 #include "st_mesa_to_tgsi.h"
55 #include "st_format.h"
57 #include "st_shader_cache.h"
58 #include "st_glsl_to_tgsi_temprename.h"
60 #include "util/hash_table.h"
63 #define PROGRAM_ANY_CONST ((1 << PROGRAM_STATE_VAR) | \
64 (1 << PROGRAM_CONSTANT) | \
65 (1 << PROGRAM_UNIFORM))
67 #define MAX_GLSL_TEXTURE_OFFSET 4
69 static unsigned is_precise(const ir_variable
*ir
)
73 return ir
->data
.precise
|| ir
->data
.invariant
;
76 class variable_storage
{
77 DECLARE_RZALLOC_CXX_OPERATORS(variable_storage
)
80 variable_storage(ir_variable
*var
, gl_register_file file
, int index
,
81 unsigned array_id
= 0)
82 : file(file
), index(index
), component(0), var(var
), array_id(array_id
)
84 assert(file
!= PROGRAM_ARRAY
|| array_id
!= 0);
87 gl_register_file file
;
90 /* Explicit component location. This is given in terms of the GLSL-style
91 * swizzles where each double is a single component, i.e. for 64-bit types
92 * it can only be 0 or 1.
95 ir_variable
*var
; /* variable that maps to this, if any */
99 class immediate_storage
: public exec_node
{
101 immediate_storage(gl_constant_value
*values
, int size32
, GLenum type
)
103 memcpy(this->values
, values
, size32
* sizeof(gl_constant_value
));
104 this->size32
= size32
;
108 /* doubles are stored across 2 gl_constant_values */
109 gl_constant_value values
[4];
110 int size32
; /**< Number of 32-bit components (1-4) */
111 GLenum type
; /**< GL_DOUBLE, GL_FLOAT, GL_INT, GL_BOOL, or GL_UNSIGNED_INT */
114 static const st_src_reg undef_src
= st_src_reg(PROGRAM_UNDEFINED
, 0, GLSL_TYPE_ERROR
);
115 static const st_dst_reg undef_dst
= st_dst_reg(PROGRAM_UNDEFINED
, SWIZZLE_NOOP
, GLSL_TYPE_ERROR
);
119 unsigned array_id
; /* TGSI ArrayID; 1-based: 0 means not an array */
122 unsigned gs_out_streams
;
123 enum glsl_interp_mode interp
;
124 enum glsl_base_type base_type
;
125 ubyte usage_mask
; /* GLSL-style usage-mask, i.e. single bit per double */
128 static struct inout_decl
*
129 find_inout_array(struct inout_decl
*decls
, unsigned count
, unsigned array_id
)
131 assert(array_id
!= 0);
133 for (unsigned i
= 0; i
< count
; i
++) {
134 struct inout_decl
*decl
= &decls
[i
];
136 if (array_id
== decl
->array_id
) {
144 static enum glsl_base_type
145 find_array_type(struct inout_decl
*decls
, unsigned count
, unsigned array_id
)
148 return GLSL_TYPE_ERROR
;
149 struct inout_decl
*decl
= find_inout_array(decls
, count
, array_id
);
151 return decl
->base_type
;
152 return GLSL_TYPE_ERROR
;
155 struct hwatomic_decl
{
162 struct glsl_to_tgsi_visitor
: public ir_visitor
{
164 glsl_to_tgsi_visitor();
165 ~glsl_to_tgsi_visitor();
167 struct gl_context
*ctx
;
168 struct gl_program
*prog
;
169 struct gl_shader_program
*shader_program
;
170 struct gl_linked_shader
*shader
;
171 struct gl_shader_compiler_options
*options
;
175 unsigned *array_sizes
;
176 unsigned max_num_arrays
;
179 struct inout_decl inputs
[4 * PIPE_MAX_SHADER_INPUTS
];
181 unsigned num_input_arrays
;
182 struct inout_decl outputs
[4 * PIPE_MAX_SHADER_OUTPUTS
];
183 unsigned num_outputs
;
184 unsigned num_output_arrays
;
186 struct hwatomic_decl atomic_info
[PIPE_MAX_HW_ATOMIC_BUFFERS
];
187 unsigned num_atomics
;
188 unsigned num_atomic_arrays
;
189 int num_address_regs
;
190 uint32_t samplers_used
;
191 glsl_base_type sampler_types
[PIPE_MAX_SAMPLERS
];
192 enum tgsi_texture_type sampler_targets
[PIPE_MAX_SAMPLERS
];
194 enum tgsi_texture_type image_targets
[PIPE_MAX_SHADER_IMAGES
];
195 enum pipe_format image_formats
[PIPE_MAX_SHADER_IMAGES
];
196 bool indirect_addr_consts
;
197 int wpos_transform_const
;
199 bool native_integers
;
202 bool use_shared_memory
;
207 variable_storage
*find_variable_storage(ir_variable
*var
);
209 int add_constant(gl_register_file file
, gl_constant_value values
[8],
210 int size
, GLenum datatype
, uint16_t *swizzle_out
);
212 st_src_reg
get_temp(const glsl_type
*type
);
213 void reladdr_to_temp(ir_instruction
*ir
, st_src_reg
*reg
, int *num_reladdr
);
215 st_src_reg
st_src_reg_for_double(double val
);
216 st_src_reg
st_src_reg_for_float(float val
);
217 st_src_reg
st_src_reg_for_int(int val
);
218 st_src_reg
st_src_reg_for_int64(int64_t val
);
219 st_src_reg
st_src_reg_for_type(enum glsl_base_type type
, int val
);
222 * \name Visit methods
224 * As typical for the visitor pattern, there must be one \c visit method for
225 * each concrete subclass of \c ir_instruction. Virtual base classes within
226 * the hierarchy should not have \c visit methods.
229 virtual void visit(ir_variable
*);
230 virtual void visit(ir_loop
*);
231 virtual void visit(ir_loop_jump
*);
232 virtual void visit(ir_function_signature
*);
233 virtual void visit(ir_function
*);
234 virtual void visit(ir_expression
*);
235 virtual void visit(ir_swizzle
*);
236 virtual void visit(ir_dereference_variable
*);
237 virtual void visit(ir_dereference_array
*);
238 virtual void visit(ir_dereference_record
*);
239 virtual void visit(ir_assignment
*);
240 virtual void visit(ir_constant
*);
241 virtual void visit(ir_call
*);
242 virtual void visit(ir_return
*);
243 virtual void visit(ir_discard
*);
244 virtual void visit(ir_texture
*);
245 virtual void visit(ir_if
*);
246 virtual void visit(ir_emit_vertex
*);
247 virtual void visit(ir_end_primitive
*);
248 virtual void visit(ir_barrier
*);
251 void visit_expression(ir_expression
*, st_src_reg
*) ATTRIBUTE_NOINLINE
;
253 void visit_atomic_counter_intrinsic(ir_call
*);
254 void visit_ssbo_intrinsic(ir_call
*);
255 void visit_membar_intrinsic(ir_call
*);
256 void visit_shared_intrinsic(ir_call
*);
257 void visit_image_intrinsic(ir_call
*);
258 void visit_generic_intrinsic(ir_call
*, enum tgsi_opcode op
);
262 /** List of variable_storage */
263 struct hash_table
*variables
;
265 /** List of immediate_storage */
266 exec_list immediates
;
267 unsigned num_immediates
;
269 /** List of glsl_to_tgsi_instruction */
270 exec_list instructions
;
272 glsl_to_tgsi_instruction
*emit_asm(ir_instruction
*ir
, enum tgsi_opcode op
,
273 st_dst_reg dst
= undef_dst
,
274 st_src_reg src0
= undef_src
,
275 st_src_reg src1
= undef_src
,
276 st_src_reg src2
= undef_src
,
277 st_src_reg src3
= undef_src
);
279 glsl_to_tgsi_instruction
*emit_asm(ir_instruction
*ir
, enum tgsi_opcode op
,
280 st_dst_reg dst
, st_dst_reg dst1
,
281 st_src_reg src0
= undef_src
,
282 st_src_reg src1
= undef_src
,
283 st_src_reg src2
= undef_src
,
284 st_src_reg src3
= undef_src
);
286 enum tgsi_opcode
get_opcode(enum tgsi_opcode op
,
288 st_src_reg src0
, st_src_reg src1
);
291 * Emit the correct dot-product instruction for the type of arguments
293 glsl_to_tgsi_instruction
*emit_dp(ir_instruction
*ir
,
299 void emit_scalar(ir_instruction
*ir
, enum tgsi_opcode op
,
300 st_dst_reg dst
, st_src_reg src0
);
302 void emit_scalar(ir_instruction
*ir
, enum tgsi_opcode op
,
303 st_dst_reg dst
, st_src_reg src0
, st_src_reg src1
);
305 void emit_arl(ir_instruction
*ir
, st_dst_reg dst
, st_src_reg src0
);
307 void get_deref_offsets(ir_dereference
*ir
,
308 unsigned *array_size
,
313 void calc_deref_offsets(ir_dereference
*tail
,
314 unsigned *array_elements
,
316 st_src_reg
*indirect
,
318 st_src_reg
canonicalize_gather_offset(st_src_reg offset
);
320 bool try_emit_mad(ir_expression
*ir
,
322 bool try_emit_mad_for_and_not(ir_expression
*ir
,
325 void emit_swz(ir_expression
*ir
);
327 bool process_move_condition(ir_rvalue
*ir
);
329 void simplify_cmp(void);
331 void rename_temp_registers(struct rename_reg_pair
*renames
);
332 void get_first_temp_read(int *first_reads
);
333 void get_first_temp_write(int *first_writes
);
334 void get_last_temp_read_first_temp_write(int *last_reads
, int *first_writes
);
335 void get_last_temp_write(int *last_writes
);
337 void copy_propagate(void);
338 int eliminate_dead_code(void);
340 void merge_two_dsts(void);
341 void merge_registers(void);
342 void renumber_registers(void);
344 void emit_block_mov(ir_assignment
*ir
, const struct glsl_type
*type
,
345 st_dst_reg
*l
, st_src_reg
*r
,
346 st_src_reg
*cond
, bool cond_swap
);
351 static st_dst_reg address_reg
= st_dst_reg(PROGRAM_ADDRESS
, WRITEMASK_X
,
353 static st_dst_reg address_reg2
= st_dst_reg(PROGRAM_ADDRESS
, WRITEMASK_X
,
355 static st_dst_reg sampler_reladdr
= st_dst_reg(PROGRAM_ADDRESS
, WRITEMASK_X
,
359 fail_link(struct gl_shader_program
*prog
, const char *fmt
, ...)
363 fail_link(struct gl_shader_program
*prog
, const char *fmt
, ...)
367 ralloc_vasprintf_append(&prog
->data
->InfoLog
, fmt
, args
);
370 prog
->data
->LinkStatus
= LINKING_FAILURE
;
374 swizzle_for_size(int size
)
376 static const int size_swizzles
[4] = {
377 MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_X
, SWIZZLE_X
, SWIZZLE_X
),
378 MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_Y
, SWIZZLE_Y
),
379 MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_Z
, SWIZZLE_Z
),
380 MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_Z
, SWIZZLE_W
),
383 assert((size
>= 1) && (size
<= 4));
384 return size_swizzles
[size
- 1];
388 glsl_to_tgsi_instruction
*
389 glsl_to_tgsi_visitor::emit_asm(ir_instruction
*ir
, enum tgsi_opcode op
,
390 st_dst_reg dst
, st_dst_reg dst1
,
391 st_src_reg src0
, st_src_reg src1
,
392 st_src_reg src2
, st_src_reg src3
)
394 glsl_to_tgsi_instruction
*inst
= new(mem_ctx
) glsl_to_tgsi_instruction();
395 int num_reladdr
= 0, i
, j
;
396 bool dst_is_64bit
[2];
398 op
= get_opcode(op
, dst
, src0
, src1
);
400 /* If we have to do relative addressing, we want to load the ARL
401 * reg directly for one of the regs, and preload the other reladdr
402 * sources into temps.
404 num_reladdr
+= dst
.reladdr
!= NULL
|| dst
.reladdr2
;
405 assert(!dst1
.reladdr
); /* should be lowered in earlier passes */
406 num_reladdr
+= src0
.reladdr
!= NULL
|| src0
.reladdr2
!= NULL
;
407 num_reladdr
+= src1
.reladdr
!= NULL
|| src1
.reladdr2
!= NULL
;
408 num_reladdr
+= src2
.reladdr
!= NULL
|| src2
.reladdr2
!= NULL
;
409 num_reladdr
+= src3
.reladdr
!= NULL
|| src3
.reladdr2
!= NULL
;
411 reladdr_to_temp(ir
, &src3
, &num_reladdr
);
412 reladdr_to_temp(ir
, &src2
, &num_reladdr
);
413 reladdr_to_temp(ir
, &src1
, &num_reladdr
);
414 reladdr_to_temp(ir
, &src0
, &num_reladdr
);
416 if (dst
.reladdr
|| dst
.reladdr2
) {
418 emit_arl(ir
, address_reg
, *dst
.reladdr
);
420 emit_arl(ir
, address_reg2
, *dst
.reladdr2
);
424 assert(num_reladdr
== 0);
426 /* inst->op has only 8 bits. */
427 STATIC_ASSERT(TGSI_OPCODE_LAST
<= 255);
430 inst
->precise
= this->precise
;
431 inst
->info
= tgsi_get_opcode_info(op
);
438 inst
->is_64bit_expanded
= false;
441 inst
->tex_offsets
= NULL
;
442 inst
->tex_offset_num_offset
= 0;
444 inst
->tex_shadow
= 0;
445 /* default to float, for paths where this is not initialized
446 * (since 0==UINT which is likely wrong):
448 inst
->tex_type
= GLSL_TYPE_FLOAT
;
450 /* Update indirect addressing status used by TGSI */
451 if (dst
.reladdr
|| dst
.reladdr2
) {
453 case PROGRAM_STATE_VAR
:
454 case PROGRAM_CONSTANT
:
455 case PROGRAM_UNIFORM
:
456 this->indirect_addr_consts
= true;
458 case PROGRAM_IMMEDIATE
:
459 assert(!"immediates should not have indirect addressing");
466 for (i
= 0; i
< 4; i
++) {
467 if (inst
->src
[i
].reladdr
) {
468 switch (inst
->src
[i
].file
) {
469 case PROGRAM_STATE_VAR
:
470 case PROGRAM_CONSTANT
:
471 case PROGRAM_UNIFORM
:
472 this->indirect_addr_consts
= true;
474 case PROGRAM_IMMEDIATE
:
475 assert(!"immediates should not have indirect addressing");
485 * This section contains the double processing.
486 * GLSL just represents doubles as single channel values,
487 * however most HW and TGSI represent doubles as pairs of register channels.
489 * so we have to fixup destination writemask/index and src swizzle/indexes.
490 * dest writemasks need to translate from single channel write mask
491 * to a dual-channel writemask, but also need to modify the index,
492 * if we are touching the Z,W fields in the pre-translated writemask.
494 * src channels have similiar index modifications along with swizzle
495 * changes to we pick the XY, ZW pairs from the correct index.
497 * GLSL [0].x -> TGSI [0].xy
498 * GLSL [0].y -> TGSI [0].zw
499 * GLSL [0].z -> TGSI [1].xy
500 * GLSL [0].w -> TGSI [1].zw
502 for (j
= 0; j
< 2; j
++) {
503 dst_is_64bit
[j
] = glsl_base_type_is_64bit(inst
->dst
[j
].type
);
504 if (!dst_is_64bit
[j
] && inst
->dst
[j
].file
== PROGRAM_OUTPUT
&&
505 inst
->dst
[j
].type
== GLSL_TYPE_ARRAY
) {
506 enum glsl_base_type type
= find_array_type(this->outputs
,
508 inst
->dst
[j
].array_id
);
509 if (glsl_base_type_is_64bit(type
))
510 dst_is_64bit
[j
] = true;
514 if (dst_is_64bit
[0] || dst_is_64bit
[1] ||
515 glsl_base_type_is_64bit(inst
->src
[0].type
)) {
516 glsl_to_tgsi_instruction
*dinst
= NULL
;
517 int initial_src_swz
[4], initial_src_idx
[4];
518 int initial_dst_idx
[2], initial_dst_writemask
[2];
519 /* select the writemask for dst0 or dst1 */
520 unsigned writemask
= inst
->dst
[1].file
== PROGRAM_UNDEFINED
521 ? inst
->dst
[0].writemask
: inst
->dst
[1].writemask
;
523 /* copy out the writemask, index and swizzles for all src/dsts. */
524 for (j
= 0; j
< 2; j
++) {
525 initial_dst_writemask
[j
] = inst
->dst
[j
].writemask
;
526 initial_dst_idx
[j
] = inst
->dst
[j
].index
;
529 for (j
= 0; j
< 4; j
++) {
530 initial_src_swz
[j
] = inst
->src
[j
].swizzle
;
531 initial_src_idx
[j
] = inst
->src
[j
].index
;
535 * scan all the components in the dst writemask
536 * generate an instruction for each of them if required.
541 int i
= u_bit_scan(&writemask
);
543 /* before emitting the instruction, see if we have to adjust
544 * load / store address */
545 if (i
> 1 && (inst
->op
== TGSI_OPCODE_LOAD
||
546 inst
->op
== TGSI_OPCODE_STORE
) &&
547 addr
.file
== PROGRAM_UNDEFINED
) {
548 /* We have to advance the buffer address by 16 */
549 addr
= get_temp(glsl_type::uint_type
);
550 emit_asm(ir
, TGSI_OPCODE_UADD
, st_dst_reg(addr
),
551 inst
->src
[0], st_src_reg_for_int(16));
554 /* first time use previous instruction */
558 /* create a new instructions for subsequent attempts */
559 dinst
= new(mem_ctx
) glsl_to_tgsi_instruction();
564 this->instructions
.push_tail(dinst
);
565 dinst
->is_64bit_expanded
= true;
567 /* modify the destination if we are splitting */
568 for (j
= 0; j
< 2; j
++) {
569 if (dst_is_64bit
[j
]) {
570 dinst
->dst
[j
].writemask
= (i
& 1) ? WRITEMASK_ZW
: WRITEMASK_XY
;
571 dinst
->dst
[j
].index
= initial_dst_idx
[j
];
573 if (dinst
->op
== TGSI_OPCODE_LOAD
||
574 dinst
->op
== TGSI_OPCODE_STORE
)
575 dinst
->src
[0] = addr
;
576 if (dinst
->op
!= TGSI_OPCODE_STORE
)
577 dinst
->dst
[j
].index
++;
580 /* if we aren't writing to a double, just get the bit of the
581 * initial writemask for this channel
583 dinst
->dst
[j
].writemask
= initial_dst_writemask
[j
] & (1 << i
);
587 /* modify the src registers */
588 for (j
= 0; j
< 4; j
++) {
589 int swz
= GET_SWZ(initial_src_swz
[j
], i
);
591 if (glsl_base_type_is_64bit(dinst
->src
[j
].type
)) {
592 dinst
->src
[j
].index
= initial_src_idx
[j
];
594 dinst
->src
[j
].double_reg2
= true;
595 dinst
->src
[j
].index
++;
599 dinst
->src
[j
].swizzle
= MAKE_SWIZZLE4(SWIZZLE_Z
, SWIZZLE_W
,
600 SWIZZLE_Z
, SWIZZLE_W
);
602 dinst
->src
[j
].swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
,
603 SWIZZLE_X
, SWIZZLE_Y
);
606 /* some opcodes are special case in what they use as sources
607 * - [FUI]2D/[UI]2I64 is a float/[u]int src0, (D)LDEXP is
610 if (op
== TGSI_OPCODE_F2D
|| op
== TGSI_OPCODE_U2D
||
611 op
== TGSI_OPCODE_I2D
||
612 op
== TGSI_OPCODE_I2I64
|| op
== TGSI_OPCODE_U2I64
||
613 op
== TGSI_OPCODE_DLDEXP
|| op
== TGSI_OPCODE_LDEXP
||
614 (op
== TGSI_OPCODE_UCMP
&& dst_is_64bit
[0])) {
615 dinst
->src
[j
].swizzle
= MAKE_SWIZZLE4(swz
, swz
, swz
, swz
);
622 this->instructions
.push_tail(inst
);
629 glsl_to_tgsi_instruction
*
630 glsl_to_tgsi_visitor::emit_asm(ir_instruction
*ir
, enum tgsi_opcode op
,
632 st_src_reg src0
, st_src_reg src1
,
633 st_src_reg src2
, st_src_reg src3
)
635 return emit_asm(ir
, op
, dst
, undef_dst
, src0
, src1
, src2
, src3
);
639 * Determines whether to use an integer, unsigned integer, or float opcode
640 * based on the operands and input opcode, then emits the result.
643 glsl_to_tgsi_visitor::get_opcode(enum tgsi_opcode op
,
645 st_src_reg src0
, st_src_reg src1
)
647 enum glsl_base_type type
= GLSL_TYPE_FLOAT
;
649 if (op
== TGSI_OPCODE_MOV
)
652 assert(src0
.type
!= GLSL_TYPE_ARRAY
);
653 assert(src0
.type
!= GLSL_TYPE_STRUCT
);
654 assert(src1
.type
!= GLSL_TYPE_ARRAY
);
655 assert(src1
.type
!= GLSL_TYPE_STRUCT
);
657 if (is_resource_instruction(op
))
659 else if (src0
.type
== GLSL_TYPE_INT64
|| src1
.type
== GLSL_TYPE_INT64
)
660 type
= GLSL_TYPE_INT64
;
661 else if (src0
.type
== GLSL_TYPE_UINT64
|| src1
.type
== GLSL_TYPE_UINT64
)
662 type
= GLSL_TYPE_UINT64
;
663 else if (src0
.type
== GLSL_TYPE_DOUBLE
|| src1
.type
== GLSL_TYPE_DOUBLE
)
664 type
= GLSL_TYPE_DOUBLE
;
665 else if (src0
.type
== GLSL_TYPE_FLOAT
|| src1
.type
== GLSL_TYPE_FLOAT
)
666 type
= GLSL_TYPE_FLOAT
;
667 else if (native_integers
)
668 type
= src0
.type
== GLSL_TYPE_BOOL
? GLSL_TYPE_INT
: src0
.type
;
670 #define case7(c, f, i, u, d, i64, ui64) \
671 case TGSI_OPCODE_##c: \
672 if (type == GLSL_TYPE_UINT64) \
673 op = TGSI_OPCODE_##ui64; \
674 else if (type == GLSL_TYPE_INT64) \
675 op = TGSI_OPCODE_##i64; \
676 else if (type == GLSL_TYPE_DOUBLE) \
677 op = TGSI_OPCODE_##d; \
678 else if (type == GLSL_TYPE_INT) \
679 op = TGSI_OPCODE_##i; \
680 else if (type == GLSL_TYPE_UINT) \
681 op = TGSI_OPCODE_##u; \
683 op = TGSI_OPCODE_##f; \
686 #define casecomp(c, f, i, u, d, i64, ui64) \
687 case TGSI_OPCODE_##c: \
688 if (type == GLSL_TYPE_INT64) \
689 op = TGSI_OPCODE_##i64; \
690 else if (type == GLSL_TYPE_UINT64) \
691 op = TGSI_OPCODE_##ui64; \
692 else if (type == GLSL_TYPE_DOUBLE) \
693 op = TGSI_OPCODE_##d; \
694 else if (type == GLSL_TYPE_INT || type == GLSL_TYPE_SUBROUTINE) \
695 op = TGSI_OPCODE_##i; \
696 else if (type == GLSL_TYPE_UINT) \
697 op = TGSI_OPCODE_##u; \
698 else if (native_integers) \
699 op = TGSI_OPCODE_##f; \
701 op = TGSI_OPCODE_##c; \
705 /* Some instructions are initially selected without considering the type.
706 * This fixes the type:
708 * INIT FLOAT SINT UINT DOUBLE SINT64 UINT64
710 case7(ADD
, ADD
, UADD
, UADD
, DADD
, U64ADD
, U64ADD
);
711 case7(CEIL
, CEIL
, LAST
, LAST
, DCEIL
, LAST
, LAST
);
712 case7(DIV
, DIV
, IDIV
, UDIV
, DDIV
, I64DIV
, U64DIV
);
713 case7(FMA
, FMA
, UMAD
, UMAD
, DFMA
, LAST
, LAST
);
714 case7(FLR
, FLR
, LAST
, LAST
, DFLR
, LAST
, LAST
);
715 case7(FRC
, FRC
, LAST
, LAST
, DFRAC
, LAST
, LAST
);
716 case7(MUL
, MUL
, UMUL
, UMUL
, DMUL
, U64MUL
, U64MUL
);
717 case7(MAD
, MAD
, UMAD
, UMAD
, DMAD
, LAST
, LAST
);
718 case7(MAX
, MAX
, IMAX
, UMAX
, DMAX
, I64MAX
, U64MAX
);
719 case7(MIN
, MIN
, IMIN
, UMIN
, DMIN
, I64MIN
, U64MIN
);
720 case7(RCP
, RCP
, LAST
, LAST
, DRCP
, LAST
, LAST
);
721 case7(ROUND
, ROUND
,LAST
, LAST
, DROUND
, LAST
, LAST
);
722 case7(RSQ
, RSQ
, LAST
, LAST
, DRSQ
, LAST
, LAST
);
723 case7(SQRT
, SQRT
, LAST
, LAST
, DSQRT
, LAST
, LAST
);
724 case7(SSG
, SSG
, ISSG
, ISSG
, DSSG
, I64SSG
, I64SSG
);
725 case7(TRUNC
, TRUNC
,LAST
, LAST
, DTRUNC
, LAST
, LAST
);
727 case7(MOD
, LAST
, MOD
, UMOD
, LAST
, I64MOD
, U64MOD
);
728 case7(SHL
, LAST
, SHL
, SHL
, LAST
, U64SHL
, U64SHL
);
729 case7(IBFE
, LAST
, IBFE
, UBFE
, LAST
, LAST
, LAST
);
730 case7(IMSB
, LAST
, IMSB
, UMSB
, LAST
, LAST
, LAST
);
731 case7(IMUL_HI
, LAST
, IMUL_HI
, UMUL_HI
, LAST
, LAST
, LAST
);
732 case7(ISHR
, LAST
, ISHR
, USHR
, LAST
, I64SHR
, U64SHR
);
733 case7(ATOMIMAX
,LAST
, ATOMIMAX
,ATOMUMAX
,LAST
, LAST
, LAST
);
734 case7(ATOMIMIN
,LAST
, ATOMIMIN
,ATOMUMIN
,LAST
, LAST
, LAST
);
736 casecomp(SEQ
, FSEQ
, USEQ
, USEQ
, DSEQ
, U64SEQ
, U64SEQ
);
737 casecomp(SNE
, FSNE
, USNE
, USNE
, DSNE
, U64SNE
, U64SNE
);
738 casecomp(SGE
, FSGE
, ISGE
, USGE
, DSGE
, I64SGE
, U64SGE
);
739 casecomp(SLT
, FSLT
, ISLT
, USLT
, DSLT
, I64SLT
, U64SLT
);
745 assert(op
!= TGSI_OPCODE_LAST
);
749 glsl_to_tgsi_instruction
*
750 glsl_to_tgsi_visitor::emit_dp(ir_instruction
*ir
,
751 st_dst_reg dst
, st_src_reg src0
, st_src_reg src1
,
754 static const enum tgsi_opcode dot_opcodes
[] = {
755 TGSI_OPCODE_DP2
, TGSI_OPCODE_DP3
, TGSI_OPCODE_DP4
758 return emit_asm(ir
, dot_opcodes
[elements
- 2], dst
, src0
, src1
);
762 * Emits TGSI scalar opcodes to produce unique answers across channels.
764 * Some TGSI opcodes are scalar-only, like ARB_fp/vp. The src X
765 * channel determines the result across all channels. So to do a vec4
766 * of this operation, we want to emit a scalar per source channel used
767 * to produce dest channels.
770 glsl_to_tgsi_visitor::emit_scalar(ir_instruction
*ir
, enum tgsi_opcode op
,
772 st_src_reg orig_src0
, st_src_reg orig_src1
)
775 int done_mask
= ~dst
.writemask
;
777 /* TGSI RCP is a scalar operation splatting results to all channels,
778 * like ARB_fp/vp. So emit as many RCPs as necessary to cover our
781 for (i
= 0; i
< 4; i
++) {
782 GLuint this_mask
= (1 << i
);
783 st_src_reg src0
= orig_src0
;
784 st_src_reg src1
= orig_src1
;
786 if (done_mask
& this_mask
)
789 GLuint src0_swiz
= GET_SWZ(src0
.swizzle
, i
);
790 GLuint src1_swiz
= GET_SWZ(src1
.swizzle
, i
);
791 for (j
= i
+ 1; j
< 4; j
++) {
792 /* If there is another enabled component in the destination that is
793 * derived from the same inputs, generate its value on this pass as
796 if (!(done_mask
& (1 << j
)) &&
797 GET_SWZ(src0
.swizzle
, j
) == src0_swiz
&&
798 GET_SWZ(src1
.swizzle
, j
) == src1_swiz
) {
799 this_mask
|= (1 << j
);
802 src0
.swizzle
= MAKE_SWIZZLE4(src0_swiz
, src0_swiz
,
803 src0_swiz
, src0_swiz
);
804 src1
.swizzle
= MAKE_SWIZZLE4(src1_swiz
, src1_swiz
,
805 src1_swiz
, src1_swiz
);
807 dst
.writemask
= this_mask
;
808 emit_asm(ir
, op
, dst
, src0
, src1
);
809 done_mask
|= this_mask
;
814 glsl_to_tgsi_visitor::emit_scalar(ir_instruction
*ir
, enum tgsi_opcode op
,
815 st_dst_reg dst
, st_src_reg src0
)
817 st_src_reg undef
= undef_src
;
819 undef
.swizzle
= SWIZZLE_XXXX
;
821 emit_scalar(ir
, op
, dst
, src0
, undef
);
825 glsl_to_tgsi_visitor::emit_arl(ir_instruction
*ir
,
826 st_dst_reg dst
, st_src_reg src0
)
828 enum tgsi_opcode op
= TGSI_OPCODE_ARL
;
830 if (src0
.type
== GLSL_TYPE_INT
|| src0
.type
== GLSL_TYPE_UINT
) {
831 if (!this->need_uarl
&& src0
.is_legal_tgsi_address_operand())
834 op
= TGSI_OPCODE_UARL
;
837 assert(dst
.file
== PROGRAM_ADDRESS
);
838 if (dst
.index
>= this->num_address_regs
)
839 this->num_address_regs
= dst
.index
+ 1;
841 emit_asm(NULL
, op
, dst
, src0
);
845 glsl_to_tgsi_visitor::add_constant(gl_register_file file
,
846 gl_constant_value values
[8], int size
,
848 uint16_t *swizzle_out
)
850 if (file
== PROGRAM_CONSTANT
) {
851 GLuint swizzle
= swizzle_out
? *swizzle_out
: 0;
852 int result
= _mesa_add_typed_unnamed_constant(this->prog
->Parameters
,
853 values
, size
, datatype
,
856 *swizzle_out
= swizzle
;
860 assert(file
== PROGRAM_IMMEDIATE
);
863 immediate_storage
*entry
;
864 int size32
= size
* ((datatype
== GL_DOUBLE
||
865 datatype
== GL_INT64_ARB
||
866 datatype
== GL_UNSIGNED_INT64_ARB
) ? 2 : 1);
869 /* Search immediate storage to see if we already have an identical
870 * immediate that we can use instead of adding a duplicate entry.
872 foreach_in_list(immediate_storage
, entry
, &this->immediates
) {
873 immediate_storage
*tmp
= entry
;
875 for (i
= 0; i
* 4 < size32
; i
++) {
876 int slot_size
= MIN2(size32
- (i
* 4), 4);
877 if (tmp
->type
!= datatype
|| tmp
->size32
!= slot_size
)
879 if (memcmp(tmp
->values
, &values
[i
* 4],
880 slot_size
* sizeof(gl_constant_value
)))
883 /* Everything matches, keep going until the full size is matched */
884 tmp
= (immediate_storage
*)tmp
->next
;
887 /* The full value matched */
894 for (i
= 0; i
* 4 < size32
; i
++) {
895 int slot_size
= MIN2(size32
- (i
* 4), 4);
896 /* Add this immediate to the list. */
897 entry
= new(mem_ctx
) immediate_storage(&values
[i
* 4],
898 slot_size
, datatype
);
899 this->immediates
.push_tail(entry
);
900 this->num_immediates
++;
906 glsl_to_tgsi_visitor::st_src_reg_for_float(float val
)
908 st_src_reg
src(PROGRAM_IMMEDIATE
, -1, GLSL_TYPE_FLOAT
);
909 union gl_constant_value uval
;
912 src
.index
= add_constant(src
.file
, &uval
, 1, GL_FLOAT
, &src
.swizzle
);
918 glsl_to_tgsi_visitor::st_src_reg_for_double(double val
)
920 st_src_reg
src(PROGRAM_IMMEDIATE
, -1, GLSL_TYPE_DOUBLE
);
921 union gl_constant_value uval
[2];
923 memcpy(uval
, &val
, sizeof(uval
));
924 src
.index
= add_constant(src
.file
, uval
, 1, GL_DOUBLE
, &src
.swizzle
);
925 src
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_X
, SWIZZLE_Y
);
930 glsl_to_tgsi_visitor::st_src_reg_for_int(int val
)
932 st_src_reg
src(PROGRAM_IMMEDIATE
, -1, GLSL_TYPE_INT
);
933 union gl_constant_value uval
;
935 assert(native_integers
);
938 src
.index
= add_constant(src
.file
, &uval
, 1, GL_INT
, &src
.swizzle
);
944 glsl_to_tgsi_visitor::st_src_reg_for_int64(int64_t val
)
946 st_src_reg
src(PROGRAM_IMMEDIATE
, -1, GLSL_TYPE_INT64
);
947 union gl_constant_value uval
[2];
949 memcpy(uval
, &val
, sizeof(uval
));
950 src
.index
= add_constant(src
.file
, uval
, 1, GL_DOUBLE
, &src
.swizzle
);
951 src
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_X
, SWIZZLE_Y
);
957 glsl_to_tgsi_visitor::st_src_reg_for_type(enum glsl_base_type type
, int val
)
960 return type
== GLSL_TYPE_FLOAT
? st_src_reg_for_float(val
) :
961 st_src_reg_for_int(val
);
963 return st_src_reg_for_float(val
);
967 attrib_type_size(const struct glsl_type
*type
, bool is_vs_input
)
969 return type
->count_attribute_slots(is_vs_input
);
973 type_size(const struct glsl_type
*type
)
975 return type
->count_attribute_slots(false);
979 add_buffer_to_load_and_stores(glsl_to_tgsi_instruction
*inst
, st_src_reg
*buf
,
980 exec_list
*instructions
, ir_constant
*access
)
983 * emit_asm() might have actually split the op into pieces, e.g. for
984 * double stores. We have to go back and fix up all the generated ops.
986 enum tgsi_opcode op
= inst
->op
;
988 inst
->resource
= *buf
;
990 inst
->buffer_access
= access
->value
.u
[0];
992 if (inst
== instructions
->get_head_raw())
994 inst
= (glsl_to_tgsi_instruction
*)inst
->get_prev();
996 if (inst
->op
== TGSI_OPCODE_UADD
) {
997 if (inst
== instructions
->get_head_raw())
999 inst
= (glsl_to_tgsi_instruction
*)inst
->get_prev();
1001 } while (inst
->op
== op
&& inst
->resource
.file
== PROGRAM_UNDEFINED
);
1005 * If the given GLSL type is an array or matrix or a structure containing
1006 * an array/matrix member, return true. Else return false.
1008 * This is used to determine which kind of temp storage (PROGRAM_TEMPORARY
1009 * or PROGRAM_ARRAY) should be used for variables of this type. Anytime
1010 * we have an array that might be indexed with a variable, we need to use
1011 * the later storage type.
1014 type_has_array_or_matrix(const glsl_type
*type
)
1016 if (type
->is_array() || type
->is_matrix())
1019 if (type
->is_record()) {
1020 for (unsigned i
= 0; i
< type
->length
; i
++) {
1021 if (type_has_array_or_matrix(type
->fields
.structure
[i
].type
)) {
1032 * In the initial pass of codegen, we assign temporary numbers to
1033 * intermediate results. (not SSA -- variable assignments will reuse
1037 glsl_to_tgsi_visitor::get_temp(const glsl_type
*type
)
1041 src
.type
= native_integers
? type
->base_type
: GLSL_TYPE_FLOAT
;
1046 if (!options
->EmitNoIndirectTemp
&& type_has_array_or_matrix(type
)) {
1047 if (next_array
>= max_num_arrays
) {
1048 max_num_arrays
+= 32;
1049 array_sizes
= (unsigned*)
1050 realloc(array_sizes
, sizeof(array_sizes
[0]) * max_num_arrays
);
1053 src
.file
= PROGRAM_ARRAY
;
1055 src
.array_id
= next_array
+ 1;
1056 array_sizes
[next_array
] = type_size(type
);
1060 src
.file
= PROGRAM_TEMPORARY
;
1061 src
.index
= next_temp
;
1062 next_temp
+= type_size(type
);
1065 if (type
->is_array() || type
->is_record()) {
1066 src
.swizzle
= SWIZZLE_NOOP
;
1068 src
.swizzle
= swizzle_for_size(type
->vector_elements
);
1075 glsl_to_tgsi_visitor::find_variable_storage(ir_variable
*var
)
1077 struct hash_entry
*entry
;
1079 entry
= _mesa_hash_table_search(this->variables
, var
);
1083 return (variable_storage
*)entry
->data
;
1087 glsl_to_tgsi_visitor::visit(ir_variable
*ir
)
1089 if (strcmp(ir
->name
, "gl_FragCoord") == 0) {
1090 this->prog
->OriginUpperLeft
= ir
->data
.origin_upper_left
;
1091 this->prog
->PixelCenterInteger
= ir
->data
.pixel_center_integer
;
1094 if (ir
->data
.mode
== ir_var_uniform
&& strncmp(ir
->name
, "gl_", 3) == 0) {
1096 const ir_state_slot
*const slots
= ir
->get_state_slots();
1097 assert(slots
!= NULL
);
1099 /* Check if this statevar's setup in the STATE file exactly
1100 * matches how we'll want to reference it as a
1101 * struct/array/whatever. If not, then we need to move it into
1102 * temporary storage and hope that it'll get copy-propagated
1105 for (i
= 0; i
< ir
->get_num_state_slots(); i
++) {
1106 if (slots
[i
].swizzle
!= SWIZZLE_XYZW
) {
1111 variable_storage
*storage
;
1113 if (i
== ir
->get_num_state_slots()) {
1114 /* We'll set the index later. */
1115 storage
= new(mem_ctx
) variable_storage(ir
, PROGRAM_STATE_VAR
, -1);
1117 _mesa_hash_table_insert(this->variables
, ir
, storage
);
1121 /* The variable_storage constructor allocates slots based on the size
1122 * of the type. However, this had better match the number of state
1123 * elements that we're going to copy into the new temporary.
1125 assert((int) ir
->get_num_state_slots() == type_size(ir
->type
));
1127 dst
= st_dst_reg(get_temp(ir
->type
));
1129 storage
= new(mem_ctx
) variable_storage(ir
, dst
.file
, dst
.index
,
1132 _mesa_hash_table_insert(this->variables
, ir
, storage
);
1136 for (unsigned int i
= 0; i
< ir
->get_num_state_slots(); i
++) {
1137 int index
= _mesa_add_state_reference(this->prog
->Parameters
,
1140 if (storage
->file
== PROGRAM_STATE_VAR
) {
1141 if (storage
->index
== -1) {
1142 storage
->index
= index
;
1144 assert(index
== storage
->index
+ (int)i
);
1147 /* We use GLSL_TYPE_FLOAT here regardless of the actual type of
1148 * the data being moved since MOV does not care about the type of
1149 * data it is moving, and we don't want to declare registers with
1150 * array or struct types.
1152 st_src_reg
src(PROGRAM_STATE_VAR
, index
, GLSL_TYPE_FLOAT
);
1153 src
.swizzle
= slots
[i
].swizzle
;
1154 emit_asm(ir
, TGSI_OPCODE_MOV
, dst
, src
);
1155 /* even a float takes up a whole vec4 reg in a struct/array. */
1160 if (storage
->file
== PROGRAM_TEMPORARY
&&
1161 dst
.index
!= storage
->index
+ (int) ir
->get_num_state_slots()) {
1162 fail_link(this->shader_program
,
1163 "failed to load builtin uniform `%s' (%d/%d regs loaded)\n",
1164 ir
->name
, dst
.index
- storage
->index
,
1165 type_size(ir
->type
));
1171 glsl_to_tgsi_visitor::visit(ir_loop
*ir
)
1173 emit_asm(NULL
, TGSI_OPCODE_BGNLOOP
);
1175 visit_exec_list(&ir
->body_instructions
, this);
1177 emit_asm(NULL
, TGSI_OPCODE_ENDLOOP
);
1181 glsl_to_tgsi_visitor::visit(ir_loop_jump
*ir
)
1184 case ir_loop_jump::jump_break
:
1185 emit_asm(NULL
, TGSI_OPCODE_BRK
);
1187 case ir_loop_jump::jump_continue
:
1188 emit_asm(NULL
, TGSI_OPCODE_CONT
);
1195 glsl_to_tgsi_visitor::visit(ir_function_signature
*ir
)
1202 glsl_to_tgsi_visitor::visit(ir_function
*ir
)
1204 /* Ignore function bodies other than main() -- we shouldn't see calls to
1205 * them since they should all be inlined before we get to glsl_to_tgsi.
1207 if (strcmp(ir
->name
, "main") == 0) {
1208 const ir_function_signature
*sig
;
1211 sig
= ir
->matching_signature(NULL
, &empty
, false);
1215 foreach_in_list(ir_instruction
, ir
, &sig
->body
) {
1222 glsl_to_tgsi_visitor::try_emit_mad(ir_expression
*ir
, int mul_operand
)
1224 int nonmul_operand
= 1 - mul_operand
;
1226 st_dst_reg result_dst
;
1228 ir_expression
*expr
= ir
->operands
[mul_operand
]->as_expression();
1229 if (!expr
|| expr
->operation
!= ir_binop_mul
)
1232 expr
->operands
[0]->accept(this);
1234 expr
->operands
[1]->accept(this);
1236 ir
->operands
[nonmul_operand
]->accept(this);
1239 this->result
= get_temp(ir
->type
);
1240 result_dst
= st_dst_reg(this->result
);
1241 result_dst
.writemask
= (1 << ir
->type
->vector_elements
) - 1;
1242 emit_asm(ir
, TGSI_OPCODE_MAD
, result_dst
, a
, b
, c
);
1248 * Emit MAD(a, -b, a) instead of AND(a, NOT(b))
1250 * The logic values are 1.0 for true and 0.0 for false. Logical-and is
1251 * implemented using multiplication, and logical-or is implemented using
1252 * addition. Logical-not can be implemented as (true - x), or (1.0 - x).
1253 * As result, the logical expression (a & !b) can be rewritten as:
1257 * - (a * 1) - (a * b)
1261 * This final expression can be implemented as a single MAD(a, -b, a)
1265 glsl_to_tgsi_visitor::try_emit_mad_for_and_not(ir_expression
*ir
,
1268 const int other_operand
= 1 - try_operand
;
1271 ir_expression
*expr
= ir
->operands
[try_operand
]->as_expression();
1272 if (!expr
|| expr
->operation
!= ir_unop_logic_not
)
1275 ir
->operands
[other_operand
]->accept(this);
1277 expr
->operands
[0]->accept(this);
1280 b
.negate
= ~b
.negate
;
1282 this->result
= get_temp(ir
->type
);
1283 emit_asm(ir
, TGSI_OPCODE_MAD
, st_dst_reg(this->result
), a
, b
, a
);
1289 glsl_to_tgsi_visitor::reladdr_to_temp(ir_instruction
*ir
,
1290 st_src_reg
*reg
, int *num_reladdr
)
1292 if (!reg
->reladdr
&& !reg
->reladdr2
)
1296 emit_arl(ir
, address_reg
, *reg
->reladdr
);
1298 emit_arl(ir
, address_reg2
, *reg
->reladdr2
);
1300 if (*num_reladdr
!= 1) {
1301 st_src_reg temp
= get_temp(glsl_type::get_instance(reg
->type
, 4, 1));
1303 emit_asm(ir
, TGSI_OPCODE_MOV
, st_dst_reg(temp
), *reg
);
1311 glsl_to_tgsi_visitor::visit(ir_expression
*ir
)
1313 st_src_reg op
[ARRAY_SIZE(ir
->operands
)];
1315 /* Quick peephole: Emit MAD(a, b, c) instead of ADD(MUL(a, b), c)
1317 if (!this->precise
&& ir
->operation
== ir_binop_add
) {
1318 if (try_emit_mad(ir
, 1))
1320 if (try_emit_mad(ir
, 0))
1324 /* Quick peephole: Emit OPCODE_MAD(-a, -b, a) instead of AND(a, NOT(b))
1326 if (!native_integers
&& ir
->operation
== ir_binop_logic_and
) {
1327 if (try_emit_mad_for_and_not(ir
, 1))
1329 if (try_emit_mad_for_and_not(ir
, 0))
1333 if (ir
->operation
== ir_quadop_vector
)
1334 assert(!"ir_quadop_vector should have been lowered");
1336 for (unsigned int operand
= 0; operand
< ir
->num_operands
; operand
++) {
1337 this->result
.file
= PROGRAM_UNDEFINED
;
1338 ir
->operands
[operand
]->accept(this);
1339 if (this->result
.file
== PROGRAM_UNDEFINED
) {
1340 printf("Failed to get tree for expression operand:\n");
1341 ir
->operands
[operand
]->print();
1345 op
[operand
] = this->result
;
1347 /* Matrix expression operands should have been broken down to vector
1348 * operations already.
1350 assert(!ir
->operands
[operand
]->type
->is_matrix());
1353 visit_expression(ir
, op
);
1356 /* The non-recursive part of the expression visitor lives in a separate
1357 * function and should be prevented from being inlined, to avoid a stack
1358 * explosion when deeply nested expressions are visited.
1361 glsl_to_tgsi_visitor::visit_expression(ir_expression
* ir
, st_src_reg
*op
)
1363 st_src_reg result_src
;
1364 st_dst_reg result_dst
;
1366 int vector_elements
= ir
->operands
[0]->type
->vector_elements
;
1367 if (ir
->operands
[1] &&
1368 ir
->operation
!= ir_binop_interpolate_at_offset
&&
1369 ir
->operation
!= ir_binop_interpolate_at_sample
) {
1370 st_src_reg
*swz_op
= NULL
;
1371 if (vector_elements
> ir
->operands
[1]->type
->vector_elements
) {
1372 assert(ir
->operands
[1]->type
->vector_elements
== 1);
1374 } else if (vector_elements
< ir
->operands
[1]->type
->vector_elements
) {
1375 assert(ir
->operands
[0]->type
->vector_elements
== 1);
1379 uint16_t swizzle_x
= GET_SWZ(swz_op
->swizzle
, 0);
1380 swz_op
->swizzle
= MAKE_SWIZZLE4(swizzle_x
, swizzle_x
,
1381 swizzle_x
, swizzle_x
);
1383 vector_elements
= MAX2(vector_elements
,
1384 ir
->operands
[1]->type
->vector_elements
);
1386 if (ir
->operands
[2] &&
1387 ir
->operands
[2]->type
->vector_elements
!= vector_elements
) {
1388 /* This can happen with ir_triop_lrp, i.e. glsl mix */
1389 assert(ir
->operands
[2]->type
->vector_elements
== 1);
1390 uint16_t swizzle_x
= GET_SWZ(op
[2].swizzle
, 0);
1391 op
[2].swizzle
= MAKE_SWIZZLE4(swizzle_x
, swizzle_x
,
1392 swizzle_x
, swizzle_x
);
1395 this->result
.file
= PROGRAM_UNDEFINED
;
1397 /* Storage for our result. Ideally for an assignment we'd be using
1398 * the actual storage for the result here, instead.
1400 result_src
= get_temp(ir
->type
);
1401 /* convenience for the emit functions below. */
1402 result_dst
= st_dst_reg(result_src
);
1403 /* Limit writes to the channels that will be used by result_src later.
1404 * This does limit this temp's use as a temporary for multi-instruction
1407 result_dst
.writemask
= (1 << ir
->type
->vector_elements
) - 1;
1409 switch (ir
->operation
) {
1410 case ir_unop_logic_not
:
1411 if (result_dst
.type
!= GLSL_TYPE_FLOAT
)
1412 emit_asm(ir
, TGSI_OPCODE_NOT
, result_dst
, op
[0]);
1414 /* Previously 'SEQ dst, src, 0.0' was used for this. However, many
1415 * older GPUs implement SEQ using multiple instructions (i915 uses two
1416 * SGE instructions and a MUL instruction). Since our logic values are
1417 * 0.0 and 1.0, 1-x also implements !x.
1419 op
[0].negate
= ~op
[0].negate
;
1420 emit_asm(ir
, TGSI_OPCODE_ADD
, result_dst
, op
[0],
1421 st_src_reg_for_float(1.0));
1425 if (result_dst
.type
== GLSL_TYPE_INT64
||
1426 result_dst
.type
== GLSL_TYPE_UINT64
)
1427 emit_asm(ir
, TGSI_OPCODE_I64NEG
, result_dst
, op
[0]);
1428 else if (result_dst
.type
== GLSL_TYPE_INT
||
1429 result_dst
.type
== GLSL_TYPE_UINT
)
1430 emit_asm(ir
, TGSI_OPCODE_INEG
, result_dst
, op
[0]);
1431 else if (result_dst
.type
== GLSL_TYPE_DOUBLE
)
1432 emit_asm(ir
, TGSI_OPCODE_DNEG
, result_dst
, op
[0]);
1434 op
[0].negate
= ~op
[0].negate
;
1438 case ir_unop_subroutine_to_int
:
1439 emit_asm(ir
, TGSI_OPCODE_MOV
, result_dst
, op
[0]);
1442 if (result_dst
.type
== GLSL_TYPE_FLOAT
)
1443 emit_asm(ir
, TGSI_OPCODE_MOV
, result_dst
, op
[0].get_abs());
1444 else if (result_dst
.type
== GLSL_TYPE_DOUBLE
)
1445 emit_asm(ir
, TGSI_OPCODE_DABS
, result_dst
, op
[0]);
1446 else if (result_dst
.type
== GLSL_TYPE_INT64
||
1447 result_dst
.type
== GLSL_TYPE_UINT64
)
1448 emit_asm(ir
, TGSI_OPCODE_I64ABS
, result_dst
, op
[0]);
1450 emit_asm(ir
, TGSI_OPCODE_IABS
, result_dst
, op
[0]);
1453 emit_asm(ir
, TGSI_OPCODE_SSG
, result_dst
, op
[0]);
1456 emit_scalar(ir
, TGSI_OPCODE_RCP
, result_dst
, op
[0]);
1460 emit_scalar(ir
, TGSI_OPCODE_EX2
, result_dst
, op
[0]);
1463 assert(!"not reached: should be handled by exp_to_exp2");
1466 assert(!"not reached: should be handled by log_to_log2");
1469 emit_scalar(ir
, TGSI_OPCODE_LG2
, result_dst
, op
[0]);
1472 emit_scalar(ir
, TGSI_OPCODE_SIN
, result_dst
, op
[0]);
1475 emit_scalar(ir
, TGSI_OPCODE_COS
, result_dst
, op
[0]);
1477 case ir_unop_saturate
: {
1478 glsl_to_tgsi_instruction
*inst
;
1479 inst
= emit_asm(ir
, TGSI_OPCODE_MOV
, result_dst
, op
[0]);
1480 inst
->saturate
= true;
1485 case ir_unop_dFdx_coarse
:
1486 emit_asm(ir
, TGSI_OPCODE_DDX
, result_dst
, op
[0]);
1488 case ir_unop_dFdx_fine
:
1489 emit_asm(ir
, TGSI_OPCODE_DDX_FINE
, result_dst
, op
[0]);
1492 case ir_unop_dFdy_coarse
:
1493 case ir_unop_dFdy_fine
:
1495 /* The X component contains 1 or -1 depending on whether the framebuffer
1496 * is a FBO or the window system buffer, respectively.
1497 * It is then multiplied with the source operand of DDY.
1499 static const gl_state_index16 transform_y_state
[STATE_LENGTH
]
1500 = { STATE_INTERNAL
, STATE_FB_WPOS_Y_TRANSFORM
};
1502 unsigned transform_y_index
=
1503 _mesa_add_state_reference(this->prog
->Parameters
,
1506 st_src_reg transform_y
= st_src_reg(PROGRAM_STATE_VAR
,
1508 glsl_type::vec4_type
);
1509 transform_y
.swizzle
= SWIZZLE_XXXX
;
1511 st_src_reg temp
= get_temp(glsl_type::vec4_type
);
1513 emit_asm(ir
, TGSI_OPCODE_MUL
, st_dst_reg(temp
), transform_y
, op
[0]);
1514 emit_asm(ir
, ir
->operation
== ir_unop_dFdy_fine
?
1515 TGSI_OPCODE_DDY_FINE
: TGSI_OPCODE_DDY
, result_dst
, temp
);
1519 case ir_unop_frexp_sig
:
1520 emit_asm(ir
, TGSI_OPCODE_DFRACEXP
, result_dst
, undef_dst
, op
[0]);
1523 case ir_unop_frexp_exp
:
1524 emit_asm(ir
, TGSI_OPCODE_DFRACEXP
, undef_dst
, result_dst
, op
[0]);
1527 case ir_unop_noise
: {
1528 /* At some point, a motivated person could add a better
1529 * implementation of noise. Currently not even the nvidia
1530 * binary drivers do anything more than this. In any case, the
1531 * place to do this is in the GL state tracker, not the poor
1534 emit_asm(ir
, TGSI_OPCODE_MOV
, result_dst
, st_src_reg_for_float(0.5));
1539 emit_asm(ir
, TGSI_OPCODE_ADD
, result_dst
, op
[0], op
[1]);
1542 op
[1].negate
= ~op
[1].negate
;
1543 emit_asm(ir
, TGSI_OPCODE_ADD
, result_dst
, op
[0], op
[1]);
1547 emit_asm(ir
, TGSI_OPCODE_MUL
, result_dst
, op
[0], op
[1]);
1550 emit_asm(ir
, TGSI_OPCODE_DIV
, result_dst
, op
[0], op
[1]);
1553 if (result_dst
.type
== GLSL_TYPE_FLOAT
)
1554 assert(!"ir_binop_mod should have been converted to b * fract(a/b)");
1556 emit_asm(ir
, TGSI_OPCODE_MOD
, result_dst
, op
[0], op
[1]);
1560 emit_asm(ir
, TGSI_OPCODE_SLT
, result_dst
, op
[0], op
[1]);
1562 case ir_binop_gequal
:
1563 emit_asm(ir
, TGSI_OPCODE_SGE
, result_dst
, op
[0], op
[1]);
1565 case ir_binop_equal
:
1566 emit_asm(ir
, TGSI_OPCODE_SEQ
, result_dst
, op
[0], op
[1]);
1568 case ir_binop_nequal
:
1569 emit_asm(ir
, TGSI_OPCODE_SNE
, result_dst
, op
[0], op
[1]);
1571 case ir_binop_all_equal
:
1572 /* "==" operator producing a scalar boolean. */
1573 if (ir
->operands
[0]->type
->is_vector() ||
1574 ir
->operands
[1]->type
->is_vector()) {
1575 st_src_reg temp
= get_temp(native_integers
?
1576 glsl_type::uvec4_type
:
1577 glsl_type::vec4_type
);
1579 if (native_integers
) {
1580 st_dst_reg temp_dst
= st_dst_reg(temp
);
1581 st_src_reg temp1
= st_src_reg(temp
), temp2
= st_src_reg(temp
);
1583 if (ir
->operands
[0]->type
->is_boolean() &&
1584 ir
->operands
[1]->as_constant() &&
1585 ir
->operands
[1]->as_constant()->is_one()) {
1586 emit_asm(ir
, TGSI_OPCODE_MOV
, st_dst_reg(temp
), op
[0]);
1588 emit_asm(ir
, TGSI_OPCODE_SEQ
, st_dst_reg(temp
), op
[0], op
[1]);
1591 /* Emit 1-3 AND operations to combine the SEQ results. */
1592 switch (ir
->operands
[0]->type
->vector_elements
) {
1596 temp_dst
.writemask
= WRITEMASK_Y
;
1597 temp1
.swizzle
= SWIZZLE_YYYY
;
1598 temp2
.swizzle
= SWIZZLE_ZZZZ
;
1599 emit_asm(ir
, TGSI_OPCODE_AND
, temp_dst
, temp1
, temp2
);
1602 temp_dst
.writemask
= WRITEMASK_X
;
1603 temp1
.swizzle
= SWIZZLE_XXXX
;
1604 temp2
.swizzle
= SWIZZLE_YYYY
;
1605 emit_asm(ir
, TGSI_OPCODE_AND
, temp_dst
, temp1
, temp2
);
1606 temp_dst
.writemask
= WRITEMASK_Y
;
1607 temp1
.swizzle
= SWIZZLE_ZZZZ
;
1608 temp2
.swizzle
= SWIZZLE_WWWW
;
1609 emit_asm(ir
, TGSI_OPCODE_AND
, temp_dst
, temp1
, temp2
);
1612 temp1
.swizzle
= SWIZZLE_XXXX
;
1613 temp2
.swizzle
= SWIZZLE_YYYY
;
1614 emit_asm(ir
, TGSI_OPCODE_AND
, result_dst
, temp1
, temp2
);
1616 emit_asm(ir
, TGSI_OPCODE_SNE
, st_dst_reg(temp
), op
[0], op
[1]);
1618 /* After the dot-product, the value will be an integer on the
1619 * range [0,4]. Zero becomes 1.0, and positive values become zero.
1621 emit_dp(ir
, result_dst
, temp
, temp
, vector_elements
);
1623 /* Negating the result of the dot-product gives values on the range
1624 * [-4, 0]. Zero becomes 1.0, and negative values become zero.
1625 * This is achieved using SGE.
1627 st_src_reg sge_src
= result_src
;
1628 sge_src
.negate
= ~sge_src
.negate
;
1629 emit_asm(ir
, TGSI_OPCODE_SGE
, result_dst
, sge_src
,
1630 st_src_reg_for_float(0.0));
1633 emit_asm(ir
, TGSI_OPCODE_SEQ
, result_dst
, op
[0], op
[1]);
1636 case ir_binop_any_nequal
:
1637 /* "!=" operator producing a scalar boolean. */
1638 if (ir
->operands
[0]->type
->is_vector() ||
1639 ir
->operands
[1]->type
->is_vector()) {
1640 st_src_reg temp
= get_temp(native_integers
?
1641 glsl_type::uvec4_type
:
1642 glsl_type::vec4_type
);
1643 if (ir
->operands
[0]->type
->is_boolean() &&
1644 ir
->operands
[1]->as_constant() &&
1645 ir
->operands
[1]->as_constant()->is_zero()) {
1646 emit_asm(ir
, TGSI_OPCODE_MOV
, st_dst_reg(temp
), op
[0]);
1648 emit_asm(ir
, TGSI_OPCODE_SNE
, st_dst_reg(temp
), op
[0], op
[1]);
1651 if (native_integers
) {
1652 st_dst_reg temp_dst
= st_dst_reg(temp
);
1653 st_src_reg temp1
= st_src_reg(temp
), temp2
= st_src_reg(temp
);
1655 /* Emit 1-3 OR operations to combine the SNE results. */
1656 switch (ir
->operands
[0]->type
->vector_elements
) {
1660 temp_dst
.writemask
= WRITEMASK_Y
;
1661 temp1
.swizzle
= SWIZZLE_YYYY
;
1662 temp2
.swizzle
= SWIZZLE_ZZZZ
;
1663 emit_asm(ir
, TGSI_OPCODE_OR
, temp_dst
, temp1
, temp2
);
1666 temp_dst
.writemask
= WRITEMASK_X
;
1667 temp1
.swizzle
= SWIZZLE_XXXX
;
1668 temp2
.swizzle
= SWIZZLE_YYYY
;
1669 emit_asm(ir
, TGSI_OPCODE_OR
, temp_dst
, temp1
, temp2
);
1670 temp_dst
.writemask
= WRITEMASK_Y
;
1671 temp1
.swizzle
= SWIZZLE_ZZZZ
;
1672 temp2
.swizzle
= SWIZZLE_WWWW
;
1673 emit_asm(ir
, TGSI_OPCODE_OR
, temp_dst
, temp1
, temp2
);
1676 temp1
.swizzle
= SWIZZLE_XXXX
;
1677 temp2
.swizzle
= SWIZZLE_YYYY
;
1678 emit_asm(ir
, TGSI_OPCODE_OR
, result_dst
, temp1
, temp2
);
1680 /* After the dot-product, the value will be an integer on the
1681 * range [0,4]. Zero stays zero, and positive values become 1.0.
1683 glsl_to_tgsi_instruction
*const dp
=
1684 emit_dp(ir
, result_dst
, temp
, temp
, vector_elements
);
1685 if (this->prog
->Target
== GL_FRAGMENT_PROGRAM_ARB
) {
1686 /* The clamping to [0,1] can be done for free in the fragment
1687 * shader with a saturate.
1689 dp
->saturate
= true;
1691 /* Negating the result of the dot-product gives values on the
1692 * range [-4, 0]. Zero stays zero, and negative values become
1693 * 1.0. This achieved using SLT.
1695 st_src_reg slt_src
= result_src
;
1696 slt_src
.negate
= ~slt_src
.negate
;
1697 emit_asm(ir
, TGSI_OPCODE_SLT
, result_dst
, slt_src
,
1698 st_src_reg_for_float(0.0));
1702 emit_asm(ir
, TGSI_OPCODE_SNE
, result_dst
, op
[0], op
[1]);
1706 case ir_binop_logic_xor
:
1707 if (native_integers
)
1708 emit_asm(ir
, TGSI_OPCODE_XOR
, result_dst
, op
[0], op
[1]);
1710 emit_asm(ir
, TGSI_OPCODE_SNE
, result_dst
, op
[0], op
[1]);
1713 case ir_binop_logic_or
: {
1714 if (native_integers
) {
1715 /* If integers are used as booleans, we can use an actual "or"
1718 assert(native_integers
);
1719 emit_asm(ir
, TGSI_OPCODE_OR
, result_dst
, op
[0], op
[1]);
1721 /* After the addition, the value will be an integer on the
1722 * range [0,2]. Zero stays zero, and positive values become 1.0.
1724 glsl_to_tgsi_instruction
*add
=
1725 emit_asm(ir
, TGSI_OPCODE_ADD
, result_dst
, op
[0], op
[1]);
1726 if (this->prog
->Target
== GL_FRAGMENT_PROGRAM_ARB
) {
1727 /* The clamping to [0,1] can be done for free in the fragment
1728 * shader with a saturate if floats are being used as boolean
1731 add
->saturate
= true;
1733 /* Negating the result of the addition gives values on the range
1734 * [-2, 0]. Zero stays zero, and negative values become 1.0
1735 * This is achieved using SLT.
1737 st_src_reg slt_src
= result_src
;
1738 slt_src
.negate
= ~slt_src
.negate
;
1739 emit_asm(ir
, TGSI_OPCODE_SLT
, result_dst
, slt_src
,
1740 st_src_reg_for_float(0.0));
1746 case ir_binop_logic_and
:
1747 /* If native integers are disabled, the bool args are stored as float 0.0
1748 * or 1.0, so "mul" gives us "and". If they're enabled, just use the
1749 * actual AND opcode.
1751 if (native_integers
)
1752 emit_asm(ir
, TGSI_OPCODE_AND
, result_dst
, op
[0], op
[1]);
1754 emit_asm(ir
, TGSI_OPCODE_MUL
, result_dst
, op
[0], op
[1]);
1758 assert(ir
->operands
[0]->type
->is_vector());
1759 assert(ir
->operands
[0]->type
== ir
->operands
[1]->type
);
1760 emit_dp(ir
, result_dst
, op
[0], op
[1],
1761 ir
->operands
[0]->type
->vector_elements
);
1766 emit_scalar(ir
, TGSI_OPCODE_SQRT
, result_dst
, op
[0]);
1768 /* This is the only instruction sequence that makes the game "Risen"
1769 * render correctly. ABS is not required for the game, but since GLSL
1770 * declares negative values as "undefined", allowing us to do whatever
1771 * we want, I choose to use ABS to match DX9 and pre-GLSL RSQ
1774 emit_scalar(ir
, TGSI_OPCODE_RSQ
, result_dst
, op
[0].get_abs());
1775 emit_scalar(ir
, TGSI_OPCODE_RCP
, result_dst
, result_src
);
1779 emit_scalar(ir
, TGSI_OPCODE_RSQ
, result_dst
, op
[0]);
1782 if (native_integers
) {
1783 emit_asm(ir
, TGSI_OPCODE_I2F
, result_dst
, op
[0]);
1786 /* fallthrough to next case otherwise */
1788 if (native_integers
) {
1789 emit_asm(ir
, TGSI_OPCODE_AND
, result_dst
, op
[0],
1790 st_src_reg_for_float(1.0));
1793 /* fallthrough to next case otherwise */
1796 case ir_unop_i642u64
:
1797 case ir_unop_u642i64
:
1798 /* Converting between signed and unsigned integers is a no-op. */
1800 result_src
.type
= result_dst
.type
;
1803 if (native_integers
) {
1804 /* Booleans are stored as integers using ~0 for true and 0 for false.
1805 * GLSL requires that int(bool) return 1 for true and 0 for false.
1806 * This conversion is done with AND, but it could be done with NEG.
1808 emit_asm(ir
, TGSI_OPCODE_AND
, result_dst
, op
[0],
1809 st_src_reg_for_int(1));
1811 /* Booleans and integers are both stored as floats when native
1812 * integers are disabled.
1818 if (native_integers
)
1819 emit_asm(ir
, TGSI_OPCODE_F2I
, result_dst
, op
[0]);
1821 emit_asm(ir
, TGSI_OPCODE_TRUNC
, result_dst
, op
[0]);
1824 if (native_integers
)
1825 emit_asm(ir
, TGSI_OPCODE_F2U
, result_dst
, op
[0]);
1827 emit_asm(ir
, TGSI_OPCODE_TRUNC
, result_dst
, op
[0]);
1829 case ir_unop_bitcast_f2i
:
1830 case ir_unop_bitcast_f2u
:
1831 /* Make sure we don't propagate the negate modifier to integer opcodes. */
1832 if (op
[0].negate
|| op
[0].abs
)
1833 emit_asm(ir
, TGSI_OPCODE_MOV
, result_dst
, op
[0]);
1836 result_src
.type
= ir
->operation
== ir_unop_bitcast_f2i
? GLSL_TYPE_INT
:
1839 case ir_unop_bitcast_i2f
:
1840 case ir_unop_bitcast_u2f
:
1842 result_src
.type
= GLSL_TYPE_FLOAT
;
1845 emit_asm(ir
, TGSI_OPCODE_SNE
, result_dst
, op
[0],
1846 st_src_reg_for_float(0.0));
1849 emit_asm(ir
, TGSI_OPCODE_SNE
, result_dst
, op
[0],
1850 st_src_reg_for_double(0.0));
1853 if (native_integers
)
1854 emit_asm(ir
, TGSI_OPCODE_USNE
, result_dst
, op
[0],
1855 st_src_reg_for_int(0));
1857 emit_asm(ir
, TGSI_OPCODE_SNE
, result_dst
, op
[0],
1858 st_src_reg_for_float(0.0));
1860 case ir_unop_bitcast_u642d
:
1861 case ir_unop_bitcast_i642d
:
1863 result_src
.type
= GLSL_TYPE_DOUBLE
;
1865 case ir_unop_bitcast_d2i64
:
1867 result_src
.type
= GLSL_TYPE_INT64
;
1869 case ir_unop_bitcast_d2u64
:
1871 result_src
.type
= GLSL_TYPE_UINT64
;
1874 emit_asm(ir
, TGSI_OPCODE_TRUNC
, result_dst
, op
[0]);
1877 emit_asm(ir
, TGSI_OPCODE_CEIL
, result_dst
, op
[0]);
1880 emit_asm(ir
, TGSI_OPCODE_FLR
, result_dst
, op
[0]);
1882 case ir_unop_round_even
:
1883 emit_asm(ir
, TGSI_OPCODE_ROUND
, result_dst
, op
[0]);
1886 emit_asm(ir
, TGSI_OPCODE_FRC
, result_dst
, op
[0]);
1890 emit_asm(ir
, TGSI_OPCODE_MIN
, result_dst
, op
[0], op
[1]);
1893 emit_asm(ir
, TGSI_OPCODE_MAX
, result_dst
, op
[0], op
[1]);
1896 emit_scalar(ir
, TGSI_OPCODE_POW
, result_dst
, op
[0], op
[1]);
1899 case ir_unop_bit_not
:
1900 if (native_integers
) {
1901 emit_asm(ir
, TGSI_OPCODE_NOT
, result_dst
, op
[0]);
1905 if (native_integers
) {
1906 emit_asm(ir
, TGSI_OPCODE_U2F
, result_dst
, op
[0]);
1909 case ir_binop_lshift
:
1910 case ir_binop_rshift
:
1911 if (native_integers
) {
1912 enum tgsi_opcode opcode
= ir
->operation
== ir_binop_lshift
1913 ? TGSI_OPCODE_SHL
: TGSI_OPCODE_ISHR
;
1916 if (glsl_base_type_is_64bit(op
[0].type
)) {
1917 /* GLSL shift operations have 32-bit shift counts, but TGSI uses
1920 count
= get_temp(glsl_type::u64vec(ir
->operands
[1]
1921 ->type
->components()));
1922 emit_asm(ir
, TGSI_OPCODE_U2I64
, st_dst_reg(count
), op
[1]);
1927 emit_asm(ir
, opcode
, result_dst
, op
[0], count
);
1930 case ir_binop_bit_and
:
1931 if (native_integers
) {
1932 emit_asm(ir
, TGSI_OPCODE_AND
, result_dst
, op
[0], op
[1]);
1935 case ir_binop_bit_xor
:
1936 if (native_integers
) {
1937 emit_asm(ir
, TGSI_OPCODE_XOR
, result_dst
, op
[0], op
[1]);
1940 case ir_binop_bit_or
:
1941 if (native_integers
) {
1942 emit_asm(ir
, TGSI_OPCODE_OR
, result_dst
, op
[0], op
[1]);
1946 assert(!"GLSL 1.30 features unsupported");
1949 case ir_binop_ubo_load
: {
1950 if (ctx
->Const
.UseSTD430AsDefaultPacking
) {
1951 ir_rvalue
*block
= ir
->operands
[0];
1952 ir_rvalue
*offset
= ir
->operands
[1];
1953 ir_constant
*const_block
= block
->as_constant();
1955 st_src_reg
cbuf(PROGRAM_CONSTANT
,
1956 (const_block
? const_block
->value
.u
[0] + 1 : 1),
1957 ir
->type
->base_type
);
1959 cbuf
.has_index2
= true;
1962 block
->accept(this);
1963 cbuf
.reladdr
= ralloc(mem_ctx
, st_src_reg
);
1964 *cbuf
.reladdr
= this->result
;
1965 emit_arl(ir
, sampler_reladdr
, this->result
);
1968 /* Calculate the surface offset */
1969 offset
->accept(this);
1970 st_src_reg off
= this->result
;
1972 glsl_to_tgsi_instruction
*inst
=
1973 emit_asm(ir
, TGSI_OPCODE_LOAD
, result_dst
, off
);
1975 if (result_dst
.type
== GLSL_TYPE_BOOL
)
1976 emit_asm(ir
, TGSI_OPCODE_USNE
, result_dst
, st_src_reg(result_dst
),
1977 st_src_reg_for_int(0));
1979 add_buffer_to_load_and_stores(inst
, &cbuf
, &this->instructions
,
1982 ir_constant
*const_uniform_block
= ir
->operands
[0]->as_constant();
1983 ir_constant
*const_offset_ir
= ir
->operands
[1]->as_constant();
1984 unsigned const_offset
= const_offset_ir
?
1985 const_offset_ir
->value
.u
[0] : 0;
1986 unsigned const_block
= const_uniform_block
?
1987 const_uniform_block
->value
.u
[0] + 1 : 1;
1988 st_src_reg index_reg
= get_temp(glsl_type::uint_type
);
1991 cbuf
.type
= ir
->type
->base_type
;
1992 cbuf
.file
= PROGRAM_CONSTANT
;
1994 cbuf
.reladdr
= NULL
;
1997 cbuf
.index2D
= const_block
;
1999 assert(ir
->type
->is_vector() || ir
->type
->is_scalar());
2001 if (const_offset_ir
) {
2002 /* Constant index into constant buffer */
2003 cbuf
.reladdr
= NULL
;
2004 cbuf
.index
= const_offset
/ 16;
2006 ir_expression
*offset_expr
= ir
->operands
[1]->as_expression();
2007 st_src_reg offset
= op
[1];
2009 /* The OpenGL spec is written in such a way that accesses with
2010 * non-constant offset are almost always vec4-aligned. The only
2011 * exception to this are members of structs in arrays of structs:
2012 * each struct in an array of structs is at least vec4-aligned,
2013 * but single-element and [ui]vec2 members of the struct may be at
2014 * an offset that is not a multiple of 16 bytes.
2016 * Here, we extract that offset, relying on previous passes to
2017 * always generate offset expressions of the form
2018 * (+ expr constant_offset).
2020 * Note that the std430 layout, which allows more cases of
2021 * alignment less than vec4 in arrays, is not supported for
2022 * uniform blocks, so we do not have to deal with it here.
2024 if (offset_expr
&& offset_expr
->operation
== ir_binop_add
) {
2025 const_offset_ir
= offset_expr
->operands
[1]->as_constant();
2026 if (const_offset_ir
) {
2027 const_offset
= const_offset_ir
->value
.u
[0];
2028 cbuf
.index
= const_offset
/ 16;
2029 offset_expr
->operands
[0]->accept(this);
2030 offset
= this->result
;
2034 /* Relative/variable index into constant buffer */
2035 emit_asm(ir
, TGSI_OPCODE_USHR
, st_dst_reg(index_reg
), offset
,
2036 st_src_reg_for_int(4));
2037 cbuf
.reladdr
= ralloc(mem_ctx
, st_src_reg
);
2038 memcpy(cbuf
.reladdr
, &index_reg
, sizeof(index_reg
));
2041 if (const_uniform_block
) {
2042 /* Constant constant buffer */
2043 cbuf
.reladdr2
= NULL
;
2045 /* Relative/variable constant buffer */
2046 cbuf
.reladdr2
= ralloc(mem_ctx
, st_src_reg
);
2047 memcpy(cbuf
.reladdr2
, &op
[0], sizeof(st_src_reg
));
2049 cbuf
.has_index2
= true;
2051 cbuf
.swizzle
= swizzle_for_size(ir
->type
->vector_elements
);
2052 if (glsl_base_type_is_64bit(cbuf
.type
))
2053 cbuf
.swizzle
+= MAKE_SWIZZLE4(const_offset
% 16 / 8,
2054 const_offset
% 16 / 8,
2055 const_offset
% 16 / 8,
2056 const_offset
% 16 / 8);
2058 cbuf
.swizzle
+= MAKE_SWIZZLE4(const_offset
% 16 / 4,
2059 const_offset
% 16 / 4,
2060 const_offset
% 16 / 4,
2061 const_offset
% 16 / 4);
2063 if (ir
->type
->is_boolean()) {
2064 emit_asm(ir
, TGSI_OPCODE_USNE
, result_dst
, cbuf
,
2065 st_src_reg_for_int(0));
2067 emit_asm(ir
, TGSI_OPCODE_MOV
, result_dst
, cbuf
);
2073 /* note: we have to reorder the three args here */
2074 emit_asm(ir
, TGSI_OPCODE_LRP
, result_dst
, op
[2], op
[1], op
[0]);
2077 if (this->ctx
->Const
.NativeIntegers
)
2078 emit_asm(ir
, TGSI_OPCODE_UCMP
, result_dst
, op
[0], op
[1], op
[2]);
2080 op
[0].negate
= ~op
[0].negate
;
2081 emit_asm(ir
, TGSI_OPCODE_CMP
, result_dst
, op
[0], op
[1], op
[2]);
2084 case ir_triop_bitfield_extract
:
2085 emit_asm(ir
, TGSI_OPCODE_IBFE
, result_dst
, op
[0], op
[1], op
[2]);
2087 case ir_quadop_bitfield_insert
:
2088 emit_asm(ir
, TGSI_OPCODE_BFI
, result_dst
, op
[0], op
[1], op
[2], op
[3]);
2090 case ir_unop_bitfield_reverse
:
2091 emit_asm(ir
, TGSI_OPCODE_BREV
, result_dst
, op
[0]);
2093 case ir_unop_bit_count
:
2094 emit_asm(ir
, TGSI_OPCODE_POPC
, result_dst
, op
[0]);
2096 case ir_unop_find_msb
:
2097 emit_asm(ir
, TGSI_OPCODE_IMSB
, result_dst
, op
[0]);
2099 case ir_unop_find_lsb
:
2100 emit_asm(ir
, TGSI_OPCODE_LSB
, result_dst
, op
[0]);
2102 case ir_binop_imul_high
:
2103 emit_asm(ir
, TGSI_OPCODE_IMUL_HI
, result_dst
, op
[0], op
[1]);
2106 /* In theory, MAD is incorrect here. */
2108 emit_asm(ir
, TGSI_OPCODE_FMA
, result_dst
, op
[0], op
[1], op
[2]);
2110 emit_asm(ir
, TGSI_OPCODE_MAD
, result_dst
, op
[0], op
[1], op
[2]);
2112 case ir_unop_interpolate_at_centroid
:
2113 emit_asm(ir
, TGSI_OPCODE_INTERP_CENTROID
, result_dst
, op
[0]);
2115 case ir_binop_interpolate_at_offset
: {
2116 /* The y coordinate needs to be flipped for the default fb */
2117 static const gl_state_index16 transform_y_state
[STATE_LENGTH
]
2118 = { STATE_INTERNAL
, STATE_FB_WPOS_Y_TRANSFORM
};
2120 unsigned transform_y_index
=
2121 _mesa_add_state_reference(this->prog
->Parameters
,
2124 st_src_reg transform_y
= st_src_reg(PROGRAM_STATE_VAR
,
2126 glsl_type::vec4_type
);
2127 transform_y
.swizzle
= SWIZZLE_XXXX
;
2129 st_src_reg temp
= get_temp(glsl_type::vec2_type
);
2130 st_dst_reg temp_dst
= st_dst_reg(temp
);
2132 emit_asm(ir
, TGSI_OPCODE_MOV
, temp_dst
, op
[1]);
2133 temp_dst
.writemask
= WRITEMASK_Y
;
2134 emit_asm(ir
, TGSI_OPCODE_MUL
, temp_dst
, transform_y
, op
[1]);
2135 emit_asm(ir
, TGSI_OPCODE_INTERP_OFFSET
, result_dst
, op
[0], temp
);
2138 case ir_binop_interpolate_at_sample
:
2139 emit_asm(ir
, TGSI_OPCODE_INTERP_SAMPLE
, result_dst
, op
[0], op
[1]);
2143 emit_asm(ir
, TGSI_OPCODE_D2F
, result_dst
, op
[0]);
2146 emit_asm(ir
, TGSI_OPCODE_F2D
, result_dst
, op
[0]);
2149 emit_asm(ir
, TGSI_OPCODE_D2I
, result_dst
, op
[0]);
2152 emit_asm(ir
, TGSI_OPCODE_I2D
, result_dst
, op
[0]);
2155 emit_asm(ir
, TGSI_OPCODE_D2U
, result_dst
, op
[0]);
2158 emit_asm(ir
, TGSI_OPCODE_U2D
, result_dst
, op
[0]);
2160 case ir_unop_unpack_double_2x32
:
2161 case ir_unop_pack_double_2x32
:
2162 case ir_unop_unpack_int_2x32
:
2163 case ir_unop_pack_int_2x32
:
2164 case ir_unop_unpack_uint_2x32
:
2165 case ir_unop_pack_uint_2x32
:
2166 case ir_unop_unpack_sampler_2x32
:
2167 case ir_unop_pack_sampler_2x32
:
2168 case ir_unop_unpack_image_2x32
:
2169 case ir_unop_pack_image_2x32
:
2170 emit_asm(ir
, TGSI_OPCODE_MOV
, result_dst
, op
[0]);
2173 case ir_binop_ldexp
:
2174 if (ir
->operands
[0]->type
->is_double()) {
2175 emit_asm(ir
, TGSI_OPCODE_DLDEXP
, result_dst
, op
[0], op
[1]);
2176 } else if (ir
->operands
[0]->type
->is_float()) {
2177 emit_asm(ir
, TGSI_OPCODE_LDEXP
, result_dst
, op
[0], op
[1]);
2179 assert(!"Invalid ldexp for non-double opcode in glsl_to_tgsi_visitor::visit()");
2183 case ir_unop_pack_half_2x16
:
2184 emit_asm(ir
, TGSI_OPCODE_PK2H
, result_dst
, op
[0]);
2186 case ir_unop_unpack_half_2x16
:
2187 emit_asm(ir
, TGSI_OPCODE_UP2H
, result_dst
, op
[0]);
2190 case ir_unop_get_buffer_size
: {
2191 ir_constant
*const_offset
= ir
->operands
[0]->as_constant();
2192 int buf_base
= ctx
->st
->has_hw_atomics
2193 ? 0 : ctx
->Const
.Program
[shader
->Stage
].MaxAtomicBuffers
;
2196 buf_base
+ (const_offset
? const_offset
->value
.u
[0] : 0),
2198 if (!const_offset
) {
2199 buffer
.reladdr
= ralloc(mem_ctx
, st_src_reg
);
2200 *buffer
.reladdr
= op
[0];
2201 emit_arl(ir
, sampler_reladdr
, op
[0]);
2203 emit_asm(ir
, TGSI_OPCODE_RESQ
, result_dst
)->resource
= buffer
;
2209 case ir_unop_b2i64
: {
2210 st_src_reg temp
= get_temp(glsl_type::uvec4_type
);
2211 st_dst_reg temp_dst
= st_dst_reg(temp
);
2212 unsigned orig_swz
= op
[0].swizzle
;
2214 * To convert unsigned to 64-bit:
2215 * zero Y channel, copy X channel.
2217 temp_dst
.writemask
= WRITEMASK_Y
;
2218 if (vector_elements
> 1)
2219 temp_dst
.writemask
|= WRITEMASK_W
;
2220 emit_asm(ir
, TGSI_OPCODE_MOV
, temp_dst
, st_src_reg_for_int(0));
2221 temp_dst
.writemask
= WRITEMASK_X
;
2222 if (vector_elements
> 1)
2223 temp_dst
.writemask
|= WRITEMASK_Z
;
2224 op
[0].swizzle
= MAKE_SWIZZLE4(GET_SWZ(orig_swz
, 0), GET_SWZ(orig_swz
, 0),
2225 GET_SWZ(orig_swz
, 1), GET_SWZ(orig_swz
, 1));
2226 if (ir
->operation
== ir_unop_u2i64
|| ir
->operation
== ir_unop_u2u64
)
2227 emit_asm(ir
, TGSI_OPCODE_MOV
, temp_dst
, op
[0]);
2229 emit_asm(ir
, TGSI_OPCODE_AND
, temp_dst
, op
[0], st_src_reg_for_int(1));
2231 result_src
.type
= GLSL_TYPE_UINT64
;
2232 if (vector_elements
> 2) {
2233 /* Subtle: We rely on the fact that get_temp here returns the next
2234 * TGSI temporary register directly after the temp register used for
2235 * the first two components, so that the result gets picked up
2238 st_src_reg temp
= get_temp(glsl_type::uvec4_type
);
2239 st_dst_reg temp_dst
= st_dst_reg(temp
);
2240 temp_dst
.writemask
= WRITEMASK_Y
;
2241 if (vector_elements
> 3)
2242 temp_dst
.writemask
|= WRITEMASK_W
;
2243 emit_asm(ir
, TGSI_OPCODE_MOV
, temp_dst
, st_src_reg_for_int(0));
2245 temp_dst
.writemask
= WRITEMASK_X
;
2246 if (vector_elements
> 3)
2247 temp_dst
.writemask
|= WRITEMASK_Z
;
2248 op
[0].swizzle
= MAKE_SWIZZLE4(GET_SWZ(orig_swz
, 2),
2249 GET_SWZ(orig_swz
, 2),
2250 GET_SWZ(orig_swz
, 3),
2251 GET_SWZ(orig_swz
, 3));
2252 if (ir
->operation
== ir_unop_u2i64
|| ir
->operation
== ir_unop_u2u64
)
2253 emit_asm(ir
, TGSI_OPCODE_MOV
, temp_dst
, op
[0]);
2255 emit_asm(ir
, TGSI_OPCODE_AND
, temp_dst
, op
[0],
2256 st_src_reg_for_int(1));
2263 case ir_unop_i642u
: {
2264 st_src_reg temp
= get_temp(glsl_type::uvec4_type
);
2265 st_dst_reg temp_dst
= st_dst_reg(temp
);
2266 unsigned orig_swz
= op
[0].swizzle
;
2267 unsigned orig_idx
= op
[0].index
;
2269 temp_dst
.writemask
= WRITEMASK_X
;
2271 for (el
= 0; el
< vector_elements
; el
++) {
2272 unsigned swz
= GET_SWZ(orig_swz
, el
);
2274 op
[0].swizzle
= MAKE_SWIZZLE4(SWIZZLE_Z
, SWIZZLE_Z
,
2275 SWIZZLE_Z
, SWIZZLE_Z
);
2277 op
[0].swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_X
,
2278 SWIZZLE_X
, SWIZZLE_X
);
2280 op
[0].index
= orig_idx
+ 1;
2281 op
[0].type
= GLSL_TYPE_UINT
;
2282 temp_dst
.writemask
= WRITEMASK_X
<< el
;
2283 emit_asm(ir
, TGSI_OPCODE_MOV
, temp_dst
, op
[0]);
2286 if (ir
->operation
== ir_unop_u642u
|| ir
->operation
== ir_unop_i642u
)
2287 result_src
.type
= GLSL_TYPE_UINT
;
2289 result_src
.type
= GLSL_TYPE_INT
;
2293 emit_asm(ir
, TGSI_OPCODE_U64SNE
, result_dst
, op
[0],
2294 st_src_reg_for_int64(0));
2297 emit_asm(ir
, TGSI_OPCODE_I642F
, result_dst
, op
[0]);
2300 emit_asm(ir
, TGSI_OPCODE_U642F
, result_dst
, op
[0]);
2303 emit_asm(ir
, TGSI_OPCODE_I642D
, result_dst
, op
[0]);
2306 emit_asm(ir
, TGSI_OPCODE_U642D
, result_dst
, op
[0]);
2309 emit_asm(ir
, TGSI_OPCODE_I2I64
, result_dst
, op
[0]);
2312 emit_asm(ir
, TGSI_OPCODE_F2I64
, result_dst
, op
[0]);
2315 emit_asm(ir
, TGSI_OPCODE_D2I64
, result_dst
, op
[0]);
2318 emit_asm(ir
, TGSI_OPCODE_I2I64
, result_dst
, op
[0]);
2321 emit_asm(ir
, TGSI_OPCODE_F2U64
, result_dst
, op
[0]);
2324 emit_asm(ir
, TGSI_OPCODE_D2U64
, result_dst
, op
[0]);
2326 /* these might be needed */
2327 case ir_unop_pack_snorm_2x16
:
2328 case ir_unop_pack_unorm_2x16
:
2329 case ir_unop_pack_snorm_4x8
:
2330 case ir_unop_pack_unorm_4x8
:
2332 case ir_unop_unpack_snorm_2x16
:
2333 case ir_unop_unpack_unorm_2x16
:
2334 case ir_unop_unpack_snorm_4x8
:
2335 case ir_unop_unpack_unorm_4x8
:
2337 case ir_quadop_vector
:
2338 case ir_binop_vector_extract
:
2339 case ir_triop_vector_insert
:
2340 case ir_binop_carry
:
2341 case ir_binop_borrow
:
2342 case ir_unop_ssbo_unsized_array_length
:
2343 /* This operation is not supported, or should have already been handled.
2345 assert(!"Invalid ir opcode in glsl_to_tgsi_visitor::visit()");
2349 this->result
= result_src
;
2354 glsl_to_tgsi_visitor::visit(ir_swizzle
*ir
)
2360 /* Note that this is only swizzles in expressions, not those on the left
2361 * hand side of an assignment, which do write masking. See ir_assignment
2365 ir
->val
->accept(this);
2367 assert(src
.file
!= PROGRAM_UNDEFINED
);
2368 assert(ir
->type
->vector_elements
> 0);
2370 for (i
= 0; i
< 4; i
++) {
2371 if (i
< ir
->type
->vector_elements
) {
2374 swizzle
[i
] = GET_SWZ(src
.swizzle
, ir
->mask
.x
);
2377 swizzle
[i
] = GET_SWZ(src
.swizzle
, ir
->mask
.y
);
2380 swizzle
[i
] = GET_SWZ(src
.swizzle
, ir
->mask
.z
);
2383 swizzle
[i
] = GET_SWZ(src
.swizzle
, ir
->mask
.w
);
2387 /* If the type is smaller than a vec4, replicate the last
2390 swizzle
[i
] = swizzle
[ir
->type
->vector_elements
- 1];
2394 src
.swizzle
= MAKE_SWIZZLE4(swizzle
[0], swizzle
[1], swizzle
[2], swizzle
[3]);
2399 /* Test if the variable is an array. Note that geometry and
2400 * tessellation shader inputs are outputs are always arrays (except
2401 * for patch inputs), so only the array element type is considered.
2404 is_inout_array(unsigned stage
, ir_variable
*var
, bool *remove_array
)
2406 const glsl_type
*type
= var
->type
;
2408 *remove_array
= false;
2410 if ((stage
== MESA_SHADER_VERTEX
&& var
->data
.mode
== ir_var_shader_in
) ||
2411 (stage
== MESA_SHADER_FRAGMENT
&& var
->data
.mode
== ir_var_shader_out
))
2414 if (((stage
== MESA_SHADER_GEOMETRY
&& var
->data
.mode
== ir_var_shader_in
) ||
2415 (stage
== MESA_SHADER_TESS_EVAL
&& var
->data
.mode
== ir_var_shader_in
) ||
2416 stage
== MESA_SHADER_TESS_CTRL
) &&
2418 if (!var
->type
->is_array())
2419 return false; /* a system value probably */
2421 type
= var
->type
->fields
.array
;
2422 *remove_array
= true;
2425 return type
->is_array() || type
->is_matrix();
2429 st_translate_interp_loc(ir_variable
*var
)
2431 if (var
->data
.centroid
)
2432 return TGSI_INTERPOLATE_LOC_CENTROID
;
2433 else if (var
->data
.sample
)
2434 return TGSI_INTERPOLATE_LOC_SAMPLE
;
2436 return TGSI_INTERPOLATE_LOC_CENTER
;
2440 glsl_to_tgsi_visitor::visit(ir_dereference_variable
*ir
)
2442 variable_storage
*entry
= find_variable_storage(ir
->var
);
2443 ir_variable
*var
= ir
->var
;
2447 switch (var
->data
.mode
) {
2448 case ir_var_uniform
:
2449 entry
= new(mem_ctx
) variable_storage(var
, PROGRAM_UNIFORM
,
2450 var
->data
.param_index
);
2451 _mesa_hash_table_insert(this->variables
, var
, entry
);
2453 case ir_var_shader_in
: {
2454 /* The linker assigns locations for varyings and attributes,
2455 * including deprecated builtins (like gl_Color), user-assign
2456 * generic attributes (glBindVertexLocation), and
2457 * user-defined varyings.
2459 assert(var
->data
.location
!= -1);
2461 const glsl_type
*type_without_array
= var
->type
->without_array();
2462 struct inout_decl
*decl
= &inputs
[num_inputs
];
2463 unsigned component
= var
->data
.location_frac
;
2464 unsigned num_components
;
2467 if (type_without_array
->is_64bit())
2468 component
= component
/ 2;
2469 if (type_without_array
->vector_elements
)
2470 num_components
= type_without_array
->vector_elements
;
2474 decl
->mesa_index
= var
->data
.location
;
2475 decl
->interp
= (glsl_interp_mode
) var
->data
.interpolation
;
2476 decl
->interp_loc
= st_translate_interp_loc(var
);
2477 decl
->base_type
= type_without_array
->base_type
;
2478 decl
->usage_mask
= u_bit_consecutive(component
, num_components
);
2480 if (is_inout_array(shader
->Stage
, var
, &remove_array
)) {
2481 decl
->array_id
= num_input_arrays
+ 1;
2488 decl
->size
= type_size(var
->type
->fields
.array
);
2490 decl
->size
= type_size(var
->type
);
2492 entry
= new(mem_ctx
) variable_storage(var
,
2496 entry
->component
= component
;
2498 _mesa_hash_table_insert(this->variables
, var
, entry
);
2502 case ir_var_shader_out
: {
2503 assert(var
->data
.location
!= -1);
2505 const glsl_type
*type_without_array
= var
->type
->without_array();
2506 struct inout_decl
*decl
= &outputs
[num_outputs
];
2507 unsigned component
= var
->data
.location_frac
;
2508 unsigned num_components
;
2511 if (type_without_array
->is_64bit())
2512 component
= component
/ 2;
2513 if (type_without_array
->vector_elements
)
2514 num_components
= type_without_array
->vector_elements
;
2518 decl
->mesa_index
= var
->data
.location
+ FRAG_RESULT_MAX
* var
->data
.index
;
2519 decl
->base_type
= type_without_array
->base_type
;
2520 decl
->usage_mask
= u_bit_consecutive(component
, num_components
);
2521 if (var
->data
.stream
& (1u << 31)) {
2522 decl
->gs_out_streams
= var
->data
.stream
& ~(1u << 31);
2524 assert(var
->data
.stream
< 4);
2525 decl
->gs_out_streams
= 0;
2526 for (unsigned i
= 0; i
< num_components
; ++i
)
2527 decl
->gs_out_streams
|= var
->data
.stream
<< (2 * (component
+ i
));
2530 if (is_inout_array(shader
->Stage
, var
, &remove_array
)) {
2531 decl
->array_id
= num_output_arrays
+ 1;
2532 num_output_arrays
++;
2538 decl
->size
= type_size(var
->type
->fields
.array
);
2540 decl
->size
= type_size(var
->type
);
2542 if (var
->data
.fb_fetch_output
) {
2543 st_dst_reg dst
= st_dst_reg(get_temp(var
->type
));
2544 st_src_reg src
= st_src_reg(PROGRAM_OUTPUT
, decl
->mesa_index
,
2545 var
->type
, component
, decl
->array_id
);
2546 emit_asm(NULL
, TGSI_OPCODE_FBFETCH
, dst
, src
);
2547 entry
= new(mem_ctx
) variable_storage(var
, dst
.file
, dst
.index
,
2550 entry
= new(mem_ctx
) variable_storage(var
,
2555 entry
->component
= component
;
2557 _mesa_hash_table_insert(this->variables
, var
, entry
);
2561 case ir_var_system_value
:
2562 entry
= new(mem_ctx
) variable_storage(var
,
2563 PROGRAM_SYSTEM_VALUE
,
2564 var
->data
.location
);
2567 case ir_var_temporary
:
2568 st_src_reg src
= get_temp(var
->type
);
2570 entry
= new(mem_ctx
) variable_storage(var
, src
.file
, src
.index
,
2572 _mesa_hash_table_insert(this->variables
, var
, entry
);
2578 printf("Failed to make storage for %s\n", var
->name
);
2583 this->result
= st_src_reg(entry
->file
, entry
->index
, var
->type
,
2584 entry
->component
, entry
->array_id
);
2585 if (this->shader
->Stage
== MESA_SHADER_VERTEX
&&
2586 var
->data
.mode
== ir_var_shader_in
&&
2587 var
->type
->without_array()->is_double())
2588 this->result
.is_double_vertex_input
= true;
2589 if (!native_integers
)
2590 this->result
.type
= GLSL_TYPE_FLOAT
;
2594 shrink_array_declarations(struct inout_decl
*decls
, unsigned count
,
2595 GLbitfield64
* usage_mask
,
2596 GLbitfield64 double_usage_mask
,
2597 GLbitfield
* patch_usage_mask
)
2602 /* Fix array declarations by removing unused array elements at both ends
2603 * of the arrays. For example, mat4[3] where only mat[1] is used.
2605 for (i
= 0; i
< count
; i
++) {
2606 struct inout_decl
*decl
= &decls
[i
];
2607 if (!decl
->array_id
)
2610 /* Shrink the beginning. */
2611 for (j
= 0; j
< (int)decl
->size
; j
++) {
2612 if (decl
->mesa_index
>= VARYING_SLOT_PATCH0
) {
2613 if (*patch_usage_mask
&
2614 BITFIELD64_BIT(decl
->mesa_index
- VARYING_SLOT_PATCH0
+ j
))
2618 if (*usage_mask
& BITFIELD64_BIT(decl
->mesa_index
+j
))
2620 if (double_usage_mask
& BITFIELD64_BIT(decl
->mesa_index
+j
-1))
2629 /* Shrink the end. */
2630 for (j
= decl
->size
-1; j
>= 0; j
--) {
2631 if (decl
->mesa_index
>= VARYING_SLOT_PATCH0
) {
2632 if (*patch_usage_mask
&
2633 BITFIELD64_BIT(decl
->mesa_index
- VARYING_SLOT_PATCH0
+ j
))
2637 if (*usage_mask
& BITFIELD64_BIT(decl
->mesa_index
+j
))
2639 if (double_usage_mask
& BITFIELD64_BIT(decl
->mesa_index
+j
-1))
2646 /* When not all entries of an array are accessed, we mark them as used
2647 * here anyway, to ensure that the input/output mapping logic doesn't get
2650 * TODO This happens when an array isn't used via indirect access, which
2651 * some game ports do (at least eON-based). There is an optimization
2652 * opportunity here by replacing the array declaration with non-array
2653 * declarations of those slots that are actually used.
2655 for (j
= 1; j
< (int)decl
->size
; ++j
) {
2656 if (decl
->mesa_index
>= VARYING_SLOT_PATCH0
)
2657 *patch_usage_mask
|= BITFIELD64_BIT(decl
->mesa_index
- VARYING_SLOT_PATCH0
+ j
);
2659 *usage_mask
|= BITFIELD64_BIT(decl
->mesa_index
+ j
);
2665 glsl_to_tgsi_visitor::visit(ir_dereference_array
*ir
)
2670 ir_variable
*var
= ir
->variable_referenced();
2672 /* We only need the logic provided by st_glsl_storage_type_size()
2673 * for arrays of structs. Indirect sampler and image indexing is handled
2676 int element_size
= ir
->type
->without_array()->is_record() ?
2677 st_glsl_storage_type_size(ir
->type
, var
->data
.bindless
) :
2678 type_size(ir
->type
);
2680 index
= ir
->array_index
->constant_expression_value(ralloc_parent(ir
));
2682 ir
->array
->accept(this);
2685 if (!src
.has_index2
) {
2686 switch (this->prog
->Target
) {
2687 case GL_TESS_CONTROL_PROGRAM_NV
:
2688 is_2D
= (src
.file
== PROGRAM_INPUT
|| src
.file
== PROGRAM_OUTPUT
) &&
2689 !ir
->variable_referenced()->data
.patch
;
2691 case GL_TESS_EVALUATION_PROGRAM_NV
:
2692 is_2D
= src
.file
== PROGRAM_INPUT
&&
2693 !ir
->variable_referenced()->data
.patch
;
2695 case GL_GEOMETRY_PROGRAM_NV
:
2696 is_2D
= src
.file
== PROGRAM_INPUT
;
2706 if (this->prog
->Target
== GL_VERTEX_PROGRAM_ARB
&&
2707 src
.file
== PROGRAM_INPUT
)
2708 element_size
= attrib_type_size(ir
->type
, true);
2710 src
.index2D
= index
->value
.i
[0];
2711 src
.has_index2
= true;
2713 src
.index
+= index
->value
.i
[0] * element_size
;
2715 /* Variable index array dereference. It eats the "vec4" of the
2716 * base of the array and an index that offsets the TGSI register
2719 ir
->array_index
->accept(this);
2721 st_src_reg index_reg
;
2723 if (element_size
== 1) {
2724 index_reg
= this->result
;
2726 index_reg
= get_temp(native_integers
?
2727 glsl_type::int_type
: glsl_type::float_type
);
2729 emit_asm(ir
, TGSI_OPCODE_MUL
, st_dst_reg(index_reg
),
2730 this->result
, st_src_reg_for_type(index_reg
.type
, element_size
));
2733 /* If there was already a relative address register involved, add the
2734 * new and the old together to get the new offset.
2736 if (!is_2D
&& src
.reladdr
!= NULL
) {
2737 st_src_reg accum_reg
= get_temp(native_integers
?
2738 glsl_type::int_type
: glsl_type::float_type
);
2740 emit_asm(ir
, TGSI_OPCODE_ADD
, st_dst_reg(accum_reg
),
2741 index_reg
, *src
.reladdr
);
2743 index_reg
= accum_reg
;
2747 src
.reladdr2
= ralloc(mem_ctx
, st_src_reg
);
2748 memcpy(src
.reladdr2
, &index_reg
, sizeof(index_reg
));
2750 src
.has_index2
= true;
2752 src
.reladdr
= ralloc(mem_ctx
, st_src_reg
);
2753 memcpy(src
.reladdr
, &index_reg
, sizeof(index_reg
));
2757 /* Change the register type to the element type of the array. */
2758 src
.type
= ir
->type
->base_type
;
2764 glsl_to_tgsi_visitor::visit(ir_dereference_record
*ir
)
2767 const glsl_type
*struct_type
= ir
->record
->type
;
2768 ir_variable
*var
= ir
->record
->variable_referenced();
2771 ir
->record
->accept(this);
2773 assert(ir
->field_idx
>= 0);
2775 for (i
= 0; i
< struct_type
->length
; i
++) {
2776 if (i
== (unsigned) ir
->field_idx
)
2778 const glsl_type
*member_type
= struct_type
->fields
.structure
[i
].type
;
2779 offset
+= st_glsl_storage_type_size(member_type
, var
->data
.bindless
);
2782 /* If the type is smaller than a vec4, replicate the last channel out. */
2783 if (ir
->type
->is_scalar() || ir
->type
->is_vector())
2784 this->result
.swizzle
= swizzle_for_size(ir
->type
->vector_elements
);
2786 this->result
.swizzle
= SWIZZLE_NOOP
;
2788 this->result
.index
+= offset
;
2789 this->result
.type
= ir
->type
->base_type
;
2793 * We want to be careful in assignment setup to hit the actual storage
2794 * instead of potentially using a temporary like we might with the
2795 * ir_dereference handler.
2798 get_assignment_lhs(ir_dereference
*ir
, glsl_to_tgsi_visitor
*v
, int *component
)
2800 /* The LHS must be a dereference. If the LHS is a variable indexed array
2801 * access of a vector, it must be separated into a series conditional moves
2802 * before reaching this point (see ir_vec_index_to_cond_assign).
2804 assert(ir
->as_dereference());
2805 ir_dereference_array
*deref_array
= ir
->as_dereference_array();
2807 assert(!deref_array
->array
->type
->is_vector());
2810 /* Use the rvalue deref handler for the most part. We write swizzles using
2811 * the writemask, but we do extract the base component for enhanced layouts
2812 * from the source swizzle.
2815 *component
= GET_SWZ(v
->result
.swizzle
, 0);
2816 return st_dst_reg(v
->result
);
2820 * Process the condition of a conditional assignment
2822 * Examines the condition of a conditional assignment to generate the optimal
2823 * first operand of a \c CMP instruction. If the condition is a relational
2824 * operator with 0 (e.g., \c ir_binop_less), the value being compared will be
2825 * used as the source for the \c CMP instruction. Otherwise the comparison
2826 * is processed to a boolean result, and the boolean result is used as the
2827 * operand to the CMP instruction.
2830 glsl_to_tgsi_visitor::process_move_condition(ir_rvalue
*ir
)
2832 ir_rvalue
*src_ir
= ir
;
2834 bool switch_order
= false;
2836 ir_expression
*const expr
= ir
->as_expression();
2838 if (native_integers
) {
2839 if ((expr
!= NULL
) && (expr
->num_operands
== 2)) {
2840 enum glsl_base_type type
= expr
->operands
[0]->type
->base_type
;
2841 if (type
== GLSL_TYPE_INT
|| type
== GLSL_TYPE_UINT
||
2842 type
== GLSL_TYPE_BOOL
) {
2843 if (expr
->operation
== ir_binop_equal
) {
2844 if (expr
->operands
[0]->is_zero()) {
2845 src_ir
= expr
->operands
[1];
2846 switch_order
= true;
2848 else if (expr
->operands
[1]->is_zero()) {
2849 src_ir
= expr
->operands
[0];
2850 switch_order
= true;
2853 else if (expr
->operation
== ir_binop_nequal
) {
2854 if (expr
->operands
[0]->is_zero()) {
2855 src_ir
= expr
->operands
[1];
2857 else if (expr
->operands
[1]->is_zero()) {
2858 src_ir
= expr
->operands
[0];
2864 src_ir
->accept(this);
2865 return switch_order
;
2868 if ((expr
!= NULL
) && (expr
->num_operands
== 2)) {
2869 bool zero_on_left
= false;
2871 if (expr
->operands
[0]->is_zero()) {
2872 src_ir
= expr
->operands
[1];
2873 zero_on_left
= true;
2874 } else if (expr
->operands
[1]->is_zero()) {
2875 src_ir
= expr
->operands
[0];
2876 zero_on_left
= false;
2880 * (a < 0) T F F ( a < 0) T F F
2881 * (0 < a) F F T (-a < 0) F F T
2882 * (a >= 0) F T T ( a < 0) T F F (swap order of other operands)
2883 * (0 >= a) T T F (-a < 0) F F T (swap order of other operands)
2885 * Note that exchanging the order of 0 and 'a' in the comparison simply
2886 * means that the value of 'a' should be negated.
2889 switch (expr
->operation
) {
2891 switch_order
= false;
2892 negate
= zero_on_left
;
2895 case ir_binop_gequal
:
2896 switch_order
= true;
2897 negate
= zero_on_left
;
2901 /* This isn't the right kind of comparison afterall, so make sure
2902 * the whole condition is visited.
2910 src_ir
->accept(this);
2912 /* We use the TGSI_OPCODE_CMP (a < 0 ? b : c) for conditional moves, and the
2913 * condition we produced is 0.0 or 1.0. By flipping the sign, we can
2914 * choose which value TGSI_OPCODE_CMP produces without an extra instruction
2915 * computing the condition.
2918 this->result
.negate
= ~this->result
.negate
;
2920 return switch_order
;
2924 glsl_to_tgsi_visitor::emit_block_mov(ir_assignment
*ir
, const struct glsl_type
*type
,
2925 st_dst_reg
*l
, st_src_reg
*r
,
2926 st_src_reg
*cond
, bool cond_swap
)
2928 if (type
->is_record()) {
2929 for (unsigned int i
= 0; i
< type
->length
; i
++) {
2930 emit_block_mov(ir
, type
->fields
.structure
[i
].type
, l
, r
,
2936 if (type
->is_array()) {
2937 for (unsigned int i
= 0; i
< type
->length
; i
++) {
2938 emit_block_mov(ir
, type
->fields
.array
, l
, r
, cond
, cond_swap
);
2943 if (type
->is_matrix()) {
2944 const struct glsl_type
*vec_type
;
2946 vec_type
= glsl_type::get_instance(type
->is_double()
2947 ? GLSL_TYPE_DOUBLE
: GLSL_TYPE_FLOAT
,
2948 type
->vector_elements
, 1);
2950 for (int i
= 0; i
< type
->matrix_columns
; i
++) {
2951 emit_block_mov(ir
, vec_type
, l
, r
, cond
, cond_swap
);
2956 assert(type
->is_scalar() || type
->is_vector());
2958 l
->type
= type
->base_type
;
2959 r
->type
= type
->base_type
;
2961 st_src_reg l_src
= st_src_reg(*l
);
2963 if (l_src
.file
== PROGRAM_OUTPUT
&&
2964 this->prog
->Target
== GL_FRAGMENT_PROGRAM_ARB
&&
2965 (l_src
.index
== FRAG_RESULT_DEPTH
||
2966 l_src
.index
== FRAG_RESULT_STENCIL
)) {
2967 /* This is a special case because the source swizzles will be shifted
2968 * later to account for the difference between GLSL (where they're
2969 * plain floats) and TGSI (where they're Z and Y components). */
2970 l_src
.swizzle
= SWIZZLE_XXXX
;
2973 if (native_integers
) {
2974 emit_asm(ir
, TGSI_OPCODE_UCMP
, *l
, *cond
,
2975 cond_swap
? l_src
: *r
,
2976 cond_swap
? *r
: l_src
);
2978 emit_asm(ir
, TGSI_OPCODE_CMP
, *l
, *cond
,
2979 cond_swap
? l_src
: *r
,
2980 cond_swap
? *r
: l_src
);
2983 emit_asm(ir
, TGSI_OPCODE_MOV
, *l
, *r
);
2987 if (type
->is_dual_slot()) {
2989 if (r
->is_double_vertex_input
== false)
2995 glsl_to_tgsi_visitor::visit(ir_assignment
*ir
)
3001 /* all generated instructions need to be flaged as precise */
3002 this->precise
= is_precise(ir
->lhs
->variable_referenced());
3003 ir
->rhs
->accept(this);
3006 l
= get_assignment_lhs(ir
->lhs
, this, &dst_component
);
3010 int first_enabled_chan
= 0;
3012 ir_variable
*variable
= ir
->lhs
->variable_referenced();
3014 if (shader
->Stage
== MESA_SHADER_FRAGMENT
&&
3015 variable
->data
.mode
== ir_var_shader_out
&&
3016 (variable
->data
.location
== FRAG_RESULT_DEPTH
||
3017 variable
->data
.location
== FRAG_RESULT_STENCIL
)) {
3018 assert(ir
->lhs
->type
->is_scalar());
3019 assert(ir
->write_mask
== WRITEMASK_X
);
3021 if (variable
->data
.location
== FRAG_RESULT_DEPTH
)
3022 l
.writemask
= WRITEMASK_Z
;
3024 assert(variable
->data
.location
== FRAG_RESULT_STENCIL
);
3025 l
.writemask
= WRITEMASK_Y
;
3027 } else if (ir
->write_mask
== 0) {
3028 assert(!ir
->lhs
->type
->is_scalar() && !ir
->lhs
->type
->is_vector());
3030 unsigned num_elements
=
3031 ir
->lhs
->type
->without_array()->vector_elements
;
3034 l
.writemask
= u_bit_consecutive(0, num_elements
);
3036 /* The type is a struct or an array of (array of) structs. */
3037 l
.writemask
= WRITEMASK_XYZW
;
3040 l
.writemask
= ir
->write_mask
;
3043 for (int i
= 0; i
< 4; i
++) {
3044 if (l
.writemask
& (1 << i
)) {
3045 first_enabled_chan
= GET_SWZ(r
.swizzle
, i
);
3050 l
.writemask
= l
.writemask
<< dst_component
;
3052 /* Swizzle a small RHS vector into the channels being written.
3054 * glsl ir treats write_mask as dictating how many channels are
3055 * present on the RHS while TGSI treats write_mask as just
3056 * showing which channels of the vec4 RHS get written.
3058 for (int i
= 0; i
< 4; i
++) {
3059 if (l
.writemask
& (1 << i
))
3060 swizzles
[i
] = GET_SWZ(r
.swizzle
, rhs_chan
++);
3062 swizzles
[i
] = first_enabled_chan
;
3064 r
.swizzle
= MAKE_SWIZZLE4(swizzles
[0], swizzles
[1],
3065 swizzles
[2], swizzles
[3]);
3068 assert(l
.file
!= PROGRAM_UNDEFINED
);
3069 assert(r
.file
!= PROGRAM_UNDEFINED
);
3071 if (ir
->condition
) {
3072 const bool switch_order
= this->process_move_condition(ir
->condition
);
3073 st_src_reg condition
= this->result
;
3075 emit_block_mov(ir
, ir
->lhs
->type
, &l
, &r
, &condition
, switch_order
);
3076 } else if (ir
->rhs
->as_expression() &&
3077 this->instructions
.get_tail() &&
3078 ir
->rhs
== ((glsl_to_tgsi_instruction
*)this->instructions
.get_tail())->ir
&&
3079 !((glsl_to_tgsi_instruction
*)this->instructions
.get_tail())->is_64bit_expanded
&&
3080 type_size(ir
->lhs
->type
) == 1 &&
3081 l
.writemask
== ((glsl_to_tgsi_instruction
*)this->instructions
.get_tail())->dst
[0].writemask
) {
3082 /* To avoid emitting an extra MOV when assigning an expression to a
3083 * variable, emit the last instruction of the expression again, but
3084 * replace the destination register with the target of the assignment.
3085 * Dead code elimination will remove the original instruction.
3087 glsl_to_tgsi_instruction
*inst
, *new_inst
;
3088 inst
= (glsl_to_tgsi_instruction
*)this->instructions
.get_tail();
3089 new_inst
= emit_asm(ir
, inst
->op
, l
, inst
->src
[0], inst
->src
[1], inst
->src
[2], inst
->src
[3]);
3090 new_inst
->saturate
= inst
->saturate
;
3091 new_inst
->resource
= inst
->resource
;
3092 inst
->dead_mask
= inst
->dst
[0].writemask
;
3094 emit_block_mov(ir
, ir
->rhs
->type
, &l
, &r
, NULL
, false);
3101 glsl_to_tgsi_visitor::visit(ir_constant
*ir
)
3104 GLdouble stack_vals
[4] = { 0 };
3105 gl_constant_value
*values
= (gl_constant_value
*) stack_vals
;
3106 GLenum gl_type
= GL_NONE
;
3108 static int in_array
= 0;
3109 gl_register_file file
= in_array
? PROGRAM_CONSTANT
: PROGRAM_IMMEDIATE
;
3111 /* Unfortunately, 4 floats is all we can get into
3112 * _mesa_add_typed_unnamed_constant. So, make a temp to store an
3113 * aggregate constant and move each constant value into it. If we
3114 * get lucky, copy propagation will eliminate the extra moves.
3116 if (ir
->type
->is_record()) {
3117 st_src_reg temp_base
= get_temp(ir
->type
);
3118 st_dst_reg temp
= st_dst_reg(temp_base
);
3120 for (i
= 0; i
< ir
->type
->length
; i
++) {
3121 ir_constant
*const field_value
= ir
->get_record_field(i
);
3122 int size
= type_size(field_value
->type
);
3126 field_value
->accept(this);
3129 for (unsigned j
= 0; j
< (unsigned int)size
; j
++) {
3130 emit_asm(ir
, TGSI_OPCODE_MOV
, temp
, src
);
3136 this->result
= temp_base
;
3140 if (ir
->type
->is_array()) {
3141 st_src_reg temp_base
= get_temp(ir
->type
);
3142 st_dst_reg temp
= st_dst_reg(temp_base
);
3143 int size
= type_size(ir
->type
->fields
.array
);
3148 for (i
= 0; i
< ir
->type
->length
; i
++) {
3149 ir
->const_elements
[i
]->accept(this);
3151 for (int j
= 0; j
< size
; j
++) {
3152 emit_asm(ir
, TGSI_OPCODE_MOV
, temp
, src
);
3158 this->result
= temp_base
;
3163 if (ir
->type
->is_matrix()) {
3164 st_src_reg mat
= get_temp(ir
->type
);
3165 st_dst_reg mat_column
= st_dst_reg(mat
);
3167 for (i
= 0; i
< ir
->type
->matrix_columns
; i
++) {
3168 switch (ir
->type
->base_type
) {
3169 case GLSL_TYPE_FLOAT
:
3170 values
= (gl_constant_value
*)
3171 &ir
->value
.f
[i
* ir
->type
->vector_elements
];
3173 src
= st_src_reg(file
, -1, ir
->type
->base_type
);
3174 src
.index
= add_constant(file
,
3176 ir
->type
->vector_elements
,
3179 emit_asm(ir
, TGSI_OPCODE_MOV
, mat_column
, src
);
3181 case GLSL_TYPE_DOUBLE
:
3182 values
= (gl_constant_value
*)
3183 &ir
->value
.d
[i
* ir
->type
->vector_elements
];
3184 src
= st_src_reg(file
, -1, ir
->type
->base_type
);
3185 src
.index
= add_constant(file
,
3187 ir
->type
->vector_elements
,
3190 if (ir
->type
->vector_elements
>= 2) {
3191 mat_column
.writemask
= WRITEMASK_XY
;
3192 src
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
,
3193 SWIZZLE_X
, SWIZZLE_Y
);
3194 emit_asm(ir
, TGSI_OPCODE_MOV
, mat_column
, src
);
3196 mat_column
.writemask
= WRITEMASK_X
;
3197 src
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_X
,
3198 SWIZZLE_X
, SWIZZLE_X
);
3199 emit_asm(ir
, TGSI_OPCODE_MOV
, mat_column
, src
);
3202 if (ir
->type
->vector_elements
> 2) {
3203 if (ir
->type
->vector_elements
== 4) {
3204 mat_column
.writemask
= WRITEMASK_ZW
;
3205 src
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
,
3206 SWIZZLE_X
, SWIZZLE_Y
);
3207 emit_asm(ir
, TGSI_OPCODE_MOV
, mat_column
, src
);
3209 mat_column
.writemask
= WRITEMASK_Z
;
3210 src
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_Y
, SWIZZLE_Y
,
3211 SWIZZLE_Y
, SWIZZLE_Y
);
3212 emit_asm(ir
, TGSI_OPCODE_MOV
, mat_column
, src
);
3213 mat_column
.writemask
= WRITEMASK_XYZW
;
3214 src
.swizzle
= SWIZZLE_XYZW
;
3220 unreachable("Illegal matrix constant type.\n");
3229 switch (ir
->type
->base_type
) {
3230 case GLSL_TYPE_FLOAT
:
3232 for (i
= 0; i
< ir
->type
->vector_elements
; i
++) {
3233 values
[i
].f
= ir
->value
.f
[i
];
3236 case GLSL_TYPE_DOUBLE
:
3237 gl_type
= GL_DOUBLE
;
3238 for (i
= 0; i
< ir
->type
->vector_elements
; i
++) {
3239 memcpy(&values
[i
* 2], &ir
->value
.d
[i
], sizeof(double));
3242 case GLSL_TYPE_INT64
:
3243 gl_type
= GL_INT64_ARB
;
3244 for (i
= 0; i
< ir
->type
->vector_elements
; i
++) {
3245 memcpy(&values
[i
* 2], &ir
->value
.d
[i
], sizeof(int64_t));
3248 case GLSL_TYPE_UINT64
:
3249 gl_type
= GL_UNSIGNED_INT64_ARB
;
3250 for (i
= 0; i
< ir
->type
->vector_elements
; i
++) {
3251 memcpy(&values
[i
* 2], &ir
->value
.d
[i
], sizeof(uint64_t));
3254 case GLSL_TYPE_UINT
:
3255 gl_type
= native_integers
? GL_UNSIGNED_INT
: GL_FLOAT
;
3256 for (i
= 0; i
< ir
->type
->vector_elements
; i
++) {
3257 if (native_integers
)
3258 values
[i
].u
= ir
->value
.u
[i
];
3260 values
[i
].f
= ir
->value
.u
[i
];
3264 gl_type
= native_integers
? GL_INT
: GL_FLOAT
;
3265 for (i
= 0; i
< ir
->type
->vector_elements
; i
++) {
3266 if (native_integers
)
3267 values
[i
].i
= ir
->value
.i
[i
];
3269 values
[i
].f
= ir
->value
.i
[i
];
3272 case GLSL_TYPE_BOOL
:
3273 gl_type
= native_integers
? GL_BOOL
: GL_FLOAT
;
3274 for (i
= 0; i
< ir
->type
->vector_elements
; i
++) {
3275 values
[i
].u
= ir
->value
.b
[i
] ? ctx
->Const
.UniformBooleanTrue
: 0;
3279 assert(!"Non-float/uint/int/bool constant");
3282 this->result
= st_src_reg(file
, -1, ir
->type
);
3283 this->result
.index
= add_constant(file
,
3285 ir
->type
->vector_elements
,
3287 &this->result
.swizzle
);
3291 glsl_to_tgsi_visitor::visit_atomic_counter_intrinsic(ir_call
*ir
)
3293 exec_node
*param
= ir
->actual_parameters
.get_head();
3294 ir_dereference
*deref
= static_cast<ir_dereference
*>(param
);
3295 ir_variable
*location
= deref
->variable_referenced();
3296 bool has_hw_atomics
= st_context(ctx
)->has_hw_atomics
;
3297 /* Calculate the surface offset */
3299 unsigned array_size
= 0, base
= 0;
3301 st_src_reg resource
;
3303 get_deref_offsets(deref
, &array_size
, &base
, &index
, &offset
, false);
3305 if (has_hw_atomics
) {
3306 variable_storage
*entry
= find_variable_storage(location
);
3307 st_src_reg
buffer(PROGRAM_HW_ATOMIC
, 0, GLSL_TYPE_ATOMIC_UINT
,
3308 location
->data
.binding
);
3311 entry
= new(mem_ctx
) variable_storage(location
, PROGRAM_HW_ATOMIC
,
3313 _mesa_hash_table_insert(this->variables
, location
, entry
);
3315 atomic_info
[num_atomics
].location
= location
->data
.location
;
3316 atomic_info
[num_atomics
].binding
= location
->data
.binding
;
3317 atomic_info
[num_atomics
].size
= location
->type
->arrays_of_arrays_size();
3318 if (atomic_info
[num_atomics
].size
== 0)
3319 atomic_info
[num_atomics
].size
= 1;
3320 atomic_info
[num_atomics
].array_id
= 0;
3324 if (offset
.file
!= PROGRAM_UNDEFINED
) {
3325 if (atomic_info
[entry
->index
].array_id
== 0) {
3326 num_atomic_arrays
++;
3327 atomic_info
[entry
->index
].array_id
= num_atomic_arrays
;
3329 buffer
.array_id
= atomic_info
[entry
->index
].array_id
;
3332 buffer
.index
= index
;
3333 buffer
.index
+= location
->data
.offset
/ ATOMIC_COUNTER_SIZE
;
3334 buffer
.has_index2
= true;
3336 if (offset
.file
!= PROGRAM_UNDEFINED
) {
3337 buffer
.reladdr
= ralloc(mem_ctx
, st_src_reg
);
3338 *buffer
.reladdr
= offset
;
3339 emit_arl(ir
, sampler_reladdr
, offset
);
3341 offset
= st_src_reg_for_int(0);
3345 st_src_reg
buffer(PROGRAM_BUFFER
, location
->data
.binding
,
3346 GLSL_TYPE_ATOMIC_UINT
);
3348 if (offset
.file
!= PROGRAM_UNDEFINED
) {
3349 emit_asm(ir
, TGSI_OPCODE_MUL
, st_dst_reg(offset
),
3350 offset
, st_src_reg_for_int(ATOMIC_COUNTER_SIZE
));
3351 emit_asm(ir
, TGSI_OPCODE_ADD
, st_dst_reg(offset
),
3352 offset
, st_src_reg_for_int(location
->data
.offset
+ index
* ATOMIC_COUNTER_SIZE
));
3354 offset
= st_src_reg_for_int(location
->data
.offset
+ index
* ATOMIC_COUNTER_SIZE
);
3359 ir
->return_deref
->accept(this);
3360 st_dst_reg
dst(this->result
);
3361 dst
.writemask
= WRITEMASK_X
;
3363 glsl_to_tgsi_instruction
*inst
;
3365 if (ir
->callee
->intrinsic_id
== ir_intrinsic_atomic_counter_read
) {
3366 inst
= emit_asm(ir
, TGSI_OPCODE_LOAD
, dst
, offset
);
3367 } else if (ir
->callee
->intrinsic_id
== ir_intrinsic_atomic_counter_increment
) {
3368 inst
= emit_asm(ir
, TGSI_OPCODE_ATOMUADD
, dst
, offset
,
3369 st_src_reg_for_int(1));
3370 } else if (ir
->callee
->intrinsic_id
== ir_intrinsic_atomic_counter_predecrement
) {
3371 inst
= emit_asm(ir
, TGSI_OPCODE_ATOMUADD
, dst
, offset
,
3372 st_src_reg_for_int(-1));
3373 emit_asm(ir
, TGSI_OPCODE_ADD
, dst
, this->result
, st_src_reg_for_int(-1));
3375 param
= param
->get_next();
3376 ir_rvalue
*val
= ((ir_instruction
*)param
)->as_rvalue();
3379 st_src_reg data
= this->result
, data2
= undef_src
;
3380 enum tgsi_opcode opcode
;
3381 switch (ir
->callee
->intrinsic_id
) {
3382 case ir_intrinsic_atomic_counter_add
:
3383 opcode
= TGSI_OPCODE_ATOMUADD
;
3385 case ir_intrinsic_atomic_counter_min
:
3386 opcode
= TGSI_OPCODE_ATOMIMIN
;
3388 case ir_intrinsic_atomic_counter_max
:
3389 opcode
= TGSI_OPCODE_ATOMIMAX
;
3391 case ir_intrinsic_atomic_counter_and
:
3392 opcode
= TGSI_OPCODE_ATOMAND
;
3394 case ir_intrinsic_atomic_counter_or
:
3395 opcode
= TGSI_OPCODE_ATOMOR
;
3397 case ir_intrinsic_atomic_counter_xor
:
3398 opcode
= TGSI_OPCODE_ATOMXOR
;
3400 case ir_intrinsic_atomic_counter_exchange
:
3401 opcode
= TGSI_OPCODE_ATOMXCHG
;
3403 case ir_intrinsic_atomic_counter_comp_swap
: {
3404 opcode
= TGSI_OPCODE_ATOMCAS
;
3405 param
= param
->get_next();
3406 val
= ((ir_instruction
*)param
)->as_rvalue();
3408 data2
= this->result
;
3412 assert(!"Unexpected intrinsic");
3416 inst
= emit_asm(ir
, opcode
, dst
, offset
, data
, data2
);
3419 inst
->resource
= resource
;
3423 glsl_to_tgsi_visitor::visit_ssbo_intrinsic(ir_call
*ir
)
3425 exec_node
*param
= ir
->actual_parameters
.get_head();
3427 ir_rvalue
*block
= ((ir_instruction
*)param
)->as_rvalue();
3429 param
= param
->get_next();
3430 ir_rvalue
*offset
= ((ir_instruction
*)param
)->as_rvalue();
3432 ir_constant
*const_block
= block
->as_constant();
3433 int buf_base
= st_context(ctx
)->has_hw_atomics
3434 ? 0 : ctx
->Const
.Program
[shader
->Stage
].MaxAtomicBuffers
;
3437 buf_base
+ (const_block
? const_block
->value
.u
[0] : 0),
3441 block
->accept(this);
3442 buffer
.reladdr
= ralloc(mem_ctx
, st_src_reg
);
3443 *buffer
.reladdr
= this->result
;
3444 emit_arl(ir
, sampler_reladdr
, this->result
);
3447 /* Calculate the surface offset */
3448 offset
->accept(this);
3449 st_src_reg off
= this->result
;
3451 st_dst_reg dst
= undef_dst
;
3452 if (ir
->return_deref
) {
3453 ir
->return_deref
->accept(this);
3454 dst
= st_dst_reg(this->result
);
3455 dst
.writemask
= (1 << ir
->return_deref
->type
->vector_elements
) - 1;
3458 glsl_to_tgsi_instruction
*inst
;
3460 if (ir
->callee
->intrinsic_id
== ir_intrinsic_ssbo_load
) {
3461 inst
= emit_asm(ir
, TGSI_OPCODE_LOAD
, dst
, off
);
3462 if (dst
.type
== GLSL_TYPE_BOOL
)
3463 emit_asm(ir
, TGSI_OPCODE_USNE
, dst
, st_src_reg(dst
),
3464 st_src_reg_for_int(0));
3465 } else if (ir
->callee
->intrinsic_id
== ir_intrinsic_ssbo_store
) {
3466 param
= param
->get_next();
3467 ir_rvalue
*val
= ((ir_instruction
*)param
)->as_rvalue();
3470 param
= param
->get_next();
3471 ir_constant
*write_mask
= ((ir_instruction
*)param
)->as_constant();
3473 dst
.writemask
= write_mask
->value
.u
[0];
3475 dst
.type
= this->result
.type
;
3476 inst
= emit_asm(ir
, TGSI_OPCODE_STORE
, dst
, off
, this->result
);
3478 param
= param
->get_next();
3479 ir_rvalue
*val
= ((ir_instruction
*)param
)->as_rvalue();
3482 st_src_reg data
= this->result
, data2
= undef_src
;
3483 enum tgsi_opcode opcode
;
3484 switch (ir
->callee
->intrinsic_id
) {
3485 case ir_intrinsic_ssbo_atomic_add
:
3486 opcode
= TGSI_OPCODE_ATOMUADD
;
3488 case ir_intrinsic_ssbo_atomic_min
:
3489 opcode
= TGSI_OPCODE_ATOMIMIN
;
3491 case ir_intrinsic_ssbo_atomic_max
:
3492 opcode
= TGSI_OPCODE_ATOMIMAX
;
3494 case ir_intrinsic_ssbo_atomic_and
:
3495 opcode
= TGSI_OPCODE_ATOMAND
;
3497 case ir_intrinsic_ssbo_atomic_or
:
3498 opcode
= TGSI_OPCODE_ATOMOR
;
3500 case ir_intrinsic_ssbo_atomic_xor
:
3501 opcode
= TGSI_OPCODE_ATOMXOR
;
3503 case ir_intrinsic_ssbo_atomic_exchange
:
3504 opcode
= TGSI_OPCODE_ATOMXCHG
;
3506 case ir_intrinsic_ssbo_atomic_comp_swap
:
3507 opcode
= TGSI_OPCODE_ATOMCAS
;
3508 param
= param
->get_next();
3509 val
= ((ir_instruction
*)param
)->as_rvalue();
3511 data2
= this->result
;
3514 assert(!"Unexpected intrinsic");
3518 inst
= emit_asm(ir
, opcode
, dst
, off
, data
, data2
);
3521 param
= param
->get_next();
3522 ir_constant
*access
= NULL
;
3523 if (!param
->is_tail_sentinel()) {
3524 access
= ((ir_instruction
*)param
)->as_constant();
3528 add_buffer_to_load_and_stores(inst
, &buffer
, &this->instructions
, access
);
3532 glsl_to_tgsi_visitor::visit_membar_intrinsic(ir_call
*ir
)
3534 switch (ir
->callee
->intrinsic_id
) {
3535 case ir_intrinsic_memory_barrier
:
3536 emit_asm(ir
, TGSI_OPCODE_MEMBAR
, undef_dst
,
3537 st_src_reg_for_int(TGSI_MEMBAR_SHADER_BUFFER
|
3538 TGSI_MEMBAR_ATOMIC_BUFFER
|
3539 TGSI_MEMBAR_SHADER_IMAGE
|
3540 TGSI_MEMBAR_SHARED
));
3542 case ir_intrinsic_memory_barrier_atomic_counter
:
3543 emit_asm(ir
, TGSI_OPCODE_MEMBAR
, undef_dst
,
3544 st_src_reg_for_int(TGSI_MEMBAR_ATOMIC_BUFFER
));
3546 case ir_intrinsic_memory_barrier_buffer
:
3547 emit_asm(ir
, TGSI_OPCODE_MEMBAR
, undef_dst
,
3548 st_src_reg_for_int(TGSI_MEMBAR_SHADER_BUFFER
));
3550 case ir_intrinsic_memory_barrier_image
:
3551 emit_asm(ir
, TGSI_OPCODE_MEMBAR
, undef_dst
,
3552 st_src_reg_for_int(TGSI_MEMBAR_SHADER_IMAGE
));
3554 case ir_intrinsic_memory_barrier_shared
:
3555 emit_asm(ir
, TGSI_OPCODE_MEMBAR
, undef_dst
,
3556 st_src_reg_for_int(TGSI_MEMBAR_SHARED
));
3558 case ir_intrinsic_group_memory_barrier
:
3559 emit_asm(ir
, TGSI_OPCODE_MEMBAR
, undef_dst
,
3560 st_src_reg_for_int(TGSI_MEMBAR_SHADER_BUFFER
|
3561 TGSI_MEMBAR_ATOMIC_BUFFER
|
3562 TGSI_MEMBAR_SHADER_IMAGE
|
3563 TGSI_MEMBAR_SHARED
|
3564 TGSI_MEMBAR_THREAD_GROUP
));
3567 assert(!"Unexpected memory barrier intrinsic");
3572 glsl_to_tgsi_visitor::visit_shared_intrinsic(ir_call
*ir
)
3574 exec_node
*param
= ir
->actual_parameters
.get_head();
3576 ir_rvalue
*offset
= ((ir_instruction
*)param
)->as_rvalue();
3578 st_src_reg
buffer(PROGRAM_MEMORY
, 0, GLSL_TYPE_UINT
);
3580 /* Calculate the surface offset */
3581 offset
->accept(this);
3582 st_src_reg off
= this->result
;
3584 st_dst_reg dst
= undef_dst
;
3585 if (ir
->return_deref
) {
3586 ir
->return_deref
->accept(this);
3587 dst
= st_dst_reg(this->result
);
3588 dst
.writemask
= (1 << ir
->return_deref
->type
->vector_elements
) - 1;
3591 glsl_to_tgsi_instruction
*inst
;
3593 if (ir
->callee
->intrinsic_id
== ir_intrinsic_shared_load
) {
3594 inst
= emit_asm(ir
, TGSI_OPCODE_LOAD
, dst
, off
);
3595 inst
->resource
= buffer
;
3596 } else if (ir
->callee
->intrinsic_id
== ir_intrinsic_shared_store
) {
3597 param
= param
->get_next();
3598 ir_rvalue
*val
= ((ir_instruction
*)param
)->as_rvalue();
3601 param
= param
->get_next();
3602 ir_constant
*write_mask
= ((ir_instruction
*)param
)->as_constant();
3604 dst
.writemask
= write_mask
->value
.u
[0];
3606 dst
.type
= this->result
.type
;
3607 inst
= emit_asm(ir
, TGSI_OPCODE_STORE
, dst
, off
, this->result
);
3608 inst
->resource
= buffer
;
3610 param
= param
->get_next();
3611 ir_rvalue
*val
= ((ir_instruction
*)param
)->as_rvalue();
3614 st_src_reg data
= this->result
, data2
= undef_src
;
3615 enum tgsi_opcode opcode
;
3616 switch (ir
->callee
->intrinsic_id
) {
3617 case ir_intrinsic_shared_atomic_add
:
3618 opcode
= TGSI_OPCODE_ATOMUADD
;
3620 case ir_intrinsic_shared_atomic_min
:
3621 opcode
= TGSI_OPCODE_ATOMIMIN
;
3623 case ir_intrinsic_shared_atomic_max
:
3624 opcode
= TGSI_OPCODE_ATOMIMAX
;
3626 case ir_intrinsic_shared_atomic_and
:
3627 opcode
= TGSI_OPCODE_ATOMAND
;
3629 case ir_intrinsic_shared_atomic_or
:
3630 opcode
= TGSI_OPCODE_ATOMOR
;
3632 case ir_intrinsic_shared_atomic_xor
:
3633 opcode
= TGSI_OPCODE_ATOMXOR
;
3635 case ir_intrinsic_shared_atomic_exchange
:
3636 opcode
= TGSI_OPCODE_ATOMXCHG
;
3638 case ir_intrinsic_shared_atomic_comp_swap
:
3639 opcode
= TGSI_OPCODE_ATOMCAS
;
3640 param
= param
->get_next();
3641 val
= ((ir_instruction
*)param
)->as_rvalue();
3643 data2
= this->result
;
3646 assert(!"Unexpected intrinsic");
3650 inst
= emit_asm(ir
, opcode
, dst
, off
, data
, data2
);
3651 inst
->resource
= buffer
;
3656 get_image_qualifiers(ir_dereference
*ir
, const glsl_type
**type
,
3657 bool *memory_coherent
, bool *memory_volatile
,
3658 bool *memory_restrict
, unsigned *image_format
)
3661 switch (ir
->ir_type
) {
3662 case ir_type_dereference_record
: {
3663 ir_dereference_record
*deref_record
= ir
->as_dereference_record();
3664 const glsl_type
*struct_type
= deref_record
->record
->type
;
3665 int fild_idx
= deref_record
->field_idx
;
3667 *type
= struct_type
->fields
.structure
[fild_idx
].type
->without_array();
3669 struct_type
->fields
.structure
[fild_idx
].memory_coherent
;
3671 struct_type
->fields
.structure
[fild_idx
].memory_volatile
;
3673 struct_type
->fields
.structure
[fild_idx
].memory_restrict
;
3675 struct_type
->fields
.structure
[fild_idx
].image_format
;
3679 case ir_type_dereference_array
: {
3680 ir_dereference_array
*deref_arr
= ir
->as_dereference_array();
3681 get_image_qualifiers((ir_dereference
*)deref_arr
->array
, type
,
3682 memory_coherent
, memory_volatile
, memory_restrict
,
3687 case ir_type_dereference_variable
: {
3688 ir_variable
*var
= ir
->variable_referenced();
3690 *type
= var
->type
->without_array();
3691 *memory_coherent
= var
->data
.memory_coherent
;
3692 *memory_volatile
= var
->data
.memory_volatile
;
3693 *memory_restrict
= var
->data
.memory_restrict
;
3694 *image_format
= var
->data
.image_format
;
3704 glsl_to_tgsi_visitor::visit_image_intrinsic(ir_call
*ir
)
3706 exec_node
*param
= ir
->actual_parameters
.get_head();
3708 ir_dereference
*img
= (ir_dereference
*)param
;
3709 const ir_variable
*imgvar
= img
->variable_referenced();
3710 unsigned sampler_array_size
= 1, sampler_base
= 0;
3711 bool memory_coherent
= false, memory_volatile
= false, memory_restrict
= false;
3712 unsigned image_format
= 0;
3713 const glsl_type
*type
= NULL
;
3715 get_image_qualifiers(img
, &type
, &memory_coherent
, &memory_volatile
,
3716 &memory_restrict
, &image_format
);
3719 st_src_reg
image(PROGRAM_IMAGE
, 0, GLSL_TYPE_UINT
);
3721 get_deref_offsets(img
, &sampler_array_size
, &sampler_base
,
3722 &index
, &reladdr
, !imgvar
->contains_bindless());
3724 image
.index
= index
;
3725 if (reladdr
.file
!= PROGRAM_UNDEFINED
) {
3726 image
.reladdr
= ralloc(mem_ctx
, st_src_reg
);
3727 *image
.reladdr
= reladdr
;
3728 emit_arl(ir
, sampler_reladdr
, reladdr
);
3731 st_dst_reg dst
= undef_dst
;
3732 if (ir
->return_deref
) {
3733 ir
->return_deref
->accept(this);
3734 dst
= st_dst_reg(this->result
);
3735 dst
.writemask
= (1 << ir
->return_deref
->type
->vector_elements
) - 1;
3738 glsl_to_tgsi_instruction
*inst
;
3740 st_src_reg bindless
;
3741 if (imgvar
->contains_bindless()) {
3743 bindless
= this->result
;
3746 if (ir
->callee
->intrinsic_id
== ir_intrinsic_image_size
) {
3747 dst
.writemask
= WRITEMASK_XYZ
;
3748 inst
= emit_asm(ir
, TGSI_OPCODE_RESQ
, dst
);
3749 } else if (ir
->callee
->intrinsic_id
== ir_intrinsic_image_samples
) {
3750 st_src_reg res
= get_temp(glsl_type::ivec4_type
);
3751 st_dst_reg dstres
= st_dst_reg(res
);
3752 dstres
.writemask
= WRITEMASK_W
;
3753 inst
= emit_asm(ir
, TGSI_OPCODE_RESQ
, dstres
);
3754 res
.swizzle
= SWIZZLE_WWWW
;
3755 emit_asm(ir
, TGSI_OPCODE_MOV
, dst
, res
);
3757 st_src_reg arg1
= undef_src
, arg2
= undef_src
;
3759 st_dst_reg coord_dst
;
3760 coord
= get_temp(glsl_type::ivec4_type
);
3761 coord_dst
= st_dst_reg(coord
);
3762 coord_dst
.writemask
= (1 << type
->coordinate_components()) - 1;
3763 param
= param
->get_next();
3764 ((ir_dereference
*)param
)->accept(this);
3765 emit_asm(ir
, TGSI_OPCODE_MOV
, coord_dst
, this->result
);
3766 coord
.swizzle
= SWIZZLE_XXXX
;
3767 switch (type
->coordinate_components()) {
3768 case 4: assert(!"unexpected coord count");
3770 case 3: coord
.swizzle
|= SWIZZLE_Z
<< 6;
3772 case 2: coord
.swizzle
|= SWIZZLE_Y
<< 3;
3775 if (type
->sampler_dimensionality
== GLSL_SAMPLER_DIM_MS
) {
3776 param
= param
->get_next();
3777 ((ir_dereference
*)param
)->accept(this);
3778 st_src_reg sample
= this->result
;
3779 sample
.swizzle
= SWIZZLE_XXXX
;
3780 coord_dst
.writemask
= WRITEMASK_W
;
3781 emit_asm(ir
, TGSI_OPCODE_MOV
, coord_dst
, sample
);
3782 coord
.swizzle
|= SWIZZLE_W
<< 9;
3785 param
= param
->get_next();
3786 if (!param
->is_tail_sentinel()) {
3787 ((ir_dereference
*)param
)->accept(this);
3788 arg1
= this->result
;
3789 param
= param
->get_next();
3792 if (!param
->is_tail_sentinel()) {
3793 ((ir_dereference
*)param
)->accept(this);
3794 arg2
= this->result
;
3795 param
= param
->get_next();
3798 assert(param
->is_tail_sentinel());
3800 enum tgsi_opcode opcode
;
3801 switch (ir
->callee
->intrinsic_id
) {
3802 case ir_intrinsic_image_load
:
3803 opcode
= TGSI_OPCODE_LOAD
;
3805 case ir_intrinsic_image_store
:
3806 opcode
= TGSI_OPCODE_STORE
;
3808 case ir_intrinsic_image_atomic_add
:
3809 opcode
= TGSI_OPCODE_ATOMUADD
;
3811 case ir_intrinsic_image_atomic_min
:
3812 opcode
= TGSI_OPCODE_ATOMIMIN
;
3814 case ir_intrinsic_image_atomic_max
:
3815 opcode
= TGSI_OPCODE_ATOMIMAX
;
3817 case ir_intrinsic_image_atomic_and
:
3818 opcode
= TGSI_OPCODE_ATOMAND
;
3820 case ir_intrinsic_image_atomic_or
:
3821 opcode
= TGSI_OPCODE_ATOMOR
;
3823 case ir_intrinsic_image_atomic_xor
:
3824 opcode
= TGSI_OPCODE_ATOMXOR
;
3826 case ir_intrinsic_image_atomic_exchange
:
3827 opcode
= TGSI_OPCODE_ATOMXCHG
;
3829 case ir_intrinsic_image_atomic_comp_swap
:
3830 opcode
= TGSI_OPCODE_ATOMCAS
;
3833 assert(!"Unexpected intrinsic");
3837 inst
= emit_asm(ir
, opcode
, dst
, coord
, arg1
, arg2
);
3838 if (opcode
== TGSI_OPCODE_STORE
)
3839 inst
->dst
[0].writemask
= WRITEMASK_XYZW
;
3842 if (imgvar
->contains_bindless()) {
3843 inst
->resource
= bindless
;
3844 inst
->resource
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
,
3845 SWIZZLE_X
, SWIZZLE_Y
);
3847 inst
->resource
= image
;
3848 inst
->sampler_array_size
= sampler_array_size
;
3849 inst
->sampler_base
= sampler_base
;
3852 inst
->tex_target
= type
->sampler_index();
3853 inst
->image_format
= st_mesa_format_to_pipe_format(st_context(ctx
),
3854 _mesa_get_shader_image_format(image_format
));
3856 if (memory_coherent
)
3857 inst
->buffer_access
|= TGSI_MEMORY_COHERENT
;
3858 if (memory_restrict
)
3859 inst
->buffer_access
|= TGSI_MEMORY_RESTRICT
;
3860 if (memory_volatile
)
3861 inst
->buffer_access
|= TGSI_MEMORY_VOLATILE
;
3865 glsl_to_tgsi_visitor::visit_generic_intrinsic(ir_call
*ir
, enum tgsi_opcode op
)
3867 ir
->return_deref
->accept(this);
3868 st_dst_reg dst
= st_dst_reg(this->result
);
3870 dst
.writemask
= u_bit_consecutive(0, ir
->return_deref
->var
->type
->vector_elements
);
3872 st_src_reg src
[4] = { undef_src
, undef_src
, undef_src
, undef_src
};
3873 unsigned num_src
= 0;
3874 foreach_in_list(ir_rvalue
, param
, &ir
->actual_parameters
) {
3875 assert(num_src
< ARRAY_SIZE(src
));
3877 this->result
.file
= PROGRAM_UNDEFINED
;
3878 param
->accept(this);
3879 assert(this->result
.file
!= PROGRAM_UNDEFINED
);
3881 src
[num_src
] = this->result
;
3885 emit_asm(ir
, op
, dst
, src
[0], src
[1], src
[2], src
[3]);
3889 glsl_to_tgsi_visitor::visit(ir_call
*ir
)
3891 ir_function_signature
*sig
= ir
->callee
;
3893 /* Filter out intrinsics */
3894 switch (sig
->intrinsic_id
) {
3895 case ir_intrinsic_atomic_counter_read
:
3896 case ir_intrinsic_atomic_counter_increment
:
3897 case ir_intrinsic_atomic_counter_predecrement
:
3898 case ir_intrinsic_atomic_counter_add
:
3899 case ir_intrinsic_atomic_counter_min
:
3900 case ir_intrinsic_atomic_counter_max
:
3901 case ir_intrinsic_atomic_counter_and
:
3902 case ir_intrinsic_atomic_counter_or
:
3903 case ir_intrinsic_atomic_counter_xor
:
3904 case ir_intrinsic_atomic_counter_exchange
:
3905 case ir_intrinsic_atomic_counter_comp_swap
:
3906 visit_atomic_counter_intrinsic(ir
);
3909 case ir_intrinsic_ssbo_load
:
3910 case ir_intrinsic_ssbo_store
:
3911 case ir_intrinsic_ssbo_atomic_add
:
3912 case ir_intrinsic_ssbo_atomic_min
:
3913 case ir_intrinsic_ssbo_atomic_max
:
3914 case ir_intrinsic_ssbo_atomic_and
:
3915 case ir_intrinsic_ssbo_atomic_or
:
3916 case ir_intrinsic_ssbo_atomic_xor
:
3917 case ir_intrinsic_ssbo_atomic_exchange
:
3918 case ir_intrinsic_ssbo_atomic_comp_swap
:
3919 visit_ssbo_intrinsic(ir
);
3922 case ir_intrinsic_memory_barrier
:
3923 case ir_intrinsic_memory_barrier_atomic_counter
:
3924 case ir_intrinsic_memory_barrier_buffer
:
3925 case ir_intrinsic_memory_barrier_image
:
3926 case ir_intrinsic_memory_barrier_shared
:
3927 case ir_intrinsic_group_memory_barrier
:
3928 visit_membar_intrinsic(ir
);
3931 case ir_intrinsic_shared_load
:
3932 case ir_intrinsic_shared_store
:
3933 case ir_intrinsic_shared_atomic_add
:
3934 case ir_intrinsic_shared_atomic_min
:
3935 case ir_intrinsic_shared_atomic_max
:
3936 case ir_intrinsic_shared_atomic_and
:
3937 case ir_intrinsic_shared_atomic_or
:
3938 case ir_intrinsic_shared_atomic_xor
:
3939 case ir_intrinsic_shared_atomic_exchange
:
3940 case ir_intrinsic_shared_atomic_comp_swap
:
3941 visit_shared_intrinsic(ir
);
3944 case ir_intrinsic_image_load
:
3945 case ir_intrinsic_image_store
:
3946 case ir_intrinsic_image_atomic_add
:
3947 case ir_intrinsic_image_atomic_min
:
3948 case ir_intrinsic_image_atomic_max
:
3949 case ir_intrinsic_image_atomic_and
:
3950 case ir_intrinsic_image_atomic_or
:
3951 case ir_intrinsic_image_atomic_xor
:
3952 case ir_intrinsic_image_atomic_exchange
:
3953 case ir_intrinsic_image_atomic_comp_swap
:
3954 case ir_intrinsic_image_size
:
3955 case ir_intrinsic_image_samples
:
3956 visit_image_intrinsic(ir
);
3959 case ir_intrinsic_shader_clock
:
3960 visit_generic_intrinsic(ir
, TGSI_OPCODE_CLOCK
);
3963 case ir_intrinsic_vote_all
:
3964 visit_generic_intrinsic(ir
, TGSI_OPCODE_VOTE_ALL
);
3966 case ir_intrinsic_vote_any
:
3967 visit_generic_intrinsic(ir
, TGSI_OPCODE_VOTE_ANY
);
3969 case ir_intrinsic_vote_eq
:
3970 visit_generic_intrinsic(ir
, TGSI_OPCODE_VOTE_EQ
);
3972 case ir_intrinsic_ballot
:
3973 visit_generic_intrinsic(ir
, TGSI_OPCODE_BALLOT
);
3975 case ir_intrinsic_read_first_invocation
:
3976 visit_generic_intrinsic(ir
, TGSI_OPCODE_READ_FIRST
);
3978 case ir_intrinsic_read_invocation
:
3979 visit_generic_intrinsic(ir
, TGSI_OPCODE_READ_INVOC
);
3982 case ir_intrinsic_invalid
:
3983 case ir_intrinsic_generic_load
:
3984 case ir_intrinsic_generic_store
:
3985 case ir_intrinsic_generic_atomic_add
:
3986 case ir_intrinsic_generic_atomic_and
:
3987 case ir_intrinsic_generic_atomic_or
:
3988 case ir_intrinsic_generic_atomic_xor
:
3989 case ir_intrinsic_generic_atomic_min
:
3990 case ir_intrinsic_generic_atomic_max
:
3991 case ir_intrinsic_generic_atomic_exchange
:
3992 case ir_intrinsic_generic_atomic_comp_swap
:
3993 case ir_intrinsic_begin_invocation_interlock
:
3994 case ir_intrinsic_end_invocation_interlock
:
3995 unreachable("Invalid intrinsic");
4000 glsl_to_tgsi_visitor::calc_deref_offsets(ir_dereference
*tail
,
4001 unsigned *array_elements
,
4003 st_src_reg
*indirect
,
4006 switch (tail
->ir_type
) {
4007 case ir_type_dereference_record
: {
4008 ir_dereference_record
*deref_record
= tail
->as_dereference_record();
4009 const glsl_type
*struct_type
= deref_record
->record
->type
;
4010 int field_index
= deref_record
->field_idx
;
4012 calc_deref_offsets(deref_record
->record
->as_dereference(), array_elements
, index
, indirect
, location
);
4014 assert(field_index
>= 0);
4015 *location
+= struct_type
->record_location_offset(field_index
);
4019 case ir_type_dereference_array
: {
4020 ir_dereference_array
*deref_arr
= tail
->as_dereference_array();
4022 void *mem_ctx
= ralloc_parent(deref_arr
);
4023 ir_constant
*array_index
=
4024 deref_arr
->array_index
->constant_expression_value(mem_ctx
);
4027 st_src_reg temp_reg
;
4028 st_dst_reg temp_dst
;
4030 temp_reg
= get_temp(glsl_type::uint_type
);
4031 temp_dst
= st_dst_reg(temp_reg
);
4032 temp_dst
.writemask
= 1;
4034 deref_arr
->array_index
->accept(this);
4035 if (*array_elements
!= 1)
4036 emit_asm(NULL
, TGSI_OPCODE_MUL
, temp_dst
, this->result
, st_src_reg_for_int(*array_elements
));
4038 emit_asm(NULL
, TGSI_OPCODE_MOV
, temp_dst
, this->result
);
4040 if (indirect
->file
== PROGRAM_UNDEFINED
)
4041 *indirect
= temp_reg
;
4043 temp_dst
= st_dst_reg(*indirect
);
4044 temp_dst
.writemask
= 1;
4045 emit_asm(NULL
, TGSI_OPCODE_ADD
, temp_dst
, *indirect
, temp_reg
);
4048 *index
+= array_index
->value
.u
[0] * *array_elements
;
4050 *array_elements
*= deref_arr
->array
->type
->length
;
4052 calc_deref_offsets(deref_arr
->array
->as_dereference(), array_elements
, index
, indirect
, location
);
4061 glsl_to_tgsi_visitor::get_deref_offsets(ir_dereference
*ir
,
4062 unsigned *array_size
,
4065 st_src_reg
*reladdr
,
4068 GLuint shader
= _mesa_program_enum_to_shader_stage(this->prog
->Target
);
4069 unsigned location
= 0;
4070 ir_variable
*var
= ir
->variable_referenced();
4072 memset(reladdr
, 0, sizeof(*reladdr
));
4073 reladdr
->file
= PROGRAM_UNDEFINED
;
4079 location
= var
->data
.location
;
4080 calc_deref_offsets(ir
, array_size
, index
, reladdr
, &location
);
4083 * If we end up with no indirect then adjust the base to the index,
4084 * and set the array size to 1.
4086 if (reladdr
->file
== PROGRAM_UNDEFINED
) {
4092 assert(location
!= 0xffffffff);
4093 *base
+= this->shader_program
->data
->UniformStorage
[location
].opaque
[shader
].index
;
4094 *index
+= this->shader_program
->data
->UniformStorage
[location
].opaque
[shader
].index
;
4099 glsl_to_tgsi_visitor::canonicalize_gather_offset(st_src_reg offset
)
4101 if (offset
.reladdr
|| offset
.reladdr2
||
4102 offset
.has_index2
||
4103 offset
.file
== PROGRAM_UNIFORM
||
4104 offset
.file
== PROGRAM_CONSTANT
||
4105 offset
.file
== PROGRAM_STATE_VAR
) {
4106 st_src_reg tmp
= get_temp(glsl_type::ivec2_type
);
4107 st_dst_reg tmp_dst
= st_dst_reg(tmp
);
4108 tmp_dst
.writemask
= WRITEMASK_XY
;
4109 emit_asm(NULL
, TGSI_OPCODE_MOV
, tmp_dst
, offset
);
4117 glsl_to_tgsi_visitor::visit(ir_texture
*ir
)
4119 st_src_reg result_src
, coord
, cube_sc
, lod_info
, projector
, dx
, dy
;
4120 st_src_reg offset
[MAX_GLSL_TEXTURE_OFFSET
], sample_index
, component
;
4121 st_src_reg levels_src
, reladdr
;
4122 st_dst_reg result_dst
, coord_dst
, cube_sc_dst
;
4123 glsl_to_tgsi_instruction
*inst
= NULL
;
4124 enum tgsi_opcode opcode
= TGSI_OPCODE_NOP
;
4125 const glsl_type
*sampler_type
= ir
->sampler
->type
;
4126 unsigned sampler_array_size
= 1, sampler_base
= 0;
4127 bool is_cube_array
= false, is_cube_shadow
= false;
4128 ir_variable
*var
= ir
->sampler
->variable_referenced();
4131 /* if we are a cube array sampler or a cube shadow */
4132 if (sampler_type
->sampler_dimensionality
== GLSL_SAMPLER_DIM_CUBE
) {
4133 is_cube_array
= sampler_type
->sampler_array
;
4134 is_cube_shadow
= sampler_type
->sampler_shadow
;
4137 if (ir
->coordinate
) {
4138 ir
->coordinate
->accept(this);
4140 /* Put our coords in a temp. We'll need to modify them for shadow,
4141 * projection, or LOD, so the only case we'd use it as-is is if
4142 * we're doing plain old texturing. The optimization passes on
4143 * glsl_to_tgsi_visitor should handle cleaning up our mess in that case.
4145 coord
= get_temp(glsl_type::vec4_type
);
4146 coord_dst
= st_dst_reg(coord
);
4147 coord_dst
.writemask
= (1 << ir
->coordinate
->type
->vector_elements
) - 1;
4148 emit_asm(ir
, TGSI_OPCODE_MOV
, coord_dst
, this->result
);
4151 if (ir
->projector
) {
4152 ir
->projector
->accept(this);
4153 projector
= this->result
;
4156 /* Storage for our result. Ideally for an assignment we'd be using
4157 * the actual storage for the result here, instead.
4159 result_src
= get_temp(ir
->type
);
4160 result_dst
= st_dst_reg(result_src
);
4161 result_dst
.writemask
= (1 << ir
->type
->vector_elements
) - 1;
4165 opcode
= (is_cube_array
&& ir
->shadow_comparator
) ? TGSI_OPCODE_TEX2
: TGSI_OPCODE_TEX
;
4167 ir
->offset
->accept(this);
4168 offset
[0] = this->result
;
4172 if (is_cube_array
|| is_cube_shadow
) {
4173 opcode
= TGSI_OPCODE_TXB2
;
4176 opcode
= TGSI_OPCODE_TXB
;
4178 ir
->lod_info
.bias
->accept(this);
4179 lod_info
= this->result
;
4181 ir
->offset
->accept(this);
4182 offset
[0] = this->result
;
4186 if (this->has_tex_txf_lz
&& ir
->lod_info
.lod
->is_zero()) {
4187 opcode
= TGSI_OPCODE_TEX_LZ
;
4189 opcode
= is_cube_array
? TGSI_OPCODE_TXL2
: TGSI_OPCODE_TXL
;
4190 ir
->lod_info
.lod
->accept(this);
4191 lod_info
= this->result
;
4194 ir
->offset
->accept(this);
4195 offset
[0] = this->result
;
4199 opcode
= TGSI_OPCODE_TXD
;
4200 ir
->lod_info
.grad
.dPdx
->accept(this);
4202 ir
->lod_info
.grad
.dPdy
->accept(this);
4205 ir
->offset
->accept(this);
4206 offset
[0] = this->result
;
4210 opcode
= TGSI_OPCODE_TXQ
;
4211 ir
->lod_info
.lod
->accept(this);
4212 lod_info
= this->result
;
4214 case ir_query_levels
:
4215 opcode
= TGSI_OPCODE_TXQ
;
4216 lod_info
= undef_src
;
4217 levels_src
= get_temp(ir
->type
);
4220 if (this->has_tex_txf_lz
&& ir
->lod_info
.lod
->is_zero()) {
4221 opcode
= TGSI_OPCODE_TXF_LZ
;
4223 opcode
= TGSI_OPCODE_TXF
;
4224 ir
->lod_info
.lod
->accept(this);
4225 lod_info
= this->result
;
4228 ir
->offset
->accept(this);
4229 offset
[0] = this->result
;
4233 opcode
= TGSI_OPCODE_TXF
;
4234 ir
->lod_info
.sample_index
->accept(this);
4235 sample_index
= this->result
;
4238 opcode
= TGSI_OPCODE_TG4
;
4239 ir
->lod_info
.component
->accept(this);
4240 component
= this->result
;
4242 ir
->offset
->accept(this);
4243 if (ir
->offset
->type
->is_array()) {
4244 const glsl_type
*elt_type
= ir
->offset
->type
->fields
.array
;
4245 for (i
= 0; i
< ir
->offset
->type
->length
; i
++) {
4246 offset
[i
] = this->result
;
4247 offset
[i
].index
+= i
* type_size(elt_type
);
4248 offset
[i
].type
= elt_type
->base_type
;
4249 offset
[i
].swizzle
= swizzle_for_size(elt_type
->vector_elements
);
4250 offset
[i
] = canonicalize_gather_offset(offset
[i
]);
4253 offset
[0] = canonicalize_gather_offset(this->result
);
4258 opcode
= TGSI_OPCODE_LODQ
;
4260 case ir_texture_samples
:
4261 opcode
= TGSI_OPCODE_TXQS
;
4263 case ir_samples_identical
:
4264 unreachable("Unexpected ir_samples_identical opcode");
4267 if (ir
->projector
) {
4268 if (opcode
== TGSI_OPCODE_TEX
) {
4269 /* Slot the projector in as the last component of the coord. */
4270 coord_dst
.writemask
= WRITEMASK_W
;
4271 emit_asm(ir
, TGSI_OPCODE_MOV
, coord_dst
, projector
);
4272 coord_dst
.writemask
= WRITEMASK_XYZW
;
4273 opcode
= TGSI_OPCODE_TXP
;
4275 st_src_reg coord_w
= coord
;
4276 coord_w
.swizzle
= SWIZZLE_WWWW
;
4278 /* For the other TEX opcodes there's no projective version
4279 * since the last slot is taken up by LOD info. Do the
4280 * projective divide now.
4282 coord_dst
.writemask
= WRITEMASK_W
;
4283 emit_asm(ir
, TGSI_OPCODE_RCP
, coord_dst
, projector
);
4285 /* In the case where we have to project the coordinates "by hand,"
4286 * the shadow comparator value must also be projected.
4288 st_src_reg tmp_src
= coord
;
4289 if (ir
->shadow_comparator
) {
4290 /* Slot the shadow value in as the second to last component of the
4293 ir
->shadow_comparator
->accept(this);
4295 tmp_src
= get_temp(glsl_type::vec4_type
);
4296 st_dst_reg tmp_dst
= st_dst_reg(tmp_src
);
4298 /* Projective division not allowed for array samplers. */
4299 assert(!sampler_type
->sampler_array
);
4301 tmp_dst
.writemask
= WRITEMASK_Z
;
4302 emit_asm(ir
, TGSI_OPCODE_MOV
, tmp_dst
, this->result
);
4304 tmp_dst
.writemask
= WRITEMASK_XY
;
4305 emit_asm(ir
, TGSI_OPCODE_MOV
, tmp_dst
, coord
);
4308 coord_dst
.writemask
= WRITEMASK_XYZ
;
4309 emit_asm(ir
, TGSI_OPCODE_MUL
, coord_dst
, tmp_src
, coord_w
);
4311 coord_dst
.writemask
= WRITEMASK_XYZW
;
4312 coord
.swizzle
= SWIZZLE_XYZW
;
4316 /* If projection is done and the opcode is not TGSI_OPCODE_TXP, then the
4317 * shadow comparator was put in the correct place (and projected) by the
4318 * code, above, that handles by-hand projection.
4320 if (ir
->shadow_comparator
&& (!ir
->projector
|| opcode
== TGSI_OPCODE_TXP
)) {
4321 /* Slot the shadow value in as the second to last component of the
4324 ir
->shadow_comparator
->accept(this);
4326 if (is_cube_array
) {
4327 cube_sc
= get_temp(glsl_type::float_type
);
4328 cube_sc_dst
= st_dst_reg(cube_sc
);
4329 cube_sc_dst
.writemask
= WRITEMASK_X
;
4330 emit_asm(ir
, TGSI_OPCODE_MOV
, cube_sc_dst
, this->result
);
4331 cube_sc_dst
.writemask
= WRITEMASK_X
;
4334 if ((sampler_type
->sampler_dimensionality
== GLSL_SAMPLER_DIM_2D
&&
4335 sampler_type
->sampler_array
) ||
4336 sampler_type
->sampler_dimensionality
== GLSL_SAMPLER_DIM_CUBE
) {
4337 coord_dst
.writemask
= WRITEMASK_W
;
4339 coord_dst
.writemask
= WRITEMASK_Z
;
4341 emit_asm(ir
, TGSI_OPCODE_MOV
, coord_dst
, this->result
);
4342 coord_dst
.writemask
= WRITEMASK_XYZW
;
4346 if (ir
->op
== ir_txf_ms
) {
4347 coord_dst
.writemask
= WRITEMASK_W
;
4348 emit_asm(ir
, TGSI_OPCODE_MOV
, coord_dst
, sample_index
);
4349 coord_dst
.writemask
= WRITEMASK_XYZW
;
4350 } else if (opcode
== TGSI_OPCODE_TXL
|| opcode
== TGSI_OPCODE_TXB
||
4351 opcode
== TGSI_OPCODE_TXF
) {
4352 /* TGSI stores LOD or LOD bias in the last channel of the coords. */
4353 coord_dst
.writemask
= WRITEMASK_W
;
4354 emit_asm(ir
, TGSI_OPCODE_MOV
, coord_dst
, lod_info
);
4355 coord_dst
.writemask
= WRITEMASK_XYZW
;
4358 st_src_reg
sampler(PROGRAM_SAMPLER
, 0, GLSL_TYPE_UINT
);
4361 get_deref_offsets(ir
->sampler
, &sampler_array_size
, &sampler_base
,
4362 &index
, &reladdr
, !var
->contains_bindless());
4364 sampler
.index
= index
;
4365 if (reladdr
.file
!= PROGRAM_UNDEFINED
) {
4366 sampler
.reladdr
= ralloc(mem_ctx
, st_src_reg
);
4367 *sampler
.reladdr
= reladdr
;
4368 emit_arl(ir
, sampler_reladdr
, reladdr
);
4371 st_src_reg bindless
;
4372 if (var
->contains_bindless()) {
4373 ir
->sampler
->accept(this);
4374 bindless
= this->result
;
4377 if (opcode
== TGSI_OPCODE_TXD
)
4378 inst
= emit_asm(ir
, opcode
, result_dst
, coord
, dx
, dy
);
4379 else if (opcode
== TGSI_OPCODE_TXQ
) {
4380 if (ir
->op
== ir_query_levels
) {
4381 /* the level is stored in W */
4382 inst
= emit_asm(ir
, opcode
, st_dst_reg(levels_src
), lod_info
);
4383 result_dst
.writemask
= WRITEMASK_X
;
4384 levels_src
.swizzle
= SWIZZLE_WWWW
;
4385 emit_asm(ir
, TGSI_OPCODE_MOV
, result_dst
, levels_src
);
4387 inst
= emit_asm(ir
, opcode
, result_dst
, lod_info
);
4388 } else if (opcode
== TGSI_OPCODE_TXQS
) {
4389 inst
= emit_asm(ir
, opcode
, result_dst
);
4390 } else if (opcode
== TGSI_OPCODE_TXL2
|| opcode
== TGSI_OPCODE_TXB2
) {
4391 inst
= emit_asm(ir
, opcode
, result_dst
, coord
, lod_info
);
4392 } else if (opcode
== TGSI_OPCODE_TEX2
) {
4393 inst
= emit_asm(ir
, opcode
, result_dst
, coord
, cube_sc
);
4394 } else if (opcode
== TGSI_OPCODE_TG4
) {
4395 if (is_cube_array
&& ir
->shadow_comparator
) {
4396 inst
= emit_asm(ir
, opcode
, result_dst
, coord
, cube_sc
);
4398 inst
= emit_asm(ir
, opcode
, result_dst
, coord
, component
);
4401 inst
= emit_asm(ir
, opcode
, result_dst
, coord
);
4403 if (ir
->shadow_comparator
)
4404 inst
->tex_shadow
= GL_TRUE
;
4406 if (var
->contains_bindless()) {
4407 inst
->resource
= bindless
;
4408 inst
->resource
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
,
4409 SWIZZLE_X
, SWIZZLE_Y
);
4411 inst
->resource
= sampler
;
4412 inst
->sampler_array_size
= sampler_array_size
;
4413 inst
->sampler_base
= sampler_base
;
4417 if (!inst
->tex_offsets
)
4418 inst
->tex_offsets
= rzalloc_array(inst
, st_src_reg
,
4419 MAX_GLSL_TEXTURE_OFFSET
);
4421 for (i
= 0; i
< MAX_GLSL_TEXTURE_OFFSET
&&
4422 offset
[i
].file
!= PROGRAM_UNDEFINED
; i
++)
4423 inst
->tex_offsets
[i
] = offset
[i
];
4424 inst
->tex_offset_num_offset
= i
;
4427 inst
->tex_target
= sampler_type
->sampler_index();
4428 inst
->tex_type
= ir
->type
->base_type
;
4430 this->result
= result_src
;
4434 glsl_to_tgsi_visitor::visit(ir_return
*ir
)
4436 assert(!ir
->get_value());
4438 emit_asm(ir
, TGSI_OPCODE_RET
);
4442 glsl_to_tgsi_visitor::visit(ir_discard
*ir
)
4444 if (ir
->condition
) {
4445 ir
->condition
->accept(this);
4446 st_src_reg condition
= this->result
;
4448 /* Convert the bool condition to a float so we can negate. */
4449 if (native_integers
) {
4450 st_src_reg temp
= get_temp(ir
->condition
->type
);
4451 emit_asm(ir
, TGSI_OPCODE_AND
, st_dst_reg(temp
),
4452 condition
, st_src_reg_for_float(1.0));
4456 condition
.negate
= ~condition
.negate
;
4457 emit_asm(ir
, TGSI_OPCODE_KILL_IF
, undef_dst
, condition
);
4459 /* unconditional kil */
4460 emit_asm(ir
, TGSI_OPCODE_KILL
);
4465 glsl_to_tgsi_visitor::visit(ir_if
*ir
)
4467 enum tgsi_opcode if_opcode
;
4468 glsl_to_tgsi_instruction
*if_inst
;
4470 ir
->condition
->accept(this);
4471 assert(this->result
.file
!= PROGRAM_UNDEFINED
);
4473 if_opcode
= native_integers
? TGSI_OPCODE_UIF
: TGSI_OPCODE_IF
;
4475 if_inst
= emit_asm(ir
->condition
, if_opcode
, undef_dst
, this->result
);
4477 this->instructions
.push_tail(if_inst
);
4479 visit_exec_list(&ir
->then_instructions
, this);
4481 if (!ir
->else_instructions
.is_empty()) {
4482 emit_asm(ir
->condition
, TGSI_OPCODE_ELSE
);
4483 visit_exec_list(&ir
->else_instructions
, this);
4486 if_inst
= emit_asm(ir
->condition
, TGSI_OPCODE_ENDIF
);
4491 glsl_to_tgsi_visitor::visit(ir_emit_vertex
*ir
)
4493 assert(this->prog
->Target
== GL_GEOMETRY_PROGRAM_NV
);
4495 ir
->stream
->accept(this);
4496 emit_asm(ir
, TGSI_OPCODE_EMIT
, undef_dst
, this->result
);
4500 glsl_to_tgsi_visitor::visit(ir_end_primitive
*ir
)
4502 assert(this->prog
->Target
== GL_GEOMETRY_PROGRAM_NV
);
4504 ir
->stream
->accept(this);
4505 emit_asm(ir
, TGSI_OPCODE_ENDPRIM
, undef_dst
, this->result
);
4509 glsl_to_tgsi_visitor::visit(ir_barrier
*ir
)
4511 assert(this->prog
->Target
== GL_TESS_CONTROL_PROGRAM_NV
||
4512 this->prog
->Target
== GL_COMPUTE_PROGRAM_NV
);
4514 emit_asm(ir
, TGSI_OPCODE_BARRIER
);
4517 glsl_to_tgsi_visitor::glsl_to_tgsi_visitor()
4519 STATIC_ASSERT(sizeof(samplers_used
) * 8 >= PIPE_MAX_SAMPLERS
);
4521 result
.file
= PROGRAM_UNDEFINED
;
4528 num_input_arrays
= 0;
4529 num_output_arrays
= 0;
4531 num_atomic_arrays
= 0;
4533 num_address_regs
= 0;
4536 indirect_addr_consts
= false;
4537 wpos_transform_const
= -1;
4538 native_integers
= false;
4539 mem_ctx
= ralloc_context(NULL
);
4543 shader_program
= NULL
;
4548 use_shared_memory
= false;
4549 has_tex_txf_lz
= false;
4553 static void var_destroy(struct hash_entry
*entry
)
4555 variable_storage
*storage
= (variable_storage
*)entry
->data
;
4560 glsl_to_tgsi_visitor::~glsl_to_tgsi_visitor()
4562 _mesa_hash_table_destroy(variables
, var_destroy
);
4564 ralloc_free(mem_ctx
);
4567 extern "C" void free_glsl_to_tgsi_visitor(glsl_to_tgsi_visitor
*v
)
4574 * Count resources used by the given gpu program (number of texture
4578 count_resources(glsl_to_tgsi_visitor
*v
, gl_program
*prog
)
4580 v
->samplers_used
= 0;
4582 prog
->info
.textures_used_by_txf
= 0;
4584 foreach_in_list(glsl_to_tgsi_instruction
, inst
, &v
->instructions
) {
4585 if (inst
->info
->is_tex
) {
4586 for (int i
= 0; i
< inst
->sampler_array_size
; i
++) {
4587 unsigned idx
= inst
->sampler_base
+ i
;
4588 v
->samplers_used
|= 1u << idx
;
4590 debug_assert(idx
< (int)ARRAY_SIZE(v
->sampler_types
));
4591 v
->sampler_types
[idx
] = inst
->tex_type
;
4592 v
->sampler_targets
[idx
] =
4593 st_translate_texture_target(inst
->tex_target
, inst
->tex_shadow
);
4595 if (inst
->op
== TGSI_OPCODE_TXF
|| inst
->op
== TGSI_OPCODE_TXF_LZ
) {
4596 prog
->info
.textures_used_by_txf
|= 1u << idx
;
4601 if (inst
->tex_target
== TEXTURE_EXTERNAL_INDEX
)
4602 prog
->ExternalSamplersUsed
|= 1 << inst
->resource
.index
;
4604 if (inst
->resource
.file
!= PROGRAM_UNDEFINED
&& (
4605 is_resource_instruction(inst
->op
) ||
4606 inst
->op
== TGSI_OPCODE_STORE
)) {
4607 if (inst
->resource
.file
== PROGRAM_MEMORY
) {
4608 v
->use_shared_memory
= true;
4609 } else if (inst
->resource
.file
== PROGRAM_IMAGE
) {
4610 for (int i
= 0; i
< inst
->sampler_array_size
; i
++) {
4611 unsigned idx
= inst
->sampler_base
+ i
;
4612 v
->images_used
|= 1 << idx
;
4613 v
->image_targets
[idx
] =
4614 st_translate_texture_target(inst
->tex_target
, false);
4615 v
->image_formats
[idx
] = inst
->image_format
;
4620 prog
->SamplersUsed
= v
->samplers_used
;
4622 if (v
->shader_program
!= NULL
)
4623 _mesa_update_shader_textures_used(v
->shader_program
, prog
);
4627 * Returns the mask of channels (bitmask of WRITEMASK_X,Y,Z,W) which
4628 * are read from the given src in this instruction
4631 get_src_arg_mask(st_dst_reg dst
, st_src_reg src
)
4633 int read_mask
= 0, comp
;
4635 /* Now, given the src swizzle and the written channels, find which
4636 * components are actually read
4638 for (comp
= 0; comp
< 4; ++comp
) {
4639 const unsigned coord
= GET_SWZ(src
.swizzle
, comp
);
4641 if (dst
.writemask
& (1 << comp
) && coord
<= SWIZZLE_W
)
4642 read_mask
|= 1 << coord
;
4649 * This pass replaces CMP T0, T1 T2 T0 with MOV T0, T2 when the CMP
4650 * instruction is the first instruction to write to register T0. There are
4651 * several lowering passes done in GLSL IR (e.g. branches and
4652 * relative addressing) that create a large number of conditional assignments
4653 * that ir_to_mesa converts to CMP instructions like the one mentioned above.
4655 * Here is why this conversion is safe:
4656 * CMP T0, T1 T2 T0 can be expanded to:
4662 * If (T1 < 0.0) evaluates to true then our replacement MOV T0, T2 is the same
4663 * as the original program. If (T1 < 0.0) evaluates to false, executing
4664 * MOV T0, T0 will store a garbage value in T0 since T0 is uninitialized.
4665 * Therefore, it doesn't matter that we are replacing MOV T0, T0 with MOV T0, T2
4666 * because any instruction that was going to read from T0 after this was going
4667 * to read a garbage value anyway.
4670 glsl_to_tgsi_visitor::simplify_cmp(void)
4672 int tempWritesSize
= 0;
4673 unsigned *tempWrites
= NULL
;
4674 unsigned outputWrites
[VARYING_SLOT_TESS_MAX
];
4676 memset(outputWrites
, 0, sizeof(outputWrites
));
4678 foreach_in_list(glsl_to_tgsi_instruction
, inst
, &this->instructions
) {
4679 unsigned prevWriteMask
= 0;
4681 /* Give up if we encounter relative addressing or flow control. */
4682 if (inst
->dst
[0].reladdr
|| inst
->dst
[0].reladdr2
||
4683 inst
->dst
[1].reladdr
|| inst
->dst
[1].reladdr2
||
4684 inst
->info
->is_branch
||
4685 inst
->op
== TGSI_OPCODE_CONT
||
4686 inst
->op
== TGSI_OPCODE_END
||
4687 inst
->op
== TGSI_OPCODE_RET
) {
4691 if (inst
->dst
[0].file
== PROGRAM_OUTPUT
) {
4692 assert(inst
->dst
[0].index
< (signed)ARRAY_SIZE(outputWrites
));
4693 prevWriteMask
= outputWrites
[inst
->dst
[0].index
];
4694 outputWrites
[inst
->dst
[0].index
] |= inst
->dst
[0].writemask
;
4695 } else if (inst
->dst
[0].file
== PROGRAM_TEMPORARY
) {
4696 if (inst
->dst
[0].index
>= tempWritesSize
) {
4697 const int inc
= 4096;
4699 tempWrites
= (unsigned*)
4701 (tempWritesSize
+ inc
) * sizeof(unsigned));
4705 memset(tempWrites
+ tempWritesSize
, 0, inc
* sizeof(unsigned));
4706 tempWritesSize
+= inc
;
4709 prevWriteMask
= tempWrites
[inst
->dst
[0].index
];
4710 tempWrites
[inst
->dst
[0].index
] |= inst
->dst
[0].writemask
;
4714 /* For a CMP to be considered a conditional write, the destination
4715 * register and source register two must be the same. */
4716 if (inst
->op
== TGSI_OPCODE_CMP
4717 && !(inst
->dst
[0].writemask
& prevWriteMask
)
4718 && inst
->src
[2].file
== inst
->dst
[0].file
4719 && inst
->src
[2].index
== inst
->dst
[0].index
4720 && inst
->dst
[0].writemask
==
4721 get_src_arg_mask(inst
->dst
[0], inst
->src
[2])) {
4723 inst
->op
= TGSI_OPCODE_MOV
;
4724 inst
->info
= tgsi_get_opcode_info(inst
->op
);
4725 inst
->src
[0] = inst
->src
[1];
4733 rename_temp_handle_src(struct rename_reg_pair
*renames
, st_src_reg
*src
)
4735 if (src
&& src
->file
== PROGRAM_TEMPORARY
) {
4736 int old_idx
= src
->index
;
4737 if (renames
[old_idx
].valid
)
4738 src
->index
= renames
[old_idx
].new_reg
;
4742 /* Replaces all references to a temporary register index with another index. */
4744 glsl_to_tgsi_visitor::rename_temp_registers(struct rename_reg_pair
*renames
)
4746 foreach_in_list(glsl_to_tgsi_instruction
, inst
, &this->instructions
) {
4748 for (j
= 0; j
< num_inst_src_regs(inst
); j
++) {
4749 rename_temp_handle_src(renames
, &inst
->src
[j
]);
4750 rename_temp_handle_src(renames
, inst
->src
[j
].reladdr
);
4751 rename_temp_handle_src(renames
, inst
->src
[j
].reladdr2
);
4754 for (j
= 0; j
< inst
->tex_offset_num_offset
; j
++) {
4755 rename_temp_handle_src(renames
, &inst
->tex_offsets
[j
]);
4756 rename_temp_handle_src(renames
, inst
->tex_offsets
[j
].reladdr
);
4757 rename_temp_handle_src(renames
, inst
->tex_offsets
[j
].reladdr2
);
4760 rename_temp_handle_src(renames
, &inst
->resource
);
4761 rename_temp_handle_src(renames
, inst
->resource
.reladdr
);
4762 rename_temp_handle_src(renames
, inst
->resource
.reladdr2
);
4764 for (j
= 0; j
< num_inst_dst_regs(inst
); j
++) {
4765 if (inst
->dst
[j
].file
== PROGRAM_TEMPORARY
) {
4766 int old_idx
= inst
->dst
[j
].index
;
4767 if (renames
[old_idx
].valid
)
4768 inst
->dst
[j
].index
= renames
[old_idx
].new_reg
;
4770 rename_temp_handle_src(renames
, inst
->dst
[j
].reladdr
);
4771 rename_temp_handle_src(renames
, inst
->dst
[j
].reladdr2
);
4777 glsl_to_tgsi_visitor::get_first_temp_write(int *first_writes
)
4779 int depth
= 0; /* loop depth */
4780 int loop_start
= -1; /* index of the first active BGNLOOP (if any) */
4783 foreach_in_list(glsl_to_tgsi_instruction
, inst
, &this->instructions
) {
4784 for (j
= 0; j
< num_inst_dst_regs(inst
); j
++) {
4785 if (inst
->dst
[j
].file
== PROGRAM_TEMPORARY
) {
4786 if (first_writes
[inst
->dst
[j
].index
] == -1)
4787 first_writes
[inst
->dst
[j
].index
] = (depth
== 0) ? i
: loop_start
;
4791 if (inst
->op
== TGSI_OPCODE_BGNLOOP
) {
4794 } else if (inst
->op
== TGSI_OPCODE_ENDLOOP
) {
4804 glsl_to_tgsi_visitor::get_first_temp_read(int *first_reads
)
4806 int depth
= 0; /* loop depth */
4807 int loop_start
= -1; /* index of the first active BGNLOOP (if any) */
4810 foreach_in_list(glsl_to_tgsi_instruction
, inst
, &this->instructions
) {
4811 for (j
= 0; j
< num_inst_src_regs(inst
); j
++) {
4812 if (inst
->src
[j
].file
== PROGRAM_TEMPORARY
) {
4813 if (first_reads
[inst
->src
[j
].index
] == -1)
4814 first_reads
[inst
->src
[j
].index
] = (depth
== 0) ? i
: loop_start
;
4817 for (j
= 0; j
< inst
->tex_offset_num_offset
; j
++) {
4818 if (inst
->tex_offsets
[j
].file
== PROGRAM_TEMPORARY
) {
4819 if (first_reads
[inst
->tex_offsets
[j
].index
] == -1)
4820 first_reads
[inst
->tex_offsets
[j
].index
] = (depth
== 0) ? i
: loop_start
;
4823 if (inst
->op
== TGSI_OPCODE_BGNLOOP
) {
4826 } else if (inst
->op
== TGSI_OPCODE_ENDLOOP
) {
4836 glsl_to_tgsi_visitor::get_last_temp_read_first_temp_write(int *last_reads
, int *first_writes
)
4838 int depth
= 0; /* loop depth */
4839 int loop_start
= -1; /* index of the first active BGNLOOP (if any) */
4842 foreach_in_list(glsl_to_tgsi_instruction
, inst
, &this->instructions
) {
4843 for (j
= 0; j
< num_inst_src_regs(inst
); j
++) {
4844 if (inst
->src
[j
].file
== PROGRAM_TEMPORARY
)
4845 last_reads
[inst
->src
[j
].index
] = (depth
== 0) ? i
: -2;
4847 for (j
= 0; j
< num_inst_dst_regs(inst
); j
++) {
4848 if (inst
->dst
[j
].file
== PROGRAM_TEMPORARY
) {
4849 if (first_writes
[inst
->dst
[j
].index
] == -1)
4850 first_writes
[inst
->dst
[j
].index
] = (depth
== 0) ? i
: loop_start
;
4851 last_reads
[inst
->dst
[j
].index
] = (depth
== 0) ? i
: -2;
4854 for (j
= 0; j
< inst
->tex_offset_num_offset
; j
++) {
4855 if (inst
->tex_offsets
[j
].file
== PROGRAM_TEMPORARY
)
4856 last_reads
[inst
->tex_offsets
[j
].index
] = (depth
== 0) ? i
: -2;
4858 if (inst
->op
== TGSI_OPCODE_BGNLOOP
) {
4861 } else if (inst
->op
== TGSI_OPCODE_ENDLOOP
) {
4864 for (k
= 0; k
< this->next_temp
; k
++) {
4865 if (last_reads
[k
] == -2) {
4877 glsl_to_tgsi_visitor::get_last_temp_write(int *last_writes
)
4879 int depth
= 0; /* loop depth */
4883 foreach_in_list(glsl_to_tgsi_instruction
, inst
, &this->instructions
) {
4884 for (j
= 0; j
< num_inst_dst_regs(inst
); j
++) {
4885 if (inst
->dst
[j
].file
== PROGRAM_TEMPORARY
)
4886 last_writes
[inst
->dst
[j
].index
] = (depth
== 0) ? i
: -2;
4889 if (inst
->op
== TGSI_OPCODE_BGNLOOP
)
4891 else if (inst
->op
== TGSI_OPCODE_ENDLOOP
)
4893 for (k
= 0; k
< this->next_temp
; k
++) {
4894 if (last_writes
[k
] == -2) {
4905 * On a basic block basis, tracks available PROGRAM_TEMPORARY register
4906 * channels for copy propagation and updates following instructions to
4907 * use the original versions.
4909 * The glsl_to_tgsi_visitor lazily produces code assuming that this pass
4910 * will occur. As an example, a TXP production before this pass:
4912 * 0: MOV TEMP[1], INPUT[4].xyyy;
4913 * 1: MOV TEMP[1].w, INPUT[4].wwww;
4914 * 2: TXP TEMP[2], TEMP[1], texture[0], 2D;
4918 * 0: MOV TEMP[1], INPUT[4].xyyy;
4919 * 1: MOV TEMP[1].w, INPUT[4].wwww;
4920 * 2: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D;
4922 * which allows for dead code elimination on TEMP[1]'s writes.
4925 glsl_to_tgsi_visitor::copy_propagate(void)
4927 glsl_to_tgsi_instruction
**acp
= rzalloc_array(mem_ctx
,
4928 glsl_to_tgsi_instruction
*,
4929 this->next_temp
* 4);
4930 int *acp_level
= rzalloc_array(mem_ctx
, int, this->next_temp
* 4);
4933 foreach_in_list(glsl_to_tgsi_instruction
, inst
, &this->instructions
) {
4934 assert(inst
->dst
[0].file
!= PROGRAM_TEMPORARY
4935 || inst
->dst
[0].index
< this->next_temp
);
4937 /* First, do any copy propagation possible into the src regs. */
4938 for (int r
= 0; r
< 3; r
++) {
4939 glsl_to_tgsi_instruction
*first
= NULL
;
4941 int acp_base
= inst
->src
[r
].index
* 4;
4943 if (inst
->src
[r
].file
!= PROGRAM_TEMPORARY
||
4944 inst
->src
[r
].reladdr
||
4945 inst
->src
[r
].reladdr2
)
4948 /* See if we can find entries in the ACP consisting of MOVs
4949 * from the same src register for all the swizzled channels
4950 * of this src register reference.
4952 for (int i
= 0; i
< 4; i
++) {
4953 int src_chan
= GET_SWZ(inst
->src
[r
].swizzle
, i
);
4954 glsl_to_tgsi_instruction
*copy_chan
= acp
[acp_base
+ src_chan
];
4961 assert(acp_level
[acp_base
+ src_chan
] <= level
);
4966 if (first
->src
[0].file
!= copy_chan
->src
[0].file
||
4967 first
->src
[0].index
!= copy_chan
->src
[0].index
||
4968 first
->src
[0].double_reg2
!= copy_chan
->src
[0].double_reg2
||
4969 first
->src
[0].index2D
!= copy_chan
->src
[0].index2D
) {
4977 /* We've now validated that we can copy-propagate to
4978 * replace this src register reference. Do it.
4980 inst
->src
[r
].file
= first
->src
[0].file
;
4981 inst
->src
[r
].index
= first
->src
[0].index
;
4982 inst
->src
[r
].index2D
= first
->src
[0].index2D
;
4983 inst
->src
[r
].has_index2
= first
->src
[0].has_index2
;
4984 inst
->src
[r
].double_reg2
= first
->src
[0].double_reg2
;
4985 inst
->src
[r
].array_id
= first
->src
[0].array_id
;
4988 for (int i
= 0; i
< 4; i
++) {
4989 int src_chan
= GET_SWZ(inst
->src
[r
].swizzle
, i
);
4990 glsl_to_tgsi_instruction
*copy_inst
= acp
[acp_base
+ src_chan
];
4991 swizzle
|= (GET_SWZ(copy_inst
->src
[0].swizzle
, src_chan
) << (3 * i
));
4993 inst
->src
[r
].swizzle
= swizzle
;
4998 case TGSI_OPCODE_BGNLOOP
:
4999 case TGSI_OPCODE_ENDLOOP
:
5000 /* End of a basic block, clear the ACP entirely. */
5001 memset(acp
, 0, sizeof(*acp
) * this->next_temp
* 4);
5004 case TGSI_OPCODE_IF
:
5005 case TGSI_OPCODE_UIF
:
5009 case TGSI_OPCODE_ENDIF
:
5010 case TGSI_OPCODE_ELSE
:
5011 /* Clear all channels written inside the block from the ACP, but
5012 * leaving those that were not touched.
5014 for (int r
= 0; r
< this->next_temp
; r
++) {
5015 for (int c
= 0; c
< 4; c
++) {
5016 if (!acp
[4 * r
+ c
])
5019 if (acp_level
[4 * r
+ c
] >= level
)
5020 acp
[4 * r
+ c
] = NULL
;
5023 if (inst
->op
== TGSI_OPCODE_ENDIF
)
5028 /* Continuing the block, clear any written channels from
5031 for (int d
= 0; d
< 2; d
++) {
5032 if (inst
->dst
[d
].file
== PROGRAM_TEMPORARY
&& inst
->dst
[d
].reladdr
) {
5033 /* Any temporary might be written, so no copy propagation
5034 * across this instruction.
5036 memset(acp
, 0, sizeof(*acp
) * this->next_temp
* 4);
5037 } else if (inst
->dst
[d
].file
== PROGRAM_OUTPUT
&&
5038 inst
->dst
[d
].reladdr
) {
5039 /* Any output might be written, so no copy propagation
5040 * from outputs across this instruction.
5042 for (int r
= 0; r
< this->next_temp
; r
++) {
5043 for (int c
= 0; c
< 4; c
++) {
5044 if (!acp
[4 * r
+ c
])
5047 if (acp
[4 * r
+ c
]->src
[0].file
== PROGRAM_OUTPUT
)
5048 acp
[4 * r
+ c
] = NULL
;
5051 } else if (inst
->dst
[d
].file
== PROGRAM_TEMPORARY
||
5052 inst
->dst
[d
].file
== PROGRAM_OUTPUT
) {
5053 /* Clear where it's used as dst. */
5054 if (inst
->dst
[d
].file
== PROGRAM_TEMPORARY
) {
5055 for (int c
= 0; c
< 4; c
++) {
5056 if (inst
->dst
[d
].writemask
& (1 << c
))
5057 acp
[4 * inst
->dst
[d
].index
+ c
] = NULL
;
5061 /* Clear where it's used as src. */
5062 for (int r
= 0; r
< this->next_temp
; r
++) {
5063 for (int c
= 0; c
< 4; c
++) {
5064 if (!acp
[4 * r
+ c
])
5067 int src_chan
= GET_SWZ(acp
[4 * r
+ c
]->src
[0].swizzle
, c
);
5069 if (acp
[4 * r
+ c
]->src
[0].file
== inst
->dst
[d
].file
&&
5070 acp
[4 * r
+ c
]->src
[0].index
== inst
->dst
[d
].index
&&
5071 inst
->dst
[d
].writemask
& (1 << src_chan
)) {
5072 acp
[4 * r
+ c
] = NULL
;
5081 /* If this is a copy, add it to the ACP. */
5082 if (inst
->op
== TGSI_OPCODE_MOV
&&
5083 inst
->dst
[0].file
== PROGRAM_TEMPORARY
&&
5084 !(inst
->dst
[0].file
== inst
->src
[0].file
&&
5085 inst
->dst
[0].index
== inst
->src
[0].index
) &&
5086 !inst
->dst
[0].reladdr
&&
5087 !inst
->dst
[0].reladdr2
&&
5089 inst
->src
[0].file
!= PROGRAM_ARRAY
&&
5090 (inst
->src
[0].file
!= PROGRAM_OUTPUT
||
5091 this->shader
->Stage
!= MESA_SHADER_TESS_CTRL
) &&
5092 !inst
->src
[0].reladdr
&&
5093 !inst
->src
[0].reladdr2
&&
5094 !inst
->src
[0].negate
&&
5095 !inst
->src
[0].abs
) {
5096 for (int i
= 0; i
< 4; i
++) {
5097 if (inst
->dst
[0].writemask
& (1 << i
)) {
5098 acp
[4 * inst
->dst
[0].index
+ i
] = inst
;
5099 acp_level
[4 * inst
->dst
[0].index
+ i
] = level
;
5105 ralloc_free(acp_level
);
5110 dead_code_handle_reladdr(glsl_to_tgsi_instruction
**writes
, st_src_reg
*reladdr
)
5112 if (reladdr
&& reladdr
->file
== PROGRAM_TEMPORARY
) {
5113 /* Clear where it's used as src. */
5114 int swz
= GET_SWZ(reladdr
->swizzle
, 0);
5115 writes
[4 * reladdr
->index
+ swz
] = NULL
;
5120 * On a basic block basis, tracks available PROGRAM_TEMPORARY registers for dead
5123 * The glsl_to_tgsi_visitor lazily produces code assuming that this pass
5124 * will occur. As an example, a TXP production after copy propagation but
5127 * 0: MOV TEMP[1], INPUT[4].xyyy;
5128 * 1: MOV TEMP[1].w, INPUT[4].wwww;
5129 * 2: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D;
5131 * and after this pass:
5133 * 0: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D;
5136 glsl_to_tgsi_visitor::eliminate_dead_code(void)
5138 glsl_to_tgsi_instruction
**writes
= rzalloc_array(mem_ctx
,
5139 glsl_to_tgsi_instruction
*,
5140 this->next_temp
* 4);
5141 int *write_level
= rzalloc_array(mem_ctx
, int, this->next_temp
* 4);
5145 foreach_in_list(glsl_to_tgsi_instruction
, inst
, &this->instructions
) {
5146 assert(inst
->dst
[0].file
!= PROGRAM_TEMPORARY
5147 || inst
->dst
[0].index
< this->next_temp
);
5150 case TGSI_OPCODE_BGNLOOP
:
5151 case TGSI_OPCODE_ENDLOOP
:
5152 case TGSI_OPCODE_CONT
:
5153 case TGSI_OPCODE_BRK
:
5154 /* End of a basic block, clear the write array entirely.
5156 * This keeps us from killing dead code when the writes are
5157 * on either side of a loop, even when the register isn't touched
5158 * inside the loop. However, glsl_to_tgsi_visitor doesn't seem to emit
5159 * dead code of this type, so it shouldn't make a difference as long as
5160 * the dead code elimination pass in the GLSL compiler does its job.
5162 memset(writes
, 0, sizeof(*writes
) * this->next_temp
* 4);
5165 case TGSI_OPCODE_ENDIF
:
5166 case TGSI_OPCODE_ELSE
:
5167 /* Promote the recorded level of all channels written inside the
5168 * preceding if or else block to the level above the if/else block.
5170 for (int r
= 0; r
< this->next_temp
; r
++) {
5171 for (int c
= 0; c
< 4; c
++) {
5172 if (!writes
[4 * r
+ c
])
5175 if (write_level
[4 * r
+ c
] == level
)
5176 write_level
[4 * r
+ c
] = level
-1;
5179 if (inst
->op
== TGSI_OPCODE_ENDIF
)
5183 case TGSI_OPCODE_IF
:
5184 case TGSI_OPCODE_UIF
:
5186 /* fallthrough to default case to mark the condition as read */
5188 /* Continuing the block, clear any channels from the write array that
5189 * are read by this instruction.
5191 for (unsigned i
= 0; i
< ARRAY_SIZE(inst
->src
); i
++) {
5192 if (inst
->src
[i
].file
== PROGRAM_TEMPORARY
&& inst
->src
[i
].reladdr
){
5193 /* Any temporary might be read, so no dead code elimination
5194 * across this instruction.
5196 memset(writes
, 0, sizeof(*writes
) * this->next_temp
* 4);
5197 } else if (inst
->src
[i
].file
== PROGRAM_TEMPORARY
) {
5198 /* Clear where it's used as src. */
5199 int src_chans
= 1 << GET_SWZ(inst
->src
[i
].swizzle
, 0);
5200 src_chans
|= 1 << GET_SWZ(inst
->src
[i
].swizzle
, 1);
5201 src_chans
|= 1 << GET_SWZ(inst
->src
[i
].swizzle
, 2);
5202 src_chans
|= 1 << GET_SWZ(inst
->src
[i
].swizzle
, 3);
5204 for (int c
= 0; c
< 4; c
++) {
5205 if (src_chans
& (1 << c
))
5206 writes
[4 * inst
->src
[i
].index
+ c
] = NULL
;
5209 dead_code_handle_reladdr(writes
, inst
->src
[i
].reladdr
);
5210 dead_code_handle_reladdr(writes
, inst
->src
[i
].reladdr2
);
5212 for (unsigned i
= 0; i
< inst
->tex_offset_num_offset
; i
++) {
5213 if (inst
->tex_offsets
[i
].file
== PROGRAM_TEMPORARY
&& inst
->tex_offsets
[i
].reladdr
){
5214 /* Any temporary might be read, so no dead code elimination
5215 * across this instruction.
5217 memset(writes
, 0, sizeof(*writes
) * this->next_temp
* 4);
5218 } else if (inst
->tex_offsets
[i
].file
== PROGRAM_TEMPORARY
) {
5219 /* Clear where it's used as src. */
5220 int src_chans
= 1 << GET_SWZ(inst
->tex_offsets
[i
].swizzle
, 0);
5221 src_chans
|= 1 << GET_SWZ(inst
->tex_offsets
[i
].swizzle
, 1);
5222 src_chans
|= 1 << GET_SWZ(inst
->tex_offsets
[i
].swizzle
, 2);
5223 src_chans
|= 1 << GET_SWZ(inst
->tex_offsets
[i
].swizzle
, 3);
5225 for (int c
= 0; c
< 4; c
++) {
5226 if (src_chans
& (1 << c
))
5227 writes
[4 * inst
->tex_offsets
[i
].index
+ c
] = NULL
;
5230 dead_code_handle_reladdr(writes
, inst
->tex_offsets
[i
].reladdr
);
5231 dead_code_handle_reladdr(writes
, inst
->tex_offsets
[i
].reladdr2
);
5234 if (inst
->resource
.file
== PROGRAM_TEMPORARY
) {
5237 src_chans
= 1 << GET_SWZ(inst
->resource
.swizzle
, 0);
5238 src_chans
|= 1 << GET_SWZ(inst
->resource
.swizzle
, 1);
5239 src_chans
|= 1 << GET_SWZ(inst
->resource
.swizzle
, 2);
5240 src_chans
|= 1 << GET_SWZ(inst
->resource
.swizzle
, 3);
5242 for (int c
= 0; c
< 4; c
++) {
5243 if (src_chans
& (1 << c
))
5244 writes
[4 * inst
->resource
.index
+ c
] = NULL
;
5247 dead_code_handle_reladdr(writes
, inst
->resource
.reladdr
);
5248 dead_code_handle_reladdr(writes
, inst
->resource
.reladdr2
);
5250 for (unsigned i
= 0; i
< ARRAY_SIZE(inst
->dst
); i
++) {
5251 dead_code_handle_reladdr(writes
, inst
->dst
[i
].reladdr
);
5252 dead_code_handle_reladdr(writes
, inst
->dst
[i
].reladdr2
);
5257 /* If this instruction writes to a temporary, add it to the write array.
5258 * If there is already an instruction in the write array for one or more
5259 * of the channels, flag that channel write as dead.
5261 for (unsigned i
= 0; i
< ARRAY_SIZE(inst
->dst
); i
++) {
5262 if (inst
->dst
[i
].file
== PROGRAM_TEMPORARY
&&
5263 !inst
->dst
[i
].reladdr
) {
5264 for (int c
= 0; c
< 4; c
++) {
5265 if (inst
->dst
[i
].writemask
& (1 << c
)) {
5266 if (writes
[4 * inst
->dst
[i
].index
+ c
]) {
5267 if (write_level
[4 * inst
->dst
[i
].index
+ c
] < level
)
5270 writes
[4 * inst
->dst
[i
].index
+ c
]->dead_mask
|= (1 << c
);
5272 writes
[4 * inst
->dst
[i
].index
+ c
] = inst
;
5273 write_level
[4 * inst
->dst
[i
].index
+ c
] = level
;
5280 /* Anything still in the write array at this point is dead code. */
5281 for (int r
= 0; r
< this->next_temp
; r
++) {
5282 for (int c
= 0; c
< 4; c
++) {
5283 glsl_to_tgsi_instruction
*inst
= writes
[4 * r
+ c
];
5285 inst
->dead_mask
|= (1 << c
);
5289 /* Now actually remove the instructions that are completely dead and update
5290 * the writemask of other instructions with dead channels.
5292 foreach_in_list_safe(glsl_to_tgsi_instruction
, inst
, &this->instructions
) {
5293 if (!inst
->dead_mask
|| !inst
->dst
[0].writemask
)
5295 /* No amount of dead masks should remove memory stores */
5296 if (inst
->info
->is_store
)
5299 if ((inst
->dst
[0].writemask
& ~inst
->dead_mask
) == 0) {
5304 if (glsl_base_type_is_64bit(inst
->dst
[0].type
)) {
5305 if (inst
->dead_mask
== WRITEMASK_XY
||
5306 inst
->dead_mask
== WRITEMASK_ZW
)
5307 inst
->dst
[0].writemask
&= ~(inst
->dead_mask
);
5309 inst
->dst
[0].writemask
&= ~(inst
->dead_mask
);
5313 ralloc_free(write_level
);
5314 ralloc_free(writes
);
5319 /* merge DFRACEXP instructions into one. */
5321 glsl_to_tgsi_visitor::merge_two_dsts(void)
5323 /* We never delete inst, but we may delete its successor. */
5324 foreach_in_list(glsl_to_tgsi_instruction
, inst
, &this->instructions
) {
5325 glsl_to_tgsi_instruction
*inst2
;
5328 if (num_inst_dst_regs(inst
) != 2)
5331 if (inst
->dst
[0].file
!= PROGRAM_UNDEFINED
&&
5332 inst
->dst
[1].file
!= PROGRAM_UNDEFINED
)
5335 assert(inst
->dst
[0].file
!= PROGRAM_UNDEFINED
||
5336 inst
->dst
[1].file
!= PROGRAM_UNDEFINED
);
5338 if (inst
->dst
[0].file
== PROGRAM_UNDEFINED
)
5343 inst2
= (glsl_to_tgsi_instruction
*) inst
->next
;
5344 while (!inst2
->is_tail_sentinel()) {
5345 if (inst
->op
== inst2
->op
&&
5346 inst2
->dst
[defined
].file
== PROGRAM_UNDEFINED
&&
5347 inst
->src
[0].file
== inst2
->src
[0].file
&&
5348 inst
->src
[0].index
== inst2
->src
[0].index
&&
5349 inst
->src
[0].type
== inst2
->src
[0].type
&&
5350 inst
->src
[0].swizzle
== inst2
->src
[0].swizzle
)
5352 inst2
= (glsl_to_tgsi_instruction
*) inst2
->next
;
5355 if (inst2
->is_tail_sentinel()) {
5356 /* Undefined destinations are not allowed, substitute with an unused
5357 * temporary register.
5359 st_src_reg tmp
= get_temp(glsl_type::vec4_type
);
5360 inst
->dst
[defined
^ 1] = st_dst_reg(tmp
);
5361 inst
->dst
[defined
^ 1].writemask
= 0;
5365 inst
->dst
[defined
^ 1] = inst2
->dst
[defined
^ 1];
5371 /* Merges temporary registers together where possible to reduce the number of
5372 * registers needed to run a program.
5374 * Produces optimal code only after copy propagation and dead code elimination
5377 glsl_to_tgsi_visitor::merge_registers(void)
5379 struct lifetime
*lifetimes
=
5380 rzalloc_array(mem_ctx
, struct lifetime
, this->next_temp
);
5382 if (get_temp_registers_required_lifetimes(mem_ctx
, &this->instructions
,
5383 this->next_temp
, lifetimes
)) {
5384 struct rename_reg_pair
*renames
=
5385 rzalloc_array(mem_ctx
, struct rename_reg_pair
, this->next_temp
);
5386 get_temp_registers_remapping(mem_ctx
, this->next_temp
, lifetimes
, renames
);
5387 rename_temp_registers(renames
);
5388 ralloc_free(renames
);
5391 ralloc_free(lifetimes
);
5394 /* Reassign indices to temporary registers by reusing unused indices created
5395 * by optimization passes. */
5397 glsl_to_tgsi_visitor::renumber_registers(void)
5401 int *first_writes
= ralloc_array(mem_ctx
, int, this->next_temp
);
5402 struct rename_reg_pair
*renames
= rzalloc_array(mem_ctx
, struct rename_reg_pair
, this->next_temp
);
5404 for (i
= 0; i
< this->next_temp
; i
++) {
5405 first_writes
[i
] = -1;
5407 get_first_temp_write(first_writes
);
5409 for (i
= 0; i
< this->next_temp
; i
++) {
5410 if (first_writes
[i
] < 0) continue;
5411 if (i
!= new_index
) {
5412 renames
[i
].new_reg
= new_index
;
5413 renames
[i
].valid
= true;
5418 rename_temp_registers(renames
);
5419 this->next_temp
= new_index
;
5420 ralloc_free(renames
);
5421 ralloc_free(first_writes
);
5424 /* ------------------------- TGSI conversion stuff -------------------------- */
5427 * Intermediate state used during shader translation.
5429 struct st_translate
{
5430 struct ureg_program
*ureg
;
5432 unsigned temps_size
;
5433 struct ureg_dst
*temps
;
5435 struct ureg_dst
*arrays
;
5436 unsigned num_temp_arrays
;
5437 struct ureg_src
*constants
;
5439 struct ureg_src
*immediates
;
5441 struct ureg_dst outputs
[PIPE_MAX_SHADER_OUTPUTS
];
5442 struct ureg_src inputs
[PIPE_MAX_SHADER_INPUTS
];
5443 struct ureg_dst address
[3];
5444 struct ureg_src samplers
[PIPE_MAX_SAMPLERS
];
5445 struct ureg_src buffers
[PIPE_MAX_SHADER_BUFFERS
];
5446 struct ureg_src images
[PIPE_MAX_SHADER_IMAGES
];
5447 struct ureg_src systemValues
[SYSTEM_VALUE_MAX
];
5448 struct ureg_src hw_atomics
[PIPE_MAX_HW_ATOMIC_BUFFERS
];
5449 struct ureg_src shared_memory
;
5450 unsigned *array_sizes
;
5451 struct inout_decl
*input_decls
;
5452 unsigned num_input_decls
;
5453 struct inout_decl
*output_decls
;
5454 unsigned num_output_decls
;
5456 const ubyte
*inputMapping
;
5457 const ubyte
*outputMapping
;
5459 enum pipe_shader_type procType
; /**< PIPE_SHADER_VERTEX/FRAGMENT */
5463 /** Map Mesa's SYSTEM_VALUE_x to TGSI_SEMANTIC_x */
5465 _mesa_sysval_to_semantic(unsigned sysval
)
5469 case SYSTEM_VALUE_VERTEX_ID
:
5470 return TGSI_SEMANTIC_VERTEXID
;
5471 case SYSTEM_VALUE_INSTANCE_ID
:
5472 return TGSI_SEMANTIC_INSTANCEID
;
5473 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
:
5474 return TGSI_SEMANTIC_VERTEXID_NOBASE
;
5475 case SYSTEM_VALUE_BASE_VERTEX
:
5476 return TGSI_SEMANTIC_BASEVERTEX
;
5477 case SYSTEM_VALUE_BASE_INSTANCE
:
5478 return TGSI_SEMANTIC_BASEINSTANCE
;
5479 case SYSTEM_VALUE_DRAW_ID
:
5480 return TGSI_SEMANTIC_DRAWID
;
5482 /* Geometry shader */
5483 case SYSTEM_VALUE_INVOCATION_ID
:
5484 return TGSI_SEMANTIC_INVOCATIONID
;
5486 /* Fragment shader */
5487 case SYSTEM_VALUE_FRAG_COORD
:
5488 return TGSI_SEMANTIC_POSITION
;
5489 case SYSTEM_VALUE_FRONT_FACE
:
5490 return TGSI_SEMANTIC_FACE
;
5491 case SYSTEM_VALUE_SAMPLE_ID
:
5492 return TGSI_SEMANTIC_SAMPLEID
;
5493 case SYSTEM_VALUE_SAMPLE_POS
:
5494 return TGSI_SEMANTIC_SAMPLEPOS
;
5495 case SYSTEM_VALUE_SAMPLE_MASK_IN
:
5496 return TGSI_SEMANTIC_SAMPLEMASK
;
5497 case SYSTEM_VALUE_HELPER_INVOCATION
:
5498 return TGSI_SEMANTIC_HELPER_INVOCATION
;
5500 /* Tessellation shader */
5501 case SYSTEM_VALUE_TESS_COORD
:
5502 return TGSI_SEMANTIC_TESSCOORD
;
5503 case SYSTEM_VALUE_VERTICES_IN
:
5504 return TGSI_SEMANTIC_VERTICESIN
;
5505 case SYSTEM_VALUE_PRIMITIVE_ID
:
5506 return TGSI_SEMANTIC_PRIMID
;
5507 case SYSTEM_VALUE_TESS_LEVEL_OUTER
:
5508 return TGSI_SEMANTIC_TESSOUTER
;
5509 case SYSTEM_VALUE_TESS_LEVEL_INNER
:
5510 return TGSI_SEMANTIC_TESSINNER
;
5512 /* Compute shader */
5513 case SYSTEM_VALUE_LOCAL_INVOCATION_ID
:
5514 return TGSI_SEMANTIC_THREAD_ID
;
5515 case SYSTEM_VALUE_WORK_GROUP_ID
:
5516 return TGSI_SEMANTIC_BLOCK_ID
;
5517 case SYSTEM_VALUE_NUM_WORK_GROUPS
:
5518 return TGSI_SEMANTIC_GRID_SIZE
;
5519 case SYSTEM_VALUE_LOCAL_GROUP_SIZE
:
5520 return TGSI_SEMANTIC_BLOCK_SIZE
;
5522 /* ARB_shader_ballot */
5523 case SYSTEM_VALUE_SUBGROUP_SIZE
:
5524 return TGSI_SEMANTIC_SUBGROUP_SIZE
;
5525 case SYSTEM_VALUE_SUBGROUP_INVOCATION
:
5526 return TGSI_SEMANTIC_SUBGROUP_INVOCATION
;
5527 case SYSTEM_VALUE_SUBGROUP_EQ_MASK
:
5528 return TGSI_SEMANTIC_SUBGROUP_EQ_MASK
;
5529 case SYSTEM_VALUE_SUBGROUP_GE_MASK
:
5530 return TGSI_SEMANTIC_SUBGROUP_GE_MASK
;
5531 case SYSTEM_VALUE_SUBGROUP_GT_MASK
:
5532 return TGSI_SEMANTIC_SUBGROUP_GT_MASK
;
5533 case SYSTEM_VALUE_SUBGROUP_LE_MASK
:
5534 return TGSI_SEMANTIC_SUBGROUP_LE_MASK
;
5535 case SYSTEM_VALUE_SUBGROUP_LT_MASK
:
5536 return TGSI_SEMANTIC_SUBGROUP_LT_MASK
;
5539 case SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
:
5540 case SYSTEM_VALUE_GLOBAL_INVOCATION_ID
:
5541 case SYSTEM_VALUE_VERTEX_CNT
:
5543 assert(!"Unexpected SYSTEM_VALUE_ enum");
5544 return TGSI_SEMANTIC_COUNT
;
5549 * Map a glsl_to_tgsi constant/immediate to a TGSI immediate.
5551 static struct ureg_src
5552 emit_immediate(struct st_translate
*t
,
5553 gl_constant_value values
[4],
5554 GLenum type
, int size
)
5556 struct ureg_program
*ureg
= t
->ureg
;
5560 return ureg_DECL_immediate(ureg
, &values
[0].f
, size
);
5562 return ureg_DECL_immediate_f64(ureg
, (double *)&values
[0].f
, size
);
5564 return ureg_DECL_immediate_int64(ureg
, (int64_t *)&values
[0].f
, size
);
5565 case GL_UNSIGNED_INT64_ARB
:
5566 return ureg_DECL_immediate_uint64(ureg
, (uint64_t *)&values
[0].f
, size
);
5568 return ureg_DECL_immediate_int(ureg
, &values
[0].i
, size
);
5569 case GL_UNSIGNED_INT
:
5571 return ureg_DECL_immediate_uint(ureg
, &values
[0].u
, size
);
5573 assert(!"should not get here - type must be float, int, uint, or bool");
5574 return ureg_src_undef();
5579 * Map a glsl_to_tgsi dst register to a TGSI ureg_dst register.
5581 static struct ureg_dst
5582 dst_register(struct st_translate
*t
, gl_register_file file
, unsigned index
,
5588 case PROGRAM_UNDEFINED
:
5589 return ureg_dst_undef();
5591 case PROGRAM_TEMPORARY
:
5592 /* Allocate space for temporaries on demand. */
5593 if (index
>= t
->temps_size
) {
5594 const int inc
= align(index
- t
->temps_size
+ 1, 4096);
5596 t
->temps
= (struct ureg_dst
*)
5598 (t
->temps_size
+ inc
) * sizeof(struct ureg_dst
));
5600 return ureg_dst_undef();
5602 memset(t
->temps
+ t
->temps_size
, 0, inc
* sizeof(struct ureg_dst
));
5603 t
->temps_size
+= inc
;
5606 if (ureg_dst_is_undef(t
->temps
[index
]))
5607 t
->temps
[index
] = ureg_DECL_local_temporary(t
->ureg
);
5609 return t
->temps
[index
];
5612 assert(array_id
&& array_id
<= t
->num_temp_arrays
);
5613 array
= array_id
- 1;
5615 if (ureg_dst_is_undef(t
->arrays
[array
]))
5616 t
->arrays
[array
] = ureg_DECL_array_temporary(
5617 t
->ureg
, t
->array_sizes
[array
], TRUE
);
5619 return ureg_dst_array_offset(t
->arrays
[array
], index
);
5621 case PROGRAM_OUTPUT
:
5623 if (t
->procType
== PIPE_SHADER_FRAGMENT
)
5624 assert(index
< 2 * FRAG_RESULT_MAX
);
5625 else if (t
->procType
== PIPE_SHADER_TESS_CTRL
||
5626 t
->procType
== PIPE_SHADER_TESS_EVAL
)
5627 assert(index
< VARYING_SLOT_TESS_MAX
);
5629 assert(index
< VARYING_SLOT_MAX
);
5631 assert(t
->outputMapping
[index
] < ARRAY_SIZE(t
->outputs
));
5632 assert(t
->outputs
[t
->outputMapping
[index
]].File
!= TGSI_FILE_NULL
);
5633 return t
->outputs
[t
->outputMapping
[index
]];
5636 struct inout_decl
*decl
=
5637 find_inout_array(t
->output_decls
,
5638 t
->num_output_decls
, array_id
);
5639 unsigned mesa_index
= decl
->mesa_index
;
5640 int slot
= t
->outputMapping
[mesa_index
];
5642 assert(slot
!= -1 && t
->outputs
[slot
].File
== TGSI_FILE_OUTPUT
);
5644 struct ureg_dst dst
= t
->outputs
[slot
];
5645 dst
.ArrayID
= array_id
;
5646 return ureg_dst_array_offset(dst
, index
- mesa_index
);
5649 case PROGRAM_ADDRESS
:
5650 return t
->address
[index
];
5653 assert(!"unknown dst register file");
5654 return ureg_dst_undef();
5658 static struct ureg_src
5659 translate_src(struct st_translate
*t
, const st_src_reg
*src_reg
);
5661 static struct ureg_src
5662 translate_addr(struct st_translate
*t
, const st_src_reg
*reladdr
,
5663 unsigned addr_index
)
5665 if (t
->need_uarl
|| !reladdr
->is_legal_tgsi_address_operand())
5666 return ureg_src(t
->address
[addr_index
]);
5668 return translate_src(t
, reladdr
);
5672 * Create a TGSI ureg_dst register from an st_dst_reg.
5674 static struct ureg_dst
5675 translate_dst(struct st_translate
*t
,
5676 const st_dst_reg
*dst_reg
,
5679 struct ureg_dst dst
= dst_register(t
, dst_reg
->file
, dst_reg
->index
,
5682 if (dst
.File
== TGSI_FILE_NULL
)
5685 dst
= ureg_writemask(dst
, dst_reg
->writemask
);
5688 dst
= ureg_saturate(dst
);
5690 if (dst_reg
->reladdr
!= NULL
) {
5691 assert(dst_reg
->file
!= PROGRAM_TEMPORARY
);
5692 dst
= ureg_dst_indirect(dst
, translate_addr(t
, dst_reg
->reladdr
, 0));
5695 if (dst_reg
->has_index2
) {
5696 if (dst_reg
->reladdr2
)
5697 dst
= ureg_dst_dimension_indirect(dst
,
5698 translate_addr(t
, dst_reg
->reladdr2
, 1),
5701 dst
= ureg_dst_dimension(dst
, dst_reg
->index2D
);
5708 * Create a TGSI ureg_src register from an st_src_reg.
5710 static struct ureg_src
5711 translate_src(struct st_translate
*t
, const st_src_reg
*src_reg
)
5713 struct ureg_src src
;
5714 int index
= src_reg
->index
;
5715 int double_reg2
= src_reg
->double_reg2
? 1 : 0;
5717 switch (src_reg
->file
) {
5718 case PROGRAM_UNDEFINED
:
5719 src
= ureg_imm4f(t
->ureg
, 0, 0, 0, 0);
5722 case PROGRAM_TEMPORARY
:
5724 src
= ureg_src(dst_register(t
, src_reg
->file
, src_reg
->index
,
5725 src_reg
->array_id
));
5728 case PROGRAM_OUTPUT
: {
5729 struct ureg_dst dst
= dst_register(t
, src_reg
->file
, src_reg
->index
,
5731 assert(dst
.WriteMask
!= 0);
5732 unsigned shift
= ffs(dst
.WriteMask
) - 1;
5733 src
= ureg_swizzle(ureg_src(dst
),
5737 MIN2(shift
+ 3, 3));
5741 case PROGRAM_UNIFORM
:
5742 assert(src_reg
->index
>= 0);
5743 src
= src_reg
->index
< t
->num_constants
?
5744 t
->constants
[src_reg
->index
] : ureg_imm4f(t
->ureg
, 0, 0, 0, 0);
5746 case PROGRAM_STATE_VAR
:
5747 case PROGRAM_CONSTANT
: /* ie, immediate */
5748 if (src_reg
->has_index2
)
5749 src
= ureg_src_register(TGSI_FILE_CONSTANT
, src_reg
->index
);
5751 src
= src_reg
->index
>= 0 && src_reg
->index
< t
->num_constants
?
5752 t
->constants
[src_reg
->index
] : ureg_imm4f(t
->ureg
, 0, 0, 0, 0);
5755 case PROGRAM_IMMEDIATE
:
5756 assert(src_reg
->index
>= 0 && src_reg
->index
< t
->num_immediates
);
5757 src
= t
->immediates
[src_reg
->index
];
5761 /* GLSL inputs are 64-bit containers, so we have to
5762 * map back to the original index and add the offset after
5764 index
-= double_reg2
;
5765 if (!src_reg
->array_id
) {
5766 assert(t
->inputMapping
[index
] < ARRAY_SIZE(t
->inputs
));
5767 assert(t
->inputs
[t
->inputMapping
[index
]].File
!= TGSI_FILE_NULL
);
5768 src
= t
->inputs
[t
->inputMapping
[index
] + double_reg2
];
5771 struct inout_decl
*decl
= find_inout_array(t
->input_decls
,
5774 unsigned mesa_index
= decl
->mesa_index
;
5775 int slot
= t
->inputMapping
[mesa_index
];
5777 assert(slot
!= -1 && t
->inputs
[slot
].File
== TGSI_FILE_INPUT
);
5779 src
= t
->inputs
[slot
];
5780 src
.ArrayID
= src_reg
->array_id
;
5781 src
= ureg_src_array_offset(src
, index
+ double_reg2
- mesa_index
);
5785 case PROGRAM_ADDRESS
:
5786 src
= ureg_src(t
->address
[src_reg
->index
]);
5789 case PROGRAM_SYSTEM_VALUE
:
5790 assert(src_reg
->index
< (int) ARRAY_SIZE(t
->systemValues
));
5791 src
= t
->systemValues
[src_reg
->index
];
5794 case PROGRAM_HW_ATOMIC
:
5795 src
= ureg_src_array_register(TGSI_FILE_HW_ATOMIC
, src_reg
->index
,
5800 assert(!"unknown src register file");
5801 return ureg_src_undef();
5804 if (src_reg
->has_index2
) {
5805 /* 2D indexes occur with geometry shader inputs (attrib, vertex)
5806 * and UBO constant buffers (buffer, position).
5808 if (src_reg
->reladdr2
)
5809 src
= ureg_src_dimension_indirect(src
,
5810 translate_addr(t
, src_reg
->reladdr2
, 1),
5813 src
= ureg_src_dimension(src
, src_reg
->index2D
);
5816 src
= ureg_swizzle(src
,
5817 GET_SWZ(src_reg
->swizzle
, 0) & 0x3,
5818 GET_SWZ(src_reg
->swizzle
, 1) & 0x3,
5819 GET_SWZ(src_reg
->swizzle
, 2) & 0x3,
5820 GET_SWZ(src_reg
->swizzle
, 3) & 0x3);
5823 src
= ureg_abs(src
);
5825 if ((src_reg
->negate
& 0xf) == NEGATE_XYZW
)
5826 src
= ureg_negate(src
);
5828 if (src_reg
->reladdr
!= NULL
) {
5829 assert(src_reg
->file
!= PROGRAM_TEMPORARY
);
5830 src
= ureg_src_indirect(src
, translate_addr(t
, src_reg
->reladdr
, 0));
5836 static struct tgsi_texture_offset
5837 translate_tex_offset(struct st_translate
*t
,
5838 const st_src_reg
*in_offset
)
5840 struct tgsi_texture_offset offset
;
5841 struct ureg_src src
= translate_src(t
, in_offset
);
5843 offset
.File
= src
.File
;
5844 offset
.Index
= src
.Index
;
5845 offset
.SwizzleX
= src
.SwizzleX
;
5846 offset
.SwizzleY
= src
.SwizzleY
;
5847 offset
.SwizzleZ
= src
.SwizzleZ
;
5850 assert(!src
.Indirect
);
5851 assert(!src
.DimIndirect
);
5852 assert(!src
.Dimension
);
5853 assert(!src
.Absolute
); /* those shouldn't be used with integers anyway */
5854 assert(!src
.Negate
);
5860 compile_tgsi_instruction(struct st_translate
*t
,
5861 const glsl_to_tgsi_instruction
*inst
)
5863 struct ureg_program
*ureg
= t
->ureg
;
5865 struct ureg_dst dst
[2];
5866 struct ureg_src src
[4];
5867 struct tgsi_texture_offset texoffsets
[MAX_GLSL_TEXTURE_OFFSET
];
5871 enum tgsi_texture_type tex_target
= TGSI_TEXTURE_BUFFER
;
5873 num_dst
= num_inst_dst_regs(inst
);
5874 num_src
= num_inst_src_regs(inst
);
5876 for (i
= 0; i
< num_dst
; i
++)
5877 dst
[i
] = translate_dst(t
,
5881 for (i
= 0; i
< num_src
; i
++)
5882 src
[i
] = translate_src(t
, &inst
->src
[i
]);
5885 case TGSI_OPCODE_BGNLOOP
:
5886 case TGSI_OPCODE_ELSE
:
5887 case TGSI_OPCODE_ENDLOOP
:
5888 case TGSI_OPCODE_IF
:
5889 case TGSI_OPCODE_UIF
:
5890 assert(num_dst
== 0);
5891 ureg_insn(ureg
, inst
->op
, NULL
, 0, src
, num_src
, inst
->precise
);
5894 case TGSI_OPCODE_TEX
:
5895 case TGSI_OPCODE_TEX_LZ
:
5896 case TGSI_OPCODE_TXB
:
5897 case TGSI_OPCODE_TXD
:
5898 case TGSI_OPCODE_TXL
:
5899 case TGSI_OPCODE_TXP
:
5900 case TGSI_OPCODE_TXQ
:
5901 case TGSI_OPCODE_TXQS
:
5902 case TGSI_OPCODE_TXF
:
5903 case TGSI_OPCODE_TXF_LZ
:
5904 case TGSI_OPCODE_TEX2
:
5905 case TGSI_OPCODE_TXB2
:
5906 case TGSI_OPCODE_TXL2
:
5907 case TGSI_OPCODE_TG4
:
5908 case TGSI_OPCODE_LODQ
:
5909 if (inst
->resource
.file
== PROGRAM_SAMPLER
) {
5910 src
[num_src
] = t
->samplers
[inst
->resource
.index
];
5912 /* Bindless samplers. */
5913 src
[num_src
] = translate_src(t
, &inst
->resource
);
5915 assert(src
[num_src
].File
!= TGSI_FILE_NULL
);
5916 if (inst
->resource
.reladdr
)
5918 ureg_src_indirect(src
[num_src
],
5919 translate_addr(t
, inst
->resource
.reladdr
, 2));
5921 for (i
= 0; i
< (int)inst
->tex_offset_num_offset
; i
++) {
5922 texoffsets
[i
] = translate_tex_offset(t
, &inst
->tex_offsets
[i
]);
5924 tex_target
= st_translate_texture_target(inst
->tex_target
, inst
->tex_shadow
);
5930 st_translate_texture_type(inst
->tex_type
),
5931 texoffsets
, inst
->tex_offset_num_offset
,
5935 case TGSI_OPCODE_RESQ
:
5936 case TGSI_OPCODE_LOAD
:
5937 case TGSI_OPCODE_ATOMUADD
:
5938 case TGSI_OPCODE_ATOMXCHG
:
5939 case TGSI_OPCODE_ATOMCAS
:
5940 case TGSI_OPCODE_ATOMAND
:
5941 case TGSI_OPCODE_ATOMOR
:
5942 case TGSI_OPCODE_ATOMXOR
:
5943 case TGSI_OPCODE_ATOMUMIN
:
5944 case TGSI_OPCODE_ATOMUMAX
:
5945 case TGSI_OPCODE_ATOMIMIN
:
5946 case TGSI_OPCODE_ATOMIMAX
:
5947 for (i
= num_src
- 1; i
>= 0; i
--)
5948 src
[i
+ 1] = src
[i
];
5950 if (inst
->resource
.file
== PROGRAM_MEMORY
) {
5951 src
[0] = t
->shared_memory
;
5952 } else if (inst
->resource
.file
== PROGRAM_BUFFER
) {
5953 src
[0] = t
->buffers
[inst
->resource
.index
];
5954 } else if (inst
->resource
.file
== PROGRAM_HW_ATOMIC
) {
5955 src
[0] = translate_src(t
, &inst
->resource
);
5956 } else if (inst
->resource
.file
== PROGRAM_CONSTANT
) {
5957 assert(inst
->resource
.has_index2
);
5958 src
[0] = ureg_src_register(TGSI_FILE_CONSTBUF
, inst
->resource
.index
);
5960 assert(inst
->resource
.file
!= PROGRAM_UNDEFINED
);
5961 if (inst
->resource
.file
== PROGRAM_IMAGE
) {
5962 src
[0] = t
->images
[inst
->resource
.index
];
5964 /* Bindless images. */
5965 src
[0] = translate_src(t
, &inst
->resource
);
5967 tex_target
= st_translate_texture_target(inst
->tex_target
, inst
->tex_shadow
);
5969 if (inst
->resource
.reladdr
)
5970 src
[0] = ureg_src_indirect(src
[0],
5971 translate_addr(t
, inst
->resource
.reladdr
, 2));
5972 assert(src
[0].File
!= TGSI_FILE_NULL
);
5973 ureg_memory_insn(ureg
, inst
->op
, dst
, num_dst
, src
, num_src
,
5974 inst
->buffer_access
,
5975 tex_target
, inst
->image_format
);
5978 case TGSI_OPCODE_STORE
:
5979 if (inst
->resource
.file
== PROGRAM_MEMORY
) {
5980 dst
[0] = ureg_dst(t
->shared_memory
);
5981 } else if (inst
->resource
.file
== PROGRAM_BUFFER
) {
5982 dst
[0] = ureg_dst(t
->buffers
[inst
->resource
.index
]);
5984 if (inst
->resource
.file
== PROGRAM_IMAGE
) {
5985 dst
[0] = ureg_dst(t
->images
[inst
->resource
.index
]);
5987 /* Bindless images. */
5988 dst
[0] = ureg_dst(translate_src(t
, &inst
->resource
));
5990 tex_target
= st_translate_texture_target(inst
->tex_target
, inst
->tex_shadow
);
5992 dst
[0] = ureg_writemask(dst
[0], inst
->dst
[0].writemask
);
5993 if (inst
->resource
.reladdr
)
5994 dst
[0] = ureg_dst_indirect(dst
[0],
5995 translate_addr(t
, inst
->resource
.reladdr
, 2));
5996 assert(dst
[0].File
!= TGSI_FILE_NULL
);
5997 ureg_memory_insn(ureg
, inst
->op
, dst
, num_dst
, src
, num_src
,
5998 inst
->buffer_access
,
5999 tex_target
, inst
->image_format
);
6006 src
, num_src
, inst
->precise
);
6012 * Emit the TGSI instructions for inverting and adjusting WPOS.
6013 * This code is unavoidable because it also depends on whether
6014 * a FBO is bound (STATE_FB_WPOS_Y_TRANSFORM).
6017 emit_wpos_adjustment(struct gl_context
*ctx
,
6018 struct st_translate
*t
,
6019 int wpos_transform_const
,
6021 GLfloat adjX
, GLfloat adjY
[2])
6023 struct ureg_program
*ureg
= t
->ureg
;
6025 assert(wpos_transform_const
>= 0);
6027 /* Fragment program uses fragment position input.
6028 * Need to replace instances of INPUT[WPOS] with temp T
6029 * where T = INPUT[WPOS] is inverted by Y.
6031 struct ureg_src wpostrans
= ureg_DECL_constant(ureg
, wpos_transform_const
);
6032 struct ureg_dst wpos_temp
= ureg_DECL_temporary(ureg
);
6033 struct ureg_src
*wpos
=
6034 ctx
->Const
.GLSLFragCoordIsSysVal
?
6035 &t
->systemValues
[SYSTEM_VALUE_FRAG_COORD
] :
6036 &t
->inputs
[t
->inputMapping
[VARYING_SLOT_POS
]];
6037 struct ureg_src wpos_input
= *wpos
;
6039 /* First, apply the coordinate shift: */
6040 if (adjX
|| adjY
[0] || adjY
[1]) {
6041 if (adjY
[0] != adjY
[1]) {
6042 /* Adjust the y coordinate by adjY[1] or adjY[0] respectively
6043 * depending on whether inversion is actually going to be applied
6044 * or not, which is determined by testing against the inversion
6045 * state variable used below, which will be either +1 or -1.
6047 struct ureg_dst adj_temp
= ureg_DECL_local_temporary(ureg
);
6049 ureg_CMP(ureg
, adj_temp
,
6050 ureg_scalar(wpostrans
, invert
? 2 : 0),
6051 ureg_imm4f(ureg
, adjX
, adjY
[0], 0.0f
, 0.0f
),
6052 ureg_imm4f(ureg
, adjX
, adjY
[1], 0.0f
, 0.0f
));
6053 ureg_ADD(ureg
, wpos_temp
, wpos_input
, ureg_src(adj_temp
));
6055 ureg_ADD(ureg
, wpos_temp
, wpos_input
,
6056 ureg_imm4f(ureg
, adjX
, adjY
[0], 0.0f
, 0.0f
));
6058 wpos_input
= ureg_src(wpos_temp
);
6060 /* MOV wpos_temp, input[wpos]
6062 ureg_MOV(ureg
, wpos_temp
, wpos_input
);
6065 /* Now the conditional y flip: STATE_FB_WPOS_Y_TRANSFORM.xy/zw will be
6066 * inversion/identity, or the other way around if we're drawing to an FBO.
6069 /* MAD wpos_temp.y, wpos_input, wpostrans.xxxx, wpostrans.yyyy
6072 ureg_writemask(wpos_temp
, TGSI_WRITEMASK_Y
),
6074 ureg_scalar(wpostrans
, 0),
6075 ureg_scalar(wpostrans
, 1));
6077 /* MAD wpos_temp.y, wpos_input, wpostrans.zzzz, wpostrans.wwww
6080 ureg_writemask(wpos_temp
, TGSI_WRITEMASK_Y
),
6082 ureg_scalar(wpostrans
, 2),
6083 ureg_scalar(wpostrans
, 3));
6086 /* Use wpos_temp as position input from here on:
6088 *wpos
= ureg_src(wpos_temp
);
6093 * Emit fragment position/ooordinate code.
6096 emit_wpos(struct st_context
*st
,
6097 struct st_translate
*t
,
6098 const struct gl_program
*program
,
6099 struct ureg_program
*ureg
,
6100 int wpos_transform_const
)
6102 struct pipe_screen
*pscreen
= st
->pipe
->screen
;
6103 GLfloat adjX
= 0.0f
;
6104 GLfloat adjY
[2] = { 0.0f
, 0.0f
};
6105 boolean invert
= FALSE
;
6107 /* Query the pixel center conventions supported by the pipe driver and set
6108 * adjX, adjY to help out if it cannot handle the requested one internally.
6110 * The bias of the y-coordinate depends on whether y-inversion takes place
6111 * (adjY[1]) or not (adjY[0]), which is in turn dependent on whether we are
6112 * drawing to an FBO (causes additional inversion), and whether the pipe
6113 * driver origin and the requested origin differ (the latter condition is
6114 * stored in the 'invert' variable).
6116 * For height = 100 (i = integer, h = half-integer, l = lower, u = upper):
6118 * center shift only:
6123 * l,i -> u,i: ( 0.0 + 1.0) * -1 + 100 = 99
6124 * l,h -> u,h: ( 0.5 + 0.0) * -1 + 100 = 99.5
6125 * u,i -> l,i: (99.0 + 1.0) * -1 + 100 = 0
6126 * u,h -> l,h: (99.5 + 0.0) * -1 + 100 = 0.5
6128 * inversion and center shift:
6129 * l,i -> u,h: ( 0.0 + 0.5) * -1 + 100 = 99.5
6130 * l,h -> u,i: ( 0.5 + 0.5) * -1 + 100 = 99
6131 * u,i -> l,h: (99.0 + 0.5) * -1 + 100 = 0.5
6132 * u,h -> l,i: (99.5 + 0.5) * -1 + 100 = 0
6134 if (program
->OriginUpperLeft
) {
6135 /* Fragment shader wants origin in upper-left */
6136 if (pscreen
->get_param(pscreen
, PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT
)) {
6137 /* the driver supports upper-left origin */
6139 else if (pscreen
->get_param(pscreen
, PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT
)) {
6140 /* the driver supports lower-left origin, need to invert Y */
6141 ureg_property(ureg
, TGSI_PROPERTY_FS_COORD_ORIGIN
,
6142 TGSI_FS_COORD_ORIGIN_LOWER_LEFT
);
6149 /* Fragment shader wants origin in lower-left */
6150 if (pscreen
->get_param(pscreen
, PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT
))
6151 /* the driver supports lower-left origin */
6152 ureg_property(ureg
, TGSI_PROPERTY_FS_COORD_ORIGIN
,
6153 TGSI_FS_COORD_ORIGIN_LOWER_LEFT
);
6154 else if (pscreen
->get_param(pscreen
, PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT
))
6155 /* the driver supports upper-left origin, need to invert Y */
6161 if (program
->PixelCenterInteger
) {
6162 /* Fragment shader wants pixel center integer */
6163 if (pscreen
->get_param(pscreen
, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER
)) {
6164 /* the driver supports pixel center integer */
6166 ureg_property(ureg
, TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
,
6167 TGSI_FS_COORD_PIXEL_CENTER_INTEGER
);
6169 else if (pscreen
->get_param(pscreen
, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER
)) {
6170 /* the driver supports pixel center half integer, need to bias X,Y */
6179 /* Fragment shader wants pixel center half integer */
6180 if (pscreen
->get_param(pscreen
, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER
)) {
6181 /* the driver supports pixel center half integer */
6183 else if (pscreen
->get_param(pscreen
, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER
)) {
6184 /* the driver supports pixel center integer, need to bias X,Y */
6185 adjX
= adjY
[0] = adjY
[1] = 0.5f
;
6186 ureg_property(ureg
, TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
,
6187 TGSI_FS_COORD_PIXEL_CENTER_INTEGER
);
6193 /* we invert after adjustment so that we avoid the MOV to temporary,
6194 * and reuse the adjustment ADD instead */
6195 emit_wpos_adjustment(st
->ctx
, t
, wpos_transform_const
, invert
, adjX
, adjY
);
6199 * OpenGL's fragment gl_FrontFace input is 1 for front-facing, 0 for back.
6200 * TGSI uses +1 for front, -1 for back.
6201 * This function converts the TGSI value to the GL value. Simply clamping/
6202 * saturating the value to [0,1] does the job.
6205 emit_face_var(struct gl_context
*ctx
, struct st_translate
*t
)
6207 struct ureg_program
*ureg
= t
->ureg
;
6208 struct ureg_dst face_temp
= ureg_DECL_temporary(ureg
);
6209 struct ureg_src face_input
= t
->inputs
[t
->inputMapping
[VARYING_SLOT_FACE
]];
6211 if (ctx
->Const
.NativeIntegers
) {
6212 ureg_FSGE(ureg
, face_temp
, face_input
, ureg_imm1f(ureg
, 0));
6215 /* MOV_SAT face_temp, input[face] */
6216 ureg_MOV(ureg
, ureg_saturate(face_temp
), face_input
);
6219 /* Use face_temp as face input from here on: */
6220 t
->inputs
[t
->inputMapping
[VARYING_SLOT_FACE
]] = ureg_src(face_temp
);
6224 emit_compute_block_size(const struct gl_program
*prog
,
6225 struct ureg_program
*ureg
) {
6226 ureg_property(ureg
, TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH
,
6227 prog
->info
.cs
.local_size
[0]);
6228 ureg_property(ureg
, TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT
,
6229 prog
->info
.cs
.local_size
[1]);
6230 ureg_property(ureg
, TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH
,
6231 prog
->info
.cs
.local_size
[2]);
6234 struct sort_inout_decls
{
6235 bool operator()(const struct inout_decl
&a
, const struct inout_decl
&b
) const {
6236 return mapping
[a
.mesa_index
] < mapping
[b
.mesa_index
];
6239 const ubyte
*mapping
;
6242 /* Sort the given array of decls by the corresponding slot (TGSI file index).
6244 * This is for the benefit of older drivers which are broken when the
6245 * declarations aren't sorted in this way.
6248 sort_inout_decls_by_slot(struct inout_decl
*decls
,
6250 const ubyte mapping
[])
6252 sort_inout_decls sorter
;
6253 sorter
.mapping
= mapping
;
6254 std::sort(decls
, decls
+ count
, sorter
);
6257 static enum tgsi_interpolate_mode
6258 st_translate_interp(enum glsl_interp_mode glsl_qual
, GLuint varying
)
6260 switch (glsl_qual
) {
6261 case INTERP_MODE_NONE
:
6262 if (varying
== VARYING_SLOT_COL0
|| varying
== VARYING_SLOT_COL1
)
6263 return TGSI_INTERPOLATE_COLOR
;
6264 return TGSI_INTERPOLATE_PERSPECTIVE
;
6265 case INTERP_MODE_SMOOTH
:
6266 return TGSI_INTERPOLATE_PERSPECTIVE
;
6267 case INTERP_MODE_FLAT
:
6268 return TGSI_INTERPOLATE_CONSTANT
;
6269 case INTERP_MODE_NOPERSPECTIVE
:
6270 return TGSI_INTERPOLATE_LINEAR
;
6272 assert(0 && "unexpected interp mode in st_translate_interp()");
6273 return TGSI_INTERPOLATE_PERSPECTIVE
;
6278 * Translate intermediate IR (glsl_to_tgsi_instruction) to TGSI format.
6279 * \param program the program to translate
6280 * \param numInputs number of input registers used
6281 * \param inputMapping maps Mesa fragment program inputs to TGSI generic
6283 * \param inputSemanticName the TGSI_SEMANTIC flag for each input
6284 * \param inputSemanticIndex the semantic index (ex: which texcoord) for
6286 * \param interpMode the TGSI_INTERPOLATE_LINEAR/PERSP mode for each input
6287 * \param numOutputs number of output registers used
6288 * \param outputMapping maps Mesa fragment program outputs to TGSI
6290 * \param outputSemanticName the TGSI_SEMANTIC flag for each output
6291 * \param outputSemanticIndex the semantic index (ex: which texcoord) for
6294 * \return PIPE_OK or PIPE_ERROR_OUT_OF_MEMORY
6296 extern "C" enum pipe_error
6297 st_translate_program(
6298 struct gl_context
*ctx
,
6299 enum pipe_shader_type procType
,
6300 struct ureg_program
*ureg
,
6301 glsl_to_tgsi_visitor
*program
,
6302 const struct gl_program
*proginfo
,
6304 const ubyte inputMapping
[],
6305 const ubyte inputSlotToAttr
[],
6306 const ubyte inputSemanticName
[],
6307 const ubyte inputSemanticIndex
[],
6308 const ubyte interpMode
[],
6310 const ubyte outputMapping
[],
6311 const ubyte outputSemanticName
[],
6312 const ubyte outputSemanticIndex
[])
6314 struct pipe_screen
*screen
= st_context(ctx
)->pipe
->screen
;
6315 struct st_translate
*t
;
6317 struct gl_program_constants
*frag_const
=
6318 &ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
];
6319 enum pipe_error ret
= PIPE_OK
;
6321 assert(numInputs
<= ARRAY_SIZE(t
->inputs
));
6322 assert(numOutputs
<= ARRAY_SIZE(t
->outputs
));
6324 ASSERT_BITFIELD_SIZE(st_src_reg
, type
, GLSL_TYPE_ERROR
);
6325 ASSERT_BITFIELD_SIZE(st_dst_reg
, type
, GLSL_TYPE_ERROR
);
6326 ASSERT_BITFIELD_SIZE(glsl_to_tgsi_instruction
, tex_type
, GLSL_TYPE_ERROR
);
6327 ASSERT_BITFIELD_SIZE(glsl_to_tgsi_instruction
, image_format
, PIPE_FORMAT_COUNT
);
6328 ASSERT_BITFIELD_SIZE(glsl_to_tgsi_instruction
, tex_target
,
6329 (gl_texture_index
) (NUM_TEXTURE_TARGETS
- 1));
6330 ASSERT_BITFIELD_SIZE(glsl_to_tgsi_instruction
, image_format
,
6331 (enum pipe_format
) (PIPE_FORMAT_COUNT
- 1));
6332 ASSERT_BITFIELD_SIZE(glsl_to_tgsi_instruction
, op
,
6333 (enum tgsi_opcode
) (TGSI_OPCODE_LAST
- 1));
6335 t
= CALLOC_STRUCT(st_translate
);
6337 ret
= PIPE_ERROR_OUT_OF_MEMORY
;
6341 t
->procType
= procType
;
6342 t
->need_uarl
= !screen
->get_param(screen
, PIPE_CAP_TGSI_ANY_REG_AS_ADDRESS
);
6343 t
->inputMapping
= inputMapping
;
6344 t
->outputMapping
= outputMapping
;
6346 t
->num_temp_arrays
= program
->next_array
;
6347 if (t
->num_temp_arrays
)
6348 t
->arrays
= (struct ureg_dst
*)
6349 calloc(t
->num_temp_arrays
, sizeof(t
->arrays
[0]));
6352 * Declare input attributes.
6355 case PIPE_SHADER_FRAGMENT
:
6356 case PIPE_SHADER_GEOMETRY
:
6357 case PIPE_SHADER_TESS_EVAL
:
6358 case PIPE_SHADER_TESS_CTRL
:
6359 sort_inout_decls_by_slot(program
->inputs
, program
->num_inputs
, inputMapping
);
6361 for (i
= 0; i
< program
->num_inputs
; ++i
) {
6362 struct inout_decl
*decl
= &program
->inputs
[i
];
6363 unsigned slot
= inputMapping
[decl
->mesa_index
];
6364 struct ureg_src src
;
6365 ubyte tgsi_usage_mask
= decl
->usage_mask
;
6367 if (glsl_base_type_is_64bit(decl
->base_type
)) {
6368 if (tgsi_usage_mask
== 1)
6369 tgsi_usage_mask
= TGSI_WRITEMASK_XY
;
6370 else if (tgsi_usage_mask
== 2)
6371 tgsi_usage_mask
= TGSI_WRITEMASK_ZW
;
6373 tgsi_usage_mask
= TGSI_WRITEMASK_XYZW
;
6376 enum tgsi_interpolate_mode interp_mode
= TGSI_INTERPOLATE_CONSTANT
;
6377 enum tgsi_interpolate_loc interp_location
= TGSI_INTERPOLATE_LOC_CENTER
;
6378 if (procType
== PIPE_SHADER_FRAGMENT
) {
6380 interp_mode
= interpMode
[slot
] != TGSI_INTERPOLATE_COUNT
?
6381 (enum tgsi_interpolate_mode
) interpMode
[slot
] :
6382 st_translate_interp(decl
->interp
, inputSlotToAttr
[slot
]);
6384 interp_location
= (enum tgsi_interpolate_loc
) decl
->interp_loc
;
6387 src
= ureg_DECL_fs_input_cyl_centroid_layout(ureg
,
6388 (enum tgsi_semantic
) inputSemanticName
[slot
],
6389 inputSemanticIndex
[slot
],
6390 interp_mode
, 0, interp_location
, slot
, tgsi_usage_mask
,
6391 decl
->array_id
, decl
->size
);
6393 for (unsigned j
= 0; j
< decl
->size
; ++j
) {
6394 if (t
->inputs
[slot
+ j
].File
!= TGSI_FILE_INPUT
) {
6395 /* The ArrayID is set up in dst_register */
6396 t
->inputs
[slot
+ j
] = src
;
6397 t
->inputs
[slot
+ j
].ArrayID
= 0;
6398 t
->inputs
[slot
+ j
].Index
+= j
;
6403 case PIPE_SHADER_VERTEX
:
6404 for (i
= 0; i
< numInputs
; i
++) {
6405 t
->inputs
[i
] = ureg_DECL_vs_input(ureg
, i
);
6408 case PIPE_SHADER_COMPUTE
:
6415 * Declare output attributes.
6418 case PIPE_SHADER_FRAGMENT
:
6419 case PIPE_SHADER_COMPUTE
:
6421 case PIPE_SHADER_GEOMETRY
:
6422 case PIPE_SHADER_TESS_EVAL
:
6423 case PIPE_SHADER_TESS_CTRL
:
6424 case PIPE_SHADER_VERTEX
:
6425 sort_inout_decls_by_slot(program
->outputs
, program
->num_outputs
, outputMapping
);
6427 for (i
= 0; i
< program
->num_outputs
; ++i
) {
6428 struct inout_decl
*decl
= &program
->outputs
[i
];
6429 unsigned slot
= outputMapping
[decl
->mesa_index
];
6430 struct ureg_dst dst
;
6431 ubyte tgsi_usage_mask
= decl
->usage_mask
;
6433 if (glsl_base_type_is_64bit(decl
->base_type
)) {
6434 if (tgsi_usage_mask
== 1)
6435 tgsi_usage_mask
= TGSI_WRITEMASK_XY
;
6436 else if (tgsi_usage_mask
== 2)
6437 tgsi_usage_mask
= TGSI_WRITEMASK_ZW
;
6439 tgsi_usage_mask
= TGSI_WRITEMASK_XYZW
;
6442 dst
= ureg_DECL_output_layout(ureg
,
6443 (enum tgsi_semantic
) outputSemanticName
[slot
],
6444 outputSemanticIndex
[slot
],
6445 decl
->gs_out_streams
,
6446 slot
, tgsi_usage_mask
, decl
->array_id
, decl
->size
);
6448 for (unsigned j
= 0; j
< decl
->size
; ++j
) {
6449 if (t
->outputs
[slot
+ j
].File
!= TGSI_FILE_OUTPUT
) {
6450 /* The ArrayID is set up in dst_register */
6451 t
->outputs
[slot
+ j
] = dst
;
6452 t
->outputs
[slot
+ j
].ArrayID
= 0;
6453 t
->outputs
[slot
+ j
].Index
+= j
;
6462 if (procType
== PIPE_SHADER_FRAGMENT
) {
6463 if (program
->shader
->Program
->info
.fs
.early_fragment_tests
||
6464 program
->shader
->Program
->info
.fs
.post_depth_coverage
) {
6465 ureg_property(ureg
, TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL
, 1);
6467 if (program
->shader
->Program
->info
.fs
.post_depth_coverage
)
6468 ureg_property(ureg
, TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE
, 1);
6471 if (proginfo
->info
.inputs_read
& VARYING_BIT_POS
) {
6472 /* Must do this after setting up t->inputs. */
6473 emit_wpos(st_context(ctx
), t
, proginfo
, ureg
,
6474 program
->wpos_transform_const
);
6477 if (proginfo
->info
.inputs_read
& VARYING_BIT_FACE
)
6478 emit_face_var(ctx
, t
);
6480 for (i
= 0; i
< numOutputs
; i
++) {
6481 switch (outputSemanticName
[i
]) {
6482 case TGSI_SEMANTIC_POSITION
:
6483 t
->outputs
[i
] = ureg_DECL_output(ureg
,
6484 TGSI_SEMANTIC_POSITION
, /* Z/Depth */
6485 outputSemanticIndex
[i
]);
6486 t
->outputs
[i
] = ureg_writemask(t
->outputs
[i
], TGSI_WRITEMASK_Z
);
6488 case TGSI_SEMANTIC_STENCIL
:
6489 t
->outputs
[i
] = ureg_DECL_output(ureg
,
6490 TGSI_SEMANTIC_STENCIL
, /* Stencil */
6491 outputSemanticIndex
[i
]);
6492 t
->outputs
[i
] = ureg_writemask(t
->outputs
[i
], TGSI_WRITEMASK_Y
);
6494 case TGSI_SEMANTIC_COLOR
:
6495 t
->outputs
[i
] = ureg_DECL_output(ureg
,
6496 TGSI_SEMANTIC_COLOR
,
6497 outputSemanticIndex
[i
]);
6499 case TGSI_SEMANTIC_SAMPLEMASK
:
6500 t
->outputs
[i
] = ureg_DECL_output(ureg
,
6501 TGSI_SEMANTIC_SAMPLEMASK
,
6502 outputSemanticIndex
[i
]);
6503 /* TODO: If we ever support more than 32 samples, this will have
6504 * to become an array.
6506 t
->outputs
[i
] = ureg_writemask(t
->outputs
[i
], TGSI_WRITEMASK_X
);
6509 assert(!"fragment shader outputs must be POSITION/STENCIL/COLOR");
6510 ret
= PIPE_ERROR_BAD_INPUT
;
6515 else if (procType
== PIPE_SHADER_VERTEX
) {
6516 for (i
= 0; i
< numOutputs
; i
++) {
6517 if (outputSemanticName
[i
] == TGSI_SEMANTIC_FOG
) {
6518 /* force register to contain a fog coordinate in the form (F, 0, 0, 1). */
6520 ureg_writemask(t
->outputs
[i
], TGSI_WRITEMASK_YZW
),
6521 ureg_imm4f(ureg
, 0.0f
, 0.0f
, 0.0f
, 1.0f
));
6522 t
->outputs
[i
] = ureg_writemask(t
->outputs
[i
], TGSI_WRITEMASK_X
);
6527 if (procType
== PIPE_SHADER_COMPUTE
) {
6528 emit_compute_block_size(proginfo
, ureg
);
6531 /* Declare address register.
6533 if (program
->num_address_regs
> 0) {
6534 assert(program
->num_address_regs
<= 3);
6535 for (int i
= 0; i
< program
->num_address_regs
; i
++)
6536 t
->address
[i
] = ureg_DECL_address(ureg
);
6539 /* Declare misc input registers
6542 GLbitfield64 sysInputs
= proginfo
->info
.system_values_read
;
6544 for (i
= 0; sysInputs
; i
++) {
6545 if (sysInputs
& (1ull << i
)) {
6546 enum tgsi_semantic semName
= _mesa_sysval_to_semantic(i
);
6548 t
->systemValues
[i
] = ureg_DECL_system_value(ureg
, semName
, 0);
6550 if (semName
== TGSI_SEMANTIC_INSTANCEID
||
6551 semName
== TGSI_SEMANTIC_VERTEXID
) {
6552 /* From Gallium perspective, these system values are always
6553 * integer, and require native integer support. However, if
6554 * native integer is supported on the vertex stage but not the
6555 * pixel stage (e.g, i915g + draw), Mesa will generate IR that
6556 * assumes these system values are floats. To resolve the
6557 * inconsistency, we insert a U2F.
6559 struct st_context
*st
= st_context(ctx
);
6560 struct pipe_screen
*pscreen
= st
->pipe
->screen
;
6561 assert(procType
== PIPE_SHADER_VERTEX
);
6562 assert(pscreen
->get_shader_param(pscreen
, PIPE_SHADER_VERTEX
, PIPE_SHADER_CAP_INTEGERS
));
6564 if (!ctx
->Const
.NativeIntegers
) {
6565 struct ureg_dst temp
= ureg_DECL_local_temporary(t
->ureg
);
6566 ureg_U2F(t
->ureg
, ureg_writemask(temp
, TGSI_WRITEMASK_X
),
6567 t
->systemValues
[i
]);
6568 t
->systemValues
[i
] = ureg_scalar(ureg_src(temp
), 0);
6572 if (procType
== PIPE_SHADER_FRAGMENT
&&
6573 semName
== TGSI_SEMANTIC_POSITION
)
6574 emit_wpos(st_context(ctx
), t
, proginfo
, ureg
,
6575 program
->wpos_transform_const
);
6577 sysInputs
&= ~(1ull << i
);
6582 t
->array_sizes
= program
->array_sizes
;
6583 t
->input_decls
= program
->inputs
;
6584 t
->num_input_decls
= program
->num_inputs
;
6585 t
->output_decls
= program
->outputs
;
6586 t
->num_output_decls
= program
->num_outputs
;
6588 /* Emit constants and uniforms. TGSI uses a single index space for these,
6589 * so we put all the translated regs in t->constants.
6591 if (proginfo
->Parameters
) {
6592 t
->constants
= (struct ureg_src
*)
6593 calloc(proginfo
->Parameters
->NumParameters
, sizeof(t
->constants
[0]));
6594 if (t
->constants
== NULL
) {
6595 ret
= PIPE_ERROR_OUT_OF_MEMORY
;
6598 t
->num_constants
= proginfo
->Parameters
->NumParameters
;
6600 for (i
= 0; i
< proginfo
->Parameters
->NumParameters
; i
++) {
6601 unsigned pvo
= proginfo
->Parameters
->ParameterValueOffset
[i
];
6603 switch (proginfo
->Parameters
->Parameters
[i
].Type
) {
6604 case PROGRAM_STATE_VAR
:
6605 case PROGRAM_UNIFORM
:
6606 t
->constants
[i
] = ureg_DECL_constant(ureg
, i
);
6609 /* Emit immediates for PROGRAM_CONSTANT only when there's no indirect
6610 * addressing of the const buffer.
6611 * FIXME: Be smarter and recognize param arrays:
6612 * indirect addressing is only valid within the referenced
6615 case PROGRAM_CONSTANT
:
6616 if (program
->indirect_addr_consts
)
6617 t
->constants
[i
] = ureg_DECL_constant(ureg
, i
);
6619 t
->constants
[i
] = emit_immediate(t
,
6620 proginfo
->Parameters
->ParameterValues
+ pvo
,
6621 proginfo
->Parameters
->Parameters
[i
].DataType
,
6630 for (i
= 0; i
< proginfo
->info
.num_ubos
; i
++) {
6631 unsigned size
= proginfo
->sh
.UniformBlocks
[i
]->UniformBufferSize
;
6632 unsigned num_const_vecs
= (size
+ 15) / 16;
6633 unsigned first
, last
;
6634 assert(num_const_vecs
> 0);
6636 last
= num_const_vecs
> 0 ? num_const_vecs
- 1 : 0;
6637 ureg_DECL_constant2D(t
->ureg
, first
, last
, i
+ 1);
6640 /* Emit immediate values.
6642 t
->immediates
= (struct ureg_src
*)
6643 calloc(program
->num_immediates
, sizeof(struct ureg_src
));
6644 if (t
->immediates
== NULL
) {
6645 ret
= PIPE_ERROR_OUT_OF_MEMORY
;
6648 t
->num_immediates
= program
->num_immediates
;
6651 foreach_in_list(immediate_storage
, imm
, &program
->immediates
) {
6652 assert(i
< program
->num_immediates
);
6653 t
->immediates
[i
++] = emit_immediate(t
, imm
->values
, imm
->type
, imm
->size32
);
6655 assert(i
== program
->num_immediates
);
6657 /* texture samplers */
6658 for (i
= 0; i
< frag_const
->MaxTextureImageUnits
; i
++) {
6659 if (program
->samplers_used
& (1u << i
)) {
6660 enum tgsi_return_type type
=
6661 st_translate_texture_type(program
->sampler_types
[i
]);
6663 t
->samplers
[i
] = ureg_DECL_sampler(ureg
, i
);
6665 ureg_DECL_sampler_view(ureg
, i
, program
->sampler_targets
[i
],
6666 type
, type
, type
, type
);
6670 /* Declare atomic and shader storage buffers. */
6672 struct gl_program
*prog
= program
->prog
;
6674 if (!st_context(ctx
)->has_hw_atomics
) {
6675 for (i
= 0; i
< prog
->info
.num_abos
; i
++) {
6676 unsigned index
= prog
->sh
.AtomicBuffers
[i
]->Binding
;
6677 assert(index
< frag_const
->MaxAtomicBuffers
);
6678 t
->buffers
[index
] = ureg_DECL_buffer(ureg
, index
, true);
6681 for (i
= 0; i
< program
->num_atomics
; i
++) {
6682 struct hwatomic_decl
*ainfo
= &program
->atomic_info
[i
];
6683 gl_uniform_storage
*uni_storage
= &prog
->sh
.data
->UniformStorage
[ainfo
->location
];
6684 int base
= uni_storage
->offset
/ ATOMIC_COUNTER_SIZE
;
6685 ureg_DECL_hw_atomic(ureg
, base
, base
+ ainfo
->size
- 1, ainfo
->binding
,
6690 assert(prog
->info
.num_ssbos
<= frag_const
->MaxShaderStorageBlocks
);
6691 for (i
= 0; i
< prog
->info
.num_ssbos
; i
++) {
6693 if (!st_context(ctx
)->has_hw_atomics
)
6694 index
+= frag_const
->MaxAtomicBuffers
;
6696 t
->buffers
[index
] = ureg_DECL_buffer(ureg
, index
, false);
6700 if (program
->use_shared_memory
)
6701 t
->shared_memory
= ureg_DECL_memory(ureg
, TGSI_MEMORY_TYPE_SHARED
);
6703 for (i
= 0; i
< program
->shader
->Program
->info
.num_images
; i
++) {
6704 if (program
->images_used
& (1 << i
)) {
6705 t
->images
[i
] = ureg_DECL_image(ureg
, i
,
6706 program
->image_targets
[i
],
6707 program
->image_formats
[i
],
6712 /* Emit each instruction in turn:
6714 foreach_in_list(glsl_to_tgsi_instruction
, inst
, &program
->instructions
)
6715 compile_tgsi_instruction(t
, inst
);
6717 /* Set the next shader stage hint for VS and TES. */
6719 case PIPE_SHADER_VERTEX
:
6720 case PIPE_SHADER_TESS_EVAL
:
6721 if (program
->shader_program
->SeparateShader
)
6724 for (i
= program
->shader
->Stage
+1; i
<= MESA_SHADER_FRAGMENT
; i
++) {
6725 if (program
->shader_program
->_LinkedShaders
[i
]) {
6726 ureg_set_next_shader_processor(
6727 ureg
, pipe_shader_type_from_mesa((gl_shader_stage
)i
));
6733 ; /* nothing - silence compiler warning */
6741 t
->num_constants
= 0;
6742 free(t
->immediates
);
6743 t
->num_immediates
= 0;
6749 /* ----------------------------- End TGSI code ------------------------------ */
6753 * Convert a shader's GLSL IR into a Mesa gl_program, although without
6754 * generating Mesa IR.
6756 static struct gl_program
*
6757 get_mesa_program_tgsi(struct gl_context
*ctx
,
6758 struct gl_shader_program
*shader_program
,
6759 struct gl_linked_shader
*shader
)
6761 glsl_to_tgsi_visitor
* v
;
6762 struct gl_program
*prog
;
6763 struct gl_shader_compiler_options
*options
=
6764 &ctx
->Const
.ShaderCompilerOptions
[shader
->Stage
];
6765 struct pipe_screen
*pscreen
= ctx
->st
->pipe
->screen
;
6766 enum pipe_shader_type ptarget
= pipe_shader_type_from_mesa(shader
->Stage
);
6767 unsigned skip_merge_registers
;
6769 validate_ir_tree(shader
->ir
);
6771 prog
= shader
->Program
;
6773 prog
->Parameters
= _mesa_new_parameter_list();
6774 v
= new glsl_to_tgsi_visitor();
6777 v
->shader_program
= shader_program
;
6779 v
->options
= options
;
6780 v
->native_integers
= ctx
->Const
.NativeIntegers
;
6782 v
->have_sqrt
= pscreen
->get_shader_param(pscreen
, ptarget
,
6783 PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED
);
6784 v
->have_fma
= pscreen
->get_shader_param(pscreen
, ptarget
,
6785 PIPE_SHADER_CAP_TGSI_FMA_SUPPORTED
);
6786 v
->has_tex_txf_lz
= pscreen
->get_param(pscreen
,
6787 PIPE_CAP_TGSI_TEX_TXF_LZ
);
6788 v
->need_uarl
= !pscreen
->get_param(pscreen
, PIPE_CAP_TGSI_ANY_REG_AS_ADDRESS
);
6790 v
->variables
= _mesa_hash_table_create(v
->mem_ctx
, _mesa_hash_pointer
,
6791 _mesa_key_pointer_equal
);
6792 skip_merge_registers
=
6793 pscreen
->get_shader_param(pscreen
, ptarget
,
6794 PIPE_SHADER_CAP_TGSI_SKIP_MERGE_REGISTERS
);
6796 _mesa_generate_parameters_list_for_uniforms(ctx
, shader_program
, shader
,
6799 /* Remove reads from output registers. */
6800 if (!pscreen
->get_param(pscreen
, PIPE_CAP_TGSI_CAN_READ_OUTPUTS
))
6801 lower_output_reads(shader
->Stage
, shader
->ir
);
6803 /* Emit intermediate IR for main(). */
6804 visit_exec_list(shader
->ir
, v
);
6807 /* Print out some information (for debugging purposes) used by the
6808 * optimization passes. */
6811 int *first_writes
= ralloc_array(v
->mem_ctx
, int, v
->next_temp
);
6812 int *first_reads
= ralloc_array(v
->mem_ctx
, int, v
->next_temp
);
6813 int *last_writes
= ralloc_array(v
->mem_ctx
, int, v
->next_temp
);
6814 int *last_reads
= ralloc_array(v
->mem_ctx
, int, v
->next_temp
);
6816 for (i
= 0; i
< v
->next_temp
; i
++) {
6817 first_writes
[i
] = -1;
6818 first_reads
[i
] = -1;
6819 last_writes
[i
] = -1;
6822 v
->get_first_temp_read(first_reads
);
6823 v
->get_last_temp_read_first_temp_write(last_reads
, first_writes
);
6824 v
->get_last_temp_write(last_writes
);
6825 for (i
= 0; i
< v
->next_temp
; i
++)
6826 printf("Temp %d: FR=%3d FW=%3d LR=%3d LW=%3d\n", i
, first_reads
[i
],
6830 ralloc_free(first_writes
);
6831 ralloc_free(first_reads
);
6832 ralloc_free(last_writes
);
6833 ralloc_free(last_reads
);
6837 /* Perform optimizations on the instructions in the glsl_to_tgsi_visitor. */
6839 v
->copy_propagate();
6841 while (v
->eliminate_dead_code());
6843 v
->merge_two_dsts();
6844 if (!skip_merge_registers
)
6845 v
->merge_registers();
6846 v
->renumber_registers();
6848 /* Write the END instruction. */
6849 v
->emit_asm(NULL
, TGSI_OPCODE_END
);
6851 if (ctx
->_Shader
->Flags
& GLSL_DUMP
) {
6853 _mesa_log("GLSL IR for linked %s program %d:\n",
6854 _mesa_shader_stage_to_string(shader
->Stage
),
6855 shader_program
->Name
);
6856 _mesa_print_ir(_mesa_get_log_file(), shader
->ir
, NULL
);
6860 do_set_program_inouts(shader
->ir
, prog
, shader
->Stage
);
6861 _mesa_copy_linked_program_data(shader_program
, shader
);
6862 shrink_array_declarations(v
->inputs
, v
->num_inputs
,
6863 &prog
->info
.inputs_read
,
6864 prog
->info
.vs
.double_inputs_read
,
6865 &prog
->info
.patch_inputs_read
);
6866 shrink_array_declarations(v
->outputs
, v
->num_outputs
,
6867 &prog
->info
.outputs_written
, 0ULL,
6868 &prog
->info
.patch_outputs_written
);
6869 count_resources(v
, prog
);
6871 /* The GLSL IR won't be needed anymore. */
6872 ralloc_free(shader
->ir
);
6875 /* This must be done before the uniform storage is associated. */
6876 if (shader
->Stage
== MESA_SHADER_FRAGMENT
&&
6877 (prog
->info
.inputs_read
& VARYING_BIT_POS
||
6878 prog
->info
.system_values_read
& (1ull << SYSTEM_VALUE_FRAG_COORD
))) {
6879 static const gl_state_index16 wposTransformState
[STATE_LENGTH
] = {
6880 STATE_INTERNAL
, STATE_FB_WPOS_Y_TRANSFORM
6883 v
->wpos_transform_const
= _mesa_add_state_reference(prog
->Parameters
,
6884 wposTransformState
);
6887 /* Avoid reallocation of the program parameter list, because the uniform
6888 * storage is only associated with the original parameter list.
6889 * This should be enough for Bitmap and DrawPixels constants.
6891 _mesa_reserve_parameter_storage(prog
->Parameters
, 8);
6893 /* This has to be done last. Any operation the can cause
6894 * prog->ParameterValues to get reallocated (e.g., anything that adds a
6895 * program constant) has to happen before creating this linkage.
6897 _mesa_associate_uniform_storage(ctx
, shader_program
, prog
, true);
6898 if (!shader_program
->data
->LinkStatus
) {
6899 free_glsl_to_tgsi_visitor(v
);
6900 _mesa_reference_program(ctx
, &shader
->Program
, NULL
);
6904 struct st_vertex_program
*stvp
;
6905 struct st_fragment_program
*stfp
;
6906 struct st_common_program
*stp
;
6907 struct st_compute_program
*stcp
;
6909 switch (shader
->Stage
) {
6910 case MESA_SHADER_VERTEX
:
6911 stvp
= (struct st_vertex_program
*)prog
;
6912 stvp
->glsl_to_tgsi
= v
;
6914 case MESA_SHADER_FRAGMENT
:
6915 stfp
= (struct st_fragment_program
*)prog
;
6916 stfp
->glsl_to_tgsi
= v
;
6918 case MESA_SHADER_TESS_CTRL
:
6919 case MESA_SHADER_TESS_EVAL
:
6920 case MESA_SHADER_GEOMETRY
:
6921 stp
= st_common_program(prog
);
6922 stp
->glsl_to_tgsi
= v
;
6924 case MESA_SHADER_COMPUTE
:
6925 stcp
= (struct st_compute_program
*)prog
;
6926 stcp
->glsl_to_tgsi
= v
;
6929 assert(!"should not be reached");
6936 /* See if there are unsupported control flow statements. */
6937 class ir_control_flow_info_visitor
: public ir_hierarchical_visitor
{
6939 const struct gl_shader_compiler_options
*options
;
6941 ir_control_flow_info_visitor(const struct gl_shader_compiler_options
*options
)
6947 virtual ir_visitor_status
visit_enter(ir_function
*ir
)
6949 /* Other functions are skipped (same as glsl_to_tgsi). */
6950 if (strcmp(ir
->name
, "main") == 0)
6951 return visit_continue
;
6953 return visit_continue_with_parent
;
6956 virtual ir_visitor_status
visit_enter(ir_call
*ir
)
6958 if (!ir
->callee
->is_intrinsic()) {
6959 unsupported
= true; /* it's a function call */
6962 return visit_continue
;
6965 virtual ir_visitor_status
visit_enter(ir_return
*ir
)
6967 if (options
->EmitNoMainReturn
) {
6971 return visit_continue
;
6978 has_unsupported_control_flow(exec_list
*ir
,
6979 const struct gl_shader_compiler_options
*options
)
6981 ir_control_flow_info_visitor
visitor(options
);
6982 visit_list_elements(&visitor
, ir
);
6983 return visitor
.unsupported
;
6990 * Called via ctx->Driver.LinkShader()
6991 * This actually involves converting GLSL IR into an intermediate TGSI-like IR
6992 * with code lowering and other optimizations.
6995 st_link_shader(struct gl_context
*ctx
, struct gl_shader_program
*prog
)
6997 struct pipe_screen
*pscreen
= ctx
->st
->pipe
->screen
;
6999 enum pipe_shader_ir preferred_ir
= (enum pipe_shader_ir
)
7000 pscreen
->get_shader_param(pscreen
, PIPE_SHADER_VERTEX
,
7001 PIPE_SHADER_CAP_PREFERRED_IR
);
7002 bool use_nir
= preferred_ir
== PIPE_SHADER_IR_NIR
;
7004 /* Return early if we are loading the shader from on-disk cache */
7005 if (st_load_ir_from_disk_cache(ctx
, prog
, use_nir
)) {
7009 assert(prog
->data
->LinkStatus
);
7011 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
7012 if (prog
->_LinkedShaders
[i
] == NULL
)
7015 struct gl_linked_shader
*shader
= prog
->_LinkedShaders
[i
];
7016 exec_list
*ir
= shader
->ir
;
7017 gl_shader_stage stage
= shader
->Stage
;
7018 const struct gl_shader_compiler_options
*options
=
7019 &ctx
->Const
.ShaderCompilerOptions
[stage
];
7020 enum pipe_shader_type ptarget
= pipe_shader_type_from_mesa(stage
);
7021 bool have_dround
= pscreen
->get_shader_param(pscreen
, ptarget
,
7022 PIPE_SHADER_CAP_TGSI_DROUND_SUPPORTED
);
7023 bool have_dfrexp
= pscreen
->get_shader_param(pscreen
, ptarget
,
7024 PIPE_SHADER_CAP_TGSI_DFRACEXP_DLDEXP_SUPPORTED
);
7025 bool have_ldexp
= pscreen
->get_shader_param(pscreen
, ptarget
,
7026 PIPE_SHADER_CAP_TGSI_LDEXP_SUPPORTED
);
7027 unsigned if_threshold
= pscreen
->get_shader_param(pscreen
, ptarget
,
7028 PIPE_SHADER_CAP_LOWER_IF_THRESHOLD
);
7030 /* If there are forms of indirect addressing that the driver
7031 * cannot handle, perform the lowering pass.
7033 if (options
->EmitNoIndirectInput
|| options
->EmitNoIndirectOutput
||
7034 options
->EmitNoIndirectTemp
|| options
->EmitNoIndirectUniform
) {
7035 lower_variable_index_to_cond_assign(stage
, ir
,
7036 options
->EmitNoIndirectInput
,
7037 options
->EmitNoIndirectOutput
,
7038 options
->EmitNoIndirectTemp
,
7039 options
->EmitNoIndirectUniform
);
7042 if (!pscreen
->get_param(pscreen
, PIPE_CAP_INT64_DIVMOD
))
7043 lower_64bit_integer_instructions(ir
, DIV64
| MOD64
);
7045 if (ctx
->Extensions
.ARB_shading_language_packing
) {
7046 unsigned lower_inst
= LOWER_PACK_SNORM_2x16
|
7047 LOWER_UNPACK_SNORM_2x16
|
7048 LOWER_PACK_UNORM_2x16
|
7049 LOWER_UNPACK_UNORM_2x16
|
7050 LOWER_PACK_SNORM_4x8
|
7051 LOWER_UNPACK_SNORM_4x8
|
7052 LOWER_UNPACK_UNORM_4x8
|
7053 LOWER_PACK_UNORM_4x8
;
7055 if (ctx
->Extensions
.ARB_gpu_shader5
)
7056 lower_inst
|= LOWER_PACK_USE_BFI
|
7058 if (!ctx
->st
->has_half_float_packing
)
7059 lower_inst
|= LOWER_PACK_HALF_2x16
|
7060 LOWER_UNPACK_HALF_2x16
;
7062 lower_packing_builtins(ir
, lower_inst
);
7065 if (!pscreen
->get_param(pscreen
, PIPE_CAP_TEXTURE_GATHER_OFFSETS
))
7066 lower_offset_arrays(ir
);
7067 do_mat_op_to_vec(ir
);
7069 if (stage
== MESA_SHADER_FRAGMENT
)
7070 lower_blend_equation_advanced(
7071 shader
, ctx
->Extensions
.KHR_blend_equation_advanced_coherent
);
7073 lower_instructions(ir
,
7078 (have_ldexp
? 0 : LDEXP_TO_ARITH
) |
7079 (have_dfrexp
? 0 : DFREXP_DLDEXP_TO_ARITH
) |
7082 (have_dround
? 0 : DOPS_TO_DFRAC
) |
7083 (options
->EmitNoPow
? POW_TO_EXP2
: 0) |
7084 (!ctx
->Const
.NativeIntegers
? INT_DIV_TO_MUL_RCP
: 0) |
7085 (options
->EmitNoSat
? SAT_TO_CLAMP
: 0) |
7086 (ctx
->Const
.ForceGLSLAbsSqrt
? SQRT_TO_ABS_SQRT
: 0) |
7087 /* Assume that if ARB_gpu_shader5 is not supported
7088 * then all of the extended integer functions need
7089 * lowering. It may be necessary to add some caps
7090 * for individual instructions.
7092 (!ctx
->Extensions
.ARB_gpu_shader5
7093 ? BIT_COUNT_TO_MATH
|
7097 FIND_LSB_TO_FLOAT_CAST
|
7098 FIND_MSB_TO_FLOAT_CAST
|
7102 do_vec_index_to_cond_assign(ir
);
7103 lower_vector_insert(ir
, true);
7104 lower_quadop_vector(ir
, false);
7106 if (options
->MaxIfDepth
== 0) {
7110 if (ctx
->Const
.GLSLOptimizeConservatively
) {
7111 /* Do it once and repeat only if there's unsupported control flow. */
7113 do_common_optimization(ir
, true, true, options
,
7114 ctx
->Const
.NativeIntegers
);
7115 lower_if_to_cond_assign((gl_shader_stage
)i
, ir
,
7116 options
->MaxIfDepth
, if_threshold
);
7117 } while (has_unsupported_control_flow(ir
, options
));
7119 /* Repeat it until it stops making changes. */
7122 progress
= do_common_optimization(ir
, true, true, options
,
7123 ctx
->Const
.NativeIntegers
);
7124 progress
|= lower_if_to_cond_assign((gl_shader_stage
)i
, ir
,
7125 options
->MaxIfDepth
, if_threshold
);
7129 /* Do this again to lower ir_binop_vector_extract introduced
7130 * by optimization passes.
7132 do_vec_index_to_cond_assign(ir
);
7134 validate_ir_tree(ir
);
7137 build_program_resource_list(ctx
, prog
);
7140 return st_link_nir(ctx
, prog
);
7142 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
7143 struct gl_linked_shader
*shader
= prog
->_LinkedShaders
[i
];
7147 struct gl_program
*linked_prog
=
7148 get_mesa_program_tgsi(ctx
, prog
, shader
);
7149 st_set_prog_affected_state_flags(linked_prog
);
7152 if (!ctx
->Driver
.ProgramStringNotify(ctx
,
7153 _mesa_shader_stage_to_program(i
),
7155 _mesa_reference_program(ctx
, &shader
->Program
, NULL
);
7165 st_translate_stream_output_info(glsl_to_tgsi_visitor
*glsl_to_tgsi
,
7166 const ubyte outputMapping
[],
7167 struct pipe_stream_output_info
*so
)
7169 if (!glsl_to_tgsi
->shader_program
->last_vert_prog
)
7172 struct gl_transform_feedback_info
*info
=
7173 glsl_to_tgsi
->shader_program
->last_vert_prog
->sh
.LinkedTransformFeedback
;
7174 st_translate_stream_output_info2(info
, outputMapping
, so
);
7178 st_translate_stream_output_info2(struct gl_transform_feedback_info
*info
,
7179 const ubyte outputMapping
[],
7180 struct pipe_stream_output_info
*so
)
7184 for (i
= 0; i
< info
->NumOutputs
; i
++) {
7185 so
->output
[i
].register_index
=
7186 outputMapping
[info
->Outputs
[i
].OutputRegister
];
7187 so
->output
[i
].start_component
= info
->Outputs
[i
].ComponentOffset
;
7188 so
->output
[i
].num_components
= info
->Outputs
[i
].NumComponents
;
7189 so
->output
[i
].output_buffer
= info
->Outputs
[i
].OutputBuffer
;
7190 so
->output
[i
].dst_offset
= info
->Outputs
[i
].DstOffset
;
7191 so
->output
[i
].stream
= info
->Outputs
[i
].StreamId
;
7194 for (i
= 0; i
< PIPE_MAX_SO_BUFFERS
; i
++) {
7195 so
->stride
[i
] = info
->Buffers
[i
].Stride
;
7197 so
->num_outputs
= info
->NumOutputs
;