2 * Copyright (C) 2005-2007 Brian Paul All Rights Reserved.
3 * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
4 * Copyright © 2010 Intel Corporation
5 * Copyright © 2011 Bryan Cain
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
28 * \file glsl_to_tgsi.cpp
30 * Translate GLSL IR to TGSI.
33 #include "st_glsl_to_tgsi.h"
35 #include "compiler/glsl/glsl_parser_extras.h"
36 #include "compiler/glsl/ir_optimization.h"
37 #include "compiler/glsl/program.h"
39 #include "main/errors.h"
40 #include "main/shaderobj.h"
41 #include "main/uniforms.h"
42 #include "main/shaderapi.h"
43 #include "main/shaderimage.h"
44 #include "program/prog_instruction.h"
46 #include "pipe/p_context.h"
47 #include "pipe/p_screen.h"
48 #include "tgsi/tgsi_ureg.h"
49 #include "tgsi/tgsi_info.h"
50 #include "util/u_math.h"
51 #include "util/u_memory.h"
52 #include "st_glsl_types.h"
53 #include "st_program.h"
54 #include "st_mesa_to_tgsi.h"
55 #include "st_format.h"
57 #include "st_shader_cache.h"
58 #include "st_glsl_to_tgsi_temprename.h"
60 #include "util/hash_table.h"
63 #define PROGRAM_ANY_CONST ((1 << PROGRAM_STATE_VAR) | \
64 (1 << PROGRAM_CONSTANT) | \
65 (1 << PROGRAM_UNIFORM))
67 #define MAX_GLSL_TEXTURE_OFFSET 4
69 static unsigned is_precise(const ir_variable
*ir
)
73 return ir
->data
.precise
|| ir
->data
.invariant
;
76 class variable_storage
{
77 DECLARE_RZALLOC_CXX_OPERATORS(variable_storage
)
80 variable_storage(ir_variable
*var
, gl_register_file file
, int index
,
81 unsigned array_id
= 0)
82 : file(file
), index(index
), component(0), var(var
), array_id(array_id
)
84 assert(file
!= PROGRAM_ARRAY
|| array_id
!= 0);
87 gl_register_file file
;
90 /* Explicit component location. This is given in terms of the GLSL-style
91 * swizzles where each double is a single component, i.e. for 64-bit types
92 * it can only be 0 or 1.
95 ir_variable
*var
; /* variable that maps to this, if any */
99 class immediate_storage
: public exec_node
{
101 immediate_storage(gl_constant_value
*values
, int size32
, int type
)
103 memcpy(this->values
, values
, size32
* sizeof(gl_constant_value
));
104 this->size32
= size32
;
108 /* doubles are stored across 2 gl_constant_values */
109 gl_constant_value values
[4];
110 int size32
; /**< Number of 32-bit components (1-4) */
111 int type
; /**< GL_DOUBLE, GL_FLOAT, GL_INT, GL_BOOL, or GL_UNSIGNED_INT */
114 static const st_src_reg undef_src
= st_src_reg(PROGRAM_UNDEFINED
, 0, GLSL_TYPE_ERROR
);
115 static const st_dst_reg undef_dst
= st_dst_reg(PROGRAM_UNDEFINED
, SWIZZLE_NOOP
, GLSL_TYPE_ERROR
);
119 unsigned array_id
; /* TGSI ArrayID; 1-based: 0 means not an array */
122 unsigned gs_out_streams
;
123 enum glsl_interp_mode interp
;
124 enum glsl_base_type base_type
;
125 ubyte usage_mask
; /* GLSL-style usage-mask, i.e. single bit per double */
128 static struct inout_decl
*
129 find_inout_array(struct inout_decl
*decls
, unsigned count
, unsigned array_id
)
131 assert(array_id
!= 0);
133 for (unsigned i
= 0; i
< count
; i
++) {
134 struct inout_decl
*decl
= &decls
[i
];
136 if (array_id
== decl
->array_id
) {
144 static enum glsl_base_type
145 find_array_type(struct inout_decl
*decls
, unsigned count
, unsigned array_id
)
148 return GLSL_TYPE_ERROR
;
149 struct inout_decl
*decl
= find_inout_array(decls
, count
, array_id
);
151 return decl
->base_type
;
152 return GLSL_TYPE_ERROR
;
155 struct glsl_to_tgsi_visitor
: public ir_visitor
{
157 glsl_to_tgsi_visitor();
158 ~glsl_to_tgsi_visitor();
160 struct gl_context
*ctx
;
161 struct gl_program
*prog
;
162 struct gl_shader_program
*shader_program
;
163 struct gl_linked_shader
*shader
;
164 struct gl_shader_compiler_options
*options
;
168 unsigned *array_sizes
;
169 unsigned max_num_arrays
;
172 struct inout_decl inputs
[4 * PIPE_MAX_SHADER_INPUTS
];
174 unsigned num_input_arrays
;
175 struct inout_decl outputs
[4 * PIPE_MAX_SHADER_OUTPUTS
];
176 unsigned num_outputs
;
177 unsigned num_output_arrays
;
179 int num_address_regs
;
180 uint32_t samplers_used
;
181 glsl_base_type sampler_types
[PIPE_MAX_SAMPLERS
];
182 int sampler_targets
[PIPE_MAX_SAMPLERS
]; /**< One of TGSI_TEXTURE_* */
184 int image_targets
[PIPE_MAX_SHADER_IMAGES
];
185 unsigned image_formats
[PIPE_MAX_SHADER_IMAGES
];
186 bool indirect_addr_consts
;
187 int wpos_transform_const
;
190 bool native_integers
;
193 bool use_shared_memory
;
197 variable_storage
*find_variable_storage(ir_variable
*var
);
199 int add_constant(gl_register_file file
, gl_constant_value values
[8],
200 int size
, int datatype
, uint16_t *swizzle_out
);
202 st_src_reg
get_temp(const glsl_type
*type
);
203 void reladdr_to_temp(ir_instruction
*ir
, st_src_reg
*reg
, int *num_reladdr
);
205 st_src_reg
st_src_reg_for_double(double val
);
206 st_src_reg
st_src_reg_for_float(float val
);
207 st_src_reg
st_src_reg_for_int(int val
);
208 st_src_reg
st_src_reg_for_int64(int64_t val
);
209 st_src_reg
st_src_reg_for_type(enum glsl_base_type type
, int val
);
212 * \name Visit methods
214 * As typical for the visitor pattern, there must be one \c visit method for
215 * each concrete subclass of \c ir_instruction. Virtual base classes within
216 * the hierarchy should not have \c visit methods.
219 virtual void visit(ir_variable
*);
220 virtual void visit(ir_loop
*);
221 virtual void visit(ir_loop_jump
*);
222 virtual void visit(ir_function_signature
*);
223 virtual void visit(ir_function
*);
224 virtual void visit(ir_expression
*);
225 virtual void visit(ir_swizzle
*);
226 virtual void visit(ir_dereference_variable
*);
227 virtual void visit(ir_dereference_array
*);
228 virtual void visit(ir_dereference_record
*);
229 virtual void visit(ir_assignment
*);
230 virtual void visit(ir_constant
*);
231 virtual void visit(ir_call
*);
232 virtual void visit(ir_return
*);
233 virtual void visit(ir_discard
*);
234 virtual void visit(ir_texture
*);
235 virtual void visit(ir_if
*);
236 virtual void visit(ir_emit_vertex
*);
237 virtual void visit(ir_end_primitive
*);
238 virtual void visit(ir_barrier
*);
241 void visit_expression(ir_expression
*, st_src_reg
*) ATTRIBUTE_NOINLINE
;
243 void visit_atomic_counter_intrinsic(ir_call
*);
244 void visit_ssbo_intrinsic(ir_call
*);
245 void visit_membar_intrinsic(ir_call
*);
246 void visit_shared_intrinsic(ir_call
*);
247 void visit_image_intrinsic(ir_call
*);
248 void visit_generic_intrinsic(ir_call
*, unsigned op
);
252 /** List of variable_storage */
253 struct hash_table
*variables
;
255 /** List of immediate_storage */
256 exec_list immediates
;
257 unsigned num_immediates
;
259 /** List of glsl_to_tgsi_instruction */
260 exec_list instructions
;
262 glsl_to_tgsi_instruction
*emit_asm(ir_instruction
*ir
, unsigned op
,
263 st_dst_reg dst
= undef_dst
,
264 st_src_reg src0
= undef_src
,
265 st_src_reg src1
= undef_src
,
266 st_src_reg src2
= undef_src
,
267 st_src_reg src3
= undef_src
);
269 glsl_to_tgsi_instruction
*emit_asm(ir_instruction
*ir
, unsigned op
,
270 st_dst_reg dst
, st_dst_reg dst1
,
271 st_src_reg src0
= undef_src
,
272 st_src_reg src1
= undef_src
,
273 st_src_reg src2
= undef_src
,
274 st_src_reg src3
= undef_src
);
276 unsigned get_opcode(unsigned op
,
278 st_src_reg src0
, st_src_reg src1
);
281 * Emit the correct dot-product instruction for the type of arguments
283 glsl_to_tgsi_instruction
*emit_dp(ir_instruction
*ir
,
289 void emit_scalar(ir_instruction
*ir
, unsigned op
,
290 st_dst_reg dst
, st_src_reg src0
);
292 void emit_scalar(ir_instruction
*ir
, unsigned op
,
293 st_dst_reg dst
, st_src_reg src0
, st_src_reg src1
);
295 void emit_arl(ir_instruction
*ir
, st_dst_reg dst
, st_src_reg src0
);
297 void get_deref_offsets(ir_dereference
*ir
,
298 unsigned *array_size
,
303 void calc_deref_offsets(ir_dereference
*tail
,
304 unsigned *array_elements
,
306 st_src_reg
*indirect
,
308 st_src_reg
canonicalize_gather_offset(st_src_reg offset
);
310 bool try_emit_mad(ir_expression
*ir
,
312 bool try_emit_mad_for_and_not(ir_expression
*ir
,
315 void emit_swz(ir_expression
*ir
);
317 bool process_move_condition(ir_rvalue
*ir
);
319 void simplify_cmp(void);
321 void rename_temp_registers(struct rename_reg_pair
*renames
);
322 void get_first_temp_read(int *first_reads
);
323 void get_first_temp_write(int *first_writes
);
324 void get_last_temp_read_first_temp_write(int *last_reads
, int *first_writes
);
325 void get_last_temp_write(int *last_writes
);
327 void copy_propagate(void);
328 int eliminate_dead_code(void);
330 void merge_two_dsts(void);
331 void merge_registers(void);
332 void renumber_registers(void);
334 void emit_block_mov(ir_assignment
*ir
, const struct glsl_type
*type
,
335 st_dst_reg
*l
, st_src_reg
*r
,
336 st_src_reg
*cond
, bool cond_swap
);
341 static st_dst_reg address_reg
= st_dst_reg(PROGRAM_ADDRESS
, WRITEMASK_X
, GLSL_TYPE_FLOAT
, 0);
342 static st_dst_reg address_reg2
= st_dst_reg(PROGRAM_ADDRESS
, WRITEMASK_X
, GLSL_TYPE_FLOAT
, 1);
343 static st_dst_reg sampler_reladdr
= st_dst_reg(PROGRAM_ADDRESS
, WRITEMASK_X
, GLSL_TYPE_FLOAT
, 2);
346 fail_link(struct gl_shader_program
*prog
, const char *fmt
, ...) PRINTFLIKE(2, 3);
349 fail_link(struct gl_shader_program
*prog
, const char *fmt
, ...)
353 ralloc_vasprintf_append(&prog
->data
->InfoLog
, fmt
, args
);
356 prog
->data
->LinkStatus
= linking_failure
;
360 swizzle_for_size(int size
)
362 static const int size_swizzles
[4] = {
363 MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_X
, SWIZZLE_X
, SWIZZLE_X
),
364 MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_Y
, SWIZZLE_Y
),
365 MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_Z
, SWIZZLE_Z
),
366 MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_Z
, SWIZZLE_W
),
369 assert((size
>= 1) && (size
<= 4));
370 return size_swizzles
[size
- 1];
374 glsl_to_tgsi_instruction
*
375 glsl_to_tgsi_visitor::emit_asm(ir_instruction
*ir
, unsigned op
,
376 st_dst_reg dst
, st_dst_reg dst1
,
377 st_src_reg src0
, st_src_reg src1
,
378 st_src_reg src2
, st_src_reg src3
)
380 glsl_to_tgsi_instruction
*inst
= new(mem_ctx
) glsl_to_tgsi_instruction();
381 int num_reladdr
= 0, i
, j
;
382 bool dst_is_64bit
[2];
384 op
= get_opcode(op
, dst
, src0
, src1
);
386 /* If we have to do relative addressing, we want to load the ARL
387 * reg directly for one of the regs, and preload the other reladdr
388 * sources into temps.
390 num_reladdr
+= dst
.reladdr
!= NULL
|| dst
.reladdr2
;
391 num_reladdr
+= dst1
.reladdr
!= NULL
|| dst1
.reladdr2
;
392 num_reladdr
+= src0
.reladdr
!= NULL
|| src0
.reladdr2
!= NULL
;
393 num_reladdr
+= src1
.reladdr
!= NULL
|| src1
.reladdr2
!= NULL
;
394 num_reladdr
+= src2
.reladdr
!= NULL
|| src2
.reladdr2
!= NULL
;
395 num_reladdr
+= src3
.reladdr
!= NULL
|| src3
.reladdr2
!= NULL
;
397 reladdr_to_temp(ir
, &src3
, &num_reladdr
);
398 reladdr_to_temp(ir
, &src2
, &num_reladdr
);
399 reladdr_to_temp(ir
, &src1
, &num_reladdr
);
400 reladdr_to_temp(ir
, &src0
, &num_reladdr
);
402 if (dst
.reladdr
|| dst
.reladdr2
) {
404 emit_arl(ir
, address_reg
, *dst
.reladdr
);
406 emit_arl(ir
, address_reg2
, *dst
.reladdr2
);
410 emit_arl(ir
, address_reg
, *dst1
.reladdr
);
413 assert(num_reladdr
== 0);
415 /* inst->op has only 8 bits. */
416 STATIC_ASSERT(TGSI_OPCODE_LAST
<= 255);
419 inst
->precise
= this->precise
;
420 inst
->info
= tgsi_get_opcode_info(op
);
427 inst
->is_64bit_expanded
= false;
430 inst
->tex_offsets
= NULL
;
431 inst
->tex_offset_num_offset
= 0;
433 inst
->tex_shadow
= 0;
434 /* default to float, for paths where this is not initialized
435 * (since 0==UINT which is likely wrong):
437 inst
->tex_type
= GLSL_TYPE_FLOAT
;
439 /* Update indirect addressing status used by TGSI */
440 if (dst
.reladdr
|| dst
.reladdr2
) {
442 case PROGRAM_STATE_VAR
:
443 case PROGRAM_CONSTANT
:
444 case PROGRAM_UNIFORM
:
445 this->indirect_addr_consts
= true;
447 case PROGRAM_IMMEDIATE
:
448 assert(!"immediates should not have indirect addressing");
455 for (i
= 0; i
< 4; i
++) {
456 if(inst
->src
[i
].reladdr
) {
457 switch(inst
->src
[i
].file
) {
458 case PROGRAM_STATE_VAR
:
459 case PROGRAM_CONSTANT
:
460 case PROGRAM_UNIFORM
:
461 this->indirect_addr_consts
= true;
463 case PROGRAM_IMMEDIATE
:
464 assert(!"immediates should not have indirect addressing");
474 * This section contains the double processing.
475 * GLSL just represents doubles as single channel values,
476 * however most HW and TGSI represent doubles as pairs of register channels.
478 * so we have to fixup destination writemask/index and src swizzle/indexes.
479 * dest writemasks need to translate from single channel write mask
480 * to a dual-channel writemask, but also need to modify the index,
481 * if we are touching the Z,W fields in the pre-translated writemask.
483 * src channels have similiar index modifications along with swizzle
484 * changes to we pick the XY, ZW pairs from the correct index.
486 * GLSL [0].x -> TGSI [0].xy
487 * GLSL [0].y -> TGSI [0].zw
488 * GLSL [0].z -> TGSI [1].xy
489 * GLSL [0].w -> TGSI [1].zw
491 for (j
= 0; j
< 2; j
++) {
492 dst_is_64bit
[j
] = glsl_base_type_is_64bit(inst
->dst
[j
].type
);
493 if (!dst_is_64bit
[j
] && inst
->dst
[j
].file
== PROGRAM_OUTPUT
&& inst
->dst
[j
].type
== GLSL_TYPE_ARRAY
) {
494 enum glsl_base_type type
= find_array_type(this->outputs
, this->num_outputs
, inst
->dst
[j
].array_id
);
495 if (glsl_base_type_is_64bit(type
))
496 dst_is_64bit
[j
] = true;
500 if (dst_is_64bit
[0] || dst_is_64bit
[1] ||
501 glsl_base_type_is_64bit(inst
->src
[0].type
)) {
502 glsl_to_tgsi_instruction
*dinst
= NULL
;
503 int initial_src_swz
[4], initial_src_idx
[4];
504 int initial_dst_idx
[2], initial_dst_writemask
[2];
505 /* select the writemask for dst0 or dst1 */
506 unsigned writemask
= inst
->dst
[1].file
== PROGRAM_UNDEFINED
? inst
->dst
[0].writemask
: inst
->dst
[1].writemask
;
508 /* copy out the writemask, index and swizzles for all src/dsts. */
509 for (j
= 0; j
< 2; j
++) {
510 initial_dst_writemask
[j
] = inst
->dst
[j
].writemask
;
511 initial_dst_idx
[j
] = inst
->dst
[j
].index
;
514 for (j
= 0; j
< 4; j
++) {
515 initial_src_swz
[j
] = inst
->src
[j
].swizzle
;
516 initial_src_idx
[j
] = inst
->src
[j
].index
;
520 * scan all the components in the dst writemask
521 * generate an instruction for each of them if required.
526 int i
= u_bit_scan(&writemask
);
528 /* before emitting the instruction, see if we have to adjust load / store
530 if (i
> 1 && (inst
->op
== TGSI_OPCODE_LOAD
|| inst
->op
== TGSI_OPCODE_STORE
) &&
531 addr
.file
== PROGRAM_UNDEFINED
) {
532 /* We have to advance the buffer address by 16 */
533 addr
= get_temp(glsl_type::uint_type
);
534 emit_asm(ir
, TGSI_OPCODE_UADD
, st_dst_reg(addr
),
535 inst
->src
[0], st_src_reg_for_int(16));
538 /* first time use previous instruction */
542 /* create a new instructions for subsequent attempts */
543 dinst
= new(mem_ctx
) glsl_to_tgsi_instruction();
548 this->instructions
.push_tail(dinst
);
549 dinst
->is_64bit_expanded
= true;
551 /* modify the destination if we are splitting */
552 for (j
= 0; j
< 2; j
++) {
553 if (dst_is_64bit
[j
]) {
554 dinst
->dst
[j
].writemask
= (i
& 1) ? WRITEMASK_ZW
: WRITEMASK_XY
;
555 dinst
->dst
[j
].index
= initial_dst_idx
[j
];
557 if (dinst
->op
== TGSI_OPCODE_LOAD
|| dinst
->op
== TGSI_OPCODE_STORE
)
558 dinst
->src
[0] = addr
;
559 if (dinst
->op
!= TGSI_OPCODE_STORE
)
560 dinst
->dst
[j
].index
++;
563 /* if we aren't writing to a double, just get the bit of the initial writemask
565 dinst
->dst
[j
].writemask
= initial_dst_writemask
[j
] & (1 << i
);
569 /* modify the src registers */
570 for (j
= 0; j
< 4; j
++) {
571 int swz
= GET_SWZ(initial_src_swz
[j
], i
);
573 if (glsl_base_type_is_64bit(dinst
->src
[j
].type
)) {
574 dinst
->src
[j
].index
= initial_src_idx
[j
];
576 dinst
->src
[j
].double_reg2
= true;
577 dinst
->src
[j
].index
++;
581 dinst
->src
[j
].swizzle
= MAKE_SWIZZLE4(SWIZZLE_Z
, SWIZZLE_W
, SWIZZLE_Z
, SWIZZLE_W
);
583 dinst
->src
[j
].swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_X
, SWIZZLE_Y
);
586 /* some opcodes are special case in what they use as sources
587 - [FUI]2D/[UI]2I64 is a float/[u]int src0, (D)LDEXP is integer src1 */
588 if (op
== TGSI_OPCODE_F2D
|| op
== TGSI_OPCODE_U2D
|| op
== TGSI_OPCODE_I2D
||
589 op
== TGSI_OPCODE_I2I64
|| op
== TGSI_OPCODE_U2I64
||
590 op
== TGSI_OPCODE_DLDEXP
|| op
== TGSI_OPCODE_LDEXP
||
591 (op
== TGSI_OPCODE_UCMP
&& dst_is_64bit
[0])) {
592 dinst
->src
[j
].swizzle
= MAKE_SWIZZLE4(swz
, swz
, swz
, swz
);
599 this->instructions
.push_tail(inst
);
606 glsl_to_tgsi_instruction
*
607 glsl_to_tgsi_visitor::emit_asm(ir_instruction
*ir
, unsigned op
,
609 st_src_reg src0
, st_src_reg src1
,
610 st_src_reg src2
, st_src_reg src3
)
612 return emit_asm(ir
, op
, dst
, undef_dst
, src0
, src1
, src2
, src3
);
616 * Determines whether to use an integer, unsigned integer, or float opcode
617 * based on the operands and input opcode, then emits the result.
620 glsl_to_tgsi_visitor::get_opcode(unsigned op
,
622 st_src_reg src0
, st_src_reg src1
)
624 enum glsl_base_type type
= GLSL_TYPE_FLOAT
;
626 if (op
== TGSI_OPCODE_MOV
)
629 assert(src0
.type
!= GLSL_TYPE_ARRAY
);
630 assert(src0
.type
!= GLSL_TYPE_STRUCT
);
631 assert(src1
.type
!= GLSL_TYPE_ARRAY
);
632 assert(src1
.type
!= GLSL_TYPE_STRUCT
);
634 if (is_resource_instruction(op
))
636 else if (src0
.type
== GLSL_TYPE_INT64
|| src1
.type
== GLSL_TYPE_INT64
)
637 type
= GLSL_TYPE_INT64
;
638 else if (src0
.type
== GLSL_TYPE_UINT64
|| src1
.type
== GLSL_TYPE_UINT64
)
639 type
= GLSL_TYPE_UINT64
;
640 else if (src0
.type
== GLSL_TYPE_DOUBLE
|| src1
.type
== GLSL_TYPE_DOUBLE
)
641 type
= GLSL_TYPE_DOUBLE
;
642 else if (src0
.type
== GLSL_TYPE_FLOAT
|| src1
.type
== GLSL_TYPE_FLOAT
)
643 type
= GLSL_TYPE_FLOAT
;
644 else if (native_integers
)
645 type
= src0
.type
== GLSL_TYPE_BOOL
? GLSL_TYPE_INT
: src0
.type
;
647 #define case7(c, f, i, u, d, i64, ui64) \
648 case TGSI_OPCODE_##c: \
649 if (type == GLSL_TYPE_UINT64) \
650 op = TGSI_OPCODE_##ui64; \
651 else if (type == GLSL_TYPE_INT64) \
652 op = TGSI_OPCODE_##i64; \
653 else if (type == GLSL_TYPE_DOUBLE) \
654 op = TGSI_OPCODE_##d; \
655 else if (type == GLSL_TYPE_INT) \
656 op = TGSI_OPCODE_##i; \
657 else if (type == GLSL_TYPE_UINT) \
658 op = TGSI_OPCODE_##u; \
660 op = TGSI_OPCODE_##f; \
663 #define casecomp(c, f, i, u, d, i64, ui64) \
664 case TGSI_OPCODE_##c: \
665 if (type == GLSL_TYPE_INT64) \
666 op = TGSI_OPCODE_##i64; \
667 else if (type == GLSL_TYPE_UINT64) \
668 op = TGSI_OPCODE_##ui64; \
669 else if (type == GLSL_TYPE_DOUBLE) \
670 op = TGSI_OPCODE_##d; \
671 else if (type == GLSL_TYPE_INT || type == GLSL_TYPE_SUBROUTINE) \
672 op = TGSI_OPCODE_##i; \
673 else if (type == GLSL_TYPE_UINT) \
674 op = TGSI_OPCODE_##u; \
675 else if (native_integers) \
676 op = TGSI_OPCODE_##f; \
678 op = TGSI_OPCODE_##c; \
682 /* Some instructions are initially selected without considering the type.
683 * This fixes the type:
685 * INIT FLOAT SINT UINT DOUBLE SINT64 UINT64
687 case7(ADD
, ADD
, UADD
, UADD
, DADD
, U64ADD
, U64ADD
);
688 case7(CEIL
, CEIL
, LAST
, LAST
, DCEIL
, LAST
, LAST
);
689 case7(DIV
, DIV
, IDIV
, UDIV
, DDIV
, I64DIV
, U64DIV
);
690 case7(FMA
, FMA
, UMAD
, UMAD
, DFMA
, LAST
, LAST
);
691 case7(FLR
, FLR
, LAST
, LAST
, DFLR
, LAST
, LAST
);
692 case7(FRC
, FRC
, LAST
, LAST
, DFRAC
, LAST
, LAST
);
693 case7(MUL
, MUL
, UMUL
, UMUL
, DMUL
, U64MUL
, U64MUL
);
694 case7(MAD
, MAD
, UMAD
, UMAD
, DMAD
, LAST
, LAST
);
695 case7(MAX
, MAX
, IMAX
, UMAX
, DMAX
, I64MAX
, U64MAX
);
696 case7(MIN
, MIN
, IMIN
, UMIN
, DMIN
, I64MIN
, U64MIN
);
697 case7(RCP
, RCP
, LAST
, LAST
, DRCP
, LAST
, LAST
);
698 case7(ROUND
, ROUND
,LAST
, LAST
, DROUND
, LAST
, LAST
);
699 case7(RSQ
, RSQ
, LAST
, LAST
, DRSQ
, LAST
, LAST
);
700 case7(SQRT
, SQRT
, LAST
, LAST
, DSQRT
, LAST
, LAST
);
701 case7(SSG
, SSG
, ISSG
, ISSG
, DSSG
, I64SSG
, I64SSG
);
702 case7(TRUNC
, TRUNC
,LAST
, LAST
, DTRUNC
, LAST
, LAST
);
704 case7(MOD
, LAST
, MOD
, UMOD
, LAST
, I64MOD
, U64MOD
);
705 case7(SHL
, LAST
, SHL
, SHL
, LAST
, U64SHL
, U64SHL
);
706 case7(IBFE
, LAST
, IBFE
, UBFE
, LAST
, LAST
, LAST
);
707 case7(IMSB
, LAST
, IMSB
, UMSB
, LAST
, LAST
, LAST
);
708 case7(IMUL_HI
, LAST
, IMUL_HI
, UMUL_HI
, LAST
, LAST
, LAST
);
709 case7(ISHR
, LAST
, ISHR
, USHR
, LAST
, I64SHR
, U64SHR
);
710 case7(ATOMIMAX
,LAST
, ATOMIMAX
,ATOMUMAX
,LAST
, LAST
, LAST
);
711 case7(ATOMIMIN
,LAST
, ATOMIMIN
,ATOMUMIN
,LAST
, LAST
, LAST
);
713 casecomp(SEQ
, FSEQ
, USEQ
, USEQ
, DSEQ
, U64SEQ
, U64SEQ
);
714 casecomp(SNE
, FSNE
, USNE
, USNE
, DSNE
, U64SNE
, U64SNE
);
715 casecomp(SGE
, FSGE
, ISGE
, USGE
, DSGE
, I64SGE
, U64SGE
);
716 casecomp(SLT
, FSLT
, ISLT
, USLT
, DSLT
, I64SLT
, U64SLT
);
721 assert(op
!= TGSI_OPCODE_LAST
);
725 glsl_to_tgsi_instruction
*
726 glsl_to_tgsi_visitor::emit_dp(ir_instruction
*ir
,
727 st_dst_reg dst
, st_src_reg src0
, st_src_reg src1
,
730 static const unsigned dot_opcodes
[] = {
731 TGSI_OPCODE_DP2
, TGSI_OPCODE_DP3
, TGSI_OPCODE_DP4
734 return emit_asm(ir
, dot_opcodes
[elements
- 2], dst
, src0
, src1
);
738 * Emits TGSI scalar opcodes to produce unique answers across channels.
740 * Some TGSI opcodes are scalar-only, like ARB_fp/vp. The src X
741 * channel determines the result across all channels. So to do a vec4
742 * of this operation, we want to emit a scalar per source channel used
743 * to produce dest channels.
746 glsl_to_tgsi_visitor::emit_scalar(ir_instruction
*ir
, unsigned op
,
748 st_src_reg orig_src0
, st_src_reg orig_src1
)
751 int done_mask
= ~dst
.writemask
;
753 /* TGSI RCP is a scalar operation splatting results to all channels,
754 * like ARB_fp/vp. So emit as many RCPs as necessary to cover our
757 for (i
= 0; i
< 4; i
++) {
758 GLuint this_mask
= (1 << i
);
759 st_src_reg src0
= orig_src0
;
760 st_src_reg src1
= orig_src1
;
762 if (done_mask
& this_mask
)
765 GLuint src0_swiz
= GET_SWZ(src0
.swizzle
, i
);
766 GLuint src1_swiz
= GET_SWZ(src1
.swizzle
, i
);
767 for (j
= i
+ 1; j
< 4; j
++) {
768 /* If there is another enabled component in the destination that is
769 * derived from the same inputs, generate its value on this pass as
772 if (!(done_mask
& (1 << j
)) &&
773 GET_SWZ(src0
.swizzle
, j
) == src0_swiz
&&
774 GET_SWZ(src1
.swizzle
, j
) == src1_swiz
) {
775 this_mask
|= (1 << j
);
778 src0
.swizzle
= MAKE_SWIZZLE4(src0_swiz
, src0_swiz
,
779 src0_swiz
, src0_swiz
);
780 src1
.swizzle
= MAKE_SWIZZLE4(src1_swiz
, src1_swiz
,
781 src1_swiz
, src1_swiz
);
783 dst
.writemask
= this_mask
;
784 emit_asm(ir
, op
, dst
, src0
, src1
);
785 done_mask
|= this_mask
;
790 glsl_to_tgsi_visitor::emit_scalar(ir_instruction
*ir
, unsigned op
,
791 st_dst_reg dst
, st_src_reg src0
)
793 st_src_reg undef
= undef_src
;
795 undef
.swizzle
= SWIZZLE_XXXX
;
797 emit_scalar(ir
, op
, dst
, src0
, undef
);
801 glsl_to_tgsi_visitor::emit_arl(ir_instruction
*ir
,
802 st_dst_reg dst
, st_src_reg src0
)
804 int op
= TGSI_OPCODE_ARL
;
806 if (src0
.type
== GLSL_TYPE_INT
|| src0
.type
== GLSL_TYPE_UINT
)
807 op
= TGSI_OPCODE_UARL
;
809 assert(dst
.file
== PROGRAM_ADDRESS
);
810 if (dst
.index
>= this->num_address_regs
)
811 this->num_address_regs
= dst
.index
+ 1;
813 emit_asm(NULL
, op
, dst
, src0
);
817 glsl_to_tgsi_visitor::add_constant(gl_register_file file
,
818 gl_constant_value values
[8], int size
, int datatype
,
819 uint16_t *swizzle_out
)
821 if (file
== PROGRAM_CONSTANT
) {
822 GLuint swizzle
= swizzle_out
? *swizzle_out
: 0;
823 int result
= _mesa_add_typed_unnamed_constant(this->prog
->Parameters
, values
,
824 size
, datatype
, &swizzle
);
826 *swizzle_out
= swizzle
;
830 assert(file
== PROGRAM_IMMEDIATE
);
833 immediate_storage
*entry
;
834 int size32
= size
* ((datatype
== GL_DOUBLE
||
835 datatype
== GL_INT64_ARB
||
836 datatype
== GL_UNSIGNED_INT64_ARB
)? 2 : 1);
839 /* Search immediate storage to see if we already have an identical
840 * immediate that we can use instead of adding a duplicate entry.
842 foreach_in_list(immediate_storage
, entry
, &this->immediates
) {
843 immediate_storage
*tmp
= entry
;
845 for (i
= 0; i
* 4 < size32
; i
++) {
846 int slot_size
= MIN2(size32
- (i
* 4), 4);
847 if (tmp
->type
!= datatype
|| tmp
->size32
!= slot_size
)
849 if (memcmp(tmp
->values
, &values
[i
* 4],
850 slot_size
* sizeof(gl_constant_value
)))
853 /* Everything matches, keep going until the full size is matched */
854 tmp
= (immediate_storage
*)tmp
->next
;
857 /* The full value matched */
864 for (i
= 0; i
* 4 < size32
; i
++) {
865 int slot_size
= MIN2(size32
- (i
* 4), 4);
866 /* Add this immediate to the list. */
867 entry
= new(mem_ctx
) immediate_storage(&values
[i
* 4], slot_size
, datatype
);
868 this->immediates
.push_tail(entry
);
869 this->num_immediates
++;
875 glsl_to_tgsi_visitor::st_src_reg_for_float(float val
)
877 st_src_reg
src(PROGRAM_IMMEDIATE
, -1, GLSL_TYPE_FLOAT
);
878 union gl_constant_value uval
;
881 src
.index
= add_constant(src
.file
, &uval
, 1, GL_FLOAT
, &src
.swizzle
);
887 glsl_to_tgsi_visitor::st_src_reg_for_double(double val
)
889 st_src_reg
src(PROGRAM_IMMEDIATE
, -1, GLSL_TYPE_DOUBLE
);
890 union gl_constant_value uval
[2];
892 memcpy(uval
, &val
, sizeof(uval
));
893 src
.index
= add_constant(src
.file
, uval
, 1, GL_DOUBLE
, &src
.swizzle
);
894 src
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_X
, SWIZZLE_Y
);
899 glsl_to_tgsi_visitor::st_src_reg_for_int(int val
)
901 st_src_reg
src(PROGRAM_IMMEDIATE
, -1, GLSL_TYPE_INT
);
902 union gl_constant_value uval
;
904 assert(native_integers
);
907 src
.index
= add_constant(src
.file
, &uval
, 1, GL_INT
, &src
.swizzle
);
913 glsl_to_tgsi_visitor::st_src_reg_for_int64(int64_t val
)
915 st_src_reg
src(PROGRAM_IMMEDIATE
, -1, GLSL_TYPE_INT64
);
916 union gl_constant_value uval
[2];
918 memcpy(uval
, &val
, sizeof(uval
));
919 src
.index
= add_constant(src
.file
, uval
, 1, GL_DOUBLE
, &src
.swizzle
);
920 src
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_X
, SWIZZLE_Y
);
926 glsl_to_tgsi_visitor::st_src_reg_for_type(enum glsl_base_type type
, int val
)
929 return type
== GLSL_TYPE_FLOAT
? st_src_reg_for_float(val
) :
930 st_src_reg_for_int(val
);
932 return st_src_reg_for_float(val
);
936 attrib_type_size(const struct glsl_type
*type
, bool is_vs_input
)
938 return type
->count_attribute_slots(is_vs_input
);
942 type_size(const struct glsl_type
*type
)
944 return type
->count_attribute_slots(false);
948 add_buffer_to_load_and_stores(glsl_to_tgsi_instruction
*inst
, st_src_reg
*buf
,
949 exec_list
*instructions
, ir_constant
*access
)
952 * emit_asm() might have actually split the op into pieces, e.g. for
953 * double stores. We have to go back and fix up all the generated ops.
955 unsigned op
= inst
->op
;
957 inst
->resource
= *buf
;
959 inst
->buffer_access
= access
->value
.u
[0];
961 if (inst
== instructions
->get_head_raw())
963 inst
= (glsl_to_tgsi_instruction
*)inst
->get_prev();
965 if (inst
->op
== TGSI_OPCODE_UADD
) {
966 if (inst
== instructions
->get_head_raw())
968 inst
= (glsl_to_tgsi_instruction
*)inst
->get_prev();
970 } while (inst
->op
== op
&& inst
->resource
.file
== PROGRAM_UNDEFINED
);
974 * If the given GLSL type is an array or matrix or a structure containing
975 * an array/matrix member, return true. Else return false.
977 * This is used to determine which kind of temp storage (PROGRAM_TEMPORARY
978 * or PROGRAM_ARRAY) should be used for variables of this type. Anytime
979 * we have an array that might be indexed with a variable, we need to use
980 * the later storage type.
983 type_has_array_or_matrix(const glsl_type
*type
)
985 if (type
->is_array() || type
->is_matrix())
988 if (type
->is_record()) {
989 for (unsigned i
= 0; i
< type
->length
; i
++) {
990 if (type_has_array_or_matrix(type
->fields
.structure
[i
].type
)) {
1001 * In the initial pass of codegen, we assign temporary numbers to
1002 * intermediate results. (not SSA -- variable assignments will reuse
1006 glsl_to_tgsi_visitor::get_temp(const glsl_type
*type
)
1010 src
.type
= native_integers
? type
->base_type
: GLSL_TYPE_FLOAT
;
1015 if (!options
->EmitNoIndirectTemp
&& type_has_array_or_matrix(type
)) {
1016 if (next_array
>= max_num_arrays
) {
1017 max_num_arrays
+= 32;
1018 array_sizes
= (unsigned*)
1019 realloc(array_sizes
, sizeof(array_sizes
[0]) * max_num_arrays
);
1022 src
.file
= PROGRAM_ARRAY
;
1024 src
.array_id
= next_array
+ 1;
1025 array_sizes
[next_array
] = type_size(type
);
1029 src
.file
= PROGRAM_TEMPORARY
;
1030 src
.index
= next_temp
;
1031 next_temp
+= type_size(type
);
1034 if (type
->is_array() || type
->is_record()) {
1035 src
.swizzle
= SWIZZLE_NOOP
;
1037 src
.swizzle
= swizzle_for_size(type
->vector_elements
);
1044 glsl_to_tgsi_visitor::find_variable_storage(ir_variable
*var
)
1046 struct hash_entry
*entry
;
1048 entry
= _mesa_hash_table_search(this->variables
, var
);
1052 return (variable_storage
*)entry
->data
;
1056 glsl_to_tgsi_visitor::visit(ir_variable
*ir
)
1058 if (strcmp(ir
->name
, "gl_FragCoord") == 0) {
1059 this->prog
->OriginUpperLeft
= ir
->data
.origin_upper_left
;
1060 this->prog
->PixelCenterInteger
= ir
->data
.pixel_center_integer
;
1063 if (ir
->data
.mode
== ir_var_uniform
&& strncmp(ir
->name
, "gl_", 3) == 0) {
1065 const ir_state_slot
*const slots
= ir
->get_state_slots();
1066 assert(slots
!= NULL
);
1068 /* Check if this statevar's setup in the STATE file exactly
1069 * matches how we'll want to reference it as a
1070 * struct/array/whatever. If not, then we need to move it into
1071 * temporary storage and hope that it'll get copy-propagated
1074 for (i
= 0; i
< ir
->get_num_state_slots(); i
++) {
1075 if (slots
[i
].swizzle
!= SWIZZLE_XYZW
) {
1080 variable_storage
*storage
;
1082 if (i
== ir
->get_num_state_slots()) {
1083 /* We'll set the index later. */
1084 storage
= new(mem_ctx
) variable_storage(ir
, PROGRAM_STATE_VAR
, -1);
1086 _mesa_hash_table_insert(this->variables
, ir
, storage
);
1090 /* The variable_storage constructor allocates slots based on the size
1091 * of the type. However, this had better match the number of state
1092 * elements that we're going to copy into the new temporary.
1094 assert((int) ir
->get_num_state_slots() == type_size(ir
->type
));
1096 dst
= st_dst_reg(get_temp(ir
->type
));
1098 storage
= new(mem_ctx
) variable_storage(ir
, dst
.file
, dst
.index
,
1101 _mesa_hash_table_insert(this->variables
, ir
, storage
);
1105 for (unsigned int i
= 0; i
< ir
->get_num_state_slots(); i
++) {
1106 int index
= _mesa_add_state_reference(this->prog
->Parameters
,
1107 (gl_state_index
*)slots
[i
].tokens
);
1109 if (storage
->file
== PROGRAM_STATE_VAR
) {
1110 if (storage
->index
== -1) {
1111 storage
->index
= index
;
1113 assert(index
== storage
->index
+ (int)i
);
1116 /* We use GLSL_TYPE_FLOAT here regardless of the actual type of
1117 * the data being moved since MOV does not care about the type of
1118 * data it is moving, and we don't want to declare registers with
1119 * array or struct types.
1121 st_src_reg
src(PROGRAM_STATE_VAR
, index
, GLSL_TYPE_FLOAT
);
1122 src
.swizzle
= slots
[i
].swizzle
;
1123 emit_asm(ir
, TGSI_OPCODE_MOV
, dst
, src
);
1124 /* even a float takes up a whole vec4 reg in a struct/array. */
1129 if (storage
->file
== PROGRAM_TEMPORARY
&&
1130 dst
.index
!= storage
->index
+ (int) ir
->get_num_state_slots()) {
1131 fail_link(this->shader_program
,
1132 "failed to load builtin uniform `%s' (%d/%d regs loaded)\n",
1133 ir
->name
, dst
.index
- storage
->index
,
1134 type_size(ir
->type
));
1140 glsl_to_tgsi_visitor::visit(ir_loop
*ir
)
1142 emit_asm(NULL
, TGSI_OPCODE_BGNLOOP
);
1144 visit_exec_list(&ir
->body_instructions
, this);
1146 emit_asm(NULL
, TGSI_OPCODE_ENDLOOP
);
1150 glsl_to_tgsi_visitor::visit(ir_loop_jump
*ir
)
1153 case ir_loop_jump::jump_break
:
1154 emit_asm(NULL
, TGSI_OPCODE_BRK
);
1156 case ir_loop_jump::jump_continue
:
1157 emit_asm(NULL
, TGSI_OPCODE_CONT
);
1164 glsl_to_tgsi_visitor::visit(ir_function_signature
*ir
)
1171 glsl_to_tgsi_visitor::visit(ir_function
*ir
)
1173 /* Ignore function bodies other than main() -- we shouldn't see calls to
1174 * them since they should all be inlined before we get to glsl_to_tgsi.
1176 if (strcmp(ir
->name
, "main") == 0) {
1177 const ir_function_signature
*sig
;
1180 sig
= ir
->matching_signature(NULL
, &empty
, false);
1184 foreach_in_list(ir_instruction
, ir
, &sig
->body
) {
1191 glsl_to_tgsi_visitor::try_emit_mad(ir_expression
*ir
, int mul_operand
)
1193 int nonmul_operand
= 1 - mul_operand
;
1195 st_dst_reg result_dst
;
1197 ir_expression
*expr
= ir
->operands
[mul_operand
]->as_expression();
1198 if (!expr
|| expr
->operation
!= ir_binop_mul
)
1201 expr
->operands
[0]->accept(this);
1203 expr
->operands
[1]->accept(this);
1205 ir
->operands
[nonmul_operand
]->accept(this);
1208 this->result
= get_temp(ir
->type
);
1209 result_dst
= st_dst_reg(this->result
);
1210 result_dst
.writemask
= (1 << ir
->type
->vector_elements
) - 1;
1211 emit_asm(ir
, TGSI_OPCODE_MAD
, result_dst
, a
, b
, c
);
1217 * Emit MAD(a, -b, a) instead of AND(a, NOT(b))
1219 * The logic values are 1.0 for true and 0.0 for false. Logical-and is
1220 * implemented using multiplication, and logical-or is implemented using
1221 * addition. Logical-not can be implemented as (true - x), or (1.0 - x).
1222 * As result, the logical expression (a & !b) can be rewritten as:
1226 * - (a * 1) - (a * b)
1230 * This final expression can be implemented as a single MAD(a, -b, a)
1234 glsl_to_tgsi_visitor::try_emit_mad_for_and_not(ir_expression
*ir
, int try_operand
)
1236 const int other_operand
= 1 - try_operand
;
1239 ir_expression
*expr
= ir
->operands
[try_operand
]->as_expression();
1240 if (!expr
|| expr
->operation
!= ir_unop_logic_not
)
1243 ir
->operands
[other_operand
]->accept(this);
1245 expr
->operands
[0]->accept(this);
1248 b
.negate
= ~b
.negate
;
1250 this->result
= get_temp(ir
->type
);
1251 emit_asm(ir
, TGSI_OPCODE_MAD
, st_dst_reg(this->result
), a
, b
, a
);
1257 glsl_to_tgsi_visitor::reladdr_to_temp(ir_instruction
*ir
,
1258 st_src_reg
*reg
, int *num_reladdr
)
1260 if (!reg
->reladdr
&& !reg
->reladdr2
)
1263 if (reg
->reladdr
) emit_arl(ir
, address_reg
, *reg
->reladdr
);
1264 if (reg
->reladdr2
) emit_arl(ir
, address_reg2
, *reg
->reladdr2
);
1266 if (*num_reladdr
!= 1) {
1267 st_src_reg temp
= get_temp(reg
->type
== GLSL_TYPE_DOUBLE
? glsl_type::dvec4_type
: glsl_type::vec4_type
);
1269 emit_asm(ir
, TGSI_OPCODE_MOV
, st_dst_reg(temp
), *reg
);
1277 glsl_to_tgsi_visitor::visit(ir_expression
*ir
)
1279 st_src_reg op
[ARRAY_SIZE(ir
->operands
)];
1281 /* Quick peephole: Emit MAD(a, b, c) instead of ADD(MUL(a, b), c)
1283 if (!this->precise
&& ir
->operation
== ir_binop_add
) {
1284 if (try_emit_mad(ir
, 1))
1286 if (try_emit_mad(ir
, 0))
1290 /* Quick peephole: Emit OPCODE_MAD(-a, -b, a) instead of AND(a, NOT(b))
1292 if (!native_integers
&& ir
->operation
== ir_binop_logic_and
) {
1293 if (try_emit_mad_for_and_not(ir
, 1))
1295 if (try_emit_mad_for_and_not(ir
, 0))
1299 if (ir
->operation
== ir_quadop_vector
)
1300 assert(!"ir_quadop_vector should have been lowered");
1302 for (unsigned int operand
= 0; operand
< ir
->num_operands
; operand
++) {
1303 this->result
.file
= PROGRAM_UNDEFINED
;
1304 ir
->operands
[operand
]->accept(this);
1305 if (this->result
.file
== PROGRAM_UNDEFINED
) {
1306 printf("Failed to get tree for expression operand:\n");
1307 ir
->operands
[operand
]->print();
1311 op
[operand
] = this->result
;
1313 /* Matrix expression operands should have been broken down to vector
1314 * operations already.
1316 assert(!ir
->operands
[operand
]->type
->is_matrix());
1319 visit_expression(ir
, op
);
1322 /* The non-recursive part of the expression visitor lives in a separate
1323 * function and should be prevented from being inlined, to avoid a stack
1324 * explosion when deeply nested expressions are visited.
1327 glsl_to_tgsi_visitor::visit_expression(ir_expression
* ir
, st_src_reg
*op
)
1329 st_src_reg result_src
;
1330 st_dst_reg result_dst
;
1332 int vector_elements
= ir
->operands
[0]->type
->vector_elements
;
1333 if (ir
->operands
[1]) {
1334 vector_elements
= MAX2(vector_elements
,
1335 ir
->operands
[1]->type
->vector_elements
);
1338 this->result
.file
= PROGRAM_UNDEFINED
;
1340 /* Storage for our result. Ideally for an assignment we'd be using
1341 * the actual storage for the result here, instead.
1343 result_src
= get_temp(ir
->type
);
1344 /* convenience for the emit functions below. */
1345 result_dst
= st_dst_reg(result_src
);
1346 /* Limit writes to the channels that will be used by result_src later.
1347 * This does limit this temp's use as a temporary for multi-instruction
1350 result_dst
.writemask
= (1 << ir
->type
->vector_elements
) - 1;
1352 switch (ir
->operation
) {
1353 case ir_unop_logic_not
:
1354 if (result_dst
.type
!= GLSL_TYPE_FLOAT
)
1355 emit_asm(ir
, TGSI_OPCODE_NOT
, result_dst
, op
[0]);
1357 /* Previously 'SEQ dst, src, 0.0' was used for this. However, many
1358 * older GPUs implement SEQ using multiple instructions (i915 uses two
1359 * SGE instructions and a MUL instruction). Since our logic values are
1360 * 0.0 and 1.0, 1-x also implements !x.
1362 op
[0].negate
= ~op
[0].negate
;
1363 emit_asm(ir
, TGSI_OPCODE_ADD
, result_dst
, op
[0], st_src_reg_for_float(1.0));
1367 if (result_dst
.type
== GLSL_TYPE_INT64
|| result_dst
.type
== GLSL_TYPE_UINT64
)
1368 emit_asm(ir
, TGSI_OPCODE_I64NEG
, result_dst
, op
[0]);
1369 else if (result_dst
.type
== GLSL_TYPE_INT
|| result_dst
.type
== GLSL_TYPE_UINT
)
1370 emit_asm(ir
, TGSI_OPCODE_INEG
, result_dst
, op
[0]);
1371 else if (result_dst
.type
== GLSL_TYPE_DOUBLE
)
1372 emit_asm(ir
, TGSI_OPCODE_DNEG
, result_dst
, op
[0]);
1374 op
[0].negate
= ~op
[0].negate
;
1378 case ir_unop_subroutine_to_int
:
1379 emit_asm(ir
, TGSI_OPCODE_MOV
, result_dst
, op
[0]);
1382 if (result_dst
.type
== GLSL_TYPE_FLOAT
)
1383 emit_asm(ir
, TGSI_OPCODE_MOV
, result_dst
, op
[0].get_abs());
1384 else if (result_dst
.type
== GLSL_TYPE_DOUBLE
)
1385 emit_asm(ir
, TGSI_OPCODE_DABS
, result_dst
, op
[0]);
1386 else if (result_dst
.type
== GLSL_TYPE_INT64
|| result_dst
.type
== GLSL_TYPE_UINT64
)
1387 emit_asm(ir
, TGSI_OPCODE_I64ABS
, result_dst
, op
[0]);
1389 emit_asm(ir
, TGSI_OPCODE_IABS
, result_dst
, op
[0]);
1392 emit_asm(ir
, TGSI_OPCODE_SSG
, result_dst
, op
[0]);
1395 emit_scalar(ir
, TGSI_OPCODE_RCP
, result_dst
, op
[0]);
1399 emit_scalar(ir
, TGSI_OPCODE_EX2
, result_dst
, op
[0]);
1402 assert(!"not reached: should be handled by exp_to_exp2");
1405 assert(!"not reached: should be handled by log_to_log2");
1408 emit_scalar(ir
, TGSI_OPCODE_LG2
, result_dst
, op
[0]);
1411 emit_scalar(ir
, TGSI_OPCODE_SIN
, result_dst
, op
[0]);
1414 emit_scalar(ir
, TGSI_OPCODE_COS
, result_dst
, op
[0]);
1416 case ir_unop_saturate
: {
1417 glsl_to_tgsi_instruction
*inst
;
1418 inst
= emit_asm(ir
, TGSI_OPCODE_MOV
, result_dst
, op
[0]);
1419 inst
->saturate
= true;
1424 case ir_unop_dFdx_coarse
:
1425 emit_asm(ir
, TGSI_OPCODE_DDX
, result_dst
, op
[0]);
1427 case ir_unop_dFdx_fine
:
1428 emit_asm(ir
, TGSI_OPCODE_DDX_FINE
, result_dst
, op
[0]);
1431 case ir_unop_dFdy_coarse
:
1432 case ir_unop_dFdy_fine
:
1434 /* The X component contains 1 or -1 depending on whether the framebuffer
1435 * is a FBO or the window system buffer, respectively.
1436 * It is then multiplied with the source operand of DDY.
1438 static const gl_state_index transform_y_state
[STATE_LENGTH
]
1439 = { STATE_INTERNAL
, STATE_FB_WPOS_Y_TRANSFORM
};
1441 unsigned transform_y_index
=
1442 _mesa_add_state_reference(this->prog
->Parameters
,
1445 st_src_reg transform_y
= st_src_reg(PROGRAM_STATE_VAR
,
1447 glsl_type::vec4_type
);
1448 transform_y
.swizzle
= SWIZZLE_XXXX
;
1450 st_src_reg temp
= get_temp(glsl_type::vec4_type
);
1452 emit_asm(ir
, TGSI_OPCODE_MUL
, st_dst_reg(temp
), transform_y
, op
[0]);
1453 emit_asm(ir
, ir
->operation
== ir_unop_dFdy_fine
?
1454 TGSI_OPCODE_DDY_FINE
: TGSI_OPCODE_DDY
, result_dst
, temp
);
1458 case ir_unop_frexp_sig
:
1459 emit_asm(ir
, TGSI_OPCODE_DFRACEXP
, result_dst
, undef_dst
, op
[0]);
1462 case ir_unop_frexp_exp
:
1463 emit_asm(ir
, TGSI_OPCODE_DFRACEXP
, undef_dst
, result_dst
, op
[0]);
1466 case ir_unop_noise
: {
1467 /* At some point, a motivated person could add a better
1468 * implementation of noise. Currently not even the nvidia
1469 * binary drivers do anything more than this. In any case, the
1470 * place to do this is in the GL state tracker, not the poor
1473 emit_asm(ir
, TGSI_OPCODE_MOV
, result_dst
, st_src_reg_for_float(0.5));
1478 emit_asm(ir
, TGSI_OPCODE_ADD
, result_dst
, op
[0], op
[1]);
1481 op
[1].negate
= ~op
[1].negate
;
1482 emit_asm(ir
, TGSI_OPCODE_ADD
, result_dst
, op
[0], op
[1]);
1486 emit_asm(ir
, TGSI_OPCODE_MUL
, result_dst
, op
[0], op
[1]);
1489 emit_asm(ir
, TGSI_OPCODE_DIV
, result_dst
, op
[0], op
[1]);
1492 if (result_dst
.type
== GLSL_TYPE_FLOAT
)
1493 assert(!"ir_binop_mod should have been converted to b * fract(a/b)");
1495 emit_asm(ir
, TGSI_OPCODE_MOD
, result_dst
, op
[0], op
[1]);
1499 emit_asm(ir
, TGSI_OPCODE_SLT
, result_dst
, op
[0], op
[1]);
1501 case ir_binop_greater
:
1502 emit_asm(ir
, TGSI_OPCODE_SLT
, result_dst
, op
[1], op
[0]);
1504 case ir_binop_lequal
:
1505 emit_asm(ir
, TGSI_OPCODE_SGE
, result_dst
, op
[1], op
[0]);
1507 case ir_binop_gequal
:
1508 emit_asm(ir
, TGSI_OPCODE_SGE
, result_dst
, op
[0], op
[1]);
1510 case ir_binop_equal
:
1511 emit_asm(ir
, TGSI_OPCODE_SEQ
, result_dst
, op
[0], op
[1]);
1513 case ir_binop_nequal
:
1514 emit_asm(ir
, TGSI_OPCODE_SNE
, result_dst
, op
[0], op
[1]);
1516 case ir_binop_all_equal
:
1517 /* "==" operator producing a scalar boolean. */
1518 if (ir
->operands
[0]->type
->is_vector() ||
1519 ir
->operands
[1]->type
->is_vector()) {
1520 st_src_reg temp
= get_temp(native_integers
?
1521 glsl_type::uvec4_type
:
1522 glsl_type::vec4_type
);
1524 if (native_integers
) {
1525 st_dst_reg temp_dst
= st_dst_reg(temp
);
1526 st_src_reg temp1
= st_src_reg(temp
), temp2
= st_src_reg(temp
);
1528 if (ir
->operands
[0]->type
->is_boolean() &&
1529 ir
->operands
[1]->as_constant() &&
1530 ir
->operands
[1]->as_constant()->is_one()) {
1531 emit_asm(ir
, TGSI_OPCODE_MOV
, st_dst_reg(temp
), op
[0]);
1533 emit_asm(ir
, TGSI_OPCODE_SEQ
, st_dst_reg(temp
), op
[0], op
[1]);
1536 /* Emit 1-3 AND operations to combine the SEQ results. */
1537 switch (ir
->operands
[0]->type
->vector_elements
) {
1541 temp_dst
.writemask
= WRITEMASK_Y
;
1542 temp1
.swizzle
= SWIZZLE_YYYY
;
1543 temp2
.swizzle
= SWIZZLE_ZZZZ
;
1544 emit_asm(ir
, TGSI_OPCODE_AND
, temp_dst
, temp1
, temp2
);
1547 temp_dst
.writemask
= WRITEMASK_X
;
1548 temp1
.swizzle
= SWIZZLE_XXXX
;
1549 temp2
.swizzle
= SWIZZLE_YYYY
;
1550 emit_asm(ir
, TGSI_OPCODE_AND
, temp_dst
, temp1
, temp2
);
1551 temp_dst
.writemask
= WRITEMASK_Y
;
1552 temp1
.swizzle
= SWIZZLE_ZZZZ
;
1553 temp2
.swizzle
= SWIZZLE_WWWW
;
1554 emit_asm(ir
, TGSI_OPCODE_AND
, temp_dst
, temp1
, temp2
);
1557 temp1
.swizzle
= SWIZZLE_XXXX
;
1558 temp2
.swizzle
= SWIZZLE_YYYY
;
1559 emit_asm(ir
, TGSI_OPCODE_AND
, result_dst
, temp1
, temp2
);
1561 emit_asm(ir
, TGSI_OPCODE_SNE
, st_dst_reg(temp
), op
[0], op
[1]);
1563 /* After the dot-product, the value will be an integer on the
1564 * range [0,4]. Zero becomes 1.0, and positive values become zero.
1566 emit_dp(ir
, result_dst
, temp
, temp
, vector_elements
);
1568 /* Negating the result of the dot-product gives values on the range
1569 * [-4, 0]. Zero becomes 1.0, and negative values become zero.
1570 * This is achieved using SGE.
1572 st_src_reg sge_src
= result_src
;
1573 sge_src
.negate
= ~sge_src
.negate
;
1574 emit_asm(ir
, TGSI_OPCODE_SGE
, result_dst
, sge_src
, st_src_reg_for_float(0.0));
1577 emit_asm(ir
, TGSI_OPCODE_SEQ
, result_dst
, op
[0], op
[1]);
1580 case ir_binop_any_nequal
:
1581 /* "!=" operator producing a scalar boolean. */
1582 if (ir
->operands
[0]->type
->is_vector() ||
1583 ir
->operands
[1]->type
->is_vector()) {
1584 st_src_reg temp
= get_temp(native_integers
?
1585 glsl_type::uvec4_type
:
1586 glsl_type::vec4_type
);
1587 if (ir
->operands
[0]->type
->is_boolean() &&
1588 ir
->operands
[1]->as_constant() &&
1589 ir
->operands
[1]->as_constant()->is_zero()) {
1590 emit_asm(ir
, TGSI_OPCODE_MOV
, st_dst_reg(temp
), op
[0]);
1592 emit_asm(ir
, TGSI_OPCODE_SNE
, st_dst_reg(temp
), op
[0], op
[1]);
1595 if (native_integers
) {
1596 st_dst_reg temp_dst
= st_dst_reg(temp
);
1597 st_src_reg temp1
= st_src_reg(temp
), temp2
= st_src_reg(temp
);
1599 /* Emit 1-3 OR operations to combine the SNE results. */
1600 switch (ir
->operands
[0]->type
->vector_elements
) {
1604 temp_dst
.writemask
= WRITEMASK_Y
;
1605 temp1
.swizzle
= SWIZZLE_YYYY
;
1606 temp2
.swizzle
= SWIZZLE_ZZZZ
;
1607 emit_asm(ir
, TGSI_OPCODE_OR
, temp_dst
, temp1
, temp2
);
1610 temp_dst
.writemask
= WRITEMASK_X
;
1611 temp1
.swizzle
= SWIZZLE_XXXX
;
1612 temp2
.swizzle
= SWIZZLE_YYYY
;
1613 emit_asm(ir
, TGSI_OPCODE_OR
, temp_dst
, temp1
, temp2
);
1614 temp_dst
.writemask
= WRITEMASK_Y
;
1615 temp1
.swizzle
= SWIZZLE_ZZZZ
;
1616 temp2
.swizzle
= SWIZZLE_WWWW
;
1617 emit_asm(ir
, TGSI_OPCODE_OR
, temp_dst
, temp1
, temp2
);
1620 temp1
.swizzle
= SWIZZLE_XXXX
;
1621 temp2
.swizzle
= SWIZZLE_YYYY
;
1622 emit_asm(ir
, TGSI_OPCODE_OR
, result_dst
, temp1
, temp2
);
1624 /* After the dot-product, the value will be an integer on the
1625 * range [0,4]. Zero stays zero, and positive values become 1.0.
1627 glsl_to_tgsi_instruction
*const dp
=
1628 emit_dp(ir
, result_dst
, temp
, temp
, vector_elements
);
1629 if (this->prog
->Target
== GL_FRAGMENT_PROGRAM_ARB
) {
1630 /* The clamping to [0,1] can be done for free in the fragment
1631 * shader with a saturate.
1633 dp
->saturate
= true;
1635 /* Negating the result of the dot-product gives values on the range
1636 * [-4, 0]. Zero stays zero, and negative values become 1.0. This
1637 * achieved using SLT.
1639 st_src_reg slt_src
= result_src
;
1640 slt_src
.negate
= ~slt_src
.negate
;
1641 emit_asm(ir
, TGSI_OPCODE_SLT
, result_dst
, slt_src
, st_src_reg_for_float(0.0));
1645 emit_asm(ir
, TGSI_OPCODE_SNE
, result_dst
, op
[0], op
[1]);
1649 case ir_binop_logic_xor
:
1650 if (native_integers
)
1651 emit_asm(ir
, TGSI_OPCODE_XOR
, result_dst
, op
[0], op
[1]);
1653 emit_asm(ir
, TGSI_OPCODE_SNE
, result_dst
, op
[0], op
[1]);
1656 case ir_binop_logic_or
: {
1657 if (native_integers
) {
1658 /* If integers are used as booleans, we can use an actual "or"
1661 assert(native_integers
);
1662 emit_asm(ir
, TGSI_OPCODE_OR
, result_dst
, op
[0], op
[1]);
1664 /* After the addition, the value will be an integer on the
1665 * range [0,2]. Zero stays zero, and positive values become 1.0.
1667 glsl_to_tgsi_instruction
*add
=
1668 emit_asm(ir
, TGSI_OPCODE_ADD
, result_dst
, op
[0], op
[1]);
1669 if (this->prog
->Target
== GL_FRAGMENT_PROGRAM_ARB
) {
1670 /* The clamping to [0,1] can be done for free in the fragment
1671 * shader with a saturate if floats are being used as boolean values.
1673 add
->saturate
= true;
1675 /* Negating the result of the addition gives values on the range
1676 * [-2, 0]. Zero stays zero, and negative values become 1.0. This
1677 * is achieved using SLT.
1679 st_src_reg slt_src
= result_src
;
1680 slt_src
.negate
= ~slt_src
.negate
;
1681 emit_asm(ir
, TGSI_OPCODE_SLT
, result_dst
, slt_src
, st_src_reg_for_float(0.0));
1687 case ir_binop_logic_and
:
1688 /* If native integers are disabled, the bool args are stored as float 0.0
1689 * or 1.0, so "mul" gives us "and". If they're enabled, just use the
1690 * actual AND opcode.
1692 if (native_integers
)
1693 emit_asm(ir
, TGSI_OPCODE_AND
, result_dst
, op
[0], op
[1]);
1695 emit_asm(ir
, TGSI_OPCODE_MUL
, result_dst
, op
[0], op
[1]);
1699 assert(ir
->operands
[0]->type
->is_vector());
1700 assert(ir
->operands
[0]->type
== ir
->operands
[1]->type
);
1701 emit_dp(ir
, result_dst
, op
[0], op
[1],
1702 ir
->operands
[0]->type
->vector_elements
);
1707 emit_scalar(ir
, TGSI_OPCODE_SQRT
, result_dst
, op
[0]);
1709 /* This is the only instruction sequence that makes the game "Risen"
1710 * render correctly. ABS is not required for the game, but since GLSL
1711 * declares negative values as "undefined", allowing us to do whatever
1712 * we want, I choose to use ABS to match DX9 and pre-GLSL RSQ
1715 emit_scalar(ir
, TGSI_OPCODE_RSQ
, result_dst
, op
[0].get_abs());
1716 emit_scalar(ir
, TGSI_OPCODE_RCP
, result_dst
, result_src
);
1720 emit_scalar(ir
, TGSI_OPCODE_RSQ
, result_dst
, op
[0]);
1723 if (native_integers
) {
1724 emit_asm(ir
, TGSI_OPCODE_I2F
, result_dst
, op
[0]);
1727 /* fallthrough to next case otherwise */
1729 if (native_integers
) {
1730 emit_asm(ir
, TGSI_OPCODE_AND
, result_dst
, op
[0], st_src_reg_for_float(1.0));
1733 /* fallthrough to next case otherwise */
1736 case ir_unop_i642u64
:
1737 case ir_unop_u642i64
:
1738 /* Converting between signed and unsigned integers is a no-op. */
1740 result_src
.type
= result_dst
.type
;
1743 if (native_integers
) {
1744 /* Booleans are stored as integers using ~0 for true and 0 for false.
1745 * GLSL requires that int(bool) return 1 for true and 0 for false.
1746 * This conversion is done with AND, but it could be done with NEG.
1748 emit_asm(ir
, TGSI_OPCODE_AND
, result_dst
, op
[0], st_src_reg_for_int(1));
1750 /* Booleans and integers are both stored as floats when native
1751 * integers are disabled.
1757 if (native_integers
)
1758 emit_asm(ir
, TGSI_OPCODE_F2I
, result_dst
, op
[0]);
1760 emit_asm(ir
, TGSI_OPCODE_TRUNC
, result_dst
, op
[0]);
1763 if (native_integers
)
1764 emit_asm(ir
, TGSI_OPCODE_F2U
, result_dst
, op
[0]);
1766 emit_asm(ir
, TGSI_OPCODE_TRUNC
, result_dst
, op
[0]);
1768 case ir_unop_bitcast_f2i
:
1769 case ir_unop_bitcast_f2u
:
1770 /* Make sure we don't propagate the negate modifier to integer opcodes. */
1771 if (op
[0].negate
|| op
[0].abs
)
1772 emit_asm(ir
, TGSI_OPCODE_MOV
, result_dst
, op
[0]);
1775 result_src
.type
= ir
->operation
== ir_unop_bitcast_f2i
? GLSL_TYPE_INT
:
1778 case ir_unop_bitcast_i2f
:
1779 case ir_unop_bitcast_u2f
:
1781 result_src
.type
= GLSL_TYPE_FLOAT
;
1784 emit_asm(ir
, TGSI_OPCODE_SNE
, result_dst
, op
[0], st_src_reg_for_float(0.0));
1787 emit_asm(ir
, TGSI_OPCODE_SNE
, result_dst
, op
[0], st_src_reg_for_double(0.0));
1790 if (native_integers
)
1791 emit_asm(ir
, TGSI_OPCODE_USNE
, result_dst
, op
[0], st_src_reg_for_int(0));
1793 emit_asm(ir
, TGSI_OPCODE_SNE
, result_dst
, op
[0], st_src_reg_for_float(0.0));
1795 case ir_unop_bitcast_u642d
:
1796 case ir_unop_bitcast_i642d
:
1798 result_src
.type
= GLSL_TYPE_DOUBLE
;
1800 case ir_unop_bitcast_d2i64
:
1802 result_src
.type
= GLSL_TYPE_INT64
;
1804 case ir_unop_bitcast_d2u64
:
1806 result_src
.type
= GLSL_TYPE_UINT64
;
1809 emit_asm(ir
, TGSI_OPCODE_TRUNC
, result_dst
, op
[0]);
1812 emit_asm(ir
, TGSI_OPCODE_CEIL
, result_dst
, op
[0]);
1815 emit_asm(ir
, TGSI_OPCODE_FLR
, result_dst
, op
[0]);
1817 case ir_unop_round_even
:
1818 emit_asm(ir
, TGSI_OPCODE_ROUND
, result_dst
, op
[0]);
1821 emit_asm(ir
, TGSI_OPCODE_FRC
, result_dst
, op
[0]);
1825 emit_asm(ir
, TGSI_OPCODE_MIN
, result_dst
, op
[0], op
[1]);
1828 emit_asm(ir
, TGSI_OPCODE_MAX
, result_dst
, op
[0], op
[1]);
1831 emit_scalar(ir
, TGSI_OPCODE_POW
, result_dst
, op
[0], op
[1]);
1834 case ir_unop_bit_not
:
1835 if (native_integers
) {
1836 emit_asm(ir
, TGSI_OPCODE_NOT
, result_dst
, op
[0]);
1840 if (native_integers
) {
1841 emit_asm(ir
, TGSI_OPCODE_U2F
, result_dst
, op
[0]);
1844 case ir_binop_lshift
:
1845 case ir_binop_rshift
:
1846 if (native_integers
) {
1847 unsigned opcode
= ir
->operation
== ir_binop_lshift
? TGSI_OPCODE_SHL
1851 if (glsl_base_type_is_64bit(op
[0].type
)) {
1852 /* GLSL shift operations have 32-bit shift counts, but TGSI uses
1855 count
= get_temp(glsl_type::u64vec(ir
->operands
[1]->type
->components()));
1856 emit_asm(ir
, TGSI_OPCODE_U2I64
, st_dst_reg(count
), op
[1]);
1861 emit_asm(ir
, opcode
, result_dst
, op
[0], count
);
1864 case ir_binop_bit_and
:
1865 if (native_integers
) {
1866 emit_asm(ir
, TGSI_OPCODE_AND
, result_dst
, op
[0], op
[1]);
1869 case ir_binop_bit_xor
:
1870 if (native_integers
) {
1871 emit_asm(ir
, TGSI_OPCODE_XOR
, result_dst
, op
[0], op
[1]);
1874 case ir_binop_bit_or
:
1875 if (native_integers
) {
1876 emit_asm(ir
, TGSI_OPCODE_OR
, result_dst
, op
[0], op
[1]);
1880 assert(!"GLSL 1.30 features unsupported");
1883 case ir_binop_ubo_load
: {
1884 if (ctx
->Const
.UseSTD430AsDefaultPacking
) {
1885 ir_rvalue
*block
= ir
->operands
[0];
1886 ir_rvalue
*offset
= ir
->operands
[1];
1887 ir_constant
*const_block
= block
->as_constant();
1889 st_src_reg
cbuf(PROGRAM_CONSTANT
,
1890 (const_block
? const_block
->value
.u
[0] + 1 : 1),
1891 ir
->type
->base_type
);
1893 cbuf
.has_index2
= true;
1896 block
->accept(this);
1897 cbuf
.reladdr
= ralloc(mem_ctx
, st_src_reg
);
1898 *cbuf
.reladdr
= this->result
;
1899 emit_arl(ir
, sampler_reladdr
, this->result
);
1902 /* Calculate the surface offset */
1903 offset
->accept(this);
1904 st_src_reg off
= this->result
;
1906 glsl_to_tgsi_instruction
*inst
=
1907 emit_asm(ir
, TGSI_OPCODE_LOAD
, result_dst
, off
);
1909 if (result_dst
.type
== GLSL_TYPE_BOOL
)
1910 emit_asm(ir
, TGSI_OPCODE_USNE
, result_dst
, st_src_reg(result_dst
),
1911 st_src_reg_for_int(0));
1913 add_buffer_to_load_and_stores(inst
, &cbuf
, &this->instructions
,
1916 ir_constant
*const_uniform_block
= ir
->operands
[0]->as_constant();
1917 ir_constant
*const_offset_ir
= ir
->operands
[1]->as_constant();
1918 unsigned const_offset
= const_offset_ir
?
1919 const_offset_ir
->value
.u
[0] : 0;
1920 unsigned const_block
= const_uniform_block
?
1921 const_uniform_block
->value
.u
[0] + 1 : 1;
1922 st_src_reg index_reg
= get_temp(glsl_type::uint_type
);
1925 cbuf
.type
= ir
->type
->base_type
;
1926 cbuf
.file
= PROGRAM_CONSTANT
;
1928 cbuf
.reladdr
= NULL
;
1931 cbuf
.index2D
= const_block
;
1933 assert(ir
->type
->is_vector() || ir
->type
->is_scalar());
1935 if (const_offset_ir
) {
1936 /* Constant index into constant buffer */
1937 cbuf
.reladdr
= NULL
;
1938 cbuf
.index
= const_offset
/ 16;
1940 ir_expression
*offset_expr
= ir
->operands
[1]->as_expression();
1941 st_src_reg offset
= op
[1];
1943 /* The OpenGL spec is written in such a way that accesses with
1944 * non-constant offset are almost always vec4-aligned. The only
1945 * exception to this are members of structs in arrays of structs:
1946 * each struct in an array of structs is at least vec4-aligned,
1947 * but single-element and [ui]vec2 members of the struct may be at
1948 * an offset that is not a multiple of 16 bytes.
1950 * Here, we extract that offset, relying on previous passes to
1951 * always generate offset expressions of the form
1952 * (+ expr constant_offset).
1954 * Note that the std430 layout, which allows more cases of
1955 * alignment less than vec4 in arrays, is not supported for
1956 * uniform blocks, so we do not have to deal with it here.
1958 if (offset_expr
&& offset_expr
->operation
== ir_binop_add
) {
1959 const_offset_ir
= offset_expr
->operands
[1]->as_constant();
1960 if (const_offset_ir
) {
1961 const_offset
= const_offset_ir
->value
.u
[0];
1962 cbuf
.index
= const_offset
/ 16;
1963 offset_expr
->operands
[0]->accept(this);
1964 offset
= this->result
;
1968 /* Relative/variable index into constant buffer */
1969 emit_asm(ir
, TGSI_OPCODE_USHR
, st_dst_reg(index_reg
), offset
,
1970 st_src_reg_for_int(4));
1971 cbuf
.reladdr
= ralloc(mem_ctx
, st_src_reg
);
1972 memcpy(cbuf
.reladdr
, &index_reg
, sizeof(index_reg
));
1975 if (const_uniform_block
) {
1976 /* Constant constant buffer */
1977 cbuf
.reladdr2
= NULL
;
1979 /* Relative/variable constant buffer */
1980 cbuf
.reladdr2
= ralloc(mem_ctx
, st_src_reg
);
1981 memcpy(cbuf
.reladdr2
, &op
[0], sizeof(st_src_reg
));
1983 cbuf
.has_index2
= true;
1985 cbuf
.swizzle
= swizzle_for_size(ir
->type
->vector_elements
);
1986 if (glsl_base_type_is_64bit(cbuf
.type
))
1987 cbuf
.swizzle
+= MAKE_SWIZZLE4(const_offset
% 16 / 8,
1988 const_offset
% 16 / 8,
1989 const_offset
% 16 / 8,
1990 const_offset
% 16 / 8);
1992 cbuf
.swizzle
+= MAKE_SWIZZLE4(const_offset
% 16 / 4,
1993 const_offset
% 16 / 4,
1994 const_offset
% 16 / 4,
1995 const_offset
% 16 / 4);
1997 if (ir
->type
->is_boolean()) {
1998 emit_asm(ir
, TGSI_OPCODE_USNE
, result_dst
, cbuf
,
1999 st_src_reg_for_int(0));
2001 emit_asm(ir
, TGSI_OPCODE_MOV
, result_dst
, cbuf
);
2007 /* note: we have to reorder the three args here */
2008 emit_asm(ir
, TGSI_OPCODE_LRP
, result_dst
, op
[2], op
[1], op
[0]);
2011 if (this->ctx
->Const
.NativeIntegers
)
2012 emit_asm(ir
, TGSI_OPCODE_UCMP
, result_dst
, op
[0], op
[1], op
[2]);
2014 op
[0].negate
= ~op
[0].negate
;
2015 emit_asm(ir
, TGSI_OPCODE_CMP
, result_dst
, op
[0], op
[1], op
[2]);
2018 case ir_triop_bitfield_extract
:
2019 emit_asm(ir
, TGSI_OPCODE_IBFE
, result_dst
, op
[0], op
[1], op
[2]);
2021 case ir_quadop_bitfield_insert
:
2022 emit_asm(ir
, TGSI_OPCODE_BFI
, result_dst
, op
[0], op
[1], op
[2], op
[3]);
2024 case ir_unop_bitfield_reverse
:
2025 emit_asm(ir
, TGSI_OPCODE_BREV
, result_dst
, op
[0]);
2027 case ir_unop_bit_count
:
2028 emit_asm(ir
, TGSI_OPCODE_POPC
, result_dst
, op
[0]);
2030 case ir_unop_find_msb
:
2031 emit_asm(ir
, TGSI_OPCODE_IMSB
, result_dst
, op
[0]);
2033 case ir_unop_find_lsb
:
2034 emit_asm(ir
, TGSI_OPCODE_LSB
, result_dst
, op
[0]);
2036 case ir_binop_imul_high
:
2037 emit_asm(ir
, TGSI_OPCODE_IMUL_HI
, result_dst
, op
[0], op
[1]);
2040 /* In theory, MAD is incorrect here. */
2042 emit_asm(ir
, TGSI_OPCODE_FMA
, result_dst
, op
[0], op
[1], op
[2]);
2044 emit_asm(ir
, TGSI_OPCODE_MAD
, result_dst
, op
[0], op
[1], op
[2]);
2046 case ir_unop_interpolate_at_centroid
:
2047 emit_asm(ir
, TGSI_OPCODE_INTERP_CENTROID
, result_dst
, op
[0]);
2049 case ir_binop_interpolate_at_offset
: {
2050 /* The y coordinate needs to be flipped for the default fb */
2051 static const gl_state_index transform_y_state
[STATE_LENGTH
]
2052 = { STATE_INTERNAL
, STATE_FB_WPOS_Y_TRANSFORM
};
2054 unsigned transform_y_index
=
2055 _mesa_add_state_reference(this->prog
->Parameters
,
2058 st_src_reg transform_y
= st_src_reg(PROGRAM_STATE_VAR
,
2060 glsl_type::vec4_type
);
2061 transform_y
.swizzle
= SWIZZLE_XXXX
;
2063 st_src_reg temp
= get_temp(glsl_type::vec2_type
);
2064 st_dst_reg temp_dst
= st_dst_reg(temp
);
2066 emit_asm(ir
, TGSI_OPCODE_MOV
, temp_dst
, op
[1]);
2067 temp_dst
.writemask
= WRITEMASK_Y
;
2068 emit_asm(ir
, TGSI_OPCODE_MUL
, temp_dst
, transform_y
, op
[1]);
2069 emit_asm(ir
, TGSI_OPCODE_INTERP_OFFSET
, result_dst
, op
[0], temp
);
2072 case ir_binop_interpolate_at_sample
:
2073 emit_asm(ir
, TGSI_OPCODE_INTERP_SAMPLE
, result_dst
, op
[0], op
[1]);
2077 emit_asm(ir
, TGSI_OPCODE_D2F
, result_dst
, op
[0]);
2080 emit_asm(ir
, TGSI_OPCODE_F2D
, result_dst
, op
[0]);
2083 emit_asm(ir
, TGSI_OPCODE_D2I
, result_dst
, op
[0]);
2086 emit_asm(ir
, TGSI_OPCODE_I2D
, result_dst
, op
[0]);
2089 emit_asm(ir
, TGSI_OPCODE_D2U
, result_dst
, op
[0]);
2092 emit_asm(ir
, TGSI_OPCODE_U2D
, result_dst
, op
[0]);
2094 case ir_unop_unpack_double_2x32
:
2095 case ir_unop_pack_double_2x32
:
2096 case ir_unop_unpack_int_2x32
:
2097 case ir_unop_pack_int_2x32
:
2098 case ir_unop_unpack_uint_2x32
:
2099 case ir_unop_pack_uint_2x32
:
2100 case ir_unop_unpack_sampler_2x32
:
2101 case ir_unop_pack_sampler_2x32
:
2102 case ir_unop_unpack_image_2x32
:
2103 case ir_unop_pack_image_2x32
:
2104 emit_asm(ir
, TGSI_OPCODE_MOV
, result_dst
, op
[0]);
2107 case ir_binop_ldexp
:
2108 if (ir
->operands
[0]->type
->is_double()) {
2109 emit_asm(ir
, TGSI_OPCODE_DLDEXP
, result_dst
, op
[0], op
[1]);
2110 } else if (ir
->operands
[0]->type
->is_float()) {
2111 emit_asm(ir
, TGSI_OPCODE_LDEXP
, result_dst
, op
[0], op
[1]);
2113 assert(!"Invalid ldexp for non-double opcode in glsl_to_tgsi_visitor::visit()");
2117 case ir_unop_pack_half_2x16
:
2118 emit_asm(ir
, TGSI_OPCODE_PK2H
, result_dst
, op
[0]);
2120 case ir_unop_unpack_half_2x16
:
2121 emit_asm(ir
, TGSI_OPCODE_UP2H
, result_dst
, op
[0]);
2124 case ir_unop_get_buffer_size
: {
2125 ir_constant
*const_offset
= ir
->operands
[0]->as_constant();
2128 ctx
->Const
.Program
[shader
->Stage
].MaxAtomicBuffers
+
2129 (const_offset
? const_offset
->value
.u
[0] : 0),
2131 if (!const_offset
) {
2132 buffer
.reladdr
= ralloc(mem_ctx
, st_src_reg
);
2133 *buffer
.reladdr
= op
[0];
2134 emit_arl(ir
, sampler_reladdr
, op
[0]);
2136 emit_asm(ir
, TGSI_OPCODE_RESQ
, result_dst
)->resource
= buffer
;
2142 case ir_unop_b2i64
: {
2143 st_src_reg temp
= get_temp(glsl_type::uvec4_type
);
2144 st_dst_reg temp_dst
= st_dst_reg(temp
);
2145 unsigned orig_swz
= op
[0].swizzle
;
2147 * To convert unsigned to 64-bit:
2148 * zero Y channel, copy X channel.
2150 temp_dst
.writemask
= WRITEMASK_Y
;
2151 if (vector_elements
> 1)
2152 temp_dst
.writemask
|= WRITEMASK_W
;
2153 emit_asm(ir
, TGSI_OPCODE_MOV
, temp_dst
, st_src_reg_for_int(0));
2154 temp_dst
.writemask
= WRITEMASK_X
;
2155 if (vector_elements
> 1)
2156 temp_dst
.writemask
|= WRITEMASK_Z
;
2157 op
[0].swizzle
= MAKE_SWIZZLE4(GET_SWZ(orig_swz
, 0), GET_SWZ(orig_swz
, 0),
2158 GET_SWZ(orig_swz
, 1), GET_SWZ(orig_swz
, 1));
2159 if (ir
->operation
== ir_unop_u2i64
|| ir
->operation
== ir_unop_u2u64
)
2160 emit_asm(ir
, TGSI_OPCODE_MOV
, temp_dst
, op
[0]);
2162 emit_asm(ir
, TGSI_OPCODE_AND
, temp_dst
, op
[0], st_src_reg_for_int(1));
2164 result_src
.type
= GLSL_TYPE_UINT64
;
2165 if (vector_elements
> 2) {
2166 /* Subtle: We rely on the fact that get_temp here returns the next
2167 * TGSI temporary register directly after the temp register used for
2168 * the first two components, so that the result gets picked up
2171 st_src_reg temp
= get_temp(glsl_type::uvec4_type
);
2172 st_dst_reg temp_dst
= st_dst_reg(temp
);
2173 temp_dst
.writemask
= WRITEMASK_Y
;
2174 if (vector_elements
> 3)
2175 temp_dst
.writemask
|= WRITEMASK_W
;
2176 emit_asm(ir
, TGSI_OPCODE_MOV
, temp_dst
, st_src_reg_for_int(0));
2178 temp_dst
.writemask
= WRITEMASK_X
;
2179 if (vector_elements
> 3)
2180 temp_dst
.writemask
|= WRITEMASK_Z
;
2181 op
[0].swizzle
= MAKE_SWIZZLE4(GET_SWZ(orig_swz
, 2), GET_SWZ(orig_swz
, 2),
2182 GET_SWZ(orig_swz
, 3), GET_SWZ(orig_swz
, 3));
2183 if (ir
->operation
== ir_unop_u2i64
|| ir
->operation
== ir_unop_u2u64
)
2184 emit_asm(ir
, TGSI_OPCODE_MOV
, temp_dst
, op
[0]);
2186 emit_asm(ir
, TGSI_OPCODE_AND
, temp_dst
, op
[0], st_src_reg_for_int(1));
2193 case ir_unop_i642u
: {
2194 st_src_reg temp
= get_temp(glsl_type::uvec4_type
);
2195 st_dst_reg temp_dst
= st_dst_reg(temp
);
2196 unsigned orig_swz
= op
[0].swizzle
;
2197 unsigned orig_idx
= op
[0].index
;
2199 temp_dst
.writemask
= WRITEMASK_X
;
2201 for (el
= 0; el
< vector_elements
; el
++) {
2202 unsigned swz
= GET_SWZ(orig_swz
, el
);
2204 op
[0].swizzle
= MAKE_SWIZZLE4(SWIZZLE_Z
, SWIZZLE_Z
, SWIZZLE_Z
, SWIZZLE_Z
);
2206 op
[0].swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_X
, SWIZZLE_X
, SWIZZLE_X
);
2208 op
[0].index
= orig_idx
+ 1;
2209 op
[0].type
= GLSL_TYPE_UINT
;
2210 temp_dst
.writemask
= WRITEMASK_X
<< el
;
2211 emit_asm(ir
, TGSI_OPCODE_MOV
, temp_dst
, op
[0]);
2214 if (ir
->operation
== ir_unop_u642u
|| ir
->operation
== ir_unop_i642u
)
2215 result_src
.type
= GLSL_TYPE_UINT
;
2217 result_src
.type
= GLSL_TYPE_INT
;
2221 emit_asm(ir
, TGSI_OPCODE_U64SNE
, result_dst
, op
[0], st_src_reg_for_int64(0));
2224 emit_asm(ir
, TGSI_OPCODE_I642F
, result_dst
, op
[0]);
2227 emit_asm(ir
, TGSI_OPCODE_U642F
, result_dst
, op
[0]);
2230 emit_asm(ir
, TGSI_OPCODE_I642D
, result_dst
, op
[0]);
2233 emit_asm(ir
, TGSI_OPCODE_U642D
, result_dst
, op
[0]);
2236 emit_asm(ir
, TGSI_OPCODE_I2I64
, result_dst
, op
[0]);
2239 emit_asm(ir
, TGSI_OPCODE_F2I64
, result_dst
, op
[0]);
2242 emit_asm(ir
, TGSI_OPCODE_D2I64
, result_dst
, op
[0]);
2245 emit_asm(ir
, TGSI_OPCODE_I2I64
, result_dst
, op
[0]);
2248 emit_asm(ir
, TGSI_OPCODE_F2U64
, result_dst
, op
[0]);
2251 emit_asm(ir
, TGSI_OPCODE_D2U64
, result_dst
, op
[0]);
2253 /* these might be needed */
2254 case ir_unop_pack_snorm_2x16
:
2255 case ir_unop_pack_unorm_2x16
:
2256 case ir_unop_pack_snorm_4x8
:
2257 case ir_unop_pack_unorm_4x8
:
2259 case ir_unop_unpack_snorm_2x16
:
2260 case ir_unop_unpack_unorm_2x16
:
2261 case ir_unop_unpack_snorm_4x8
:
2262 case ir_unop_unpack_unorm_4x8
:
2264 case ir_quadop_vector
:
2265 case ir_binop_vector_extract
:
2266 case ir_triop_vector_insert
:
2267 case ir_binop_carry
:
2268 case ir_binop_borrow
:
2269 case ir_unop_ssbo_unsized_array_length
:
2270 /* This operation is not supported, or should have already been handled.
2272 assert(!"Invalid ir opcode in glsl_to_tgsi_visitor::visit()");
2276 this->result
= result_src
;
2281 glsl_to_tgsi_visitor::visit(ir_swizzle
*ir
)
2287 /* Note that this is only swizzles in expressions, not those on the left
2288 * hand side of an assignment, which do write masking. See ir_assignment
2292 ir
->val
->accept(this);
2294 assert(src
.file
!= PROGRAM_UNDEFINED
);
2295 assert(ir
->type
->vector_elements
> 0);
2297 for (i
= 0; i
< 4; i
++) {
2298 if (i
< ir
->type
->vector_elements
) {
2301 swizzle
[i
] = GET_SWZ(src
.swizzle
, ir
->mask
.x
);
2304 swizzle
[i
] = GET_SWZ(src
.swizzle
, ir
->mask
.y
);
2307 swizzle
[i
] = GET_SWZ(src
.swizzle
, ir
->mask
.z
);
2310 swizzle
[i
] = GET_SWZ(src
.swizzle
, ir
->mask
.w
);
2314 /* If the type is smaller than a vec4, replicate the last
2317 swizzle
[i
] = swizzle
[ir
->type
->vector_elements
- 1];
2321 src
.swizzle
= MAKE_SWIZZLE4(swizzle
[0], swizzle
[1], swizzle
[2], swizzle
[3]);
2326 /* Test if the variable is an array. Note that geometry and
2327 * tessellation shader inputs are outputs are always arrays (except
2328 * for patch inputs), so only the array element type is considered.
2331 is_inout_array(unsigned stage
, ir_variable
*var
, bool *remove_array
)
2333 const glsl_type
*type
= var
->type
;
2335 *remove_array
= false;
2337 if ((stage
== MESA_SHADER_VERTEX
&& var
->data
.mode
== ir_var_shader_in
) ||
2338 (stage
== MESA_SHADER_FRAGMENT
&& var
->data
.mode
== ir_var_shader_out
))
2341 if (((stage
== MESA_SHADER_GEOMETRY
&& var
->data
.mode
== ir_var_shader_in
) ||
2342 (stage
== MESA_SHADER_TESS_EVAL
&& var
->data
.mode
== ir_var_shader_in
) ||
2343 stage
== MESA_SHADER_TESS_CTRL
) &&
2345 if (!var
->type
->is_array())
2346 return false; /* a system value probably */
2348 type
= var
->type
->fields
.array
;
2349 *remove_array
= true;
2352 return type
->is_array() || type
->is_matrix();
2356 st_translate_interp_loc(ir_variable
*var
)
2358 if (var
->data
.centroid
)
2359 return TGSI_INTERPOLATE_LOC_CENTROID
;
2360 else if (var
->data
.sample
)
2361 return TGSI_INTERPOLATE_LOC_SAMPLE
;
2363 return TGSI_INTERPOLATE_LOC_CENTER
;
2367 glsl_to_tgsi_visitor::visit(ir_dereference_variable
*ir
)
2369 variable_storage
*entry
= find_variable_storage(ir
->var
);
2370 ir_variable
*var
= ir
->var
;
2374 switch (var
->data
.mode
) {
2375 case ir_var_uniform
:
2376 entry
= new(mem_ctx
) variable_storage(var
, PROGRAM_UNIFORM
,
2377 var
->data
.param_index
);
2378 _mesa_hash_table_insert(this->variables
, var
, entry
);
2380 case ir_var_shader_in
: {
2381 /* The linker assigns locations for varyings and attributes,
2382 * including deprecated builtins (like gl_Color), user-assign
2383 * generic attributes (glBindVertexLocation), and
2384 * user-defined varyings.
2386 assert(var
->data
.location
!= -1);
2388 const glsl_type
*type_without_array
= var
->type
->without_array();
2389 struct inout_decl
*decl
= &inputs
[num_inputs
];
2390 unsigned component
= var
->data
.location_frac
;
2391 unsigned num_components
;
2394 if (type_without_array
->is_64bit())
2395 component
= component
/ 2;
2396 if (type_without_array
->vector_elements
)
2397 num_components
= type_without_array
->vector_elements
;
2401 decl
->mesa_index
= var
->data
.location
;
2402 decl
->interp
= (glsl_interp_mode
) var
->data
.interpolation
;
2403 decl
->interp_loc
= st_translate_interp_loc(var
);
2404 decl
->base_type
= type_without_array
->base_type
;
2405 decl
->usage_mask
= u_bit_consecutive(component
, num_components
);
2407 if (is_inout_array(shader
->Stage
, var
, &remove_array
)) {
2408 decl
->array_id
= num_input_arrays
+ 1;
2415 decl
->size
= type_size(var
->type
->fields
.array
);
2417 decl
->size
= type_size(var
->type
);
2419 entry
= new(mem_ctx
) variable_storage(var
,
2423 entry
->component
= component
;
2425 _mesa_hash_table_insert(this->variables
, var
, entry
);
2429 case ir_var_shader_out
: {
2430 assert(var
->data
.location
!= -1);
2432 const glsl_type
*type_without_array
= var
->type
->without_array();
2433 struct inout_decl
*decl
= &outputs
[num_outputs
];
2434 unsigned component
= var
->data
.location_frac
;
2435 unsigned num_components
;
2438 if (type_without_array
->is_64bit())
2439 component
= component
/ 2;
2440 if (type_without_array
->vector_elements
)
2441 num_components
= type_without_array
->vector_elements
;
2445 decl
->mesa_index
= var
->data
.location
+ FRAG_RESULT_MAX
* var
->data
.index
;
2446 decl
->base_type
= type_without_array
->base_type
;
2447 decl
->usage_mask
= u_bit_consecutive(component
, num_components
);
2448 if (var
->data
.stream
& (1u << 31)) {
2449 decl
->gs_out_streams
= var
->data
.stream
& ~(1u << 31);
2451 assert(var
->data
.stream
< 4);
2452 decl
->gs_out_streams
= 0;
2453 for (unsigned i
= 0; i
< num_components
; ++i
)
2454 decl
->gs_out_streams
|= var
->data
.stream
<< (2 * (component
+ i
));
2457 if (is_inout_array(shader
->Stage
, var
, &remove_array
)) {
2458 decl
->array_id
= num_output_arrays
+ 1;
2459 num_output_arrays
++;
2465 decl
->size
= type_size(var
->type
->fields
.array
);
2467 decl
->size
= type_size(var
->type
);
2469 if (var
->data
.fb_fetch_output
) {
2470 st_dst_reg dst
= st_dst_reg(get_temp(var
->type
));
2471 st_src_reg src
= st_src_reg(PROGRAM_OUTPUT
, decl
->mesa_index
,
2472 var
->type
, component
, decl
->array_id
);
2473 emit_asm(NULL
, TGSI_OPCODE_FBFETCH
, dst
, src
);
2474 entry
= new(mem_ctx
) variable_storage(var
, dst
.file
, dst
.index
,
2477 entry
= new(mem_ctx
) variable_storage(var
,
2482 entry
->component
= component
;
2484 _mesa_hash_table_insert(this->variables
, var
, entry
);
2488 case ir_var_system_value
:
2489 entry
= new(mem_ctx
) variable_storage(var
,
2490 PROGRAM_SYSTEM_VALUE
,
2491 var
->data
.location
);
2494 case ir_var_temporary
:
2495 st_src_reg src
= get_temp(var
->type
);
2497 entry
= new(mem_ctx
) variable_storage(var
, src
.file
, src
.index
,
2499 _mesa_hash_table_insert(this->variables
, var
, entry
);
2505 printf("Failed to make storage for %s\n", var
->name
);
2510 this->result
= st_src_reg(entry
->file
, entry
->index
, var
->type
,
2511 entry
->component
, entry
->array_id
);
2512 if (this->shader
->Stage
== MESA_SHADER_VERTEX
&&
2513 var
->data
.mode
== ir_var_shader_in
&&
2514 var
->type
->without_array()->is_double())
2515 this->result
.is_double_vertex_input
= true;
2516 if (!native_integers
)
2517 this->result
.type
= GLSL_TYPE_FLOAT
;
2521 shrink_array_declarations(struct inout_decl
*decls
, unsigned count
,
2522 GLbitfield64
* usage_mask
,
2523 GLbitfield64 double_usage_mask
,
2524 GLbitfield
* patch_usage_mask
)
2529 /* Fix array declarations by removing unused array elements at both ends
2530 * of the arrays. For example, mat4[3] where only mat[1] is used.
2532 for (i
= 0; i
< count
; i
++) {
2533 struct inout_decl
*decl
= &decls
[i
];
2534 if (!decl
->array_id
)
2537 /* Shrink the beginning. */
2538 for (j
= 0; j
< (int)decl
->size
; j
++) {
2539 if (decl
->mesa_index
>= VARYING_SLOT_PATCH0
) {
2540 if (*patch_usage_mask
&
2541 BITFIELD64_BIT(decl
->mesa_index
- VARYING_SLOT_PATCH0
+ j
))
2545 if (*usage_mask
& BITFIELD64_BIT(decl
->mesa_index
+j
))
2547 if (double_usage_mask
& BITFIELD64_BIT(decl
->mesa_index
+j
-1))
2556 /* Shrink the end. */
2557 for (j
= decl
->size
-1; j
>= 0; j
--) {
2558 if (decl
->mesa_index
>= VARYING_SLOT_PATCH0
) {
2559 if (*patch_usage_mask
&
2560 BITFIELD64_BIT(decl
->mesa_index
- VARYING_SLOT_PATCH0
+ j
))
2564 if (*usage_mask
& BITFIELD64_BIT(decl
->mesa_index
+j
))
2566 if (double_usage_mask
& BITFIELD64_BIT(decl
->mesa_index
+j
-1))
2573 /* When not all entries of an array are accessed, we mark them as used
2574 * here anyway, to ensure that the input/output mapping logic doesn't get
2577 * TODO This happens when an array isn't used via indirect access, which
2578 * some game ports do (at least eON-based). There is an optimization
2579 * opportunity here by replacing the array declaration with non-array
2580 * declarations of those slots that are actually used.
2582 for (j
= 1; j
< (int)decl
->size
; ++j
) {
2583 if (decl
->mesa_index
>= VARYING_SLOT_PATCH0
)
2584 *patch_usage_mask
|= BITFIELD64_BIT(decl
->mesa_index
- VARYING_SLOT_PATCH0
+ j
);
2586 *usage_mask
|= BITFIELD64_BIT(decl
->mesa_index
+ j
);
2592 glsl_to_tgsi_visitor::visit(ir_dereference_array
*ir
)
2597 ir_variable
*var
= ir
->variable_referenced();
2599 /* We only need the logic provided by st_glsl_storage_type_size()
2600 * for arrays of structs. Indirect sampler and image indexing is handled
2603 int element_size
= ir
->type
->without_array()->is_record() ?
2604 st_glsl_storage_type_size(ir
->type
, var
->data
.bindless
) :
2605 type_size(ir
->type
);
2607 index
= ir
->array_index
->constant_expression_value(ralloc_parent(ir
));
2609 ir
->array
->accept(this);
2612 if (!src
.has_index2
) {
2613 switch (this->prog
->Target
) {
2614 case GL_TESS_CONTROL_PROGRAM_NV
:
2615 is_2D
= (src
.file
== PROGRAM_INPUT
|| src
.file
== PROGRAM_OUTPUT
) &&
2616 !ir
->variable_referenced()->data
.patch
;
2618 case GL_TESS_EVALUATION_PROGRAM_NV
:
2619 is_2D
= src
.file
== PROGRAM_INPUT
&&
2620 !ir
->variable_referenced()->data
.patch
;
2622 case GL_GEOMETRY_PROGRAM_NV
:
2623 is_2D
= src
.file
== PROGRAM_INPUT
;
2633 if (this->prog
->Target
== GL_VERTEX_PROGRAM_ARB
&&
2634 src
.file
== PROGRAM_INPUT
)
2635 element_size
= attrib_type_size(ir
->type
, true);
2637 src
.index2D
= index
->value
.i
[0];
2638 src
.has_index2
= true;
2640 src
.index
+= index
->value
.i
[0] * element_size
;
2642 /* Variable index array dereference. It eats the "vec4" of the
2643 * base of the array and an index that offsets the TGSI register
2646 ir
->array_index
->accept(this);
2648 st_src_reg index_reg
;
2650 if (element_size
== 1) {
2651 index_reg
= this->result
;
2653 index_reg
= get_temp(native_integers
?
2654 glsl_type::int_type
: glsl_type::float_type
);
2656 emit_asm(ir
, TGSI_OPCODE_MUL
, st_dst_reg(index_reg
),
2657 this->result
, st_src_reg_for_type(index_reg
.type
, element_size
));
2660 /* If there was already a relative address register involved, add the
2661 * new and the old together to get the new offset.
2663 if (!is_2D
&& src
.reladdr
!= NULL
) {
2664 st_src_reg accum_reg
= get_temp(native_integers
?
2665 glsl_type::int_type
: glsl_type::float_type
);
2667 emit_asm(ir
, TGSI_OPCODE_ADD
, st_dst_reg(accum_reg
),
2668 index_reg
, *src
.reladdr
);
2670 index_reg
= accum_reg
;
2674 src
.reladdr2
= ralloc(mem_ctx
, st_src_reg
);
2675 memcpy(src
.reladdr2
, &index_reg
, sizeof(index_reg
));
2677 src
.has_index2
= true;
2679 src
.reladdr
= ralloc(mem_ctx
, st_src_reg
);
2680 memcpy(src
.reladdr
, &index_reg
, sizeof(index_reg
));
2684 /* Change the register type to the element type of the array. */
2685 src
.type
= ir
->type
->base_type
;
2691 glsl_to_tgsi_visitor::visit(ir_dereference_record
*ir
)
2694 const glsl_type
*struct_type
= ir
->record
->type
;
2695 ir_variable
*var
= ir
->record
->variable_referenced();
2698 ir
->record
->accept(this);
2700 assert(ir
->field_idx
>= 0);
2702 for (i
= 0; i
< struct_type
->length
; i
++) {
2703 if (i
== (unsigned) ir
->field_idx
)
2705 const glsl_type
*member_type
= struct_type
->fields
.structure
[i
].type
;
2706 offset
+= st_glsl_storage_type_size(member_type
, var
->data
.bindless
);
2709 /* If the type is smaller than a vec4, replicate the last channel out. */
2710 if (ir
->type
->is_scalar() || ir
->type
->is_vector())
2711 this->result
.swizzle
= swizzle_for_size(ir
->type
->vector_elements
);
2713 this->result
.swizzle
= SWIZZLE_NOOP
;
2715 this->result
.index
+= offset
;
2716 this->result
.type
= ir
->type
->base_type
;
2720 * We want to be careful in assignment setup to hit the actual storage
2721 * instead of potentially using a temporary like we might with the
2722 * ir_dereference handler.
2725 get_assignment_lhs(ir_dereference
*ir
, glsl_to_tgsi_visitor
*v
, int *component
)
2727 /* The LHS must be a dereference. If the LHS is a variable indexed array
2728 * access of a vector, it must be separated into a series conditional moves
2729 * before reaching this point (see ir_vec_index_to_cond_assign).
2731 assert(ir
->as_dereference());
2732 ir_dereference_array
*deref_array
= ir
->as_dereference_array();
2734 assert(!deref_array
->array
->type
->is_vector());
2737 /* Use the rvalue deref handler for the most part. We write swizzles using
2738 * the writemask, but we do extract the base component for enhanced layouts
2739 * from the source swizzle.
2742 *component
= GET_SWZ(v
->result
.swizzle
, 0);
2743 return st_dst_reg(v
->result
);
2747 * Process the condition of a conditional assignment
2749 * Examines the condition of a conditional assignment to generate the optimal
2750 * first operand of a \c CMP instruction. If the condition is a relational
2751 * operator with 0 (e.g., \c ir_binop_less), the value being compared will be
2752 * used as the source for the \c CMP instruction. Otherwise the comparison
2753 * is processed to a boolean result, and the boolean result is used as the
2754 * operand to the CMP instruction.
2757 glsl_to_tgsi_visitor::process_move_condition(ir_rvalue
*ir
)
2759 ir_rvalue
*src_ir
= ir
;
2761 bool switch_order
= false;
2763 ir_expression
*const expr
= ir
->as_expression();
2765 if (native_integers
) {
2766 if ((expr
!= NULL
) && (expr
->num_operands
== 2)) {
2767 enum glsl_base_type type
= expr
->operands
[0]->type
->base_type
;
2768 if (type
== GLSL_TYPE_INT
|| type
== GLSL_TYPE_UINT
||
2769 type
== GLSL_TYPE_BOOL
) {
2770 if (expr
->operation
== ir_binop_equal
) {
2771 if (expr
->operands
[0]->is_zero()) {
2772 src_ir
= expr
->operands
[1];
2773 switch_order
= true;
2775 else if (expr
->operands
[1]->is_zero()) {
2776 src_ir
= expr
->operands
[0];
2777 switch_order
= true;
2780 else if (expr
->operation
== ir_binop_nequal
) {
2781 if (expr
->operands
[0]->is_zero()) {
2782 src_ir
= expr
->operands
[1];
2784 else if (expr
->operands
[1]->is_zero()) {
2785 src_ir
= expr
->operands
[0];
2791 src_ir
->accept(this);
2792 return switch_order
;
2795 if ((expr
!= NULL
) && (expr
->num_operands
== 2)) {
2796 bool zero_on_left
= false;
2798 if (expr
->operands
[0]->is_zero()) {
2799 src_ir
= expr
->operands
[1];
2800 zero_on_left
= true;
2801 } else if (expr
->operands
[1]->is_zero()) {
2802 src_ir
= expr
->operands
[0];
2803 zero_on_left
= false;
2807 * (a < 0) T F F ( a < 0) T F F
2808 * (0 < a) F F T (-a < 0) F F T
2809 * (a <= 0) T T F (-a < 0) F F T (swap order of other operands)
2810 * (0 <= a) F T T ( a < 0) T F F (swap order of other operands)
2811 * (a > 0) F F T (-a < 0) F F T
2812 * (0 > a) T F F ( a < 0) T F F
2813 * (a >= 0) F T T ( a < 0) T F F (swap order of other operands)
2814 * (0 >= a) T T F (-a < 0) F F T (swap order of other operands)
2816 * Note that exchanging the order of 0 and 'a' in the comparison simply
2817 * means that the value of 'a' should be negated.
2820 switch (expr
->operation
) {
2822 switch_order
= false;
2823 negate
= zero_on_left
;
2826 case ir_binop_greater
:
2827 switch_order
= false;
2828 negate
= !zero_on_left
;
2831 case ir_binop_lequal
:
2832 switch_order
= true;
2833 negate
= !zero_on_left
;
2836 case ir_binop_gequal
:
2837 switch_order
= true;
2838 negate
= zero_on_left
;
2842 /* This isn't the right kind of comparison afterall, so make sure
2843 * the whole condition is visited.
2851 src_ir
->accept(this);
2853 /* We use the TGSI_OPCODE_CMP (a < 0 ? b : c) for conditional moves, and the
2854 * condition we produced is 0.0 or 1.0. By flipping the sign, we can
2855 * choose which value TGSI_OPCODE_CMP produces without an extra instruction
2856 * computing the condition.
2859 this->result
.negate
= ~this->result
.negate
;
2861 return switch_order
;
2865 glsl_to_tgsi_visitor::emit_block_mov(ir_assignment
*ir
, const struct glsl_type
*type
,
2866 st_dst_reg
*l
, st_src_reg
*r
,
2867 st_src_reg
*cond
, bool cond_swap
)
2869 if (type
->is_record()) {
2870 for (unsigned int i
= 0; i
< type
->length
; i
++) {
2871 emit_block_mov(ir
, type
->fields
.structure
[i
].type
, l
, r
,
2877 if (type
->is_array()) {
2878 for (unsigned int i
= 0; i
< type
->length
; i
++) {
2879 emit_block_mov(ir
, type
->fields
.array
, l
, r
, cond
, cond_swap
);
2884 if (type
->is_matrix()) {
2885 const struct glsl_type
*vec_type
;
2887 vec_type
= glsl_type::get_instance(type
->is_double() ? GLSL_TYPE_DOUBLE
: GLSL_TYPE_FLOAT
,
2888 type
->vector_elements
, 1);
2890 for (int i
= 0; i
< type
->matrix_columns
; i
++) {
2891 emit_block_mov(ir
, vec_type
, l
, r
, cond
, cond_swap
);
2896 assert(type
->is_scalar() || type
->is_vector());
2898 l
->type
= type
->base_type
;
2899 r
->type
= type
->base_type
;
2901 st_src_reg l_src
= st_src_reg(*l
);
2903 if (l_src
.file
== PROGRAM_OUTPUT
&&
2904 this->prog
->Target
== GL_FRAGMENT_PROGRAM_ARB
&&
2905 (l_src
.index
== FRAG_RESULT_DEPTH
|| l_src
.index
== FRAG_RESULT_STENCIL
)) {
2906 /* This is a special case because the source swizzles will be shifted
2907 * later to account for the difference between GLSL (where they're
2908 * plain floats) and TGSI (where they're Z and Y components). */
2909 l_src
.swizzle
= SWIZZLE_XXXX
;
2912 if (native_integers
) {
2913 emit_asm(ir
, TGSI_OPCODE_UCMP
, *l
, *cond
,
2914 cond_swap
? l_src
: *r
,
2915 cond_swap
? *r
: l_src
);
2917 emit_asm(ir
, TGSI_OPCODE_CMP
, *l
, *cond
,
2918 cond_swap
? l_src
: *r
,
2919 cond_swap
? *r
: l_src
);
2922 emit_asm(ir
, TGSI_OPCODE_MOV
, *l
, *r
);
2926 if (type
->is_dual_slot()) {
2928 if (r
->is_double_vertex_input
== false)
2934 glsl_to_tgsi_visitor::visit(ir_assignment
*ir
)
2940 /* all generated instructions need to be flaged as precise */
2941 this->precise
= is_precise(ir
->lhs
->variable_referenced());
2942 ir
->rhs
->accept(this);
2945 l
= get_assignment_lhs(ir
->lhs
, this, &dst_component
);
2949 int first_enabled_chan
= 0;
2951 ir_variable
*variable
= ir
->lhs
->variable_referenced();
2953 if (shader
->Stage
== MESA_SHADER_FRAGMENT
&&
2954 variable
->data
.mode
== ir_var_shader_out
&&
2955 (variable
->data
.location
== FRAG_RESULT_DEPTH
||
2956 variable
->data
.location
== FRAG_RESULT_STENCIL
)) {
2957 assert(ir
->lhs
->type
->is_scalar());
2958 assert(ir
->write_mask
== WRITEMASK_X
);
2960 if (variable
->data
.location
== FRAG_RESULT_DEPTH
)
2961 l
.writemask
= WRITEMASK_Z
;
2963 assert(variable
->data
.location
== FRAG_RESULT_STENCIL
);
2964 l
.writemask
= WRITEMASK_Y
;
2966 } else if (ir
->write_mask
== 0) {
2967 assert(!ir
->lhs
->type
->is_scalar() && !ir
->lhs
->type
->is_vector());
2969 unsigned num_elements
= ir
->lhs
->type
->without_array()->vector_elements
;
2972 l
.writemask
= u_bit_consecutive(0, num_elements
);
2974 /* The type is a struct or an array of (array of) structs. */
2975 l
.writemask
= WRITEMASK_XYZW
;
2978 l
.writemask
= ir
->write_mask
;
2981 for (int i
= 0; i
< 4; i
++) {
2982 if (l
.writemask
& (1 << i
)) {
2983 first_enabled_chan
= GET_SWZ(r
.swizzle
, i
);
2988 l
.writemask
= l
.writemask
<< dst_component
;
2990 /* Swizzle a small RHS vector into the channels being written.
2992 * glsl ir treats write_mask as dictating how many channels are
2993 * present on the RHS while TGSI treats write_mask as just
2994 * showing which channels of the vec4 RHS get written.
2996 for (int i
= 0; i
< 4; i
++) {
2997 if (l
.writemask
& (1 << i
))
2998 swizzles
[i
] = GET_SWZ(r
.swizzle
, rhs_chan
++);
3000 swizzles
[i
] = first_enabled_chan
;
3002 r
.swizzle
= MAKE_SWIZZLE4(swizzles
[0], swizzles
[1],
3003 swizzles
[2], swizzles
[3]);
3006 assert(l
.file
!= PROGRAM_UNDEFINED
);
3007 assert(r
.file
!= PROGRAM_UNDEFINED
);
3009 if (ir
->condition
) {
3010 const bool switch_order
= this->process_move_condition(ir
->condition
);
3011 st_src_reg condition
= this->result
;
3013 emit_block_mov(ir
, ir
->lhs
->type
, &l
, &r
, &condition
, switch_order
);
3014 } else if (ir
->rhs
->as_expression() &&
3015 this->instructions
.get_tail() &&
3016 ir
->rhs
== ((glsl_to_tgsi_instruction
*)this->instructions
.get_tail())->ir
&&
3017 !((glsl_to_tgsi_instruction
*)this->instructions
.get_tail())->is_64bit_expanded
&&
3018 type_size(ir
->lhs
->type
) == 1 &&
3019 l
.writemask
== ((glsl_to_tgsi_instruction
*)this->instructions
.get_tail())->dst
[0].writemask
) {
3020 /* To avoid emitting an extra MOV when assigning an expression to a
3021 * variable, emit the last instruction of the expression again, but
3022 * replace the destination register with the target of the assignment.
3023 * Dead code elimination will remove the original instruction.
3025 glsl_to_tgsi_instruction
*inst
, *new_inst
;
3026 inst
= (glsl_to_tgsi_instruction
*)this->instructions
.get_tail();
3027 new_inst
= emit_asm(ir
, inst
->op
, l
, inst
->src
[0], inst
->src
[1], inst
->src
[2], inst
->src
[3]);
3028 new_inst
->saturate
= inst
->saturate
;
3029 new_inst
->resource
= inst
->resource
;
3030 inst
->dead_mask
= inst
->dst
[0].writemask
;
3032 emit_block_mov(ir
, ir
->rhs
->type
, &l
, &r
, NULL
, false);
3039 glsl_to_tgsi_visitor::visit(ir_constant
*ir
)
3042 GLdouble stack_vals
[4] = { 0 };
3043 gl_constant_value
*values
= (gl_constant_value
*) stack_vals
;
3044 GLenum gl_type
= GL_NONE
;
3046 static int in_array
= 0;
3047 gl_register_file file
= in_array
? PROGRAM_CONSTANT
: PROGRAM_IMMEDIATE
;
3049 /* Unfortunately, 4 floats is all we can get into
3050 * _mesa_add_typed_unnamed_constant. So, make a temp to store an
3051 * aggregate constant and move each constant value into it. If we
3052 * get lucky, copy propagation will eliminate the extra moves.
3054 if (ir
->type
->is_record()) {
3055 st_src_reg temp_base
= get_temp(ir
->type
);
3056 st_dst_reg temp
= st_dst_reg(temp_base
);
3058 for (i
= 0; i
< ir
->type
->length
; i
++) {
3059 ir_constant
*const field_value
= ir
->get_record_field(i
);
3060 int size
= type_size(field_value
->type
);
3064 field_value
->accept(this);
3067 for (unsigned j
= 0; j
< (unsigned int)size
; j
++) {
3068 emit_asm(ir
, TGSI_OPCODE_MOV
, temp
, src
);
3074 this->result
= temp_base
;
3078 if (ir
->type
->is_array()) {
3079 st_src_reg temp_base
= get_temp(ir
->type
);
3080 st_dst_reg temp
= st_dst_reg(temp_base
);
3081 int size
= type_size(ir
->type
->fields
.array
);
3086 for (i
= 0; i
< ir
->type
->length
; i
++) {
3087 ir
->const_elements
[i
]->accept(this);
3089 for (int j
= 0; j
< size
; j
++) {
3090 emit_asm(ir
, TGSI_OPCODE_MOV
, temp
, src
);
3096 this->result
= temp_base
;
3101 if (ir
->type
->is_matrix()) {
3102 st_src_reg mat
= get_temp(ir
->type
);
3103 st_dst_reg mat_column
= st_dst_reg(mat
);
3105 for (i
= 0; i
< ir
->type
->matrix_columns
; i
++) {
3106 switch (ir
->type
->base_type
) {
3107 case GLSL_TYPE_FLOAT
:
3108 values
= (gl_constant_value
*) &ir
->value
.f
[i
* ir
->type
->vector_elements
];
3110 src
= st_src_reg(file
, -1, ir
->type
->base_type
);
3111 src
.index
= add_constant(file
,
3113 ir
->type
->vector_elements
,
3116 emit_asm(ir
, TGSI_OPCODE_MOV
, mat_column
, src
);
3118 case GLSL_TYPE_DOUBLE
:
3119 values
= (gl_constant_value
*) &ir
->value
.d
[i
* ir
->type
->vector_elements
];
3120 src
= st_src_reg(file
, -1, ir
->type
->base_type
);
3121 src
.index
= add_constant(file
,
3123 ir
->type
->vector_elements
,
3126 if (ir
->type
->vector_elements
>= 2) {
3127 mat_column
.writemask
= WRITEMASK_XY
;
3128 src
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_X
, SWIZZLE_Y
);
3129 emit_asm(ir
, TGSI_OPCODE_MOV
, mat_column
, src
);
3131 mat_column
.writemask
= WRITEMASK_X
;
3132 src
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_X
, SWIZZLE_X
, SWIZZLE_X
);
3133 emit_asm(ir
, TGSI_OPCODE_MOV
, mat_column
, src
);
3136 if (ir
->type
->vector_elements
> 2) {
3137 if (ir
->type
->vector_elements
== 4) {
3138 mat_column
.writemask
= WRITEMASK_ZW
;
3139 src
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_X
, SWIZZLE_Y
);
3140 emit_asm(ir
, TGSI_OPCODE_MOV
, mat_column
, src
);
3142 mat_column
.writemask
= WRITEMASK_Z
;
3143 src
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_Y
, SWIZZLE_Y
, SWIZZLE_Y
, SWIZZLE_Y
);
3144 emit_asm(ir
, TGSI_OPCODE_MOV
, mat_column
, src
);
3145 mat_column
.writemask
= WRITEMASK_XYZW
;
3146 src
.swizzle
= SWIZZLE_XYZW
;
3152 unreachable("Illegal matrix constant type.\n");
3161 switch (ir
->type
->base_type
) {
3162 case GLSL_TYPE_FLOAT
:
3164 for (i
= 0; i
< ir
->type
->vector_elements
; i
++) {
3165 values
[i
].f
= ir
->value
.f
[i
];
3168 case GLSL_TYPE_DOUBLE
:
3169 gl_type
= GL_DOUBLE
;
3170 for (i
= 0; i
< ir
->type
->vector_elements
; i
++) {
3171 memcpy(&values
[i
* 2], &ir
->value
.d
[i
], sizeof(double));
3174 case GLSL_TYPE_INT64
:
3175 gl_type
= GL_INT64_ARB
;
3176 for (i
= 0; i
< ir
->type
->vector_elements
; i
++) {
3177 memcpy(&values
[i
* 2], &ir
->value
.d
[i
], sizeof(int64_t));
3180 case GLSL_TYPE_UINT64
:
3181 gl_type
= GL_UNSIGNED_INT64_ARB
;
3182 for (i
= 0; i
< ir
->type
->vector_elements
; i
++) {
3183 memcpy(&values
[i
* 2], &ir
->value
.d
[i
], sizeof(uint64_t));
3186 case GLSL_TYPE_UINT
:
3187 gl_type
= native_integers
? GL_UNSIGNED_INT
: GL_FLOAT
;
3188 for (i
= 0; i
< ir
->type
->vector_elements
; i
++) {
3189 if (native_integers
)
3190 values
[i
].u
= ir
->value
.u
[i
];
3192 values
[i
].f
= ir
->value
.u
[i
];
3196 gl_type
= native_integers
? GL_INT
: GL_FLOAT
;
3197 for (i
= 0; i
< ir
->type
->vector_elements
; i
++) {
3198 if (native_integers
)
3199 values
[i
].i
= ir
->value
.i
[i
];
3201 values
[i
].f
= ir
->value
.i
[i
];
3204 case GLSL_TYPE_BOOL
:
3205 gl_type
= native_integers
? GL_BOOL
: GL_FLOAT
;
3206 for (i
= 0; i
< ir
->type
->vector_elements
; i
++) {
3207 values
[i
].u
= ir
->value
.b
[i
] ? ctx
->Const
.UniformBooleanTrue
: 0;
3211 assert(!"Non-float/uint/int/bool constant");
3214 this->result
= st_src_reg(file
, -1, ir
->type
);
3215 this->result
.index
= add_constant(file
,
3217 ir
->type
->vector_elements
,
3219 &this->result
.swizzle
);
3223 glsl_to_tgsi_visitor::visit_atomic_counter_intrinsic(ir_call
*ir
)
3225 exec_node
*param
= ir
->actual_parameters
.get_head();
3226 ir_dereference
*deref
= static_cast<ir_dereference
*>(param
);
3227 ir_variable
*location
= deref
->variable_referenced();
3230 PROGRAM_BUFFER
, location
->data
.binding
, GLSL_TYPE_ATOMIC_UINT
);
3232 /* Calculate the surface offset */
3234 unsigned array_size
= 0, base
= 0;
3237 get_deref_offsets(deref
, &array_size
, &base
, &index
, &offset
, false);
3239 if (offset
.file
!= PROGRAM_UNDEFINED
) {
3240 emit_asm(ir
, TGSI_OPCODE_MUL
, st_dst_reg(offset
),
3241 offset
, st_src_reg_for_int(ATOMIC_COUNTER_SIZE
));
3242 emit_asm(ir
, TGSI_OPCODE_ADD
, st_dst_reg(offset
),
3243 offset
, st_src_reg_for_int(location
->data
.offset
+ index
* ATOMIC_COUNTER_SIZE
));
3245 offset
= st_src_reg_for_int(location
->data
.offset
+ index
* ATOMIC_COUNTER_SIZE
);
3248 ir
->return_deref
->accept(this);
3249 st_dst_reg
dst(this->result
);
3250 dst
.writemask
= WRITEMASK_X
;
3252 glsl_to_tgsi_instruction
*inst
;
3254 if (ir
->callee
->intrinsic_id
== ir_intrinsic_atomic_counter_read
) {
3255 inst
= emit_asm(ir
, TGSI_OPCODE_LOAD
, dst
, offset
);
3256 } else if (ir
->callee
->intrinsic_id
== ir_intrinsic_atomic_counter_increment
) {
3257 inst
= emit_asm(ir
, TGSI_OPCODE_ATOMUADD
, dst
, offset
,
3258 st_src_reg_for_int(1));
3259 } else if (ir
->callee
->intrinsic_id
== ir_intrinsic_atomic_counter_predecrement
) {
3260 inst
= emit_asm(ir
, TGSI_OPCODE_ATOMUADD
, dst
, offset
,
3261 st_src_reg_for_int(-1));
3262 emit_asm(ir
, TGSI_OPCODE_ADD
, dst
, this->result
, st_src_reg_for_int(-1));
3264 param
= param
->get_next();
3265 ir_rvalue
*val
= ((ir_instruction
*)param
)->as_rvalue();
3268 st_src_reg data
= this->result
, data2
= undef_src
;
3270 switch (ir
->callee
->intrinsic_id
) {
3271 case ir_intrinsic_atomic_counter_add
:
3272 opcode
= TGSI_OPCODE_ATOMUADD
;
3274 case ir_intrinsic_atomic_counter_min
:
3275 opcode
= TGSI_OPCODE_ATOMIMIN
;
3277 case ir_intrinsic_atomic_counter_max
:
3278 opcode
= TGSI_OPCODE_ATOMIMAX
;
3280 case ir_intrinsic_atomic_counter_and
:
3281 opcode
= TGSI_OPCODE_ATOMAND
;
3283 case ir_intrinsic_atomic_counter_or
:
3284 opcode
= TGSI_OPCODE_ATOMOR
;
3286 case ir_intrinsic_atomic_counter_xor
:
3287 opcode
= TGSI_OPCODE_ATOMXOR
;
3289 case ir_intrinsic_atomic_counter_exchange
:
3290 opcode
= TGSI_OPCODE_ATOMXCHG
;
3292 case ir_intrinsic_atomic_counter_comp_swap
: {
3293 opcode
= TGSI_OPCODE_ATOMCAS
;
3294 param
= param
->get_next();
3295 val
= ((ir_instruction
*)param
)->as_rvalue();
3297 data2
= this->result
;
3301 assert(!"Unexpected intrinsic");
3305 inst
= emit_asm(ir
, opcode
, dst
, offset
, data
, data2
);
3308 inst
->resource
= buffer
;
3312 glsl_to_tgsi_visitor::visit_ssbo_intrinsic(ir_call
*ir
)
3314 exec_node
*param
= ir
->actual_parameters
.get_head();
3316 ir_rvalue
*block
= ((ir_instruction
*)param
)->as_rvalue();
3318 param
= param
->get_next();
3319 ir_rvalue
*offset
= ((ir_instruction
*)param
)->as_rvalue();
3321 ir_constant
*const_block
= block
->as_constant();
3325 ctx
->Const
.Program
[shader
->Stage
].MaxAtomicBuffers
+
3326 (const_block
? const_block
->value
.u
[0] : 0),
3330 block
->accept(this);
3331 buffer
.reladdr
= ralloc(mem_ctx
, st_src_reg
);
3332 *buffer
.reladdr
= this->result
;
3333 emit_arl(ir
, sampler_reladdr
, this->result
);
3336 /* Calculate the surface offset */
3337 offset
->accept(this);
3338 st_src_reg off
= this->result
;
3340 st_dst_reg dst
= undef_dst
;
3341 if (ir
->return_deref
) {
3342 ir
->return_deref
->accept(this);
3343 dst
= st_dst_reg(this->result
);
3344 dst
.writemask
= (1 << ir
->return_deref
->type
->vector_elements
) - 1;
3347 glsl_to_tgsi_instruction
*inst
;
3349 if (ir
->callee
->intrinsic_id
== ir_intrinsic_ssbo_load
) {
3350 inst
= emit_asm(ir
, TGSI_OPCODE_LOAD
, dst
, off
);
3351 if (dst
.type
== GLSL_TYPE_BOOL
)
3352 emit_asm(ir
, TGSI_OPCODE_USNE
, dst
, st_src_reg(dst
), st_src_reg_for_int(0));
3353 } else if (ir
->callee
->intrinsic_id
== ir_intrinsic_ssbo_store
) {
3354 param
= param
->get_next();
3355 ir_rvalue
*val
= ((ir_instruction
*)param
)->as_rvalue();
3358 param
= param
->get_next();
3359 ir_constant
*write_mask
= ((ir_instruction
*)param
)->as_constant();
3361 dst
.writemask
= write_mask
->value
.u
[0];
3363 dst
.type
= this->result
.type
;
3364 inst
= emit_asm(ir
, TGSI_OPCODE_STORE
, dst
, off
, this->result
);
3366 param
= param
->get_next();
3367 ir_rvalue
*val
= ((ir_instruction
*)param
)->as_rvalue();
3370 st_src_reg data
= this->result
, data2
= undef_src
;
3372 switch (ir
->callee
->intrinsic_id
) {
3373 case ir_intrinsic_ssbo_atomic_add
:
3374 opcode
= TGSI_OPCODE_ATOMUADD
;
3376 case ir_intrinsic_ssbo_atomic_min
:
3377 opcode
= TGSI_OPCODE_ATOMIMIN
;
3379 case ir_intrinsic_ssbo_atomic_max
:
3380 opcode
= TGSI_OPCODE_ATOMIMAX
;
3382 case ir_intrinsic_ssbo_atomic_and
:
3383 opcode
= TGSI_OPCODE_ATOMAND
;
3385 case ir_intrinsic_ssbo_atomic_or
:
3386 opcode
= TGSI_OPCODE_ATOMOR
;
3388 case ir_intrinsic_ssbo_atomic_xor
:
3389 opcode
= TGSI_OPCODE_ATOMXOR
;
3391 case ir_intrinsic_ssbo_atomic_exchange
:
3392 opcode
= TGSI_OPCODE_ATOMXCHG
;
3394 case ir_intrinsic_ssbo_atomic_comp_swap
:
3395 opcode
= TGSI_OPCODE_ATOMCAS
;
3396 param
= param
->get_next();
3397 val
= ((ir_instruction
*)param
)->as_rvalue();
3399 data2
= this->result
;
3402 assert(!"Unexpected intrinsic");
3406 inst
= emit_asm(ir
, opcode
, dst
, off
, data
, data2
);
3409 param
= param
->get_next();
3410 ir_constant
*access
= NULL
;
3411 if (!param
->is_tail_sentinel()) {
3412 access
= ((ir_instruction
*)param
)->as_constant();
3416 add_buffer_to_load_and_stores(inst
, &buffer
, &this->instructions
, access
);
3420 glsl_to_tgsi_visitor::visit_membar_intrinsic(ir_call
*ir
)
3422 switch (ir
->callee
->intrinsic_id
) {
3423 case ir_intrinsic_memory_barrier
:
3424 emit_asm(ir
, TGSI_OPCODE_MEMBAR
, undef_dst
,
3425 st_src_reg_for_int(TGSI_MEMBAR_SHADER_BUFFER
|
3426 TGSI_MEMBAR_ATOMIC_BUFFER
|
3427 TGSI_MEMBAR_SHADER_IMAGE
|
3428 TGSI_MEMBAR_SHARED
));
3430 case ir_intrinsic_memory_barrier_atomic_counter
:
3431 emit_asm(ir
, TGSI_OPCODE_MEMBAR
, undef_dst
,
3432 st_src_reg_for_int(TGSI_MEMBAR_ATOMIC_BUFFER
));
3434 case ir_intrinsic_memory_barrier_buffer
:
3435 emit_asm(ir
, TGSI_OPCODE_MEMBAR
, undef_dst
,
3436 st_src_reg_for_int(TGSI_MEMBAR_SHADER_BUFFER
));
3438 case ir_intrinsic_memory_barrier_image
:
3439 emit_asm(ir
, TGSI_OPCODE_MEMBAR
, undef_dst
,
3440 st_src_reg_for_int(TGSI_MEMBAR_SHADER_IMAGE
));
3442 case ir_intrinsic_memory_barrier_shared
:
3443 emit_asm(ir
, TGSI_OPCODE_MEMBAR
, undef_dst
,
3444 st_src_reg_for_int(TGSI_MEMBAR_SHARED
));
3446 case ir_intrinsic_group_memory_barrier
:
3447 emit_asm(ir
, TGSI_OPCODE_MEMBAR
, undef_dst
,
3448 st_src_reg_for_int(TGSI_MEMBAR_SHADER_BUFFER
|
3449 TGSI_MEMBAR_ATOMIC_BUFFER
|
3450 TGSI_MEMBAR_SHADER_IMAGE
|
3451 TGSI_MEMBAR_SHARED
|
3452 TGSI_MEMBAR_THREAD_GROUP
));
3455 assert(!"Unexpected memory barrier intrinsic");
3460 glsl_to_tgsi_visitor::visit_shared_intrinsic(ir_call
*ir
)
3462 exec_node
*param
= ir
->actual_parameters
.get_head();
3464 ir_rvalue
*offset
= ((ir_instruction
*)param
)->as_rvalue();
3466 st_src_reg
buffer(PROGRAM_MEMORY
, 0, GLSL_TYPE_UINT
);
3468 /* Calculate the surface offset */
3469 offset
->accept(this);
3470 st_src_reg off
= this->result
;
3472 st_dst_reg dst
= undef_dst
;
3473 if (ir
->return_deref
) {
3474 ir
->return_deref
->accept(this);
3475 dst
= st_dst_reg(this->result
);
3476 dst
.writemask
= (1 << ir
->return_deref
->type
->vector_elements
) - 1;
3479 glsl_to_tgsi_instruction
*inst
;
3481 if (ir
->callee
->intrinsic_id
== ir_intrinsic_shared_load
) {
3482 inst
= emit_asm(ir
, TGSI_OPCODE_LOAD
, dst
, off
);
3483 inst
->resource
= buffer
;
3484 } else if (ir
->callee
->intrinsic_id
== ir_intrinsic_shared_store
) {
3485 param
= param
->get_next();
3486 ir_rvalue
*val
= ((ir_instruction
*)param
)->as_rvalue();
3489 param
= param
->get_next();
3490 ir_constant
*write_mask
= ((ir_instruction
*)param
)->as_constant();
3492 dst
.writemask
= write_mask
->value
.u
[0];
3494 dst
.type
= this->result
.type
;
3495 inst
= emit_asm(ir
, TGSI_OPCODE_STORE
, dst
, off
, this->result
);
3496 inst
->resource
= buffer
;
3498 param
= param
->get_next();
3499 ir_rvalue
*val
= ((ir_instruction
*)param
)->as_rvalue();
3502 st_src_reg data
= this->result
, data2
= undef_src
;
3504 switch (ir
->callee
->intrinsic_id
) {
3505 case ir_intrinsic_shared_atomic_add
:
3506 opcode
= TGSI_OPCODE_ATOMUADD
;
3508 case ir_intrinsic_shared_atomic_min
:
3509 opcode
= TGSI_OPCODE_ATOMIMIN
;
3511 case ir_intrinsic_shared_atomic_max
:
3512 opcode
= TGSI_OPCODE_ATOMIMAX
;
3514 case ir_intrinsic_shared_atomic_and
:
3515 opcode
= TGSI_OPCODE_ATOMAND
;
3517 case ir_intrinsic_shared_atomic_or
:
3518 opcode
= TGSI_OPCODE_ATOMOR
;
3520 case ir_intrinsic_shared_atomic_xor
:
3521 opcode
= TGSI_OPCODE_ATOMXOR
;
3523 case ir_intrinsic_shared_atomic_exchange
:
3524 opcode
= TGSI_OPCODE_ATOMXCHG
;
3526 case ir_intrinsic_shared_atomic_comp_swap
:
3527 opcode
= TGSI_OPCODE_ATOMCAS
;
3528 param
= param
->get_next();
3529 val
= ((ir_instruction
*)param
)->as_rvalue();
3531 data2
= this->result
;
3534 assert(!"Unexpected intrinsic");
3538 inst
= emit_asm(ir
, opcode
, dst
, off
, data
, data2
);
3539 inst
->resource
= buffer
;
3544 get_image_qualifiers(ir_dereference
*ir
, const glsl_type
**type
,
3545 bool *memory_coherent
, bool *memory_volatile
,
3546 bool *memory_restrict
, unsigned *image_format
)
3549 switch (ir
->ir_type
) {
3550 case ir_type_dereference_record
: {
3551 ir_dereference_record
*deref_record
= ir
->as_dereference_record();
3552 const glsl_type
*struct_type
= deref_record
->record
->type
;
3553 int fild_idx
= deref_record
->field_idx
;
3555 *type
= struct_type
->fields
.structure
[fild_idx
].type
->without_array();
3557 struct_type
->fields
.structure
[fild_idx
].memory_coherent
;
3559 struct_type
->fields
.structure
[fild_idx
].memory_volatile
;
3561 struct_type
->fields
.structure
[fild_idx
].memory_restrict
;
3563 struct_type
->fields
.structure
[fild_idx
].image_format
;
3567 case ir_type_dereference_array
: {
3568 ir_dereference_array
*deref_arr
= ir
->as_dereference_array();
3569 get_image_qualifiers((ir_dereference
*)deref_arr
->array
, type
,
3570 memory_coherent
, memory_volatile
, memory_restrict
,
3575 case ir_type_dereference_variable
: {
3576 ir_variable
*var
= ir
->variable_referenced();
3578 *type
= var
->type
->without_array();
3579 *memory_coherent
= var
->data
.memory_coherent
;
3580 *memory_volatile
= var
->data
.memory_volatile
;
3581 *memory_restrict
= var
->data
.memory_restrict
;
3582 *image_format
= var
->data
.image_format
;
3592 glsl_to_tgsi_visitor::visit_image_intrinsic(ir_call
*ir
)
3594 exec_node
*param
= ir
->actual_parameters
.get_head();
3596 ir_dereference
*img
= (ir_dereference
*)param
;
3597 const ir_variable
*imgvar
= img
->variable_referenced();
3598 unsigned sampler_array_size
= 1, sampler_base
= 0;
3599 bool memory_coherent
= false, memory_volatile
= false, memory_restrict
= false;
3600 unsigned image_format
= 0;
3601 const glsl_type
*type
= NULL
;
3603 get_image_qualifiers(img
, &type
, &memory_coherent
, &memory_volatile
,
3604 &memory_restrict
, &image_format
);
3607 st_src_reg
image(PROGRAM_IMAGE
, 0, GLSL_TYPE_UINT
);
3609 get_deref_offsets(img
, &sampler_array_size
, &sampler_base
,
3610 &index
, &reladdr
, !imgvar
->contains_bindless());
3612 image
.index
= index
;
3613 if (reladdr
.file
!= PROGRAM_UNDEFINED
) {
3614 image
.reladdr
= ralloc(mem_ctx
, st_src_reg
);
3615 *image
.reladdr
= reladdr
;
3616 emit_arl(ir
, sampler_reladdr
, reladdr
);
3619 st_dst_reg dst
= undef_dst
;
3620 if (ir
->return_deref
) {
3621 ir
->return_deref
->accept(this);
3622 dst
= st_dst_reg(this->result
);
3623 dst
.writemask
= (1 << ir
->return_deref
->type
->vector_elements
) - 1;
3626 glsl_to_tgsi_instruction
*inst
;
3628 st_src_reg bindless
;
3629 if (imgvar
->contains_bindless()) {
3631 bindless
= this->result
;
3634 if (ir
->callee
->intrinsic_id
== ir_intrinsic_image_size
) {
3635 dst
.writemask
= WRITEMASK_XYZ
;
3636 inst
= emit_asm(ir
, TGSI_OPCODE_RESQ
, dst
);
3637 } else if (ir
->callee
->intrinsic_id
== ir_intrinsic_image_samples
) {
3638 st_src_reg res
= get_temp(glsl_type::ivec4_type
);
3639 st_dst_reg dstres
= st_dst_reg(res
);
3640 dstres
.writemask
= WRITEMASK_W
;
3641 inst
= emit_asm(ir
, TGSI_OPCODE_RESQ
, dstres
);
3642 res
.swizzle
= SWIZZLE_WWWW
;
3643 emit_asm(ir
, TGSI_OPCODE_MOV
, dst
, res
);
3645 st_src_reg arg1
= undef_src
, arg2
= undef_src
;
3647 st_dst_reg coord_dst
;
3648 coord
= get_temp(glsl_type::ivec4_type
);
3649 coord_dst
= st_dst_reg(coord
);
3650 coord_dst
.writemask
= (1 << type
->coordinate_components()) - 1;
3651 param
= param
->get_next();
3652 ((ir_dereference
*)param
)->accept(this);
3653 emit_asm(ir
, TGSI_OPCODE_MOV
, coord_dst
, this->result
);
3654 coord
.swizzle
= SWIZZLE_XXXX
;
3655 switch (type
->coordinate_components()) {
3656 case 4: assert(!"unexpected coord count");
3658 case 3: coord
.swizzle
|= SWIZZLE_Z
<< 6;
3660 case 2: coord
.swizzle
|= SWIZZLE_Y
<< 3;
3663 if (type
->sampler_dimensionality
== GLSL_SAMPLER_DIM_MS
) {
3664 param
= param
->get_next();
3665 ((ir_dereference
*)param
)->accept(this);
3666 st_src_reg sample
= this->result
;
3667 sample
.swizzle
= SWIZZLE_XXXX
;
3668 coord_dst
.writemask
= WRITEMASK_W
;
3669 emit_asm(ir
, TGSI_OPCODE_MOV
, coord_dst
, sample
);
3670 coord
.swizzle
|= SWIZZLE_W
<< 9;
3673 param
= param
->get_next();
3674 if (!param
->is_tail_sentinel()) {
3675 ((ir_dereference
*)param
)->accept(this);
3676 arg1
= this->result
;
3677 param
= param
->get_next();
3680 if (!param
->is_tail_sentinel()) {
3681 ((ir_dereference
*)param
)->accept(this);
3682 arg2
= this->result
;
3683 param
= param
->get_next();
3686 assert(param
->is_tail_sentinel());
3689 switch (ir
->callee
->intrinsic_id
) {
3690 case ir_intrinsic_image_load
:
3691 opcode
= TGSI_OPCODE_LOAD
;
3693 case ir_intrinsic_image_store
:
3694 opcode
= TGSI_OPCODE_STORE
;
3696 case ir_intrinsic_image_atomic_add
:
3697 opcode
= TGSI_OPCODE_ATOMUADD
;
3699 case ir_intrinsic_image_atomic_min
:
3700 opcode
= TGSI_OPCODE_ATOMIMIN
;
3702 case ir_intrinsic_image_atomic_max
:
3703 opcode
= TGSI_OPCODE_ATOMIMAX
;
3705 case ir_intrinsic_image_atomic_and
:
3706 opcode
= TGSI_OPCODE_ATOMAND
;
3708 case ir_intrinsic_image_atomic_or
:
3709 opcode
= TGSI_OPCODE_ATOMOR
;
3711 case ir_intrinsic_image_atomic_xor
:
3712 opcode
= TGSI_OPCODE_ATOMXOR
;
3714 case ir_intrinsic_image_atomic_exchange
:
3715 opcode
= TGSI_OPCODE_ATOMXCHG
;
3717 case ir_intrinsic_image_atomic_comp_swap
:
3718 opcode
= TGSI_OPCODE_ATOMCAS
;
3721 assert(!"Unexpected intrinsic");
3725 inst
= emit_asm(ir
, opcode
, dst
, coord
, arg1
, arg2
);
3726 if (opcode
== TGSI_OPCODE_STORE
)
3727 inst
->dst
[0].writemask
= WRITEMASK_XYZW
;
3730 if (imgvar
->contains_bindless()) {
3731 inst
->resource
= bindless
;
3732 inst
->resource
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
,
3733 SWIZZLE_X
, SWIZZLE_Y
);
3735 inst
->resource
= image
;
3736 inst
->sampler_array_size
= sampler_array_size
;
3737 inst
->sampler_base
= sampler_base
;
3740 inst
->tex_target
= type
->sampler_index();
3741 inst
->image_format
= st_mesa_format_to_pipe_format(st_context(ctx
),
3742 _mesa_get_shader_image_format(image_format
));
3744 if (memory_coherent
)
3745 inst
->buffer_access
|= TGSI_MEMORY_COHERENT
;
3746 if (memory_restrict
)
3747 inst
->buffer_access
|= TGSI_MEMORY_RESTRICT
;
3748 if (memory_volatile
)
3749 inst
->buffer_access
|= TGSI_MEMORY_VOLATILE
;
3753 glsl_to_tgsi_visitor::visit_generic_intrinsic(ir_call
*ir
, unsigned op
)
3755 ir
->return_deref
->accept(this);
3756 st_dst_reg dst
= st_dst_reg(this->result
);
3758 dst
.writemask
= u_bit_consecutive(0, ir
->return_deref
->var
->type
->vector_elements
);
3760 st_src_reg src
[4] = { undef_src
, undef_src
, undef_src
, undef_src
};
3761 unsigned num_src
= 0;
3762 foreach_in_list(ir_rvalue
, param
, &ir
->actual_parameters
) {
3763 assert(num_src
< ARRAY_SIZE(src
));
3765 this->result
.file
= PROGRAM_UNDEFINED
;
3766 param
->accept(this);
3767 assert(this->result
.file
!= PROGRAM_UNDEFINED
);
3769 src
[num_src
] = this->result
;
3773 emit_asm(ir
, op
, dst
, src
[0], src
[1], src
[2], src
[3]);
3777 glsl_to_tgsi_visitor::visit(ir_call
*ir
)
3779 ir_function_signature
*sig
= ir
->callee
;
3781 /* Filter out intrinsics */
3782 switch (sig
->intrinsic_id
) {
3783 case ir_intrinsic_atomic_counter_read
:
3784 case ir_intrinsic_atomic_counter_increment
:
3785 case ir_intrinsic_atomic_counter_predecrement
:
3786 case ir_intrinsic_atomic_counter_add
:
3787 case ir_intrinsic_atomic_counter_min
:
3788 case ir_intrinsic_atomic_counter_max
:
3789 case ir_intrinsic_atomic_counter_and
:
3790 case ir_intrinsic_atomic_counter_or
:
3791 case ir_intrinsic_atomic_counter_xor
:
3792 case ir_intrinsic_atomic_counter_exchange
:
3793 case ir_intrinsic_atomic_counter_comp_swap
:
3794 visit_atomic_counter_intrinsic(ir
);
3797 case ir_intrinsic_ssbo_load
:
3798 case ir_intrinsic_ssbo_store
:
3799 case ir_intrinsic_ssbo_atomic_add
:
3800 case ir_intrinsic_ssbo_atomic_min
:
3801 case ir_intrinsic_ssbo_atomic_max
:
3802 case ir_intrinsic_ssbo_atomic_and
:
3803 case ir_intrinsic_ssbo_atomic_or
:
3804 case ir_intrinsic_ssbo_atomic_xor
:
3805 case ir_intrinsic_ssbo_atomic_exchange
:
3806 case ir_intrinsic_ssbo_atomic_comp_swap
:
3807 visit_ssbo_intrinsic(ir
);
3810 case ir_intrinsic_memory_barrier
:
3811 case ir_intrinsic_memory_barrier_atomic_counter
:
3812 case ir_intrinsic_memory_barrier_buffer
:
3813 case ir_intrinsic_memory_barrier_image
:
3814 case ir_intrinsic_memory_barrier_shared
:
3815 case ir_intrinsic_group_memory_barrier
:
3816 visit_membar_intrinsic(ir
);
3819 case ir_intrinsic_shared_load
:
3820 case ir_intrinsic_shared_store
:
3821 case ir_intrinsic_shared_atomic_add
:
3822 case ir_intrinsic_shared_atomic_min
:
3823 case ir_intrinsic_shared_atomic_max
:
3824 case ir_intrinsic_shared_atomic_and
:
3825 case ir_intrinsic_shared_atomic_or
:
3826 case ir_intrinsic_shared_atomic_xor
:
3827 case ir_intrinsic_shared_atomic_exchange
:
3828 case ir_intrinsic_shared_atomic_comp_swap
:
3829 visit_shared_intrinsic(ir
);
3832 case ir_intrinsic_image_load
:
3833 case ir_intrinsic_image_store
:
3834 case ir_intrinsic_image_atomic_add
:
3835 case ir_intrinsic_image_atomic_min
:
3836 case ir_intrinsic_image_atomic_max
:
3837 case ir_intrinsic_image_atomic_and
:
3838 case ir_intrinsic_image_atomic_or
:
3839 case ir_intrinsic_image_atomic_xor
:
3840 case ir_intrinsic_image_atomic_exchange
:
3841 case ir_intrinsic_image_atomic_comp_swap
:
3842 case ir_intrinsic_image_size
:
3843 case ir_intrinsic_image_samples
:
3844 visit_image_intrinsic(ir
);
3847 case ir_intrinsic_shader_clock
:
3848 visit_generic_intrinsic(ir
, TGSI_OPCODE_CLOCK
);
3851 case ir_intrinsic_vote_all
:
3852 visit_generic_intrinsic(ir
, TGSI_OPCODE_VOTE_ALL
);
3854 case ir_intrinsic_vote_any
:
3855 visit_generic_intrinsic(ir
, TGSI_OPCODE_VOTE_ANY
);
3857 case ir_intrinsic_vote_eq
:
3858 visit_generic_intrinsic(ir
, TGSI_OPCODE_VOTE_EQ
);
3860 case ir_intrinsic_ballot
:
3861 visit_generic_intrinsic(ir
, TGSI_OPCODE_BALLOT
);
3863 case ir_intrinsic_read_first_invocation
:
3864 visit_generic_intrinsic(ir
, TGSI_OPCODE_READ_FIRST
);
3866 case ir_intrinsic_read_invocation
:
3867 visit_generic_intrinsic(ir
, TGSI_OPCODE_READ_INVOC
);
3870 case ir_intrinsic_invalid
:
3871 case ir_intrinsic_generic_load
:
3872 case ir_intrinsic_generic_store
:
3873 case ir_intrinsic_generic_atomic_add
:
3874 case ir_intrinsic_generic_atomic_and
:
3875 case ir_intrinsic_generic_atomic_or
:
3876 case ir_intrinsic_generic_atomic_xor
:
3877 case ir_intrinsic_generic_atomic_min
:
3878 case ir_intrinsic_generic_atomic_max
:
3879 case ir_intrinsic_generic_atomic_exchange
:
3880 case ir_intrinsic_generic_atomic_comp_swap
:
3881 unreachable("Invalid intrinsic");
3886 glsl_to_tgsi_visitor::calc_deref_offsets(ir_dereference
*tail
,
3887 unsigned *array_elements
,
3889 st_src_reg
*indirect
,
3892 switch (tail
->ir_type
) {
3893 case ir_type_dereference_record
: {
3894 ir_dereference_record
*deref_record
= tail
->as_dereference_record();
3895 const glsl_type
*struct_type
= deref_record
->record
->type
;
3896 int field_index
= deref_record
->field_idx
;
3898 calc_deref_offsets(deref_record
->record
->as_dereference(), array_elements
, index
, indirect
, location
);
3900 assert(field_index
>= 0);
3901 *location
+= struct_type
->record_location_offset(field_index
);
3905 case ir_type_dereference_array
: {
3906 ir_dereference_array
*deref_arr
= tail
->as_dereference_array();
3908 void *mem_ctx
= ralloc_parent(deref_arr
);
3909 ir_constant
*array_index
=
3910 deref_arr
->array_index
->constant_expression_value(mem_ctx
);
3913 st_src_reg temp_reg
;
3914 st_dst_reg temp_dst
;
3916 temp_reg
= get_temp(glsl_type::uint_type
);
3917 temp_dst
= st_dst_reg(temp_reg
);
3918 temp_dst
.writemask
= 1;
3920 deref_arr
->array_index
->accept(this);
3921 if (*array_elements
!= 1)
3922 emit_asm(NULL
, TGSI_OPCODE_MUL
, temp_dst
, this->result
, st_src_reg_for_int(*array_elements
));
3924 emit_asm(NULL
, TGSI_OPCODE_MOV
, temp_dst
, this->result
);
3926 if (indirect
->file
== PROGRAM_UNDEFINED
)
3927 *indirect
= temp_reg
;
3929 temp_dst
= st_dst_reg(*indirect
);
3930 temp_dst
.writemask
= 1;
3931 emit_asm(NULL
, TGSI_OPCODE_ADD
, temp_dst
, *indirect
, temp_reg
);
3934 *index
+= array_index
->value
.u
[0] * *array_elements
;
3936 *array_elements
*= deref_arr
->array
->type
->length
;
3938 calc_deref_offsets(deref_arr
->array
->as_dereference(), array_elements
, index
, indirect
, location
);
3947 glsl_to_tgsi_visitor::get_deref_offsets(ir_dereference
*ir
,
3948 unsigned *array_size
,
3951 st_src_reg
*reladdr
,
3954 GLuint shader
= _mesa_program_enum_to_shader_stage(this->prog
->Target
);
3955 unsigned location
= 0;
3956 ir_variable
*var
= ir
->variable_referenced();
3958 memset(reladdr
, 0, sizeof(*reladdr
));
3959 reladdr
->file
= PROGRAM_UNDEFINED
;
3965 location
= var
->data
.location
;
3966 calc_deref_offsets(ir
, array_size
, index
, reladdr
, &location
);
3969 * If we end up with no indirect then adjust the base to the index,
3970 * and set the array size to 1.
3972 if (reladdr
->file
== PROGRAM_UNDEFINED
) {
3978 assert(location
!= 0xffffffff);
3979 *base
+= this->shader_program
->data
->UniformStorage
[location
].opaque
[shader
].index
;
3980 *index
+= this->shader_program
->data
->UniformStorage
[location
].opaque
[shader
].index
;
3985 glsl_to_tgsi_visitor::canonicalize_gather_offset(st_src_reg offset
)
3987 if (offset
.reladdr
|| offset
.reladdr2
) {
3988 st_src_reg tmp
= get_temp(glsl_type::ivec2_type
);
3989 st_dst_reg tmp_dst
= st_dst_reg(tmp
);
3990 tmp_dst
.writemask
= WRITEMASK_XY
;
3991 emit_asm(NULL
, TGSI_OPCODE_MOV
, tmp_dst
, offset
);
3999 glsl_to_tgsi_visitor::visit(ir_texture
*ir
)
4001 st_src_reg result_src
, coord
, cube_sc
, lod_info
, projector
, dx
, dy
;
4002 st_src_reg offset
[MAX_GLSL_TEXTURE_OFFSET
], sample_index
, component
;
4003 st_src_reg levels_src
, reladdr
;
4004 st_dst_reg result_dst
, coord_dst
, cube_sc_dst
;
4005 glsl_to_tgsi_instruction
*inst
= NULL
;
4006 unsigned opcode
= TGSI_OPCODE_NOP
;
4007 const glsl_type
*sampler_type
= ir
->sampler
->type
;
4008 unsigned sampler_array_size
= 1, sampler_base
= 0;
4009 bool is_cube_array
= false, is_cube_shadow
= false;
4010 ir_variable
*var
= ir
->sampler
->variable_referenced();
4013 /* if we are a cube array sampler or a cube shadow */
4014 if (sampler_type
->sampler_dimensionality
== GLSL_SAMPLER_DIM_CUBE
) {
4015 is_cube_array
= sampler_type
->sampler_array
;
4016 is_cube_shadow
= sampler_type
->sampler_shadow
;
4019 if (ir
->coordinate
) {
4020 ir
->coordinate
->accept(this);
4022 /* Put our coords in a temp. We'll need to modify them for shadow,
4023 * projection, or LOD, so the only case we'd use it as-is is if
4024 * we're doing plain old texturing. The optimization passes on
4025 * glsl_to_tgsi_visitor should handle cleaning up our mess in that case.
4027 coord
= get_temp(glsl_type::vec4_type
);
4028 coord_dst
= st_dst_reg(coord
);
4029 coord_dst
.writemask
= (1 << ir
->coordinate
->type
->vector_elements
) - 1;
4030 emit_asm(ir
, TGSI_OPCODE_MOV
, coord_dst
, this->result
);
4033 if (ir
->projector
) {
4034 ir
->projector
->accept(this);
4035 projector
= this->result
;
4038 /* Storage for our result. Ideally for an assignment we'd be using
4039 * the actual storage for the result here, instead.
4041 result_src
= get_temp(ir
->type
);
4042 result_dst
= st_dst_reg(result_src
);
4043 result_dst
.writemask
= (1 << ir
->type
->vector_elements
) - 1;
4047 opcode
= (is_cube_array
&& ir
->shadow_comparator
) ? TGSI_OPCODE_TEX2
: TGSI_OPCODE_TEX
;
4049 ir
->offset
->accept(this);
4050 offset
[0] = this->result
;
4054 if (is_cube_array
|| is_cube_shadow
) {
4055 opcode
= TGSI_OPCODE_TXB2
;
4058 opcode
= TGSI_OPCODE_TXB
;
4060 ir
->lod_info
.bias
->accept(this);
4061 lod_info
= this->result
;
4063 ir
->offset
->accept(this);
4064 offset
[0] = this->result
;
4068 if (this->has_tex_txf_lz
&& ir
->lod_info
.lod
->is_zero()) {
4069 opcode
= TGSI_OPCODE_TEX_LZ
;
4071 opcode
= is_cube_array
? TGSI_OPCODE_TXL2
: TGSI_OPCODE_TXL
;
4072 ir
->lod_info
.lod
->accept(this);
4073 lod_info
= this->result
;
4076 ir
->offset
->accept(this);
4077 offset
[0] = this->result
;
4081 opcode
= TGSI_OPCODE_TXD
;
4082 ir
->lod_info
.grad
.dPdx
->accept(this);
4084 ir
->lod_info
.grad
.dPdy
->accept(this);
4087 ir
->offset
->accept(this);
4088 offset
[0] = this->result
;
4092 opcode
= TGSI_OPCODE_TXQ
;
4093 ir
->lod_info
.lod
->accept(this);
4094 lod_info
= this->result
;
4096 case ir_query_levels
:
4097 opcode
= TGSI_OPCODE_TXQ
;
4098 lod_info
= undef_src
;
4099 levels_src
= get_temp(ir
->type
);
4102 if (this->has_tex_txf_lz
&& ir
->lod_info
.lod
->is_zero()) {
4103 opcode
= TGSI_OPCODE_TXF_LZ
;
4105 opcode
= TGSI_OPCODE_TXF
;
4106 ir
->lod_info
.lod
->accept(this);
4107 lod_info
= this->result
;
4110 ir
->offset
->accept(this);
4111 offset
[0] = this->result
;
4115 opcode
= TGSI_OPCODE_TXF
;
4116 ir
->lod_info
.sample_index
->accept(this);
4117 sample_index
= this->result
;
4120 opcode
= TGSI_OPCODE_TG4
;
4121 ir
->lod_info
.component
->accept(this);
4122 component
= this->result
;
4124 ir
->offset
->accept(this);
4125 if (ir
->offset
->type
->is_array()) {
4126 const glsl_type
*elt_type
= ir
->offset
->type
->fields
.array
;
4127 for (i
= 0; i
< ir
->offset
->type
->length
; i
++) {
4128 offset
[i
] = this->result
;
4129 offset
[i
].index
+= i
* type_size(elt_type
);
4130 offset
[i
].type
= elt_type
->base_type
;
4131 offset
[i
].swizzle
= swizzle_for_size(elt_type
->vector_elements
);
4132 offset
[i
] = canonicalize_gather_offset(offset
[i
]);
4135 offset
[0] = canonicalize_gather_offset(this->result
);
4140 opcode
= TGSI_OPCODE_LODQ
;
4142 case ir_texture_samples
:
4143 opcode
= TGSI_OPCODE_TXQS
;
4145 case ir_samples_identical
:
4146 unreachable("Unexpected ir_samples_identical opcode");
4149 if (ir
->projector
) {
4150 if (opcode
== TGSI_OPCODE_TEX
) {
4151 /* Slot the projector in as the last component of the coord. */
4152 coord_dst
.writemask
= WRITEMASK_W
;
4153 emit_asm(ir
, TGSI_OPCODE_MOV
, coord_dst
, projector
);
4154 coord_dst
.writemask
= WRITEMASK_XYZW
;
4155 opcode
= TGSI_OPCODE_TXP
;
4157 st_src_reg coord_w
= coord
;
4158 coord_w
.swizzle
= SWIZZLE_WWWW
;
4160 /* For the other TEX opcodes there's no projective version
4161 * since the last slot is taken up by LOD info. Do the
4162 * projective divide now.
4164 coord_dst
.writemask
= WRITEMASK_W
;
4165 emit_asm(ir
, TGSI_OPCODE_RCP
, coord_dst
, projector
);
4167 /* In the case where we have to project the coordinates "by hand,"
4168 * the shadow comparator value must also be projected.
4170 st_src_reg tmp_src
= coord
;
4171 if (ir
->shadow_comparator
) {
4172 /* Slot the shadow value in as the second to last component of the
4175 ir
->shadow_comparator
->accept(this);
4177 tmp_src
= get_temp(glsl_type::vec4_type
);
4178 st_dst_reg tmp_dst
= st_dst_reg(tmp_src
);
4180 /* Projective division not allowed for array samplers. */
4181 assert(!sampler_type
->sampler_array
);
4183 tmp_dst
.writemask
= WRITEMASK_Z
;
4184 emit_asm(ir
, TGSI_OPCODE_MOV
, tmp_dst
, this->result
);
4186 tmp_dst
.writemask
= WRITEMASK_XY
;
4187 emit_asm(ir
, TGSI_OPCODE_MOV
, tmp_dst
, coord
);
4190 coord_dst
.writemask
= WRITEMASK_XYZ
;
4191 emit_asm(ir
, TGSI_OPCODE_MUL
, coord_dst
, tmp_src
, coord_w
);
4193 coord_dst
.writemask
= WRITEMASK_XYZW
;
4194 coord
.swizzle
= SWIZZLE_XYZW
;
4198 /* If projection is done and the opcode is not TGSI_OPCODE_TXP, then the shadow
4199 * comparator was put in the correct place (and projected) by the code,
4200 * above, that handles by-hand projection.
4202 if (ir
->shadow_comparator
&& (!ir
->projector
|| opcode
== TGSI_OPCODE_TXP
)) {
4203 /* Slot the shadow value in as the second to last component of the
4206 ir
->shadow_comparator
->accept(this);
4208 if (is_cube_array
) {
4209 cube_sc
= get_temp(glsl_type::float_type
);
4210 cube_sc_dst
= st_dst_reg(cube_sc
);
4211 cube_sc_dst
.writemask
= WRITEMASK_X
;
4212 emit_asm(ir
, TGSI_OPCODE_MOV
, cube_sc_dst
, this->result
);
4213 cube_sc_dst
.writemask
= WRITEMASK_X
;
4216 if ((sampler_type
->sampler_dimensionality
== GLSL_SAMPLER_DIM_2D
&&
4217 sampler_type
->sampler_array
) ||
4218 sampler_type
->sampler_dimensionality
== GLSL_SAMPLER_DIM_CUBE
) {
4219 coord_dst
.writemask
= WRITEMASK_W
;
4221 coord_dst
.writemask
= WRITEMASK_Z
;
4223 emit_asm(ir
, TGSI_OPCODE_MOV
, coord_dst
, this->result
);
4224 coord_dst
.writemask
= WRITEMASK_XYZW
;
4228 if (ir
->op
== ir_txf_ms
) {
4229 coord_dst
.writemask
= WRITEMASK_W
;
4230 emit_asm(ir
, TGSI_OPCODE_MOV
, coord_dst
, sample_index
);
4231 coord_dst
.writemask
= WRITEMASK_XYZW
;
4232 } else if (opcode
== TGSI_OPCODE_TXL
|| opcode
== TGSI_OPCODE_TXB
||
4233 opcode
== TGSI_OPCODE_TXF
) {
4234 /* TGSI stores LOD or LOD bias in the last channel of the coords. */
4235 coord_dst
.writemask
= WRITEMASK_W
;
4236 emit_asm(ir
, TGSI_OPCODE_MOV
, coord_dst
, lod_info
);
4237 coord_dst
.writemask
= WRITEMASK_XYZW
;
4240 st_src_reg
sampler(PROGRAM_SAMPLER
, 0, GLSL_TYPE_UINT
);
4243 get_deref_offsets(ir
->sampler
, &sampler_array_size
, &sampler_base
,
4244 &index
, &reladdr
, !var
->contains_bindless());
4246 sampler
.index
= index
;
4247 if (reladdr
.file
!= PROGRAM_UNDEFINED
) {
4248 sampler
.reladdr
= ralloc(mem_ctx
, st_src_reg
);
4249 *sampler
.reladdr
= reladdr
;
4250 emit_arl(ir
, sampler_reladdr
, reladdr
);
4253 st_src_reg bindless
;
4254 if (var
->contains_bindless()) {
4255 ir
->sampler
->accept(this);
4256 bindless
= this->result
;
4259 if (opcode
== TGSI_OPCODE_TXD
)
4260 inst
= emit_asm(ir
, opcode
, result_dst
, coord
, dx
, dy
);
4261 else if (opcode
== TGSI_OPCODE_TXQ
) {
4262 if (ir
->op
== ir_query_levels
) {
4263 /* the level is stored in W */
4264 inst
= emit_asm(ir
, opcode
, st_dst_reg(levels_src
), lod_info
);
4265 result_dst
.writemask
= WRITEMASK_X
;
4266 levels_src
.swizzle
= SWIZZLE_WWWW
;
4267 emit_asm(ir
, TGSI_OPCODE_MOV
, result_dst
, levels_src
);
4269 inst
= emit_asm(ir
, opcode
, result_dst
, lod_info
);
4270 } else if (opcode
== TGSI_OPCODE_TXQS
) {
4271 inst
= emit_asm(ir
, opcode
, result_dst
);
4272 } else if (opcode
== TGSI_OPCODE_TXL2
|| opcode
== TGSI_OPCODE_TXB2
) {
4273 inst
= emit_asm(ir
, opcode
, result_dst
, coord
, lod_info
);
4274 } else if (opcode
== TGSI_OPCODE_TEX2
) {
4275 inst
= emit_asm(ir
, opcode
, result_dst
, coord
, cube_sc
);
4276 } else if (opcode
== TGSI_OPCODE_TG4
) {
4277 if (is_cube_array
&& ir
->shadow_comparator
) {
4278 inst
= emit_asm(ir
, opcode
, result_dst
, coord
, cube_sc
);
4280 inst
= emit_asm(ir
, opcode
, result_dst
, coord
, component
);
4283 inst
= emit_asm(ir
, opcode
, result_dst
, coord
);
4285 if (ir
->shadow_comparator
)
4286 inst
->tex_shadow
= GL_TRUE
;
4288 if (var
->contains_bindless()) {
4289 inst
->resource
= bindless
;
4290 inst
->resource
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
,
4291 SWIZZLE_X
, SWIZZLE_Y
);
4293 inst
->resource
= sampler
;
4294 inst
->sampler_array_size
= sampler_array_size
;
4295 inst
->sampler_base
= sampler_base
;
4299 if (!inst
->tex_offsets
)
4300 inst
->tex_offsets
= rzalloc_array(inst
, st_src_reg
, MAX_GLSL_TEXTURE_OFFSET
);
4302 for (i
= 0; i
< MAX_GLSL_TEXTURE_OFFSET
&& offset
[i
].file
!= PROGRAM_UNDEFINED
; i
++)
4303 inst
->tex_offsets
[i
] = offset
[i
];
4304 inst
->tex_offset_num_offset
= i
;
4307 inst
->tex_target
= sampler_type
->sampler_index();
4308 inst
->tex_type
= ir
->type
->base_type
;
4310 this->result
= result_src
;
4314 glsl_to_tgsi_visitor::visit(ir_return
*ir
)
4316 assert(!ir
->get_value());
4318 emit_asm(ir
, TGSI_OPCODE_RET
);
4322 glsl_to_tgsi_visitor::visit(ir_discard
*ir
)
4324 if (ir
->condition
) {
4325 ir
->condition
->accept(this);
4326 st_src_reg condition
= this->result
;
4328 /* Convert the bool condition to a float so we can negate. */
4329 if (native_integers
) {
4330 st_src_reg temp
= get_temp(ir
->condition
->type
);
4331 emit_asm(ir
, TGSI_OPCODE_AND
, st_dst_reg(temp
),
4332 condition
, st_src_reg_for_float(1.0));
4336 condition
.negate
= ~condition
.negate
;
4337 emit_asm(ir
, TGSI_OPCODE_KILL_IF
, undef_dst
, condition
);
4339 /* unconditional kil */
4340 emit_asm(ir
, TGSI_OPCODE_KILL
);
4345 glsl_to_tgsi_visitor::visit(ir_if
*ir
)
4348 glsl_to_tgsi_instruction
*if_inst
;
4350 ir
->condition
->accept(this);
4351 assert(this->result
.file
!= PROGRAM_UNDEFINED
);
4353 if_opcode
= native_integers
? TGSI_OPCODE_UIF
: TGSI_OPCODE_IF
;
4355 if_inst
= emit_asm(ir
->condition
, if_opcode
, undef_dst
, this->result
);
4357 this->instructions
.push_tail(if_inst
);
4359 visit_exec_list(&ir
->then_instructions
, this);
4361 if (!ir
->else_instructions
.is_empty()) {
4362 emit_asm(ir
->condition
, TGSI_OPCODE_ELSE
);
4363 visit_exec_list(&ir
->else_instructions
, this);
4366 if_inst
= emit_asm(ir
->condition
, TGSI_OPCODE_ENDIF
);
4371 glsl_to_tgsi_visitor::visit(ir_emit_vertex
*ir
)
4373 assert(this->prog
->Target
== GL_GEOMETRY_PROGRAM_NV
);
4375 ir
->stream
->accept(this);
4376 emit_asm(ir
, TGSI_OPCODE_EMIT
, undef_dst
, this->result
);
4380 glsl_to_tgsi_visitor::visit(ir_end_primitive
*ir
)
4382 assert(this->prog
->Target
== GL_GEOMETRY_PROGRAM_NV
);
4384 ir
->stream
->accept(this);
4385 emit_asm(ir
, TGSI_OPCODE_ENDPRIM
, undef_dst
, this->result
);
4389 glsl_to_tgsi_visitor::visit(ir_barrier
*ir
)
4391 assert(this->prog
->Target
== GL_TESS_CONTROL_PROGRAM_NV
||
4392 this->prog
->Target
== GL_COMPUTE_PROGRAM_NV
);
4394 emit_asm(ir
, TGSI_OPCODE_BARRIER
);
4397 glsl_to_tgsi_visitor::glsl_to_tgsi_visitor()
4399 STATIC_ASSERT(sizeof(samplers_used
) * 8 >= PIPE_MAX_SAMPLERS
);
4401 result
.file
= PROGRAM_UNDEFINED
;
4408 num_input_arrays
= 0;
4409 num_output_arrays
= 0;
4411 num_address_regs
= 0;
4414 indirect_addr_consts
= false;
4415 wpos_transform_const
= -1;
4417 native_integers
= false;
4418 mem_ctx
= ralloc_context(NULL
);
4422 shader_program
= NULL
;
4427 use_shared_memory
= false;
4428 has_tex_txf_lz
= false;
4432 static void var_destroy(struct hash_entry
*entry
)
4434 variable_storage
*storage
= (variable_storage
*)entry
->data
;
4439 glsl_to_tgsi_visitor::~glsl_to_tgsi_visitor()
4441 _mesa_hash_table_destroy(variables
, var_destroy
);
4443 ralloc_free(mem_ctx
);
4446 extern "C" void free_glsl_to_tgsi_visitor(glsl_to_tgsi_visitor
*v
)
4453 * Count resources used by the given gpu program (number of texture
4457 count_resources(glsl_to_tgsi_visitor
*v
, gl_program
*prog
)
4459 v
->samplers_used
= 0;
4462 foreach_in_list(glsl_to_tgsi_instruction
, inst
, &v
->instructions
) {
4463 if (inst
->info
->is_tex
) {
4464 for (int i
= 0; i
< inst
->sampler_array_size
; i
++) {
4465 unsigned idx
= inst
->sampler_base
+ i
;
4466 v
->samplers_used
|= 1u << idx
;
4468 debug_assert(idx
< (int)ARRAY_SIZE(v
->sampler_types
));
4469 v
->sampler_types
[idx
] = inst
->tex_type
;
4470 v
->sampler_targets
[idx
] =
4471 st_translate_texture_target(inst
->tex_target
, inst
->tex_shadow
);
4473 if (inst
->tex_shadow
) {
4474 prog
->ShadowSamplers
|= 1 << (inst
->resource
.index
+ i
);
4479 if (inst
->tex_target
== TEXTURE_EXTERNAL_INDEX
)
4480 prog
->ExternalSamplersUsed
|= 1 << inst
->resource
.index
;
4482 if (inst
->resource
.file
!= PROGRAM_UNDEFINED
&& (
4483 is_resource_instruction(inst
->op
) ||
4484 inst
->op
== TGSI_OPCODE_STORE
)) {
4485 if (inst
->resource
.file
== PROGRAM_MEMORY
) {
4486 v
->use_shared_memory
= true;
4487 } else if (inst
->resource
.file
== PROGRAM_IMAGE
) {
4488 for (int i
= 0; i
< inst
->sampler_array_size
; i
++) {
4489 unsigned idx
= inst
->sampler_base
+ i
;
4490 v
->images_used
|= 1 << idx
;
4491 v
->image_targets
[idx
] =
4492 st_translate_texture_target(inst
->tex_target
, false);
4493 v
->image_formats
[idx
] = inst
->image_format
;
4498 prog
->SamplersUsed
= v
->samplers_used
;
4500 if (v
->shader_program
!= NULL
)
4501 _mesa_update_shader_textures_used(v
->shader_program
, prog
);
4505 * Returns the mask of channels (bitmask of WRITEMASK_X,Y,Z,W) which
4506 * are read from the given src in this instruction
4509 get_src_arg_mask(st_dst_reg dst
, st_src_reg src
)
4511 int read_mask
= 0, comp
;
4513 /* Now, given the src swizzle and the written channels, find which
4514 * components are actually read
4516 for (comp
= 0; comp
< 4; ++comp
) {
4517 const unsigned coord
= GET_SWZ(src
.swizzle
, comp
);
4519 if (dst
.writemask
& (1 << comp
) && coord
<= SWIZZLE_W
)
4520 read_mask
|= 1 << coord
;
4527 * This pass replaces CMP T0, T1 T2 T0 with MOV T0, T2 when the CMP
4528 * instruction is the first instruction to write to register T0. There are
4529 * several lowering passes done in GLSL IR (e.g. branches and
4530 * relative addressing) that create a large number of conditional assignments
4531 * that ir_to_mesa converts to CMP instructions like the one mentioned above.
4533 * Here is why this conversion is safe:
4534 * CMP T0, T1 T2 T0 can be expanded to:
4540 * If (T1 < 0.0) evaluates to true then our replacement MOV T0, T2 is the same
4541 * as the original program. If (T1 < 0.0) evaluates to false, executing
4542 * MOV T0, T0 will store a garbage value in T0 since T0 is uninitialized.
4543 * Therefore, it doesn't matter that we are replacing MOV T0, T0 with MOV T0, T2
4544 * because any instruction that was going to read from T0 after this was going
4545 * to read a garbage value anyway.
4548 glsl_to_tgsi_visitor::simplify_cmp(void)
4550 int tempWritesSize
= 0;
4551 unsigned *tempWrites
= NULL
;
4552 unsigned outputWrites
[VARYING_SLOT_TESS_MAX
];
4554 memset(outputWrites
, 0, sizeof(outputWrites
));
4556 foreach_in_list(glsl_to_tgsi_instruction
, inst
, &this->instructions
) {
4557 unsigned prevWriteMask
= 0;
4559 /* Give up if we encounter relative addressing or flow control. */
4560 if (inst
->dst
[0].reladdr
|| inst
->dst
[0].reladdr2
||
4561 inst
->dst
[1].reladdr
|| inst
->dst
[1].reladdr2
||
4562 inst
->info
->is_branch
||
4563 inst
->op
== TGSI_OPCODE_CONT
||
4564 inst
->op
== TGSI_OPCODE_END
||
4565 inst
->op
== TGSI_OPCODE_RET
) {
4569 if (inst
->dst
[0].file
== PROGRAM_OUTPUT
) {
4570 assert(inst
->dst
[0].index
< (signed)ARRAY_SIZE(outputWrites
));
4571 prevWriteMask
= outputWrites
[inst
->dst
[0].index
];
4572 outputWrites
[inst
->dst
[0].index
] |= inst
->dst
[0].writemask
;
4573 } else if (inst
->dst
[0].file
== PROGRAM_TEMPORARY
) {
4574 if (inst
->dst
[0].index
>= tempWritesSize
) {
4575 const int inc
= 4096;
4577 tempWrites
= (unsigned*)
4579 (tempWritesSize
+ inc
) * sizeof(unsigned));
4583 memset(tempWrites
+ tempWritesSize
, 0, inc
* sizeof(unsigned));
4584 tempWritesSize
+= inc
;
4587 prevWriteMask
= tempWrites
[inst
->dst
[0].index
];
4588 tempWrites
[inst
->dst
[0].index
] |= inst
->dst
[0].writemask
;
4592 /* For a CMP to be considered a conditional write, the destination
4593 * register and source register two must be the same. */
4594 if (inst
->op
== TGSI_OPCODE_CMP
4595 && !(inst
->dst
[0].writemask
& prevWriteMask
)
4596 && inst
->src
[2].file
== inst
->dst
[0].file
4597 && inst
->src
[2].index
== inst
->dst
[0].index
4598 && inst
->dst
[0].writemask
== get_src_arg_mask(inst
->dst
[0], inst
->src
[2])) {
4600 inst
->op
= TGSI_OPCODE_MOV
;
4601 inst
->info
= tgsi_get_opcode_info(inst
->op
);
4602 inst
->src
[0] = inst
->src
[1];
4610 rename_temp_handle_src(struct rename_reg_pair
*renames
,
4611 struct st_src_reg
*src
)
4613 if (src
&& src
->file
== PROGRAM_TEMPORARY
) {
4614 int old_idx
= src
->index
;
4615 if (renames
[old_idx
].valid
)
4616 src
->index
= renames
[old_idx
].new_reg
;
4620 /* Replaces all references to a temporary register index with another index. */
4622 glsl_to_tgsi_visitor::rename_temp_registers(struct rename_reg_pair
*renames
)
4624 foreach_in_list(glsl_to_tgsi_instruction
, inst
, &this->instructions
) {
4626 for (j
= 0; j
< num_inst_src_regs(inst
); j
++) {
4627 rename_temp_handle_src(renames
, &inst
->src
[j
]);
4628 rename_temp_handle_src(renames
, inst
->src
[j
].reladdr
);
4629 rename_temp_handle_src(renames
, inst
->src
[j
].reladdr2
);
4632 for (j
= 0; j
< inst
->tex_offset_num_offset
; j
++) {
4633 rename_temp_handle_src(renames
, &inst
->tex_offsets
[j
]);
4634 rename_temp_handle_src(renames
, inst
->tex_offsets
[j
].reladdr
);
4635 rename_temp_handle_src(renames
, inst
->tex_offsets
[j
].reladdr2
);
4638 rename_temp_handle_src(renames
, &inst
->resource
);
4639 rename_temp_handle_src(renames
, inst
->resource
.reladdr
);
4640 rename_temp_handle_src(renames
, inst
->resource
.reladdr2
);
4642 for (j
= 0; j
< num_inst_dst_regs(inst
); j
++) {
4643 if (inst
->dst
[j
].file
== PROGRAM_TEMPORARY
) {
4644 int old_idx
= inst
->dst
[j
].index
;
4645 if (renames
[old_idx
].valid
)
4646 inst
->dst
[j
].index
= renames
[old_idx
].new_reg
;
4648 rename_temp_handle_src(renames
, inst
->dst
[j
].reladdr
);
4649 rename_temp_handle_src(renames
, inst
->dst
[j
].reladdr2
);
4655 glsl_to_tgsi_visitor::get_first_temp_write(int *first_writes
)
4657 int depth
= 0; /* loop depth */
4658 int loop_start
= -1; /* index of the first active BGNLOOP (if any) */
4661 foreach_in_list(glsl_to_tgsi_instruction
, inst
, &this->instructions
) {
4662 for (j
= 0; j
< num_inst_dst_regs(inst
); j
++) {
4663 if (inst
->dst
[j
].file
== PROGRAM_TEMPORARY
) {
4664 if (first_writes
[inst
->dst
[j
].index
] == -1)
4665 first_writes
[inst
->dst
[j
].index
] = (depth
== 0) ? i
: loop_start
;
4669 if (inst
->op
== TGSI_OPCODE_BGNLOOP
) {
4672 } else if (inst
->op
== TGSI_OPCODE_ENDLOOP
) {
4682 glsl_to_tgsi_visitor::get_first_temp_read(int *first_reads
)
4684 int depth
= 0; /* loop depth */
4685 int loop_start
= -1; /* index of the first active BGNLOOP (if any) */
4688 foreach_in_list(glsl_to_tgsi_instruction
, inst
, &this->instructions
) {
4689 for (j
= 0; j
< num_inst_src_regs(inst
); j
++) {
4690 if (inst
->src
[j
].file
== PROGRAM_TEMPORARY
) {
4691 if (first_reads
[inst
->src
[j
].index
] == -1)
4692 first_reads
[inst
->src
[j
].index
] = (depth
== 0) ? i
: loop_start
;
4695 for (j
= 0; j
< inst
->tex_offset_num_offset
; j
++) {
4696 if (inst
->tex_offsets
[j
].file
== PROGRAM_TEMPORARY
) {
4697 if (first_reads
[inst
->tex_offsets
[j
].index
] == -1)
4698 first_reads
[inst
->tex_offsets
[j
].index
] = (depth
== 0) ? i
: loop_start
;
4701 if (inst
->op
== TGSI_OPCODE_BGNLOOP
) {
4704 } else if (inst
->op
== TGSI_OPCODE_ENDLOOP
) {
4714 glsl_to_tgsi_visitor::get_last_temp_read_first_temp_write(int *last_reads
, int *first_writes
)
4716 int depth
= 0; /* loop depth */
4717 int loop_start
= -1; /* index of the first active BGNLOOP (if any) */
4720 foreach_in_list(glsl_to_tgsi_instruction
, inst
, &this->instructions
) {
4721 for (j
= 0; j
< num_inst_src_regs(inst
); j
++) {
4722 if (inst
->src
[j
].file
== PROGRAM_TEMPORARY
)
4723 last_reads
[inst
->src
[j
].index
] = (depth
== 0) ? i
: -2;
4725 for (j
= 0; j
< num_inst_dst_regs(inst
); j
++) {
4726 if (inst
->dst
[j
].file
== PROGRAM_TEMPORARY
) {
4727 if (first_writes
[inst
->dst
[j
].index
] == -1)
4728 first_writes
[inst
->dst
[j
].index
] = (depth
== 0) ? i
: loop_start
;
4729 last_reads
[inst
->dst
[j
].index
] = (depth
== 0) ? i
: -2;
4732 for (j
= 0; j
< inst
->tex_offset_num_offset
; j
++) {
4733 if (inst
->tex_offsets
[j
].file
== PROGRAM_TEMPORARY
)
4734 last_reads
[inst
->tex_offsets
[j
].index
] = (depth
== 0) ? i
: -2;
4736 if (inst
->op
== TGSI_OPCODE_BGNLOOP
) {
4739 } else if (inst
->op
== TGSI_OPCODE_ENDLOOP
) {
4742 for (k
= 0; k
< this->next_temp
; k
++) {
4743 if (last_reads
[k
] == -2) {
4755 glsl_to_tgsi_visitor::get_last_temp_write(int *last_writes
)
4757 int depth
= 0; /* loop depth */
4761 foreach_in_list(glsl_to_tgsi_instruction
, inst
, &this->instructions
) {
4762 for (j
= 0; j
< num_inst_dst_regs(inst
); j
++) {
4763 if (inst
->dst
[j
].file
== PROGRAM_TEMPORARY
)
4764 last_writes
[inst
->dst
[j
].index
] = (depth
== 0) ? i
: -2;
4767 if (inst
->op
== TGSI_OPCODE_BGNLOOP
)
4769 else if (inst
->op
== TGSI_OPCODE_ENDLOOP
)
4771 for (k
= 0; k
< this->next_temp
; k
++) {
4772 if (last_writes
[k
] == -2) {
4783 * On a basic block basis, tracks available PROGRAM_TEMPORARY register
4784 * channels for copy propagation and updates following instructions to
4785 * use the original versions.
4787 * The glsl_to_tgsi_visitor lazily produces code assuming that this pass
4788 * will occur. As an example, a TXP production before this pass:
4790 * 0: MOV TEMP[1], INPUT[4].xyyy;
4791 * 1: MOV TEMP[1].w, INPUT[4].wwww;
4792 * 2: TXP TEMP[2], TEMP[1], texture[0], 2D;
4796 * 0: MOV TEMP[1], INPUT[4].xyyy;
4797 * 1: MOV TEMP[1].w, INPUT[4].wwww;
4798 * 2: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D;
4800 * which allows for dead code elimination on TEMP[1]'s writes.
4803 glsl_to_tgsi_visitor::copy_propagate(void)
4805 glsl_to_tgsi_instruction
**acp
= rzalloc_array(mem_ctx
,
4806 glsl_to_tgsi_instruction
*,
4807 this->next_temp
* 4);
4808 int *acp_level
= rzalloc_array(mem_ctx
, int, this->next_temp
* 4);
4811 foreach_in_list(glsl_to_tgsi_instruction
, inst
, &this->instructions
) {
4812 assert(inst
->dst
[0].file
!= PROGRAM_TEMPORARY
4813 || inst
->dst
[0].index
< this->next_temp
);
4815 /* First, do any copy propagation possible into the src regs. */
4816 for (int r
= 0; r
< 3; r
++) {
4817 glsl_to_tgsi_instruction
*first
= NULL
;
4819 int acp_base
= inst
->src
[r
].index
* 4;
4821 if (inst
->src
[r
].file
!= PROGRAM_TEMPORARY
||
4822 inst
->src
[r
].reladdr
||
4823 inst
->src
[r
].reladdr2
)
4826 /* See if we can find entries in the ACP consisting of MOVs
4827 * from the same src register for all the swizzled channels
4828 * of this src register reference.
4830 for (int i
= 0; i
< 4; i
++) {
4831 int src_chan
= GET_SWZ(inst
->src
[r
].swizzle
, i
);
4832 glsl_to_tgsi_instruction
*copy_chan
= acp
[acp_base
+ src_chan
];
4839 assert(acp_level
[acp_base
+ src_chan
] <= level
);
4844 if (first
->src
[0].file
!= copy_chan
->src
[0].file
||
4845 first
->src
[0].index
!= copy_chan
->src
[0].index
||
4846 first
->src
[0].double_reg2
!= copy_chan
->src
[0].double_reg2
||
4847 first
->src
[0].index2D
!= copy_chan
->src
[0].index2D
) {
4855 /* We've now validated that we can copy-propagate to
4856 * replace this src register reference. Do it.
4858 inst
->src
[r
].file
= first
->src
[0].file
;
4859 inst
->src
[r
].index
= first
->src
[0].index
;
4860 inst
->src
[r
].index2D
= first
->src
[0].index2D
;
4861 inst
->src
[r
].has_index2
= first
->src
[0].has_index2
;
4862 inst
->src
[r
].double_reg2
= first
->src
[0].double_reg2
;
4863 inst
->src
[r
].array_id
= first
->src
[0].array_id
;
4866 for (int i
= 0; i
< 4; i
++) {
4867 int src_chan
= GET_SWZ(inst
->src
[r
].swizzle
, i
);
4868 glsl_to_tgsi_instruction
*copy_inst
= acp
[acp_base
+ src_chan
];
4869 swizzle
|= (GET_SWZ(copy_inst
->src
[0].swizzle
, src_chan
) << (3 * i
));
4871 inst
->src
[r
].swizzle
= swizzle
;
4876 case TGSI_OPCODE_BGNLOOP
:
4877 case TGSI_OPCODE_ENDLOOP
:
4878 /* End of a basic block, clear the ACP entirely. */
4879 memset(acp
, 0, sizeof(*acp
) * this->next_temp
* 4);
4882 case TGSI_OPCODE_IF
:
4883 case TGSI_OPCODE_UIF
:
4887 case TGSI_OPCODE_ENDIF
:
4888 case TGSI_OPCODE_ELSE
:
4889 /* Clear all channels written inside the block from the ACP, but
4890 * leaving those that were not touched.
4892 for (int r
= 0; r
< this->next_temp
; r
++) {
4893 for (int c
= 0; c
< 4; c
++) {
4894 if (!acp
[4 * r
+ c
])
4897 if (acp_level
[4 * r
+ c
] >= level
)
4898 acp
[4 * r
+ c
] = NULL
;
4901 if (inst
->op
== TGSI_OPCODE_ENDIF
)
4906 /* Continuing the block, clear any written channels from
4909 for (int d
= 0; d
< 2; d
++) {
4910 if (inst
->dst
[d
].file
== PROGRAM_TEMPORARY
&& inst
->dst
[d
].reladdr
) {
4911 /* Any temporary might be written, so no copy propagation
4912 * across this instruction.
4914 memset(acp
, 0, sizeof(*acp
) * this->next_temp
* 4);
4915 } else if (inst
->dst
[d
].file
== PROGRAM_OUTPUT
&&
4916 inst
->dst
[d
].reladdr
) {
4917 /* Any output might be written, so no copy propagation
4918 * from outputs across this instruction.
4920 for (int r
= 0; r
< this->next_temp
; r
++) {
4921 for (int c
= 0; c
< 4; c
++) {
4922 if (!acp
[4 * r
+ c
])
4925 if (acp
[4 * r
+ c
]->src
[0].file
== PROGRAM_OUTPUT
)
4926 acp
[4 * r
+ c
] = NULL
;
4929 } else if (inst
->dst
[d
].file
== PROGRAM_TEMPORARY
||
4930 inst
->dst
[d
].file
== PROGRAM_OUTPUT
) {
4931 /* Clear where it's used as dst. */
4932 if (inst
->dst
[d
].file
== PROGRAM_TEMPORARY
) {
4933 for (int c
= 0; c
< 4; c
++) {
4934 if (inst
->dst
[d
].writemask
& (1 << c
))
4935 acp
[4 * inst
->dst
[d
].index
+ c
] = NULL
;
4939 /* Clear where it's used as src. */
4940 for (int r
= 0; r
< this->next_temp
; r
++) {
4941 for (int c
= 0; c
< 4; c
++) {
4942 if (!acp
[4 * r
+ c
])
4945 int src_chan
= GET_SWZ(acp
[4 * r
+ c
]->src
[0].swizzle
, c
);
4947 if (acp
[4 * r
+ c
]->src
[0].file
== inst
->dst
[d
].file
&&
4948 acp
[4 * r
+ c
]->src
[0].index
== inst
->dst
[d
].index
&&
4949 inst
->dst
[d
].writemask
& (1 << src_chan
)) {
4950 acp
[4 * r
+ c
] = NULL
;
4959 /* If this is a copy, add it to the ACP. */
4960 if (inst
->op
== TGSI_OPCODE_MOV
&&
4961 inst
->dst
[0].file
== PROGRAM_TEMPORARY
&&
4962 !(inst
->dst
[0].file
== inst
->src
[0].file
&&
4963 inst
->dst
[0].index
== inst
->src
[0].index
) &&
4964 !inst
->dst
[0].reladdr
&&
4965 !inst
->dst
[0].reladdr2
&&
4967 inst
->src
[0].file
!= PROGRAM_ARRAY
&&
4968 (inst
->src
[0].file
!= PROGRAM_OUTPUT
||
4969 this->shader
->Stage
!= MESA_SHADER_TESS_CTRL
) &&
4970 !inst
->src
[0].reladdr
&&
4971 !inst
->src
[0].reladdr2
&&
4972 !inst
->src
[0].negate
&&
4973 !inst
->src
[0].abs
) {
4974 for (int i
= 0; i
< 4; i
++) {
4975 if (inst
->dst
[0].writemask
& (1 << i
)) {
4976 acp
[4 * inst
->dst
[0].index
+ i
] = inst
;
4977 acp_level
[4 * inst
->dst
[0].index
+ i
] = level
;
4983 ralloc_free(acp_level
);
4988 dead_code_handle_reladdr(glsl_to_tgsi_instruction
**writes
, st_src_reg
*reladdr
)
4990 if (reladdr
&& reladdr
->file
== PROGRAM_TEMPORARY
) {
4991 /* Clear where it's used as src. */
4992 int swz
= GET_SWZ(reladdr
->swizzle
, 0);
4993 writes
[4 * reladdr
->index
+ swz
] = NULL
;
4998 * On a basic block basis, tracks available PROGRAM_TEMPORARY registers for dead
5001 * The glsl_to_tgsi_visitor lazily produces code assuming that this pass
5002 * will occur. As an example, a TXP production after copy propagation but
5005 * 0: MOV TEMP[1], INPUT[4].xyyy;
5006 * 1: MOV TEMP[1].w, INPUT[4].wwww;
5007 * 2: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D;
5009 * and after this pass:
5011 * 0: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D;
5014 glsl_to_tgsi_visitor::eliminate_dead_code(void)
5016 glsl_to_tgsi_instruction
**writes
= rzalloc_array(mem_ctx
,
5017 glsl_to_tgsi_instruction
*,
5018 this->next_temp
* 4);
5019 int *write_level
= rzalloc_array(mem_ctx
, int, this->next_temp
* 4);
5023 foreach_in_list(glsl_to_tgsi_instruction
, inst
, &this->instructions
) {
5024 assert(inst
->dst
[0].file
!= PROGRAM_TEMPORARY
5025 || inst
->dst
[0].index
< this->next_temp
);
5028 case TGSI_OPCODE_BGNLOOP
:
5029 case TGSI_OPCODE_ENDLOOP
:
5030 case TGSI_OPCODE_CONT
:
5031 case TGSI_OPCODE_BRK
:
5032 /* End of a basic block, clear the write array entirely.
5034 * This keeps us from killing dead code when the writes are
5035 * on either side of a loop, even when the register isn't touched
5036 * inside the loop. However, glsl_to_tgsi_visitor doesn't seem to emit
5037 * dead code of this type, so it shouldn't make a difference as long as
5038 * the dead code elimination pass in the GLSL compiler does its job.
5040 memset(writes
, 0, sizeof(*writes
) * this->next_temp
* 4);
5043 case TGSI_OPCODE_ENDIF
:
5044 case TGSI_OPCODE_ELSE
:
5045 /* Promote the recorded level of all channels written inside the
5046 * preceding if or else block to the level above the if/else block.
5048 for (int r
= 0; r
< this->next_temp
; r
++) {
5049 for (int c
= 0; c
< 4; c
++) {
5050 if (!writes
[4 * r
+ c
])
5053 if (write_level
[4 * r
+ c
] == level
)
5054 write_level
[4 * r
+ c
] = level
-1;
5057 if(inst
->op
== TGSI_OPCODE_ENDIF
)
5061 case TGSI_OPCODE_IF
:
5062 case TGSI_OPCODE_UIF
:
5064 /* fallthrough to default case to mark the condition as read */
5066 /* Continuing the block, clear any channels from the write array that
5067 * are read by this instruction.
5069 for (unsigned i
= 0; i
< ARRAY_SIZE(inst
->src
); i
++) {
5070 if (inst
->src
[i
].file
== PROGRAM_TEMPORARY
&& inst
->src
[i
].reladdr
){
5071 /* Any temporary might be read, so no dead code elimination
5072 * across this instruction.
5074 memset(writes
, 0, sizeof(*writes
) * this->next_temp
* 4);
5075 } else if (inst
->src
[i
].file
== PROGRAM_TEMPORARY
) {
5076 /* Clear where it's used as src. */
5077 int src_chans
= 1 << GET_SWZ(inst
->src
[i
].swizzle
, 0);
5078 src_chans
|= 1 << GET_SWZ(inst
->src
[i
].swizzle
, 1);
5079 src_chans
|= 1 << GET_SWZ(inst
->src
[i
].swizzle
, 2);
5080 src_chans
|= 1 << GET_SWZ(inst
->src
[i
].swizzle
, 3);
5082 for (int c
= 0; c
< 4; c
++) {
5083 if (src_chans
& (1 << c
))
5084 writes
[4 * inst
->src
[i
].index
+ c
] = NULL
;
5087 dead_code_handle_reladdr(writes
, inst
->src
[i
].reladdr
);
5088 dead_code_handle_reladdr(writes
, inst
->src
[i
].reladdr2
);
5090 for (unsigned i
= 0; i
< inst
->tex_offset_num_offset
; i
++) {
5091 if (inst
->tex_offsets
[i
].file
== PROGRAM_TEMPORARY
&& inst
->tex_offsets
[i
].reladdr
){
5092 /* Any temporary might be read, so no dead code elimination
5093 * across this instruction.
5095 memset(writes
, 0, sizeof(*writes
) * this->next_temp
* 4);
5096 } else if (inst
->tex_offsets
[i
].file
== PROGRAM_TEMPORARY
) {
5097 /* Clear where it's used as src. */
5098 int src_chans
= 1 << GET_SWZ(inst
->tex_offsets
[i
].swizzle
, 0);
5099 src_chans
|= 1 << GET_SWZ(inst
->tex_offsets
[i
].swizzle
, 1);
5100 src_chans
|= 1 << GET_SWZ(inst
->tex_offsets
[i
].swizzle
, 2);
5101 src_chans
|= 1 << GET_SWZ(inst
->tex_offsets
[i
].swizzle
, 3);
5103 for (int c
= 0; c
< 4; c
++) {
5104 if (src_chans
& (1 << c
))
5105 writes
[4 * inst
->tex_offsets
[i
].index
+ c
] = NULL
;
5108 dead_code_handle_reladdr(writes
, inst
->tex_offsets
[i
].reladdr
);
5109 dead_code_handle_reladdr(writes
, inst
->tex_offsets
[i
].reladdr2
);
5112 if (inst
->resource
.file
== PROGRAM_TEMPORARY
) {
5115 src_chans
= 1 << GET_SWZ(inst
->resource
.swizzle
, 0);
5116 src_chans
|= 1 << GET_SWZ(inst
->resource
.swizzle
, 1);
5117 src_chans
|= 1 << GET_SWZ(inst
->resource
.swizzle
, 2);
5118 src_chans
|= 1 << GET_SWZ(inst
->resource
.swizzle
, 3);
5120 for (int c
= 0; c
< 4; c
++) {
5121 if (src_chans
& (1 << c
))
5122 writes
[4 * inst
->resource
.index
+ c
] = NULL
;
5125 dead_code_handle_reladdr(writes
, inst
->resource
.reladdr
);
5126 dead_code_handle_reladdr(writes
, inst
->resource
.reladdr2
);
5128 for (unsigned i
= 0; i
< ARRAY_SIZE(inst
->dst
); i
++) {
5129 dead_code_handle_reladdr(writes
, inst
->dst
[i
].reladdr
);
5130 dead_code_handle_reladdr(writes
, inst
->dst
[i
].reladdr2
);
5135 /* If this instruction writes to a temporary, add it to the write array.
5136 * If there is already an instruction in the write array for one or more
5137 * of the channels, flag that channel write as dead.
5139 for (unsigned i
= 0; i
< ARRAY_SIZE(inst
->dst
); i
++) {
5140 if (inst
->dst
[i
].file
== PROGRAM_TEMPORARY
&&
5141 !inst
->dst
[i
].reladdr
) {
5142 for (int c
= 0; c
< 4; c
++) {
5143 if (inst
->dst
[i
].writemask
& (1 << c
)) {
5144 if (writes
[4 * inst
->dst
[i
].index
+ c
]) {
5145 if (write_level
[4 * inst
->dst
[i
].index
+ c
] < level
)
5148 writes
[4 * inst
->dst
[i
].index
+ c
]->dead_mask
|= (1 << c
);
5150 writes
[4 * inst
->dst
[i
].index
+ c
] = inst
;
5151 write_level
[4 * inst
->dst
[i
].index
+ c
] = level
;
5158 /* Anything still in the write array at this point is dead code. */
5159 for (int r
= 0; r
< this->next_temp
; r
++) {
5160 for (int c
= 0; c
< 4; c
++) {
5161 glsl_to_tgsi_instruction
*inst
= writes
[4 * r
+ c
];
5163 inst
->dead_mask
|= (1 << c
);
5167 /* Now actually remove the instructions that are completely dead and update
5168 * the writemask of other instructions with dead channels.
5170 foreach_in_list_safe(glsl_to_tgsi_instruction
, inst
, &this->instructions
) {
5171 if (!inst
->dead_mask
|| !inst
->dst
[0].writemask
)
5173 /* No amount of dead masks should remove memory stores */
5174 if (inst
->info
->is_store
)
5177 if ((inst
->dst
[0].writemask
& ~inst
->dead_mask
) == 0) {
5182 if (glsl_base_type_is_64bit(inst
->dst
[0].type
)) {
5183 if (inst
->dead_mask
== WRITEMASK_XY
||
5184 inst
->dead_mask
== WRITEMASK_ZW
)
5185 inst
->dst
[0].writemask
&= ~(inst
->dead_mask
);
5187 inst
->dst
[0].writemask
&= ~(inst
->dead_mask
);
5191 ralloc_free(write_level
);
5192 ralloc_free(writes
);
5197 /* merge DFRACEXP instructions into one. */
5199 glsl_to_tgsi_visitor::merge_two_dsts(void)
5201 /* We never delete inst, but we may delete its successor. */
5202 foreach_in_list(glsl_to_tgsi_instruction
, inst
, &this->instructions
) {
5203 glsl_to_tgsi_instruction
*inst2
;
5205 if (num_inst_dst_regs(inst
) != 2)
5208 if (inst
->dst
[0].file
!= PROGRAM_UNDEFINED
&&
5209 inst
->dst
[1].file
!= PROGRAM_UNDEFINED
)
5212 inst2
= (glsl_to_tgsi_instruction
*) inst
->next
;
5215 if (inst
->src
[0].file
== inst2
->src
[0].file
&&
5216 inst
->src
[0].index
== inst2
->src
[0].index
&&
5217 inst
->src
[0].type
== inst2
->src
[0].type
&&
5218 inst
->src
[0].swizzle
== inst2
->src
[0].swizzle
)
5220 inst2
= (glsl_to_tgsi_instruction
*) inst2
->next
;
5226 if (inst
->dst
[0].file
== PROGRAM_UNDEFINED
) {
5228 inst
->dst
[0] = inst2
->dst
[0];
5229 } else if (inst
->dst
[1].file
== PROGRAM_UNDEFINED
) {
5230 inst
->dst
[1] = inst2
->dst
[1];
5241 /* Merges temporary registers together where possible to reduce the number of
5242 * registers needed to run a program.
5244 * Produces optimal code only after copy propagation and dead code elimination
5247 glsl_to_tgsi_visitor::merge_registers(void)
5250 struct lifetime
*lifetimes
=
5251 rzalloc_array(mem_ctx
, struct lifetime
, this->next_temp
);
5253 if (get_temp_registers_required_lifetimes(mem_ctx
, &this->instructions
,
5254 this->next_temp
, lifetimes
)) {
5255 struct rename_reg_pair
*renames
=
5256 rzalloc_array(mem_ctx
, struct rename_reg_pair
, this->next_temp
);
5257 get_temp_registers_remapping(mem_ctx
, this->next_temp
, lifetimes
, renames
);
5258 rename_temp_registers(renames
);
5259 ralloc_free(renames
);
5262 ralloc_free(lifetimes
);
5265 /* Reassign indices to temporary registers by reusing unused indices created
5266 * by optimization passes. */
5268 glsl_to_tgsi_visitor::renumber_registers(void)
5272 int *first_writes
= ralloc_array(mem_ctx
, int, this->next_temp
);
5273 struct rename_reg_pair
*renames
= rzalloc_array(mem_ctx
, struct rename_reg_pair
, this->next_temp
);
5275 for (i
= 0; i
< this->next_temp
; i
++) {
5276 first_writes
[i
] = -1;
5278 get_first_temp_write(first_writes
);
5280 for (i
= 0; i
< this->next_temp
; i
++) {
5281 if (first_writes
[i
] < 0) continue;
5282 if (i
!= new_index
) {
5283 renames
[i
].new_reg
= new_index
;
5284 renames
[i
].valid
= true;
5289 rename_temp_registers(renames
);
5290 this->next_temp
= new_index
;
5291 ralloc_free(renames
);
5292 ralloc_free(first_writes
);
5295 /* ------------------------- TGSI conversion stuff -------------------------- */
5298 * Intermediate state used during shader translation.
5300 struct st_translate
{
5301 struct ureg_program
*ureg
;
5303 unsigned temps_size
;
5304 struct ureg_dst
*temps
;
5306 struct ureg_dst
*arrays
;
5307 unsigned num_temp_arrays
;
5308 struct ureg_src
*constants
;
5310 struct ureg_src
*immediates
;
5312 struct ureg_dst outputs
[PIPE_MAX_SHADER_OUTPUTS
];
5313 struct ureg_src inputs
[PIPE_MAX_SHADER_INPUTS
];
5314 struct ureg_dst address
[3];
5315 struct ureg_src samplers
[PIPE_MAX_SAMPLERS
];
5316 struct ureg_src buffers
[PIPE_MAX_SHADER_BUFFERS
];
5317 struct ureg_src images
[PIPE_MAX_SHADER_IMAGES
];
5318 struct ureg_src systemValues
[SYSTEM_VALUE_MAX
];
5319 struct ureg_src shared_memory
;
5320 unsigned *array_sizes
;
5321 struct inout_decl
*input_decls
;
5322 unsigned num_input_decls
;
5323 struct inout_decl
*output_decls
;
5324 unsigned num_output_decls
;
5326 const ubyte
*inputMapping
;
5327 const ubyte
*outputMapping
;
5329 unsigned procType
; /**< PIPE_SHADER_VERTEX/FRAGMENT */
5332 /** Map Mesa's SYSTEM_VALUE_x to TGSI_SEMANTIC_x */
5334 _mesa_sysval_to_semantic(unsigned sysval
)
5338 case SYSTEM_VALUE_VERTEX_ID
:
5339 return TGSI_SEMANTIC_VERTEXID
;
5340 case SYSTEM_VALUE_INSTANCE_ID
:
5341 return TGSI_SEMANTIC_INSTANCEID
;
5342 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
:
5343 return TGSI_SEMANTIC_VERTEXID_NOBASE
;
5344 case SYSTEM_VALUE_BASE_VERTEX
:
5345 return TGSI_SEMANTIC_BASEVERTEX
;
5346 case SYSTEM_VALUE_BASE_INSTANCE
:
5347 return TGSI_SEMANTIC_BASEINSTANCE
;
5348 case SYSTEM_VALUE_DRAW_ID
:
5349 return TGSI_SEMANTIC_DRAWID
;
5351 /* Geometry shader */
5352 case SYSTEM_VALUE_INVOCATION_ID
:
5353 return TGSI_SEMANTIC_INVOCATIONID
;
5355 /* Fragment shader */
5356 case SYSTEM_VALUE_FRAG_COORD
:
5357 return TGSI_SEMANTIC_POSITION
;
5358 case SYSTEM_VALUE_FRONT_FACE
:
5359 return TGSI_SEMANTIC_FACE
;
5360 case SYSTEM_VALUE_SAMPLE_ID
:
5361 return TGSI_SEMANTIC_SAMPLEID
;
5362 case SYSTEM_VALUE_SAMPLE_POS
:
5363 return TGSI_SEMANTIC_SAMPLEPOS
;
5364 case SYSTEM_VALUE_SAMPLE_MASK_IN
:
5365 return TGSI_SEMANTIC_SAMPLEMASK
;
5366 case SYSTEM_VALUE_HELPER_INVOCATION
:
5367 return TGSI_SEMANTIC_HELPER_INVOCATION
;
5369 /* Tessellation shader */
5370 case SYSTEM_VALUE_TESS_COORD
:
5371 return TGSI_SEMANTIC_TESSCOORD
;
5372 case SYSTEM_VALUE_VERTICES_IN
:
5373 return TGSI_SEMANTIC_VERTICESIN
;
5374 case SYSTEM_VALUE_PRIMITIVE_ID
:
5375 return TGSI_SEMANTIC_PRIMID
;
5376 case SYSTEM_VALUE_TESS_LEVEL_OUTER
:
5377 return TGSI_SEMANTIC_TESSOUTER
;
5378 case SYSTEM_VALUE_TESS_LEVEL_INNER
:
5379 return TGSI_SEMANTIC_TESSINNER
;
5381 /* Compute shader */
5382 case SYSTEM_VALUE_LOCAL_INVOCATION_ID
:
5383 return TGSI_SEMANTIC_THREAD_ID
;
5384 case SYSTEM_VALUE_WORK_GROUP_ID
:
5385 return TGSI_SEMANTIC_BLOCK_ID
;
5386 case SYSTEM_VALUE_NUM_WORK_GROUPS
:
5387 return TGSI_SEMANTIC_GRID_SIZE
;
5388 case SYSTEM_VALUE_LOCAL_GROUP_SIZE
:
5389 return TGSI_SEMANTIC_BLOCK_SIZE
;
5391 /* ARB_shader_ballot */
5392 case SYSTEM_VALUE_SUBGROUP_SIZE
:
5393 return TGSI_SEMANTIC_SUBGROUP_SIZE
;
5394 case SYSTEM_VALUE_SUBGROUP_INVOCATION
:
5395 return TGSI_SEMANTIC_SUBGROUP_INVOCATION
;
5396 case SYSTEM_VALUE_SUBGROUP_EQ_MASK
:
5397 return TGSI_SEMANTIC_SUBGROUP_EQ_MASK
;
5398 case SYSTEM_VALUE_SUBGROUP_GE_MASK
:
5399 return TGSI_SEMANTIC_SUBGROUP_GE_MASK
;
5400 case SYSTEM_VALUE_SUBGROUP_GT_MASK
:
5401 return TGSI_SEMANTIC_SUBGROUP_GT_MASK
;
5402 case SYSTEM_VALUE_SUBGROUP_LE_MASK
:
5403 return TGSI_SEMANTIC_SUBGROUP_LE_MASK
;
5404 case SYSTEM_VALUE_SUBGROUP_LT_MASK
:
5405 return TGSI_SEMANTIC_SUBGROUP_LT_MASK
;
5408 case SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
:
5409 case SYSTEM_VALUE_GLOBAL_INVOCATION_ID
:
5410 case SYSTEM_VALUE_VERTEX_CNT
:
5412 assert(!"Unexpected SYSTEM_VALUE_ enum");
5413 return TGSI_SEMANTIC_COUNT
;
5418 * Map a glsl_to_tgsi constant/immediate to a TGSI immediate.
5420 static struct ureg_src
5421 emit_immediate(struct st_translate
*t
,
5422 gl_constant_value values
[4],
5425 struct ureg_program
*ureg
= t
->ureg
;
5430 return ureg_DECL_immediate(ureg
, &values
[0].f
, size
);
5432 return ureg_DECL_immediate_f64(ureg
, (double *)&values
[0].f
, size
);
5434 return ureg_DECL_immediate_int64(ureg
, (int64_t *)&values
[0].f
, size
);
5435 case GL_UNSIGNED_INT64_ARB
:
5436 return ureg_DECL_immediate_uint64(ureg
, (uint64_t *)&values
[0].f
, size
);
5438 return ureg_DECL_immediate_int(ureg
, &values
[0].i
, size
);
5439 case GL_UNSIGNED_INT
:
5441 return ureg_DECL_immediate_uint(ureg
, &values
[0].u
, size
);
5443 assert(!"should not get here - type must be float, int, uint, or bool");
5444 return ureg_src_undef();
5449 * Map a glsl_to_tgsi dst register to a TGSI ureg_dst register.
5451 static struct ureg_dst
5452 dst_register(struct st_translate
*t
, gl_register_file file
, unsigned index
,
5458 case PROGRAM_UNDEFINED
:
5459 return ureg_dst_undef();
5461 case PROGRAM_TEMPORARY
:
5462 /* Allocate space for temporaries on demand. */
5463 if (index
>= t
->temps_size
) {
5464 const int inc
= align(index
- t
->temps_size
+ 1, 4096);
5466 t
->temps
= (struct ureg_dst
*)
5468 (t
->temps_size
+ inc
) * sizeof(struct ureg_dst
));
5470 return ureg_dst_undef();
5472 memset(t
->temps
+ t
->temps_size
, 0, inc
* sizeof(struct ureg_dst
));
5473 t
->temps_size
+= inc
;
5476 if (ureg_dst_is_undef(t
->temps
[index
]))
5477 t
->temps
[index
] = ureg_DECL_local_temporary(t
->ureg
);
5479 return t
->temps
[index
];
5482 assert(array_id
&& array_id
<= t
->num_temp_arrays
);
5483 array
= array_id
- 1;
5485 if (ureg_dst_is_undef(t
->arrays
[array
]))
5486 t
->arrays
[array
] = ureg_DECL_array_temporary(
5487 t
->ureg
, t
->array_sizes
[array
], TRUE
);
5489 return ureg_dst_array_offset(t
->arrays
[array
], index
);
5491 case PROGRAM_OUTPUT
:
5493 if (t
->procType
== PIPE_SHADER_FRAGMENT
)
5494 assert(index
< 2 * FRAG_RESULT_MAX
);
5495 else if (t
->procType
== PIPE_SHADER_TESS_CTRL
||
5496 t
->procType
== PIPE_SHADER_TESS_EVAL
)
5497 assert(index
< VARYING_SLOT_TESS_MAX
);
5499 assert(index
< VARYING_SLOT_MAX
);
5501 assert(t
->outputMapping
[index
] < ARRAY_SIZE(t
->outputs
));
5502 assert(t
->outputs
[t
->outputMapping
[index
]].File
!= TGSI_FILE_NULL
);
5503 return t
->outputs
[t
->outputMapping
[index
]];
5506 struct inout_decl
*decl
= find_inout_array(t
->output_decls
, t
->num_output_decls
, array_id
);
5507 unsigned mesa_index
= decl
->mesa_index
;
5508 int slot
= t
->outputMapping
[mesa_index
];
5510 assert(slot
!= -1 && t
->outputs
[slot
].File
== TGSI_FILE_OUTPUT
);
5512 struct ureg_dst dst
= t
->outputs
[slot
];
5513 dst
.ArrayID
= array_id
;
5514 return ureg_dst_array_offset(dst
, index
- mesa_index
);
5517 case PROGRAM_ADDRESS
:
5518 return t
->address
[index
];
5521 assert(!"unknown dst register file");
5522 return ureg_dst_undef();
5527 * Create a TGSI ureg_dst register from an st_dst_reg.
5529 static struct ureg_dst
5530 translate_dst(struct st_translate
*t
,
5531 const st_dst_reg
*dst_reg
,
5534 struct ureg_dst dst
= dst_register(t
, dst_reg
->file
, dst_reg
->index
,
5537 if (dst
.File
== TGSI_FILE_NULL
)
5540 dst
= ureg_writemask(dst
, dst_reg
->writemask
);
5543 dst
= ureg_saturate(dst
);
5545 if (dst_reg
->reladdr
!= NULL
) {
5546 assert(dst_reg
->file
!= PROGRAM_TEMPORARY
);
5547 dst
= ureg_dst_indirect(dst
, ureg_src(t
->address
[0]));
5550 if (dst_reg
->has_index2
) {
5551 if (dst_reg
->reladdr2
)
5552 dst
= ureg_dst_dimension_indirect(dst
, ureg_src(t
->address
[1]),
5555 dst
= ureg_dst_dimension(dst
, dst_reg
->index2D
);
5562 * Create a TGSI ureg_src register from an st_src_reg.
5564 static struct ureg_src
5565 translate_src(struct st_translate
*t
, const st_src_reg
*src_reg
)
5567 struct ureg_src src
;
5568 int index
= src_reg
->index
;
5569 int double_reg2
= src_reg
->double_reg2
? 1 : 0;
5571 switch(src_reg
->file
) {
5572 case PROGRAM_UNDEFINED
:
5573 src
= ureg_imm4f(t
->ureg
, 0, 0, 0, 0);
5576 case PROGRAM_TEMPORARY
:
5578 src
= ureg_src(dst_register(t
, src_reg
->file
, src_reg
->index
, src_reg
->array_id
));
5581 case PROGRAM_OUTPUT
: {
5582 struct ureg_dst dst
= dst_register(t
, src_reg
->file
, src_reg
->index
, src_reg
->array_id
);
5583 assert(dst
.WriteMask
!= 0);
5584 unsigned shift
= ffs(dst
.WriteMask
) - 1;
5585 src
= ureg_swizzle(ureg_src(dst
),
5589 MIN2(shift
+ 3, 3));
5593 case PROGRAM_UNIFORM
:
5594 assert(src_reg
->index
>= 0);
5595 src
= src_reg
->index
< t
->num_constants
?
5596 t
->constants
[src_reg
->index
] : ureg_imm4f(t
->ureg
, 0, 0, 0, 0);
5598 case PROGRAM_STATE_VAR
:
5599 case PROGRAM_CONSTANT
: /* ie, immediate */
5600 if (src_reg
->has_index2
)
5601 src
= ureg_src_register(TGSI_FILE_CONSTANT
, src_reg
->index
);
5603 src
= src_reg
->index
>= 0 && src_reg
->index
< t
->num_constants
?
5604 t
->constants
[src_reg
->index
] : ureg_imm4f(t
->ureg
, 0, 0, 0, 0);
5607 case PROGRAM_IMMEDIATE
:
5608 assert(src_reg
->index
>= 0 && src_reg
->index
< t
->num_immediates
);
5609 src
= t
->immediates
[src_reg
->index
];
5613 /* GLSL inputs are 64-bit containers, so we have to
5614 * map back to the original index and add the offset after
5616 index
-= double_reg2
;
5617 if (!src_reg
->array_id
) {
5618 assert(t
->inputMapping
[index
] < ARRAY_SIZE(t
->inputs
));
5619 assert(t
->inputs
[t
->inputMapping
[index
]].File
!= TGSI_FILE_NULL
);
5620 src
= t
->inputs
[t
->inputMapping
[index
] + double_reg2
];
5623 struct inout_decl
*decl
= find_inout_array(t
->input_decls
, t
->num_input_decls
,
5625 unsigned mesa_index
= decl
->mesa_index
;
5626 int slot
= t
->inputMapping
[mesa_index
];
5628 assert(slot
!= -1 && t
->inputs
[slot
].File
== TGSI_FILE_INPUT
);
5630 src
= t
->inputs
[slot
];
5631 src
.ArrayID
= src_reg
->array_id
;
5632 src
= ureg_src_array_offset(src
, index
+ double_reg2
- mesa_index
);
5636 case PROGRAM_ADDRESS
:
5637 src
= ureg_src(t
->address
[src_reg
->index
]);
5640 case PROGRAM_SYSTEM_VALUE
:
5641 assert(src_reg
->index
< (int) ARRAY_SIZE(t
->systemValues
));
5642 src
= t
->systemValues
[src_reg
->index
];
5646 assert(!"unknown src register file");
5647 return ureg_src_undef();
5650 if (src_reg
->has_index2
) {
5651 /* 2D indexes occur with geometry shader inputs (attrib, vertex)
5652 * and UBO constant buffers (buffer, position).
5654 if (src_reg
->reladdr2
)
5655 src
= ureg_src_dimension_indirect(src
, ureg_src(t
->address
[1]),
5658 src
= ureg_src_dimension(src
, src_reg
->index2D
);
5661 src
= ureg_swizzle(src
,
5662 GET_SWZ(src_reg
->swizzle
, 0) & 0x3,
5663 GET_SWZ(src_reg
->swizzle
, 1) & 0x3,
5664 GET_SWZ(src_reg
->swizzle
, 2) & 0x3,
5665 GET_SWZ(src_reg
->swizzle
, 3) & 0x3);
5668 src
= ureg_abs(src
);
5670 if ((src_reg
->negate
& 0xf) == NEGATE_XYZW
)
5671 src
= ureg_negate(src
);
5673 if (src_reg
->reladdr
!= NULL
) {
5674 assert(src_reg
->file
!= PROGRAM_TEMPORARY
);
5675 src
= ureg_src_indirect(src
, ureg_src(t
->address
[0]));
5681 static struct tgsi_texture_offset
5682 translate_tex_offset(struct st_translate
*t
,
5683 const st_src_reg
*in_offset
)
5685 struct tgsi_texture_offset offset
;
5686 struct ureg_src src
= translate_src(t
, in_offset
);
5688 offset
.File
= src
.File
;
5689 offset
.Index
= src
.Index
;
5690 offset
.SwizzleX
= src
.SwizzleX
;
5691 offset
.SwizzleY
= src
.SwizzleY
;
5692 offset
.SwizzleZ
= src
.SwizzleZ
;
5695 assert(!src
.Indirect
);
5696 assert(!src
.DimIndirect
);
5697 assert(!src
.Dimension
);
5698 assert(!src
.Absolute
); /* those shouldn't be used with integers anyway */
5699 assert(!src
.Negate
);
5705 compile_tgsi_instruction(struct st_translate
*t
,
5706 const glsl_to_tgsi_instruction
*inst
)
5708 struct ureg_program
*ureg
= t
->ureg
;
5710 struct ureg_dst dst
[2];
5711 struct ureg_src src
[4];
5712 struct tgsi_texture_offset texoffsets
[MAX_GLSL_TEXTURE_OFFSET
];
5716 unsigned tex_target
= 0;
5718 num_dst
= num_inst_dst_regs(inst
);
5719 num_src
= num_inst_src_regs(inst
);
5721 for (i
= 0; i
< num_dst
; i
++)
5722 dst
[i
] = translate_dst(t
,
5726 for (i
= 0; i
< num_src
; i
++)
5727 src
[i
] = translate_src(t
, &inst
->src
[i
]);
5730 case TGSI_OPCODE_BGNLOOP
:
5731 case TGSI_OPCODE_ELSE
:
5732 case TGSI_OPCODE_ENDLOOP
:
5733 case TGSI_OPCODE_IF
:
5734 case TGSI_OPCODE_UIF
:
5735 assert(num_dst
== 0);
5736 ureg_insn(ureg
, inst
->op
, NULL
, 0, src
, num_src
, inst
->precise
);
5739 case TGSI_OPCODE_TEX
:
5740 case TGSI_OPCODE_TEX_LZ
:
5741 case TGSI_OPCODE_TXB
:
5742 case TGSI_OPCODE_TXD
:
5743 case TGSI_OPCODE_TXL
:
5744 case TGSI_OPCODE_TXP
:
5745 case TGSI_OPCODE_TXQ
:
5746 case TGSI_OPCODE_TXQS
:
5747 case TGSI_OPCODE_TXF
:
5748 case TGSI_OPCODE_TXF_LZ
:
5749 case TGSI_OPCODE_TEX2
:
5750 case TGSI_OPCODE_TXB2
:
5751 case TGSI_OPCODE_TXL2
:
5752 case TGSI_OPCODE_TG4
:
5753 case TGSI_OPCODE_LODQ
:
5754 if (inst
->resource
.file
== PROGRAM_SAMPLER
) {
5755 src
[num_src
] = t
->samplers
[inst
->resource
.index
];
5757 /* Bindless samplers. */
5758 src
[num_src
] = translate_src(t
, &inst
->resource
);
5760 assert(src
[num_src
].File
!= TGSI_FILE_NULL
);
5761 if (inst
->resource
.reladdr
)
5763 ureg_src_indirect(src
[num_src
], ureg_src(t
->address
[2]));
5765 for (i
= 0; i
< (int)inst
->tex_offset_num_offset
; i
++) {
5766 texoffsets
[i
] = translate_tex_offset(t
, &inst
->tex_offsets
[i
]);
5768 tex_target
= st_translate_texture_target(inst
->tex_target
, inst
->tex_shadow
);
5774 st_translate_texture_type(inst
->tex_type
),
5775 texoffsets
, inst
->tex_offset_num_offset
,
5779 case TGSI_OPCODE_RESQ
:
5780 case TGSI_OPCODE_LOAD
:
5781 case TGSI_OPCODE_ATOMUADD
:
5782 case TGSI_OPCODE_ATOMXCHG
:
5783 case TGSI_OPCODE_ATOMCAS
:
5784 case TGSI_OPCODE_ATOMAND
:
5785 case TGSI_OPCODE_ATOMOR
:
5786 case TGSI_OPCODE_ATOMXOR
:
5787 case TGSI_OPCODE_ATOMUMIN
:
5788 case TGSI_OPCODE_ATOMUMAX
:
5789 case TGSI_OPCODE_ATOMIMIN
:
5790 case TGSI_OPCODE_ATOMIMAX
:
5791 for (i
= num_src
- 1; i
>= 0; i
--)
5792 src
[i
+ 1] = src
[i
];
5794 if (inst
->resource
.file
== PROGRAM_MEMORY
) {
5795 src
[0] = t
->shared_memory
;
5796 } else if (inst
->resource
.file
== PROGRAM_BUFFER
) {
5797 src
[0] = t
->buffers
[inst
->resource
.index
];
5798 } else if (inst
->resource
.file
== PROGRAM_CONSTANT
) {
5799 assert(inst
->resource
.has_index2
);
5800 src
[0] = ureg_src_register(TGSI_FILE_CONSTBUF
, inst
->resource
.index
);
5802 assert(inst
->resource
.file
!= PROGRAM_UNDEFINED
);
5803 if (inst
->resource
.file
== PROGRAM_IMAGE
) {
5804 src
[0] = t
->images
[inst
->resource
.index
];
5806 /* Bindless images. */
5807 src
[0] = translate_src(t
, &inst
->resource
);
5809 tex_target
= st_translate_texture_target(inst
->tex_target
, inst
->tex_shadow
);
5811 if (inst
->resource
.reladdr
)
5812 src
[0] = ureg_src_indirect(src
[0], ureg_src(t
->address
[2]));
5813 assert(src
[0].File
!= TGSI_FILE_NULL
);
5814 ureg_memory_insn(ureg
, inst
->op
, dst
, num_dst
, src
, num_src
,
5815 inst
->buffer_access
,
5816 tex_target
, inst
->image_format
);
5819 case TGSI_OPCODE_STORE
:
5820 if (inst
->resource
.file
== PROGRAM_MEMORY
) {
5821 dst
[0] = ureg_dst(t
->shared_memory
);
5822 } else if (inst
->resource
.file
== PROGRAM_BUFFER
) {
5823 dst
[0] = ureg_dst(t
->buffers
[inst
->resource
.index
]);
5825 if (inst
->resource
.file
== PROGRAM_IMAGE
) {
5826 dst
[0] = ureg_dst(t
->images
[inst
->resource
.index
]);
5828 /* Bindless images. */
5829 dst
[0] = ureg_dst(translate_src(t
, &inst
->resource
));
5831 tex_target
= st_translate_texture_target(inst
->tex_target
, inst
->tex_shadow
);
5833 dst
[0] = ureg_writemask(dst
[0], inst
->dst
[0].writemask
);
5834 if (inst
->resource
.reladdr
)
5835 dst
[0] = ureg_dst_indirect(dst
[0], ureg_src(t
->address
[2]));
5836 assert(dst
[0].File
!= TGSI_FILE_NULL
);
5837 ureg_memory_insn(ureg
, inst
->op
, dst
, num_dst
, src
, num_src
,
5838 inst
->buffer_access
,
5839 tex_target
, inst
->image_format
);
5846 src
, num_src
, inst
->precise
);
5852 * Emit the TGSI instructions for inverting and adjusting WPOS.
5853 * This code is unavoidable because it also depends on whether
5854 * a FBO is bound (STATE_FB_WPOS_Y_TRANSFORM).
5857 emit_wpos_adjustment(struct gl_context
*ctx
,
5858 struct st_translate
*t
,
5859 int wpos_transform_const
,
5861 GLfloat adjX
, GLfloat adjY
[2])
5863 struct ureg_program
*ureg
= t
->ureg
;
5865 assert(wpos_transform_const
>= 0);
5867 /* Fragment program uses fragment position input.
5868 * Need to replace instances of INPUT[WPOS] with temp T
5869 * where T = INPUT[WPOS] is inverted by Y.
5871 struct ureg_src wpostrans
= ureg_DECL_constant(ureg
, wpos_transform_const
);
5872 struct ureg_dst wpos_temp
= ureg_DECL_temporary( ureg
);
5873 struct ureg_src
*wpos
=
5874 ctx
->Const
.GLSLFragCoordIsSysVal
?
5875 &t
->systemValues
[SYSTEM_VALUE_FRAG_COORD
] :
5876 &t
->inputs
[t
->inputMapping
[VARYING_SLOT_POS
]];
5877 struct ureg_src wpos_input
= *wpos
;
5879 /* First, apply the coordinate shift: */
5880 if (adjX
|| adjY
[0] || adjY
[1]) {
5881 if (adjY
[0] != adjY
[1]) {
5882 /* Adjust the y coordinate by adjY[1] or adjY[0] respectively
5883 * depending on whether inversion is actually going to be applied
5884 * or not, which is determined by testing against the inversion
5885 * state variable used below, which will be either +1 or -1.
5887 struct ureg_dst adj_temp
= ureg_DECL_local_temporary(ureg
);
5889 ureg_CMP(ureg
, adj_temp
,
5890 ureg_scalar(wpostrans
, invert
? 2 : 0),
5891 ureg_imm4f(ureg
, adjX
, adjY
[0], 0.0f
, 0.0f
),
5892 ureg_imm4f(ureg
, adjX
, adjY
[1], 0.0f
, 0.0f
));
5893 ureg_ADD(ureg
, wpos_temp
, wpos_input
, ureg_src(adj_temp
));
5895 ureg_ADD(ureg
, wpos_temp
, wpos_input
,
5896 ureg_imm4f(ureg
, adjX
, adjY
[0], 0.0f
, 0.0f
));
5898 wpos_input
= ureg_src(wpos_temp
);
5900 /* MOV wpos_temp, input[wpos]
5902 ureg_MOV( ureg
, wpos_temp
, wpos_input
);
5905 /* Now the conditional y flip: STATE_FB_WPOS_Y_TRANSFORM.xy/zw will be
5906 * inversion/identity, or the other way around if we're drawing to an FBO.
5909 /* MAD wpos_temp.y, wpos_input, wpostrans.xxxx, wpostrans.yyyy
5912 ureg_writemask(wpos_temp
, TGSI_WRITEMASK_Y
),
5914 ureg_scalar(wpostrans
, 0),
5915 ureg_scalar(wpostrans
, 1));
5917 /* MAD wpos_temp.y, wpos_input, wpostrans.zzzz, wpostrans.wwww
5920 ureg_writemask(wpos_temp
, TGSI_WRITEMASK_Y
),
5922 ureg_scalar(wpostrans
, 2),
5923 ureg_scalar(wpostrans
, 3));
5926 /* Use wpos_temp as position input from here on:
5928 *wpos
= ureg_src(wpos_temp
);
5933 * Emit fragment position/ooordinate code.
5936 emit_wpos(struct st_context
*st
,
5937 struct st_translate
*t
,
5938 const struct gl_program
*program
,
5939 struct ureg_program
*ureg
,
5940 int wpos_transform_const
)
5942 struct pipe_screen
*pscreen
= st
->pipe
->screen
;
5943 GLfloat adjX
= 0.0f
;
5944 GLfloat adjY
[2] = { 0.0f
, 0.0f
};
5945 boolean invert
= FALSE
;
5947 /* Query the pixel center conventions supported by the pipe driver and set
5948 * adjX, adjY to help out if it cannot handle the requested one internally.
5950 * The bias of the y-coordinate depends on whether y-inversion takes place
5951 * (adjY[1]) or not (adjY[0]), which is in turn dependent on whether we are
5952 * drawing to an FBO (causes additional inversion), and whether the pipe
5953 * driver origin and the requested origin differ (the latter condition is
5954 * stored in the 'invert' variable).
5956 * For height = 100 (i = integer, h = half-integer, l = lower, u = upper):
5958 * center shift only:
5963 * l,i -> u,i: ( 0.0 + 1.0) * -1 + 100 = 99
5964 * l,h -> u,h: ( 0.5 + 0.0) * -1 + 100 = 99.5
5965 * u,i -> l,i: (99.0 + 1.0) * -1 + 100 = 0
5966 * u,h -> l,h: (99.5 + 0.0) * -1 + 100 = 0.5
5968 * inversion and center shift:
5969 * l,i -> u,h: ( 0.0 + 0.5) * -1 + 100 = 99.5
5970 * l,h -> u,i: ( 0.5 + 0.5) * -1 + 100 = 99
5971 * u,i -> l,h: (99.0 + 0.5) * -1 + 100 = 0.5
5972 * u,h -> l,i: (99.5 + 0.5) * -1 + 100 = 0
5974 if (program
->OriginUpperLeft
) {
5975 /* Fragment shader wants origin in upper-left */
5976 if (pscreen
->get_param(pscreen
, PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT
)) {
5977 /* the driver supports upper-left origin */
5979 else if (pscreen
->get_param(pscreen
, PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT
)) {
5980 /* the driver supports lower-left origin, need to invert Y */
5981 ureg_property(ureg
, TGSI_PROPERTY_FS_COORD_ORIGIN
,
5982 TGSI_FS_COORD_ORIGIN_LOWER_LEFT
);
5989 /* Fragment shader wants origin in lower-left */
5990 if (pscreen
->get_param(pscreen
, PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT
))
5991 /* the driver supports lower-left origin */
5992 ureg_property(ureg
, TGSI_PROPERTY_FS_COORD_ORIGIN
,
5993 TGSI_FS_COORD_ORIGIN_LOWER_LEFT
);
5994 else if (pscreen
->get_param(pscreen
, PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT
))
5995 /* the driver supports upper-left origin, need to invert Y */
6001 if (program
->PixelCenterInteger
) {
6002 /* Fragment shader wants pixel center integer */
6003 if (pscreen
->get_param(pscreen
, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER
)) {
6004 /* the driver supports pixel center integer */
6006 ureg_property(ureg
, TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
,
6007 TGSI_FS_COORD_PIXEL_CENTER_INTEGER
);
6009 else if (pscreen
->get_param(pscreen
, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER
)) {
6010 /* the driver supports pixel center half integer, need to bias X,Y */
6019 /* Fragment shader wants pixel center half integer */
6020 if (pscreen
->get_param(pscreen
, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER
)) {
6021 /* the driver supports pixel center half integer */
6023 else if (pscreen
->get_param(pscreen
, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER
)) {
6024 /* the driver supports pixel center integer, need to bias X,Y */
6025 adjX
= adjY
[0] = adjY
[1] = 0.5f
;
6026 ureg_property(ureg
, TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
,
6027 TGSI_FS_COORD_PIXEL_CENTER_INTEGER
);
6033 /* we invert after adjustment so that we avoid the MOV to temporary,
6034 * and reuse the adjustment ADD instead */
6035 emit_wpos_adjustment(st
->ctx
, t
, wpos_transform_const
, invert
, adjX
, adjY
);
6039 * OpenGL's fragment gl_FrontFace input is 1 for front-facing, 0 for back.
6040 * TGSI uses +1 for front, -1 for back.
6041 * This function converts the TGSI value to the GL value. Simply clamping/
6042 * saturating the value to [0,1] does the job.
6045 emit_face_var(struct gl_context
*ctx
, struct st_translate
*t
)
6047 struct ureg_program
*ureg
= t
->ureg
;
6048 struct ureg_dst face_temp
= ureg_DECL_temporary(ureg
);
6049 struct ureg_src face_input
= t
->inputs
[t
->inputMapping
[VARYING_SLOT_FACE
]];
6051 if (ctx
->Const
.NativeIntegers
) {
6052 ureg_FSGE(ureg
, face_temp
, face_input
, ureg_imm1f(ureg
, 0));
6055 /* MOV_SAT face_temp, input[face] */
6056 ureg_MOV(ureg
, ureg_saturate(face_temp
), face_input
);
6059 /* Use face_temp as face input from here on: */
6060 t
->inputs
[t
->inputMapping
[VARYING_SLOT_FACE
]] = ureg_src(face_temp
);
6064 emit_compute_block_size(const struct gl_program
*prog
,
6065 struct ureg_program
*ureg
) {
6066 ureg_property(ureg
, TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH
,
6067 prog
->info
.cs
.local_size
[0]);
6068 ureg_property(ureg
, TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT
,
6069 prog
->info
.cs
.local_size
[1]);
6070 ureg_property(ureg
, TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH
,
6071 prog
->info
.cs
.local_size
[2]);
6074 struct sort_inout_decls
{
6075 bool operator()(const struct inout_decl
&a
, const struct inout_decl
&b
) const {
6076 return mapping
[a
.mesa_index
] < mapping
[b
.mesa_index
];
6079 const ubyte
*mapping
;
6082 /* Sort the given array of decls by the corresponding slot (TGSI file index).
6084 * This is for the benefit of older drivers which are broken when the
6085 * declarations aren't sorted in this way.
6088 sort_inout_decls_by_slot(struct inout_decl
*decls
,
6090 const ubyte mapping
[])
6092 sort_inout_decls sorter
;
6093 sorter
.mapping
= mapping
;
6094 std::sort(decls
, decls
+ count
, sorter
);
6098 st_translate_interp(enum glsl_interp_mode glsl_qual
, GLuint varying
)
6100 switch (glsl_qual
) {
6101 case INTERP_MODE_NONE
:
6102 if (varying
== VARYING_SLOT_COL0
|| varying
== VARYING_SLOT_COL1
)
6103 return TGSI_INTERPOLATE_COLOR
;
6104 return TGSI_INTERPOLATE_PERSPECTIVE
;
6105 case INTERP_MODE_SMOOTH
:
6106 return TGSI_INTERPOLATE_PERSPECTIVE
;
6107 case INTERP_MODE_FLAT
:
6108 return TGSI_INTERPOLATE_CONSTANT
;
6109 case INTERP_MODE_NOPERSPECTIVE
:
6110 return TGSI_INTERPOLATE_LINEAR
;
6112 assert(0 && "unexpected interp mode in st_translate_interp()");
6113 return TGSI_INTERPOLATE_PERSPECTIVE
;
6118 * Translate intermediate IR (glsl_to_tgsi_instruction) to TGSI format.
6119 * \param program the program to translate
6120 * \param numInputs number of input registers used
6121 * \param inputMapping maps Mesa fragment program inputs to TGSI generic
6123 * \param inputSemanticName the TGSI_SEMANTIC flag for each input
6124 * \param inputSemanticIndex the semantic index (ex: which texcoord) for
6126 * \param interpMode the TGSI_INTERPOLATE_LINEAR/PERSP mode for each input
6127 * \param numOutputs number of output registers used
6128 * \param outputMapping maps Mesa fragment program outputs to TGSI
6130 * \param outputSemanticName the TGSI_SEMANTIC flag for each output
6131 * \param outputSemanticIndex the semantic index (ex: which texcoord) for
6134 * \return PIPE_OK or PIPE_ERROR_OUT_OF_MEMORY
6136 extern "C" enum pipe_error
6137 st_translate_program(
6138 struct gl_context
*ctx
,
6140 struct ureg_program
*ureg
,
6141 glsl_to_tgsi_visitor
*program
,
6142 const struct gl_program
*proginfo
,
6144 const ubyte inputMapping
[],
6145 const ubyte inputSlotToAttr
[],
6146 const ubyte inputSemanticName
[],
6147 const ubyte inputSemanticIndex
[],
6148 const ubyte interpMode
[],
6150 const ubyte outputMapping
[],
6151 const ubyte outputSemanticName
[],
6152 const ubyte outputSemanticIndex
[])
6154 struct st_translate
*t
;
6156 struct gl_program_constants
*frag_const
=
6157 &ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
];
6158 enum pipe_error ret
= PIPE_OK
;
6160 assert(numInputs
<= ARRAY_SIZE(t
->inputs
));
6161 assert(numOutputs
<= ARRAY_SIZE(t
->outputs
));
6163 t
= CALLOC_STRUCT(st_translate
);
6165 ret
= PIPE_ERROR_OUT_OF_MEMORY
;
6169 t
->procType
= procType
;
6170 t
->inputMapping
= inputMapping
;
6171 t
->outputMapping
= outputMapping
;
6173 t
->num_temp_arrays
= program
->next_array
;
6174 if (t
->num_temp_arrays
)
6175 t
->arrays
= (struct ureg_dst
*)
6176 calloc(t
->num_temp_arrays
, sizeof(t
->arrays
[0]));
6179 * Declare input attributes.
6182 case PIPE_SHADER_FRAGMENT
:
6183 case PIPE_SHADER_GEOMETRY
:
6184 case PIPE_SHADER_TESS_EVAL
:
6185 case PIPE_SHADER_TESS_CTRL
:
6186 sort_inout_decls_by_slot(program
->inputs
, program
->num_inputs
, inputMapping
);
6188 for (i
= 0; i
< program
->num_inputs
; ++i
) {
6189 struct inout_decl
*decl
= &program
->inputs
[i
];
6190 unsigned slot
= inputMapping
[decl
->mesa_index
];
6191 struct ureg_src src
;
6192 ubyte tgsi_usage_mask
= decl
->usage_mask
;
6194 if (glsl_base_type_is_64bit(decl
->base_type
)) {
6195 if (tgsi_usage_mask
== 1)
6196 tgsi_usage_mask
= TGSI_WRITEMASK_XY
;
6197 else if (tgsi_usage_mask
== 2)
6198 tgsi_usage_mask
= TGSI_WRITEMASK_ZW
;
6200 tgsi_usage_mask
= TGSI_WRITEMASK_XYZW
;
6203 unsigned interp_mode
= 0;
6204 unsigned interp_location
= 0;
6205 if (procType
== PIPE_SHADER_FRAGMENT
) {
6207 interp_mode
= interpMode
[slot
] != TGSI_INTERPOLATE_COUNT
?
6209 st_translate_interp(decl
->interp
, inputSlotToAttr
[slot
]);
6211 interp_location
= decl
->interp_loc
;
6214 src
= ureg_DECL_fs_input_cyl_centroid_layout(ureg
,
6215 inputSemanticName
[slot
], inputSemanticIndex
[slot
],
6216 interp_mode
, 0, interp_location
, slot
, tgsi_usage_mask
,
6217 decl
->array_id
, decl
->size
);
6219 for (unsigned j
= 0; j
< decl
->size
; ++j
) {
6220 if (t
->inputs
[slot
+ j
].File
!= TGSI_FILE_INPUT
) {
6221 /* The ArrayID is set up in dst_register */
6222 t
->inputs
[slot
+ j
] = src
;
6223 t
->inputs
[slot
+ j
].ArrayID
= 0;
6224 t
->inputs
[slot
+ j
].Index
+= j
;
6229 case PIPE_SHADER_VERTEX
:
6230 for (i
= 0; i
< numInputs
; i
++) {
6231 t
->inputs
[i
] = ureg_DECL_vs_input(ureg
, i
);
6234 case PIPE_SHADER_COMPUTE
:
6241 * Declare output attributes.
6244 case PIPE_SHADER_FRAGMENT
:
6245 case PIPE_SHADER_COMPUTE
:
6247 case PIPE_SHADER_GEOMETRY
:
6248 case PIPE_SHADER_TESS_EVAL
:
6249 case PIPE_SHADER_TESS_CTRL
:
6250 case PIPE_SHADER_VERTEX
:
6251 sort_inout_decls_by_slot(program
->outputs
, program
->num_outputs
, outputMapping
);
6253 for (i
= 0; i
< program
->num_outputs
; ++i
) {
6254 struct inout_decl
*decl
= &program
->outputs
[i
];
6255 unsigned slot
= outputMapping
[decl
->mesa_index
];
6256 struct ureg_dst dst
;
6257 ubyte tgsi_usage_mask
= decl
->usage_mask
;
6259 if (glsl_base_type_is_64bit(decl
->base_type
)) {
6260 if (tgsi_usage_mask
== 1)
6261 tgsi_usage_mask
= TGSI_WRITEMASK_XY
;
6262 else if (tgsi_usage_mask
== 2)
6263 tgsi_usage_mask
= TGSI_WRITEMASK_ZW
;
6265 tgsi_usage_mask
= TGSI_WRITEMASK_XYZW
;
6268 dst
= ureg_DECL_output_layout(ureg
,
6269 outputSemanticName
[slot
], outputSemanticIndex
[slot
],
6270 decl
->gs_out_streams
,
6271 slot
, tgsi_usage_mask
, decl
->array_id
, decl
->size
);
6273 for (unsigned j
= 0; j
< decl
->size
; ++j
) {
6274 if (t
->outputs
[slot
+ j
].File
!= TGSI_FILE_OUTPUT
) {
6275 /* The ArrayID is set up in dst_register */
6276 t
->outputs
[slot
+ j
] = dst
;
6277 t
->outputs
[slot
+ j
].ArrayID
= 0;
6278 t
->outputs
[slot
+ j
].Index
+= j
;
6287 if (procType
== PIPE_SHADER_FRAGMENT
) {
6288 if (program
->shader
->Program
->info
.fs
.early_fragment_tests
||
6289 program
->shader
->Program
->info
.fs
.post_depth_coverage
) {
6290 ureg_property(ureg
, TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL
, 1);
6292 if (program
->shader
->Program
->info
.fs
.post_depth_coverage
)
6293 ureg_property(ureg
, TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE
, 1);
6296 if (proginfo
->info
.inputs_read
& VARYING_BIT_POS
) {
6297 /* Must do this after setting up t->inputs. */
6298 emit_wpos(st_context(ctx
), t
, proginfo
, ureg
,
6299 program
->wpos_transform_const
);
6302 if (proginfo
->info
.inputs_read
& VARYING_BIT_FACE
)
6303 emit_face_var(ctx
, t
);
6305 for (i
= 0; i
< numOutputs
; i
++) {
6306 switch (outputSemanticName
[i
]) {
6307 case TGSI_SEMANTIC_POSITION
:
6308 t
->outputs
[i
] = ureg_DECL_output(ureg
,
6309 TGSI_SEMANTIC_POSITION
, /* Z/Depth */
6310 outputSemanticIndex
[i
]);
6311 t
->outputs
[i
] = ureg_writemask(t
->outputs
[i
], TGSI_WRITEMASK_Z
);
6313 case TGSI_SEMANTIC_STENCIL
:
6314 t
->outputs
[i
] = ureg_DECL_output(ureg
,
6315 TGSI_SEMANTIC_STENCIL
, /* Stencil */
6316 outputSemanticIndex
[i
]);
6317 t
->outputs
[i
] = ureg_writemask(t
->outputs
[i
], TGSI_WRITEMASK_Y
);
6319 case TGSI_SEMANTIC_COLOR
:
6320 t
->outputs
[i
] = ureg_DECL_output(ureg
,
6321 TGSI_SEMANTIC_COLOR
,
6322 outputSemanticIndex
[i
]);
6324 case TGSI_SEMANTIC_SAMPLEMASK
:
6325 t
->outputs
[i
] = ureg_DECL_output(ureg
,
6326 TGSI_SEMANTIC_SAMPLEMASK
,
6327 outputSemanticIndex
[i
]);
6328 /* TODO: If we ever support more than 32 samples, this will have
6329 * to become an array.
6331 t
->outputs
[i
] = ureg_writemask(t
->outputs
[i
], TGSI_WRITEMASK_X
);
6334 assert(!"fragment shader outputs must be POSITION/STENCIL/COLOR");
6335 ret
= PIPE_ERROR_BAD_INPUT
;
6340 else if (procType
== PIPE_SHADER_VERTEX
) {
6341 for (i
= 0; i
< numOutputs
; i
++) {
6342 if (outputSemanticName
[i
] == TGSI_SEMANTIC_FOG
) {
6343 /* force register to contain a fog coordinate in the form (F, 0, 0, 1). */
6345 ureg_writemask(t
->outputs
[i
], TGSI_WRITEMASK_YZW
),
6346 ureg_imm4f(ureg
, 0.0f
, 0.0f
, 0.0f
, 1.0f
));
6347 t
->outputs
[i
] = ureg_writemask(t
->outputs
[i
], TGSI_WRITEMASK_X
);
6352 if (procType
== PIPE_SHADER_COMPUTE
) {
6353 emit_compute_block_size(proginfo
, ureg
);
6356 /* Declare address register.
6358 if (program
->num_address_regs
> 0) {
6359 assert(program
->num_address_regs
<= 3);
6360 for (int i
= 0; i
< program
->num_address_regs
; i
++)
6361 t
->address
[i
] = ureg_DECL_address(ureg
);
6364 /* Declare misc input registers
6367 GLbitfield sysInputs
= proginfo
->info
.system_values_read
;
6369 for (i
= 0; sysInputs
; i
++) {
6370 if (sysInputs
& (1 << i
)) {
6371 unsigned semName
= _mesa_sysval_to_semantic(i
);
6373 t
->systemValues
[i
] = ureg_DECL_system_value(ureg
, semName
, 0);
6375 if (semName
== TGSI_SEMANTIC_INSTANCEID
||
6376 semName
== TGSI_SEMANTIC_VERTEXID
) {
6377 /* From Gallium perspective, these system values are always
6378 * integer, and require native integer support. However, if
6379 * native integer is supported on the vertex stage but not the
6380 * pixel stage (e.g, i915g + draw), Mesa will generate IR that
6381 * assumes these system values are floats. To resolve the
6382 * inconsistency, we insert a U2F.
6384 struct st_context
*st
= st_context(ctx
);
6385 struct pipe_screen
*pscreen
= st
->pipe
->screen
;
6386 assert(procType
== PIPE_SHADER_VERTEX
);
6387 assert(pscreen
->get_shader_param(pscreen
, PIPE_SHADER_VERTEX
, PIPE_SHADER_CAP_INTEGERS
));
6389 if (!ctx
->Const
.NativeIntegers
) {
6390 struct ureg_dst temp
= ureg_DECL_local_temporary(t
->ureg
);
6391 ureg_U2F( t
->ureg
, ureg_writemask(temp
, TGSI_WRITEMASK_X
), t
->systemValues
[i
]);
6392 t
->systemValues
[i
] = ureg_scalar(ureg_src(temp
), 0);
6396 if (procType
== PIPE_SHADER_FRAGMENT
&&
6397 semName
== TGSI_SEMANTIC_POSITION
)
6398 emit_wpos(st_context(ctx
), t
, proginfo
, ureg
,
6399 program
->wpos_transform_const
);
6401 sysInputs
&= ~(1 << i
);
6406 t
->array_sizes
= program
->array_sizes
;
6407 t
->input_decls
= program
->inputs
;
6408 t
->num_input_decls
= program
->num_inputs
;
6409 t
->output_decls
= program
->outputs
;
6410 t
->num_output_decls
= program
->num_outputs
;
6412 /* Emit constants and uniforms. TGSI uses a single index space for these,
6413 * so we put all the translated regs in t->constants.
6415 if (proginfo
->Parameters
) {
6416 t
->constants
= (struct ureg_src
*)
6417 calloc(proginfo
->Parameters
->NumParameters
, sizeof(t
->constants
[0]));
6418 if (t
->constants
== NULL
) {
6419 ret
= PIPE_ERROR_OUT_OF_MEMORY
;
6422 t
->num_constants
= proginfo
->Parameters
->NumParameters
;
6424 for (i
= 0; i
< proginfo
->Parameters
->NumParameters
; i
++) {
6425 switch (proginfo
->Parameters
->Parameters
[i
].Type
) {
6426 case PROGRAM_STATE_VAR
:
6427 case PROGRAM_UNIFORM
:
6428 t
->constants
[i
] = ureg_DECL_constant(ureg
, i
);
6431 /* Emit immediates for PROGRAM_CONSTANT only when there's no indirect
6432 * addressing of the const buffer.
6433 * FIXME: Be smarter and recognize param arrays:
6434 * indirect addressing is only valid within the referenced
6437 case PROGRAM_CONSTANT
:
6438 if (program
->indirect_addr_consts
)
6439 t
->constants
[i
] = ureg_DECL_constant(ureg
, i
);
6441 t
->constants
[i
] = emit_immediate(t
,
6442 proginfo
->Parameters
->ParameterValues
[i
],
6443 proginfo
->Parameters
->Parameters
[i
].DataType
,
6452 for (i
= 0; i
< proginfo
->info
.num_ubos
; i
++) {
6453 unsigned size
= proginfo
->sh
.UniformBlocks
[i
]->UniformBufferSize
;
6454 unsigned num_const_vecs
= (size
+ 15) / 16;
6455 unsigned first
, last
;
6456 assert(num_const_vecs
> 0);
6458 last
= num_const_vecs
> 0 ? num_const_vecs
- 1 : 0;
6459 ureg_DECL_constant2D(t
->ureg
, first
, last
, i
+ 1);
6462 /* Emit immediate values.
6464 t
->immediates
= (struct ureg_src
*)
6465 calloc(program
->num_immediates
, sizeof(struct ureg_src
));
6466 if (t
->immediates
== NULL
) {
6467 ret
= PIPE_ERROR_OUT_OF_MEMORY
;
6470 t
->num_immediates
= program
->num_immediates
;
6473 foreach_in_list(immediate_storage
, imm
, &program
->immediates
) {
6474 assert(i
< program
->num_immediates
);
6475 t
->immediates
[i
++] = emit_immediate(t
, imm
->values
, imm
->type
, imm
->size32
);
6477 assert(i
== program
->num_immediates
);
6479 /* texture samplers */
6480 for (i
= 0; i
< frag_const
->MaxTextureImageUnits
; i
++) {
6481 if (program
->samplers_used
& (1u << i
)) {
6482 unsigned type
= st_translate_texture_type(program
->sampler_types
[i
]);
6484 t
->samplers
[i
] = ureg_DECL_sampler(ureg
, i
);
6486 ureg_DECL_sampler_view( ureg
, i
, program
->sampler_targets
[i
],
6487 type
, type
, type
, type
);
6491 /* Declare atomic and shader storage buffers. */
6493 struct gl_program
*prog
= program
->prog
;
6495 for (i
= 0; i
< prog
->info
.num_abos
; i
++) {
6496 unsigned index
= prog
->sh
.AtomicBuffers
[i
]->Binding
;
6497 assert(index
< frag_const
->MaxAtomicBuffers
);
6498 t
->buffers
[index
] = ureg_DECL_buffer(ureg
, index
, true);
6501 assert(prog
->info
.num_ssbos
<= frag_const
->MaxShaderStorageBlocks
);
6502 for (i
= 0; i
< prog
->info
.num_ssbos
; i
++) {
6503 unsigned index
= frag_const
->MaxAtomicBuffers
+ i
;
6504 t
->buffers
[index
] = ureg_DECL_buffer(ureg
, index
, false);
6508 if (program
->use_shared_memory
)
6509 t
->shared_memory
= ureg_DECL_memory(ureg
, TGSI_MEMORY_TYPE_SHARED
);
6511 for (i
= 0; i
< program
->shader
->Program
->info
.num_images
; i
++) {
6512 if (program
->images_used
& (1 << i
)) {
6513 t
->images
[i
] = ureg_DECL_image(ureg
, i
,
6514 program
->image_targets
[i
],
6515 program
->image_formats
[i
],
6520 /* Emit each instruction in turn:
6522 foreach_in_list(glsl_to_tgsi_instruction
, inst
, &program
->instructions
)
6523 compile_tgsi_instruction(t
, inst
);
6525 /* Set the next shader stage hint for VS and TES. */
6527 case PIPE_SHADER_VERTEX
:
6528 case PIPE_SHADER_TESS_EVAL
:
6529 if (program
->shader_program
->SeparateShader
)
6532 for (i
= program
->shader
->Stage
+1; i
<= MESA_SHADER_FRAGMENT
; i
++) {
6533 if (program
->shader_program
->_LinkedShaders
[i
]) {
6534 ureg_set_next_shader_processor(
6535 ureg
, pipe_shader_type_from_mesa((gl_shader_stage
)i
));
6547 t
->num_constants
= 0;
6548 free(t
->immediates
);
6549 t
->num_immediates
= 0;
6555 /* ----------------------------- End TGSI code ------------------------------ */
6559 * Convert a shader's GLSL IR into a Mesa gl_program, although without
6560 * generating Mesa IR.
6562 static struct gl_program
*
6563 get_mesa_program_tgsi(struct gl_context
*ctx
,
6564 struct gl_shader_program
*shader_program
,
6565 struct gl_linked_shader
*shader
)
6567 glsl_to_tgsi_visitor
* v
;
6568 struct gl_program
*prog
;
6569 struct gl_shader_compiler_options
*options
=
6570 &ctx
->Const
.ShaderCompilerOptions
[shader
->Stage
];
6571 struct pipe_screen
*pscreen
= ctx
->st
->pipe
->screen
;
6572 enum pipe_shader_type ptarget
= pipe_shader_type_from_mesa(shader
->Stage
);
6573 unsigned skip_merge_registers
;
6575 validate_ir_tree(shader
->ir
);
6577 prog
= shader
->Program
;
6579 prog
->Parameters
= _mesa_new_parameter_list();
6580 v
= new glsl_to_tgsi_visitor();
6583 v
->shader_program
= shader_program
;
6585 v
->options
= options
;
6586 v
->glsl_version
= ctx
->Const
.GLSLVersion
;
6587 v
->native_integers
= ctx
->Const
.NativeIntegers
;
6589 v
->have_sqrt
= pscreen
->get_shader_param(pscreen
, ptarget
,
6590 PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED
);
6591 v
->have_fma
= pscreen
->get_shader_param(pscreen
, ptarget
,
6592 PIPE_SHADER_CAP_TGSI_FMA_SUPPORTED
);
6593 v
->has_tex_txf_lz
= pscreen
->get_param(pscreen
,
6594 PIPE_CAP_TGSI_TEX_TXF_LZ
);
6596 v
->variables
= _mesa_hash_table_create(v
->mem_ctx
, _mesa_hash_pointer
,
6597 _mesa_key_pointer_equal
);
6598 skip_merge_registers
=
6599 pscreen
->get_shader_param(pscreen
, ptarget
,
6600 PIPE_SHADER_CAP_TGSI_SKIP_MERGE_REGISTERS
);
6602 _mesa_generate_parameters_list_for_uniforms(ctx
, shader_program
, shader
,
6605 /* Remove reads from output registers. */
6606 if (!pscreen
->get_param(pscreen
, PIPE_CAP_TGSI_CAN_READ_OUTPUTS
))
6607 lower_output_reads(shader
->Stage
, shader
->ir
);
6609 /* Emit intermediate IR for main(). */
6610 visit_exec_list(shader
->ir
, v
);
6613 /* Print out some information (for debugging purposes) used by the
6614 * optimization passes. */
6617 int *first_writes
= ralloc_array(v
->mem_ctx
, int, v
->next_temp
);
6618 int *first_reads
= ralloc_array(v
->mem_ctx
, int, v
->next_temp
);
6619 int *last_writes
= ralloc_array(v
->mem_ctx
, int, v
->next_temp
);
6620 int *last_reads
= ralloc_array(v
->mem_ctx
, int, v
->next_temp
);
6622 for (i
= 0; i
< v
->next_temp
; i
++) {
6623 first_writes
[i
] = -1;
6624 first_reads
[i
] = -1;
6625 last_writes
[i
] = -1;
6628 v
->get_first_temp_read(first_reads
);
6629 v
->get_last_temp_read_first_temp_write(last_reads
, first_writes
);
6630 v
->get_last_temp_write(last_writes
);
6631 for (i
= 0; i
< v
->next_temp
; i
++)
6632 printf("Temp %d: FR=%3d FW=%3d LR=%3d LW=%3d\n", i
, first_reads
[i
],
6636 ralloc_free(first_writes
);
6637 ralloc_free(first_reads
);
6638 ralloc_free(last_writes
);
6639 ralloc_free(last_reads
);
6643 /* Perform optimizations on the instructions in the glsl_to_tgsi_visitor. */
6645 v
->copy_propagate();
6647 while (v
->eliminate_dead_code());
6649 v
->merge_two_dsts();
6650 if (!skip_merge_registers
)
6651 v
->merge_registers();
6652 v
->renumber_registers();
6654 /* Write the END instruction. */
6655 v
->emit_asm(NULL
, TGSI_OPCODE_END
);
6657 if (ctx
->_Shader
->Flags
& GLSL_DUMP
) {
6659 _mesa_log("GLSL IR for linked %s program %d:\n",
6660 _mesa_shader_stage_to_string(shader
->Stage
),
6661 shader_program
->Name
);
6662 _mesa_print_ir(_mesa_get_log_file(), shader
->ir
, NULL
);
6666 do_set_program_inouts(shader
->ir
, prog
, shader
->Stage
);
6667 _mesa_copy_linked_program_data(shader_program
, shader
);
6668 shrink_array_declarations(v
->inputs
, v
->num_inputs
,
6669 &prog
->info
.inputs_read
,
6670 prog
->info
.double_inputs_read
,
6671 &prog
->info
.patch_inputs_read
);
6672 shrink_array_declarations(v
->outputs
, v
->num_outputs
,
6673 &prog
->info
.outputs_written
, 0ULL,
6674 &prog
->info
.patch_outputs_written
);
6675 count_resources(v
, prog
);
6677 /* The GLSL IR won't be needed anymore. */
6678 ralloc_free(shader
->ir
);
6681 /* This must be done before the uniform storage is associated. */
6682 if (shader
->Stage
== MESA_SHADER_FRAGMENT
&&
6683 (prog
->info
.inputs_read
& VARYING_BIT_POS
||
6684 prog
->info
.system_values_read
& (1 << SYSTEM_VALUE_FRAG_COORD
))) {
6685 static const gl_state_index wposTransformState
[STATE_LENGTH
] = {
6686 STATE_INTERNAL
, STATE_FB_WPOS_Y_TRANSFORM
6689 v
->wpos_transform_const
= _mesa_add_state_reference(prog
->Parameters
,
6690 wposTransformState
);
6693 /* Avoid reallocation of the program parameter list, because the uniform
6694 * storage is only associated with the original parameter list.
6695 * This should be enough for Bitmap and DrawPixels constants.
6697 _mesa_reserve_parameter_storage(prog
->Parameters
, 8);
6699 /* This has to be done last. Any operation the can cause
6700 * prog->ParameterValues to get reallocated (e.g., anything that adds a
6701 * program constant) has to happen before creating this linkage.
6703 _mesa_associate_uniform_storage(ctx
, shader_program
, prog
, true);
6704 if (!shader_program
->data
->LinkStatus
) {
6705 free_glsl_to_tgsi_visitor(v
);
6706 _mesa_reference_program(ctx
, &shader
->Program
, NULL
);
6710 struct st_vertex_program
*stvp
;
6711 struct st_fragment_program
*stfp
;
6712 struct st_common_program
*stp
;
6713 struct st_compute_program
*stcp
;
6715 switch (shader
->Stage
) {
6716 case MESA_SHADER_VERTEX
:
6717 stvp
= (struct st_vertex_program
*)prog
;
6718 stvp
->glsl_to_tgsi
= v
;
6720 case MESA_SHADER_FRAGMENT
:
6721 stfp
= (struct st_fragment_program
*)prog
;
6722 stfp
->glsl_to_tgsi
= v
;
6724 case MESA_SHADER_TESS_CTRL
:
6725 case MESA_SHADER_TESS_EVAL
:
6726 case MESA_SHADER_GEOMETRY
:
6727 stp
= st_common_program(prog
);
6728 stp
->glsl_to_tgsi
= v
;
6730 case MESA_SHADER_COMPUTE
:
6731 stcp
= (struct st_compute_program
*)prog
;
6732 stcp
->glsl_to_tgsi
= v
;
6735 assert(!"should not be reached");
6742 /* See if there are unsupported control flow statements. */
6743 class ir_control_flow_info_visitor
: public ir_hierarchical_visitor
{
6745 const struct gl_shader_compiler_options
*options
;
6747 ir_control_flow_info_visitor(const struct gl_shader_compiler_options
*options
)
6753 virtual ir_visitor_status
visit_enter(ir_function
*ir
)
6755 /* Other functions are skipped (same as glsl_to_tgsi). */
6756 if (strcmp(ir
->name
, "main") == 0)
6757 return visit_continue
;
6759 return visit_continue_with_parent
;
6762 virtual ir_visitor_status
visit_enter(ir_call
*ir
)
6764 if (!ir
->callee
->is_intrinsic()) {
6765 unsupported
= true; /* it's a function call */
6768 return visit_continue
;
6771 virtual ir_visitor_status
visit_enter(ir_return
*ir
)
6773 if (options
->EmitNoMainReturn
) {
6777 return visit_continue
;
6784 has_unsupported_control_flow(exec_list
*ir
,
6785 const struct gl_shader_compiler_options
*options
)
6787 ir_control_flow_info_visitor
visitor(options
);
6788 visit_list_elements(&visitor
, ir
);
6789 return visitor
.unsupported
;
6796 * Called via ctx->Driver.LinkShader()
6797 * This actually involves converting GLSL IR into an intermediate TGSI-like IR
6798 * with code lowering and other optimizations.
6801 st_link_shader(struct gl_context
*ctx
, struct gl_shader_program
*prog
)
6803 /* Return early if we are loading the shader from on-disk cache */
6804 if (st_load_tgsi_from_disk_cache(ctx
, prog
)) {
6808 struct pipe_screen
*pscreen
= ctx
->st
->pipe
->screen
;
6809 assert(prog
->data
->LinkStatus
);
6811 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
6812 if (prog
->_LinkedShaders
[i
] == NULL
)
6815 struct gl_linked_shader
*shader
= prog
->_LinkedShaders
[i
];
6816 exec_list
*ir
= shader
->ir
;
6817 gl_shader_stage stage
= shader
->Stage
;
6818 const struct gl_shader_compiler_options
*options
=
6819 &ctx
->Const
.ShaderCompilerOptions
[stage
];
6820 enum pipe_shader_type ptarget
= pipe_shader_type_from_mesa(stage
);
6821 bool have_dround
= pscreen
->get_shader_param(pscreen
, ptarget
,
6822 PIPE_SHADER_CAP_TGSI_DROUND_SUPPORTED
);
6823 bool have_dfrexp
= pscreen
->get_shader_param(pscreen
, ptarget
,
6824 PIPE_SHADER_CAP_TGSI_DFRACEXP_DLDEXP_SUPPORTED
);
6825 bool have_ldexp
= pscreen
->get_shader_param(pscreen
, ptarget
,
6826 PIPE_SHADER_CAP_TGSI_LDEXP_SUPPORTED
);
6827 unsigned if_threshold
= pscreen
->get_shader_param(pscreen
, ptarget
,
6828 PIPE_SHADER_CAP_LOWER_IF_THRESHOLD
);
6830 /* If there are forms of indirect addressing that the driver
6831 * cannot handle, perform the lowering pass.
6833 if (options
->EmitNoIndirectInput
|| options
->EmitNoIndirectOutput
||
6834 options
->EmitNoIndirectTemp
|| options
->EmitNoIndirectUniform
) {
6835 lower_variable_index_to_cond_assign(stage
, ir
,
6836 options
->EmitNoIndirectInput
,
6837 options
->EmitNoIndirectOutput
,
6838 options
->EmitNoIndirectTemp
,
6839 options
->EmitNoIndirectUniform
);
6842 if (!pscreen
->get_param(pscreen
, PIPE_CAP_INT64_DIVMOD
))
6843 lower_64bit_integer_instructions(ir
, DIV64
| MOD64
);
6845 if (ctx
->Extensions
.ARB_shading_language_packing
) {
6846 unsigned lower_inst
= LOWER_PACK_SNORM_2x16
|
6847 LOWER_UNPACK_SNORM_2x16
|
6848 LOWER_PACK_UNORM_2x16
|
6849 LOWER_UNPACK_UNORM_2x16
|
6850 LOWER_PACK_SNORM_4x8
|
6851 LOWER_UNPACK_SNORM_4x8
|
6852 LOWER_UNPACK_UNORM_4x8
|
6853 LOWER_PACK_UNORM_4x8
;
6855 if (ctx
->Extensions
.ARB_gpu_shader5
)
6856 lower_inst
|= LOWER_PACK_USE_BFI
|
6858 if (!ctx
->st
->has_half_float_packing
)
6859 lower_inst
|= LOWER_PACK_HALF_2x16
|
6860 LOWER_UNPACK_HALF_2x16
;
6862 lower_packing_builtins(ir
, lower_inst
);
6865 if (!pscreen
->get_param(pscreen
, PIPE_CAP_TEXTURE_GATHER_OFFSETS
))
6866 lower_offset_arrays(ir
);
6867 do_mat_op_to_vec(ir
);
6869 if (stage
== MESA_SHADER_FRAGMENT
)
6870 lower_blend_equation_advanced(shader
);
6872 lower_instructions(ir
,
6877 (have_ldexp
? 0 : LDEXP_TO_ARITH
) |
6878 (have_dfrexp
? 0 : DFREXP_DLDEXP_TO_ARITH
) |
6881 (have_dround
? 0 : DOPS_TO_DFRAC
) |
6882 (options
->EmitNoPow
? POW_TO_EXP2
: 0) |
6883 (!ctx
->Const
.NativeIntegers
? INT_DIV_TO_MUL_RCP
: 0) |
6884 (options
->EmitNoSat
? SAT_TO_CLAMP
: 0) |
6885 (ctx
->Const
.ForceGLSLAbsSqrt
? SQRT_TO_ABS_SQRT
: 0) |
6886 /* Assume that if ARB_gpu_shader5 is not supported
6887 * then all of the extended integer functions need
6888 * lowering. It may be necessary to add some caps
6889 * for individual instructions.
6891 (!ctx
->Extensions
.ARB_gpu_shader5
6892 ? BIT_COUNT_TO_MATH
|
6896 FIND_LSB_TO_FLOAT_CAST
|
6897 FIND_MSB_TO_FLOAT_CAST
|
6901 do_vec_index_to_cond_assign(ir
);
6902 lower_vector_insert(ir
, true);
6903 lower_quadop_vector(ir
, false);
6905 if (options
->MaxIfDepth
== 0) {
6909 if (ctx
->Const
.GLSLOptimizeConservatively
) {
6910 /* Do it once and repeat only if there's unsupported control flow. */
6912 do_common_optimization(ir
, true, true, options
,
6913 ctx
->Const
.NativeIntegers
);
6914 lower_if_to_cond_assign((gl_shader_stage
)i
, ir
,
6915 options
->MaxIfDepth
, if_threshold
);
6916 } while (has_unsupported_control_flow(ir
, options
));
6918 /* Repeat it until it stops making changes. */
6921 progress
= do_common_optimization(ir
, true, true, options
,
6922 ctx
->Const
.NativeIntegers
);
6923 progress
|= lower_if_to_cond_assign((gl_shader_stage
)i
, ir
,
6924 options
->MaxIfDepth
, if_threshold
);
6928 validate_ir_tree(ir
);
6931 build_program_resource_list(ctx
, prog
);
6933 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
6934 struct gl_linked_shader
*shader
= prog
->_LinkedShaders
[i
];
6938 enum pipe_shader_type ptarget
=
6939 pipe_shader_type_from_mesa(shader
->Stage
);
6940 enum pipe_shader_ir preferred_ir
= (enum pipe_shader_ir
)
6941 pscreen
->get_shader_param(pscreen
, ptarget
,
6942 PIPE_SHADER_CAP_PREFERRED_IR
);
6944 struct gl_program
*linked_prog
= NULL
;
6945 if (preferred_ir
== PIPE_SHADER_IR_NIR
) {
6946 /* TODO only for GLSL VS/FS/CS for now: */
6947 switch (shader
->Stage
) {
6948 case MESA_SHADER_VERTEX
:
6949 case MESA_SHADER_FRAGMENT
:
6950 case MESA_SHADER_COMPUTE
:
6951 linked_prog
= st_nir_get_mesa_program(ctx
, prog
, shader
);
6956 linked_prog
= get_mesa_program_tgsi(ctx
, prog
, shader
);
6960 st_set_prog_affected_state_flags(linked_prog
);
6961 if (!ctx
->Driver
.ProgramStringNotify(ctx
,
6962 _mesa_shader_stage_to_program(i
),
6964 _mesa_reference_program(ctx
, &shader
->Program
, NULL
);
6974 st_translate_stream_output_info(glsl_to_tgsi_visitor
*glsl_to_tgsi
,
6975 const ubyte outputMapping
[],
6976 struct pipe_stream_output_info
*so
)
6978 if (!glsl_to_tgsi
->shader_program
->last_vert_prog
)
6981 struct gl_transform_feedback_info
*info
=
6982 glsl_to_tgsi
->shader_program
->last_vert_prog
->sh
.LinkedTransformFeedback
;
6983 st_translate_stream_output_info2(info
, outputMapping
, so
);
6987 st_translate_stream_output_info2(struct gl_transform_feedback_info
*info
,
6988 const ubyte outputMapping
[],
6989 struct pipe_stream_output_info
*so
)
6993 for (i
= 0; i
< info
->NumOutputs
; i
++) {
6994 so
->output
[i
].register_index
=
6995 outputMapping
[info
->Outputs
[i
].OutputRegister
];
6996 so
->output
[i
].start_component
= info
->Outputs
[i
].ComponentOffset
;
6997 so
->output
[i
].num_components
= info
->Outputs
[i
].NumComponents
;
6998 so
->output
[i
].output_buffer
= info
->Outputs
[i
].OutputBuffer
;
6999 so
->output
[i
].dst_offset
= info
->Outputs
[i
].DstOffset
;
7000 so
->output
[i
].stream
= info
->Outputs
[i
].StreamId
;
7003 for (i
= 0; i
< PIPE_MAX_SO_BUFFERS
; i
++) {
7004 so
->stride
[i
] = info
->Buffers
[i
].Stride
;
7006 so
->num_outputs
= info
->NumOutputs
;