glsl_to_tgsi: allow bound samplers and images to be used as l-values
[mesa.git] / src / mesa / state_tracker / st_glsl_to_tgsi.cpp
1 /*
2 * Copyright (C) 2005-2007 Brian Paul All Rights Reserved.
3 * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
4 * Copyright © 2010 Intel Corporation
5 * Copyright © 2011 Bryan Cain
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26
27 /**
28 * \file glsl_to_tgsi.cpp
29 *
30 * Translate GLSL IR to TGSI.
31 */
32
33 #include "st_glsl_to_tgsi.h"
34
35 #include "compiler/glsl/glsl_parser_extras.h"
36 #include "compiler/glsl/ir_optimization.h"
37 #include "compiler/glsl/program.h"
38
39 #include "main/errors.h"
40 #include "main/shaderobj.h"
41 #include "main/uniforms.h"
42 #include "main/shaderapi.h"
43 #include "main/shaderimage.h"
44 #include "program/prog_instruction.h"
45
46 #include "pipe/p_context.h"
47 #include "pipe/p_screen.h"
48 #include "tgsi/tgsi_ureg.h"
49 #include "tgsi/tgsi_info.h"
50 #include "util/u_math.h"
51 #include "util/u_memory.h"
52 #include "st_glsl_types.h"
53 #include "st_program.h"
54 #include "st_mesa_to_tgsi.h"
55 #include "st_format.h"
56 #include "st_nir.h"
57 #include "st_shader_cache.h"
58 #include "st_glsl_to_tgsi_temprename.h"
59
60 #include "util/hash_table.h"
61 #include <algorithm>
62
63 #define PROGRAM_ANY_CONST ((1 << PROGRAM_STATE_VAR) | \
64 (1 << PROGRAM_CONSTANT) | \
65 (1 << PROGRAM_UNIFORM))
66
67 #define MAX_GLSL_TEXTURE_OFFSET 4
68
69 static unsigned is_precise(const ir_variable *ir)
70 {
71 if (!ir)
72 return 0;
73 return ir->data.precise || ir->data.invariant;
74 }
75
76 class variable_storage {
77 DECLARE_RZALLOC_CXX_OPERATORS(variable_storage)
78
79 public:
80 variable_storage(ir_variable *var, gl_register_file file, int index,
81 unsigned array_id = 0)
82 : file(file), index(index), component(0), var(var), array_id(array_id)
83 {
84 assert(file != PROGRAM_ARRAY || array_id != 0);
85 }
86
87 gl_register_file file;
88 int index;
89
90 /* Explicit component location. This is given in terms of the GLSL-style
91 * swizzles where each double is a single component, i.e. for 64-bit types
92 * it can only be 0 or 1.
93 */
94 int component;
95 ir_variable *var; /* variable that maps to this, if any */
96 unsigned array_id;
97 };
98
99 class immediate_storage : public exec_node {
100 public:
101 immediate_storage(gl_constant_value *values, int size32, GLenum type)
102 {
103 memcpy(this->values, values, size32 * sizeof(gl_constant_value));
104 this->size32 = size32;
105 this->type = type;
106 }
107
108 /* doubles are stored across 2 gl_constant_values */
109 gl_constant_value values[4];
110 int size32; /**< Number of 32-bit components (1-4) */
111 GLenum type; /**< GL_DOUBLE, GL_FLOAT, GL_INT, GL_BOOL, or GL_UNSIGNED_INT */
112 };
113
114 static const st_src_reg undef_src = st_src_reg(PROGRAM_UNDEFINED, 0, GLSL_TYPE_ERROR);
115 static const st_dst_reg undef_dst = st_dst_reg(PROGRAM_UNDEFINED, SWIZZLE_NOOP, GLSL_TYPE_ERROR);
116
117 struct inout_decl {
118 unsigned mesa_index;
119 unsigned array_id; /* TGSI ArrayID; 1-based: 0 means not an array */
120 unsigned size;
121 unsigned interp_loc;
122 unsigned gs_out_streams;
123 enum glsl_interp_mode interp;
124 enum glsl_base_type base_type;
125 ubyte usage_mask; /* GLSL-style usage-mask, i.e. single bit per double */
126 bool invariant;
127 };
128
129 static struct inout_decl *
130 find_inout_array(struct inout_decl *decls, unsigned count, unsigned array_id)
131 {
132 assert(array_id != 0);
133
134 for (unsigned i = 0; i < count; i++) {
135 struct inout_decl *decl = &decls[i];
136
137 if (array_id == decl->array_id) {
138 return decl;
139 }
140 }
141
142 return NULL;
143 }
144
145 static enum glsl_base_type
146 find_array_type(struct inout_decl *decls, unsigned count, unsigned array_id)
147 {
148 if (!array_id)
149 return GLSL_TYPE_ERROR;
150 struct inout_decl *decl = find_inout_array(decls, count, array_id);
151 if (decl)
152 return decl->base_type;
153 return GLSL_TYPE_ERROR;
154 }
155
156 struct hwatomic_decl {
157 unsigned location;
158 unsigned binding;
159 unsigned size;
160 unsigned array_id;
161 };
162
163 struct glsl_to_tgsi_visitor : public ir_visitor {
164 public:
165 glsl_to_tgsi_visitor();
166 ~glsl_to_tgsi_visitor();
167
168 struct gl_context *ctx;
169 struct gl_program *prog;
170 struct gl_shader_program *shader_program;
171 struct gl_linked_shader *shader;
172 struct gl_shader_compiler_options *options;
173
174 int next_temp;
175
176 unsigned *array_sizes;
177 unsigned max_num_arrays;
178 unsigned next_array;
179
180 struct inout_decl inputs[4 * PIPE_MAX_SHADER_INPUTS];
181 unsigned num_inputs;
182 unsigned num_input_arrays;
183 struct inout_decl outputs[4 * PIPE_MAX_SHADER_OUTPUTS];
184 unsigned num_outputs;
185 unsigned num_output_arrays;
186
187 struct hwatomic_decl atomic_info[PIPE_MAX_HW_ATOMIC_BUFFERS];
188 unsigned num_atomics;
189 unsigned num_atomic_arrays;
190 int num_address_regs;
191 uint32_t samplers_used;
192 glsl_base_type sampler_types[PIPE_MAX_SAMPLERS];
193 enum tgsi_texture_type sampler_targets[PIPE_MAX_SAMPLERS];
194 int images_used;
195 enum tgsi_texture_type image_targets[PIPE_MAX_SHADER_IMAGES];
196 enum pipe_format image_formats[PIPE_MAX_SHADER_IMAGES];
197 bool indirect_addr_consts;
198 int wpos_transform_const;
199
200 bool native_integers;
201 bool have_sqrt;
202 bool have_fma;
203 bool use_shared_memory;
204 bool has_tex_txf_lz;
205 bool precise;
206 bool need_uarl;
207
208 variable_storage *find_variable_storage(ir_variable *var);
209
210 int add_constant(gl_register_file file, gl_constant_value values[8],
211 int size, GLenum datatype, uint16_t *swizzle_out);
212
213 st_src_reg get_temp(const glsl_type *type);
214 void reladdr_to_temp(ir_instruction *ir, st_src_reg *reg, int *num_reladdr);
215
216 st_src_reg st_src_reg_for_double(double val);
217 st_src_reg st_src_reg_for_float(float val);
218 st_src_reg st_src_reg_for_int(int val);
219 st_src_reg st_src_reg_for_int64(int64_t val);
220 st_src_reg st_src_reg_for_type(enum glsl_base_type type, int val);
221
222 /**
223 * \name Visit methods
224 *
225 * As typical for the visitor pattern, there must be one \c visit method for
226 * each concrete subclass of \c ir_instruction. Virtual base classes within
227 * the hierarchy should not have \c visit methods.
228 */
229 /*@{*/
230 virtual void visit(ir_variable *);
231 virtual void visit(ir_loop *);
232 virtual void visit(ir_loop_jump *);
233 virtual void visit(ir_function_signature *);
234 virtual void visit(ir_function *);
235 virtual void visit(ir_expression *);
236 virtual void visit(ir_swizzle *);
237 virtual void visit(ir_dereference_variable *);
238 virtual void visit(ir_dereference_array *);
239 virtual void visit(ir_dereference_record *);
240 virtual void visit(ir_assignment *);
241 virtual void visit(ir_constant *);
242 virtual void visit(ir_call *);
243 virtual void visit(ir_return *);
244 virtual void visit(ir_discard *);
245 virtual void visit(ir_texture *);
246 virtual void visit(ir_if *);
247 virtual void visit(ir_emit_vertex *);
248 virtual void visit(ir_end_primitive *);
249 virtual void visit(ir_barrier *);
250 /*@}*/
251
252 void visit_expression(ir_expression *, st_src_reg *) ATTRIBUTE_NOINLINE;
253
254 void visit_atomic_counter_intrinsic(ir_call *);
255 void visit_ssbo_intrinsic(ir_call *);
256 void visit_membar_intrinsic(ir_call *);
257 void visit_shared_intrinsic(ir_call *);
258 void visit_image_intrinsic(ir_call *);
259 void visit_generic_intrinsic(ir_call *, enum tgsi_opcode op);
260
261 st_src_reg result;
262
263 /** List of variable_storage */
264 struct hash_table *variables;
265
266 /** List of immediate_storage */
267 exec_list immediates;
268 unsigned num_immediates;
269
270 /** List of glsl_to_tgsi_instruction */
271 exec_list instructions;
272
273 glsl_to_tgsi_instruction *emit_asm(ir_instruction *ir, enum tgsi_opcode op,
274 st_dst_reg dst = undef_dst,
275 st_src_reg src0 = undef_src,
276 st_src_reg src1 = undef_src,
277 st_src_reg src2 = undef_src,
278 st_src_reg src3 = undef_src);
279
280 glsl_to_tgsi_instruction *emit_asm(ir_instruction *ir, enum tgsi_opcode op,
281 st_dst_reg dst, st_dst_reg dst1,
282 st_src_reg src0 = undef_src,
283 st_src_reg src1 = undef_src,
284 st_src_reg src2 = undef_src,
285 st_src_reg src3 = undef_src);
286
287 enum tgsi_opcode get_opcode(enum tgsi_opcode op,
288 st_dst_reg dst,
289 st_src_reg src0, st_src_reg src1);
290
291 /**
292 * Emit the correct dot-product instruction for the type of arguments
293 */
294 glsl_to_tgsi_instruction *emit_dp(ir_instruction *ir,
295 st_dst_reg dst,
296 st_src_reg src0,
297 st_src_reg src1,
298 unsigned elements);
299
300 void emit_scalar(ir_instruction *ir, enum tgsi_opcode op,
301 st_dst_reg dst, st_src_reg src0);
302
303 void emit_scalar(ir_instruction *ir, enum tgsi_opcode op,
304 st_dst_reg dst, st_src_reg src0, st_src_reg src1);
305
306 void emit_arl(ir_instruction *ir, st_dst_reg dst, st_src_reg src0);
307
308 void get_deref_offsets(ir_dereference *ir,
309 unsigned *array_size,
310 unsigned *base,
311 uint16_t *index,
312 st_src_reg *reladdr,
313 bool opaque);
314 void calc_deref_offsets(ir_dereference *tail,
315 unsigned *array_elements,
316 uint16_t *index,
317 st_src_reg *indirect,
318 unsigned *location);
319 st_src_reg canonicalize_gather_offset(st_src_reg offset);
320 bool handle_bound_deref(ir_dereference *ir);
321
322 bool try_emit_mad(ir_expression *ir,
323 int mul_operand);
324 bool try_emit_mad_for_and_not(ir_expression *ir,
325 int mul_operand);
326
327 void emit_swz(ir_expression *ir);
328
329 bool process_move_condition(ir_rvalue *ir);
330
331 void simplify_cmp(void);
332
333 void rename_temp_registers(struct rename_reg_pair *renames);
334 void get_first_temp_read(int *first_reads);
335 void get_first_temp_write(int *first_writes);
336 void get_last_temp_read_first_temp_write(int *last_reads, int *first_writes);
337 void get_last_temp_write(int *last_writes);
338
339 void copy_propagate(void);
340 int eliminate_dead_code(void);
341
342 void merge_two_dsts(void);
343 void merge_registers(void);
344 void renumber_registers(void);
345
346 void emit_block_mov(ir_assignment *ir, const struct glsl_type *type,
347 st_dst_reg *l, st_src_reg *r,
348 st_src_reg *cond, bool cond_swap);
349
350 void *mem_ctx;
351 };
352
353 static st_dst_reg address_reg = st_dst_reg(PROGRAM_ADDRESS, WRITEMASK_X,
354 GLSL_TYPE_FLOAT, 0);
355 static st_dst_reg address_reg2 = st_dst_reg(PROGRAM_ADDRESS, WRITEMASK_X,
356 GLSL_TYPE_FLOAT, 1);
357 static st_dst_reg sampler_reladdr = st_dst_reg(PROGRAM_ADDRESS, WRITEMASK_X,
358 GLSL_TYPE_FLOAT, 2);
359
360 static void
361 fail_link(struct gl_shader_program *prog, const char *fmt, ...)
362 PRINTFLIKE(2, 3);
363
364 static void
365 fail_link(struct gl_shader_program *prog, const char *fmt, ...)
366 {
367 va_list args;
368 va_start(args, fmt);
369 ralloc_vasprintf_append(&prog->data->InfoLog, fmt, args);
370 va_end(args);
371
372 prog->data->LinkStatus = LINKING_FAILURE;
373 }
374
375 int
376 swizzle_for_size(int size)
377 {
378 static const int size_swizzles[4] = {
379 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_X),
380 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Y, SWIZZLE_Y),
381 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_Z),
382 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W),
383 };
384
385 assert((size >= 1) && (size <= 4));
386 return size_swizzles[size - 1];
387 }
388
389
390 glsl_to_tgsi_instruction *
391 glsl_to_tgsi_visitor::emit_asm(ir_instruction *ir, enum tgsi_opcode op,
392 st_dst_reg dst, st_dst_reg dst1,
393 st_src_reg src0, st_src_reg src1,
394 st_src_reg src2, st_src_reg src3)
395 {
396 glsl_to_tgsi_instruction *inst = new(mem_ctx) glsl_to_tgsi_instruction();
397 int num_reladdr = 0, i, j;
398 bool dst_is_64bit[2];
399
400 op = get_opcode(op, dst, src0, src1);
401
402 /* If we have to do relative addressing, we want to load the ARL
403 * reg directly for one of the regs, and preload the other reladdr
404 * sources into temps.
405 */
406 num_reladdr += dst.reladdr != NULL || dst.reladdr2;
407 assert(!dst1.reladdr); /* should be lowered in earlier passes */
408 num_reladdr += src0.reladdr != NULL || src0.reladdr2 != NULL;
409 num_reladdr += src1.reladdr != NULL || src1.reladdr2 != NULL;
410 num_reladdr += src2.reladdr != NULL || src2.reladdr2 != NULL;
411 num_reladdr += src3.reladdr != NULL || src3.reladdr2 != NULL;
412
413 reladdr_to_temp(ir, &src3, &num_reladdr);
414 reladdr_to_temp(ir, &src2, &num_reladdr);
415 reladdr_to_temp(ir, &src1, &num_reladdr);
416 reladdr_to_temp(ir, &src0, &num_reladdr);
417
418 if (dst.reladdr || dst.reladdr2) {
419 if (dst.reladdr)
420 emit_arl(ir, address_reg, *dst.reladdr);
421 if (dst.reladdr2)
422 emit_arl(ir, address_reg2, *dst.reladdr2);
423 num_reladdr--;
424 }
425
426 assert(num_reladdr == 0);
427
428 /* inst->op has only 8 bits. */
429 STATIC_ASSERT(TGSI_OPCODE_LAST <= 255);
430
431 inst->op = op;
432 inst->precise = this->precise;
433 inst->info = tgsi_get_opcode_info(op);
434 inst->dst[0] = dst;
435 inst->dst[1] = dst1;
436 inst->src[0] = src0;
437 inst->src[1] = src1;
438 inst->src[2] = src2;
439 inst->src[3] = src3;
440 inst->is_64bit_expanded = false;
441 inst->ir = ir;
442 inst->dead_mask = 0;
443 inst->tex_offsets = NULL;
444 inst->tex_offset_num_offset = 0;
445 inst->saturate = 0;
446 inst->tex_shadow = 0;
447 /* default to float, for paths where this is not initialized
448 * (since 0==UINT which is likely wrong):
449 */
450 inst->tex_type = GLSL_TYPE_FLOAT;
451
452 /* Update indirect addressing status used by TGSI */
453 if (dst.reladdr || dst.reladdr2) {
454 switch (dst.file) {
455 case PROGRAM_STATE_VAR:
456 case PROGRAM_CONSTANT:
457 case PROGRAM_UNIFORM:
458 this->indirect_addr_consts = true;
459 break;
460 case PROGRAM_IMMEDIATE:
461 assert(!"immediates should not have indirect addressing");
462 break;
463 default:
464 break;
465 }
466 }
467 else {
468 for (i = 0; i < 4; i++) {
469 if (inst->src[i].reladdr) {
470 switch (inst->src[i].file) {
471 case PROGRAM_STATE_VAR:
472 case PROGRAM_CONSTANT:
473 case PROGRAM_UNIFORM:
474 this->indirect_addr_consts = true;
475 break;
476 case PROGRAM_IMMEDIATE:
477 assert(!"immediates should not have indirect addressing");
478 break;
479 default:
480 break;
481 }
482 }
483 }
484 }
485
486 /*
487 * This section contains the double processing.
488 * GLSL just represents doubles as single channel values,
489 * however most HW and TGSI represent doubles as pairs of register channels.
490 *
491 * so we have to fixup destination writemask/index and src swizzle/indexes.
492 * dest writemasks need to translate from single channel write mask
493 * to a dual-channel writemask, but also need to modify the index,
494 * if we are touching the Z,W fields in the pre-translated writemask.
495 *
496 * src channels have similiar index modifications along with swizzle
497 * changes to we pick the XY, ZW pairs from the correct index.
498 *
499 * GLSL [0].x -> TGSI [0].xy
500 * GLSL [0].y -> TGSI [0].zw
501 * GLSL [0].z -> TGSI [1].xy
502 * GLSL [0].w -> TGSI [1].zw
503 */
504 for (j = 0; j < 2; j++) {
505 dst_is_64bit[j] = glsl_base_type_is_64bit(inst->dst[j].type);
506 if (!dst_is_64bit[j] && inst->dst[j].file == PROGRAM_OUTPUT &&
507 inst->dst[j].type == GLSL_TYPE_ARRAY) {
508 enum glsl_base_type type = find_array_type(this->outputs,
509 this->num_outputs,
510 inst->dst[j].array_id);
511 if (glsl_base_type_is_64bit(type))
512 dst_is_64bit[j] = true;
513 }
514 }
515
516 if (dst_is_64bit[0] || dst_is_64bit[1] ||
517 glsl_base_type_is_64bit(inst->src[0].type)) {
518 glsl_to_tgsi_instruction *dinst = NULL;
519 int initial_src_swz[4], initial_src_idx[4];
520 int initial_dst_idx[2], initial_dst_writemask[2];
521 /* select the writemask for dst0 or dst1 */
522 unsigned writemask = inst->dst[1].file == PROGRAM_UNDEFINED
523 ? inst->dst[0].writemask : inst->dst[1].writemask;
524
525 /* copy out the writemask, index and swizzles for all src/dsts. */
526 for (j = 0; j < 2; j++) {
527 initial_dst_writemask[j] = inst->dst[j].writemask;
528 initial_dst_idx[j] = inst->dst[j].index;
529 }
530
531 for (j = 0; j < 4; j++) {
532 initial_src_swz[j] = inst->src[j].swizzle;
533 initial_src_idx[j] = inst->src[j].index;
534 }
535
536 /*
537 * scan all the components in the dst writemask
538 * generate an instruction for each of them if required.
539 */
540 st_src_reg addr;
541 while (writemask) {
542
543 int i = u_bit_scan(&writemask);
544
545 /* before emitting the instruction, see if we have to adjust
546 * load / store address */
547 if (i > 1 && (inst->op == TGSI_OPCODE_LOAD ||
548 inst->op == TGSI_OPCODE_STORE) &&
549 addr.file == PROGRAM_UNDEFINED) {
550 /* We have to advance the buffer address by 16 */
551 addr = get_temp(glsl_type::uint_type);
552 emit_asm(ir, TGSI_OPCODE_UADD, st_dst_reg(addr),
553 inst->src[0], st_src_reg_for_int(16));
554 }
555
556 /* first time use previous instruction */
557 if (dinst == NULL) {
558 dinst = inst;
559 } else {
560 /* create a new instructions for subsequent attempts */
561 dinst = new(mem_ctx) glsl_to_tgsi_instruction();
562 *dinst = *inst;
563 dinst->next = NULL;
564 dinst->prev = NULL;
565 }
566 this->instructions.push_tail(dinst);
567 dinst->is_64bit_expanded = true;
568
569 /* modify the destination if we are splitting */
570 for (j = 0; j < 2; j++) {
571 if (dst_is_64bit[j]) {
572 dinst->dst[j].writemask = (i & 1) ? WRITEMASK_ZW : WRITEMASK_XY;
573 dinst->dst[j].index = initial_dst_idx[j];
574 if (i > 1) {
575 if (dinst->op == TGSI_OPCODE_LOAD ||
576 dinst->op == TGSI_OPCODE_STORE)
577 dinst->src[0] = addr;
578 if (dinst->op != TGSI_OPCODE_STORE)
579 dinst->dst[j].index++;
580 }
581 } else {
582 /* if we aren't writing to a double, just get the bit of the
583 * initial writemask for this channel
584 */
585 dinst->dst[j].writemask = initial_dst_writemask[j] & (1 << i);
586 }
587 }
588
589 /* modify the src registers */
590 for (j = 0; j < 4; j++) {
591 int swz = GET_SWZ(initial_src_swz[j], i);
592
593 if (glsl_base_type_is_64bit(dinst->src[j].type)) {
594 dinst->src[j].index = initial_src_idx[j];
595 if (swz > 1) {
596 dinst->src[j].double_reg2 = true;
597 dinst->src[j].index++;
598 }
599
600 if (swz & 1)
601 dinst->src[j].swizzle = MAKE_SWIZZLE4(SWIZZLE_Z, SWIZZLE_W,
602 SWIZZLE_Z, SWIZZLE_W);
603 else
604 dinst->src[j].swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y,
605 SWIZZLE_X, SWIZZLE_Y);
606
607 } else {
608 /* some opcodes are special case in what they use as sources
609 * - [FUI]2D/[UI]2I64 is a float/[u]int src0, (D)LDEXP is
610 * integer src1
611 */
612 if (op == TGSI_OPCODE_F2D || op == TGSI_OPCODE_U2D ||
613 op == TGSI_OPCODE_I2D ||
614 op == TGSI_OPCODE_I2I64 || op == TGSI_OPCODE_U2I64 ||
615 op == TGSI_OPCODE_DLDEXP || op == TGSI_OPCODE_LDEXP ||
616 (op == TGSI_OPCODE_UCMP && dst_is_64bit[0])) {
617 dinst->src[j].swizzle = MAKE_SWIZZLE4(swz, swz, swz, swz);
618 }
619 }
620 }
621 }
622 inst = dinst;
623 } else {
624 this->instructions.push_tail(inst);
625 }
626
627
628 return inst;
629 }
630
631 glsl_to_tgsi_instruction *
632 glsl_to_tgsi_visitor::emit_asm(ir_instruction *ir, enum tgsi_opcode op,
633 st_dst_reg dst,
634 st_src_reg src0, st_src_reg src1,
635 st_src_reg src2, st_src_reg src3)
636 {
637 return emit_asm(ir, op, dst, undef_dst, src0, src1, src2, src3);
638 }
639
640 /**
641 * Determines whether to use an integer, unsigned integer, or float opcode
642 * based on the operands and input opcode, then emits the result.
643 */
644 enum tgsi_opcode
645 glsl_to_tgsi_visitor::get_opcode(enum tgsi_opcode op,
646 st_dst_reg dst,
647 st_src_reg src0, st_src_reg src1)
648 {
649 enum glsl_base_type type = GLSL_TYPE_FLOAT;
650
651 if (op == TGSI_OPCODE_MOV)
652 return op;
653
654 assert(src0.type != GLSL_TYPE_ARRAY);
655 assert(src0.type != GLSL_TYPE_STRUCT);
656 assert(src1.type != GLSL_TYPE_ARRAY);
657 assert(src1.type != GLSL_TYPE_STRUCT);
658
659 if (is_resource_instruction(op))
660 type = src1.type;
661 else if (src0.type == GLSL_TYPE_INT64 || src1.type == GLSL_TYPE_INT64)
662 type = GLSL_TYPE_INT64;
663 else if (src0.type == GLSL_TYPE_UINT64 || src1.type == GLSL_TYPE_UINT64)
664 type = GLSL_TYPE_UINT64;
665 else if (src0.type == GLSL_TYPE_DOUBLE || src1.type == GLSL_TYPE_DOUBLE)
666 type = GLSL_TYPE_DOUBLE;
667 else if (src0.type == GLSL_TYPE_FLOAT || src1.type == GLSL_TYPE_FLOAT)
668 type = GLSL_TYPE_FLOAT;
669 else if (native_integers)
670 type = src0.type == GLSL_TYPE_BOOL ? GLSL_TYPE_INT : src0.type;
671
672 #define case7(c, f, i, u, d, i64, ui64) \
673 case TGSI_OPCODE_##c: \
674 if (type == GLSL_TYPE_UINT64) \
675 op = TGSI_OPCODE_##ui64; \
676 else if (type == GLSL_TYPE_INT64) \
677 op = TGSI_OPCODE_##i64; \
678 else if (type == GLSL_TYPE_DOUBLE) \
679 op = TGSI_OPCODE_##d; \
680 else if (type == GLSL_TYPE_INT) \
681 op = TGSI_OPCODE_##i; \
682 else if (type == GLSL_TYPE_UINT) \
683 op = TGSI_OPCODE_##u; \
684 else \
685 op = TGSI_OPCODE_##f; \
686 break;
687
688 #define casecomp(c, f, i, u, d, i64, ui64) \
689 case TGSI_OPCODE_##c: \
690 if (type == GLSL_TYPE_INT64) \
691 op = TGSI_OPCODE_##i64; \
692 else if (type == GLSL_TYPE_UINT64) \
693 op = TGSI_OPCODE_##ui64; \
694 else if (type == GLSL_TYPE_DOUBLE) \
695 op = TGSI_OPCODE_##d; \
696 else if (type == GLSL_TYPE_INT || type == GLSL_TYPE_SUBROUTINE) \
697 op = TGSI_OPCODE_##i; \
698 else if (type == GLSL_TYPE_UINT) \
699 op = TGSI_OPCODE_##u; \
700 else if (native_integers) \
701 op = TGSI_OPCODE_##f; \
702 else \
703 op = TGSI_OPCODE_##c; \
704 break;
705
706 switch (op) {
707 /* Some instructions are initially selected without considering the type.
708 * This fixes the type:
709 *
710 * INIT FLOAT SINT UINT DOUBLE SINT64 UINT64
711 */
712 case7(ADD, ADD, UADD, UADD, DADD, U64ADD, U64ADD);
713 case7(CEIL, CEIL, LAST, LAST, DCEIL, LAST, LAST);
714 case7(DIV, DIV, IDIV, UDIV, DDIV, I64DIV, U64DIV);
715 case7(FMA, FMA, UMAD, UMAD, DFMA, LAST, LAST);
716 case7(FLR, FLR, LAST, LAST, DFLR, LAST, LAST);
717 case7(FRC, FRC, LAST, LAST, DFRAC, LAST, LAST);
718 case7(MUL, MUL, UMUL, UMUL, DMUL, U64MUL, U64MUL);
719 case7(MAD, MAD, UMAD, UMAD, DMAD, LAST, LAST);
720 case7(MAX, MAX, IMAX, UMAX, DMAX, I64MAX, U64MAX);
721 case7(MIN, MIN, IMIN, UMIN, DMIN, I64MIN, U64MIN);
722 case7(RCP, RCP, LAST, LAST, DRCP, LAST, LAST);
723 case7(ROUND, ROUND,LAST, LAST, DROUND, LAST, LAST);
724 case7(RSQ, RSQ, LAST, LAST, DRSQ, LAST, LAST);
725 case7(SQRT, SQRT, LAST, LAST, DSQRT, LAST, LAST);
726 case7(SSG, SSG, ISSG, ISSG, DSSG, I64SSG, I64SSG);
727 case7(TRUNC, TRUNC,LAST, LAST, DTRUNC, LAST, LAST);
728
729 case7(MOD, LAST, MOD, UMOD, LAST, I64MOD, U64MOD);
730 case7(SHL, LAST, SHL, SHL, LAST, U64SHL, U64SHL);
731 case7(IBFE, LAST, IBFE, UBFE, LAST, LAST, LAST);
732 case7(IMSB, LAST, IMSB, UMSB, LAST, LAST, LAST);
733 case7(IMUL_HI, LAST, IMUL_HI, UMUL_HI, LAST, LAST, LAST);
734 case7(ISHR, LAST, ISHR, USHR, LAST, I64SHR, U64SHR);
735 case7(ATOMIMAX,LAST, ATOMIMAX,ATOMUMAX,LAST, LAST, LAST);
736 case7(ATOMIMIN,LAST, ATOMIMIN,ATOMUMIN,LAST, LAST, LAST);
737
738 casecomp(SEQ, FSEQ, USEQ, USEQ, DSEQ, U64SEQ, U64SEQ);
739 casecomp(SNE, FSNE, USNE, USNE, DSNE, U64SNE, U64SNE);
740 casecomp(SGE, FSGE, ISGE, USGE, DSGE, I64SGE, U64SGE);
741 casecomp(SLT, FSLT, ISLT, USLT, DSLT, I64SLT, U64SLT);
742
743 default:
744 break;
745 }
746
747 assert(op != TGSI_OPCODE_LAST);
748 return op;
749 }
750
751 glsl_to_tgsi_instruction *
752 glsl_to_tgsi_visitor::emit_dp(ir_instruction *ir,
753 st_dst_reg dst, st_src_reg src0, st_src_reg src1,
754 unsigned elements)
755 {
756 static const enum tgsi_opcode dot_opcodes[] = {
757 TGSI_OPCODE_DP2, TGSI_OPCODE_DP3, TGSI_OPCODE_DP4
758 };
759
760 return emit_asm(ir, dot_opcodes[elements - 2], dst, src0, src1);
761 }
762
763 /**
764 * Emits TGSI scalar opcodes to produce unique answers across channels.
765 *
766 * Some TGSI opcodes are scalar-only, like ARB_fp/vp. The src X
767 * channel determines the result across all channels. So to do a vec4
768 * of this operation, we want to emit a scalar per source channel used
769 * to produce dest channels.
770 */
771 void
772 glsl_to_tgsi_visitor::emit_scalar(ir_instruction *ir, enum tgsi_opcode op,
773 st_dst_reg dst,
774 st_src_reg orig_src0, st_src_reg orig_src1)
775 {
776 int i, j;
777 int done_mask = ~dst.writemask;
778
779 /* TGSI RCP is a scalar operation splatting results to all channels,
780 * like ARB_fp/vp. So emit as many RCPs as necessary to cover our
781 * dst channels.
782 */
783 for (i = 0; i < 4; i++) {
784 GLuint this_mask = (1 << i);
785 st_src_reg src0 = orig_src0;
786 st_src_reg src1 = orig_src1;
787
788 if (done_mask & this_mask)
789 continue;
790
791 GLuint src0_swiz = GET_SWZ(src0.swizzle, i);
792 GLuint src1_swiz = GET_SWZ(src1.swizzle, i);
793 for (j = i + 1; j < 4; j++) {
794 /* If there is another enabled component in the destination that is
795 * derived from the same inputs, generate its value on this pass as
796 * well.
797 */
798 if (!(done_mask & (1 << j)) &&
799 GET_SWZ(src0.swizzle, j) == src0_swiz &&
800 GET_SWZ(src1.swizzle, j) == src1_swiz) {
801 this_mask |= (1 << j);
802 }
803 }
804 src0.swizzle = MAKE_SWIZZLE4(src0_swiz, src0_swiz,
805 src0_swiz, src0_swiz);
806 src1.swizzle = MAKE_SWIZZLE4(src1_swiz, src1_swiz,
807 src1_swiz, src1_swiz);
808
809 dst.writemask = this_mask;
810 emit_asm(ir, op, dst, src0, src1);
811 done_mask |= this_mask;
812 }
813 }
814
815 void
816 glsl_to_tgsi_visitor::emit_scalar(ir_instruction *ir, enum tgsi_opcode op,
817 st_dst_reg dst, st_src_reg src0)
818 {
819 st_src_reg undef = undef_src;
820
821 undef.swizzle = SWIZZLE_XXXX;
822
823 emit_scalar(ir, op, dst, src0, undef);
824 }
825
826 void
827 glsl_to_tgsi_visitor::emit_arl(ir_instruction *ir,
828 st_dst_reg dst, st_src_reg src0)
829 {
830 enum tgsi_opcode op = TGSI_OPCODE_ARL;
831
832 if (src0.type == GLSL_TYPE_INT || src0.type == GLSL_TYPE_UINT) {
833 if (!this->need_uarl && src0.is_legal_tgsi_address_operand())
834 return;
835
836 op = TGSI_OPCODE_UARL;
837 }
838
839 assert(dst.file == PROGRAM_ADDRESS);
840 if (dst.index >= this->num_address_regs)
841 this->num_address_regs = dst.index + 1;
842
843 emit_asm(NULL, op, dst, src0);
844 }
845
846 int
847 glsl_to_tgsi_visitor::add_constant(gl_register_file file,
848 gl_constant_value values[8], int size,
849 GLenum datatype,
850 uint16_t *swizzle_out)
851 {
852 if (file == PROGRAM_CONSTANT) {
853 GLuint swizzle = swizzle_out ? *swizzle_out : 0;
854 int result = _mesa_add_typed_unnamed_constant(this->prog->Parameters,
855 values, size, datatype,
856 &swizzle);
857 if (swizzle_out)
858 *swizzle_out = swizzle;
859 return result;
860 }
861
862 assert(file == PROGRAM_IMMEDIATE);
863
864 int index = 0;
865 immediate_storage *entry;
866 int size32 = size * ((datatype == GL_DOUBLE ||
867 datatype == GL_INT64_ARB ||
868 datatype == GL_UNSIGNED_INT64_ARB) ? 2 : 1);
869 int i;
870
871 /* Search immediate storage to see if we already have an identical
872 * immediate that we can use instead of adding a duplicate entry.
873 */
874 foreach_in_list(immediate_storage, entry, &this->immediates) {
875 immediate_storage *tmp = entry;
876
877 for (i = 0; i * 4 < size32; i++) {
878 int slot_size = MIN2(size32 - (i * 4), 4);
879 if (tmp->type != datatype || tmp->size32 != slot_size)
880 break;
881 if (memcmp(tmp->values, &values[i * 4],
882 slot_size * sizeof(gl_constant_value)))
883 break;
884
885 /* Everything matches, keep going until the full size is matched */
886 tmp = (immediate_storage *)tmp->next;
887 }
888
889 /* The full value matched */
890 if (i * 4 >= size32)
891 return index;
892
893 index++;
894 }
895
896 for (i = 0; i * 4 < size32; i++) {
897 int slot_size = MIN2(size32 - (i * 4), 4);
898 /* Add this immediate to the list. */
899 entry = new(mem_ctx) immediate_storage(&values[i * 4],
900 slot_size, datatype);
901 this->immediates.push_tail(entry);
902 this->num_immediates++;
903 }
904 return index;
905 }
906
907 st_src_reg
908 glsl_to_tgsi_visitor::st_src_reg_for_float(float val)
909 {
910 st_src_reg src(PROGRAM_IMMEDIATE, -1, GLSL_TYPE_FLOAT);
911 union gl_constant_value uval;
912
913 uval.f = val;
914 src.index = add_constant(src.file, &uval, 1, GL_FLOAT, &src.swizzle);
915
916 return src;
917 }
918
919 st_src_reg
920 glsl_to_tgsi_visitor::st_src_reg_for_double(double val)
921 {
922 st_src_reg src(PROGRAM_IMMEDIATE, -1, GLSL_TYPE_DOUBLE);
923 union gl_constant_value uval[2];
924
925 memcpy(uval, &val, sizeof(uval));
926 src.index = add_constant(src.file, uval, 1, GL_DOUBLE, &src.swizzle);
927 src.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_X, SWIZZLE_Y);
928 return src;
929 }
930
931 st_src_reg
932 glsl_to_tgsi_visitor::st_src_reg_for_int(int val)
933 {
934 st_src_reg src(PROGRAM_IMMEDIATE, -1, GLSL_TYPE_INT);
935 union gl_constant_value uval;
936
937 assert(native_integers);
938
939 uval.i = val;
940 src.index = add_constant(src.file, &uval, 1, GL_INT, &src.swizzle);
941
942 return src;
943 }
944
945 st_src_reg
946 glsl_to_tgsi_visitor::st_src_reg_for_int64(int64_t val)
947 {
948 st_src_reg src(PROGRAM_IMMEDIATE, -1, GLSL_TYPE_INT64);
949 union gl_constant_value uval[2];
950
951 memcpy(uval, &val, sizeof(uval));
952 src.index = add_constant(src.file, uval, 1, GL_DOUBLE, &src.swizzle);
953 src.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_X, SWIZZLE_Y);
954
955 return src;
956 }
957
958 st_src_reg
959 glsl_to_tgsi_visitor::st_src_reg_for_type(enum glsl_base_type type, int val)
960 {
961 if (native_integers)
962 return type == GLSL_TYPE_FLOAT ? st_src_reg_for_float(val) :
963 st_src_reg_for_int(val);
964 else
965 return st_src_reg_for_float(val);
966 }
967
968 static int
969 attrib_type_size(const struct glsl_type *type, bool is_vs_input)
970 {
971 return type->count_attribute_slots(is_vs_input);
972 }
973
974 static int
975 type_size(const struct glsl_type *type)
976 {
977 return type->count_attribute_slots(false);
978 }
979
980 static void
981 add_buffer_to_load_and_stores(glsl_to_tgsi_instruction *inst, st_src_reg *buf,
982 exec_list *instructions, ir_constant *access)
983 {
984 /**
985 * emit_asm() might have actually split the op into pieces, e.g. for
986 * double stores. We have to go back and fix up all the generated ops.
987 */
988 enum tgsi_opcode op = inst->op;
989 do {
990 inst->resource = *buf;
991 if (access)
992 inst->buffer_access = access->value.u[0];
993
994 if (inst == instructions->get_head_raw())
995 break;
996 inst = (glsl_to_tgsi_instruction *)inst->get_prev();
997
998 if (inst->op == TGSI_OPCODE_UADD) {
999 if (inst == instructions->get_head_raw())
1000 break;
1001 inst = (glsl_to_tgsi_instruction *)inst->get_prev();
1002 }
1003 } while (inst->op == op && inst->resource.file == PROGRAM_UNDEFINED);
1004 }
1005
1006 /**
1007 * If the given GLSL type is an array or matrix or a structure containing
1008 * an array/matrix member, return true. Else return false.
1009 *
1010 * This is used to determine which kind of temp storage (PROGRAM_TEMPORARY
1011 * or PROGRAM_ARRAY) should be used for variables of this type. Anytime
1012 * we have an array that might be indexed with a variable, we need to use
1013 * the later storage type.
1014 */
1015 static bool
1016 type_has_array_or_matrix(const glsl_type *type)
1017 {
1018 if (type->is_array() || type->is_matrix())
1019 return true;
1020
1021 if (type->is_record()) {
1022 for (unsigned i = 0; i < type->length; i++) {
1023 if (type_has_array_or_matrix(type->fields.structure[i].type)) {
1024 return true;
1025 }
1026 }
1027 }
1028
1029 return false;
1030 }
1031
1032
1033 /**
1034 * In the initial pass of codegen, we assign temporary numbers to
1035 * intermediate results. (not SSA -- variable assignments will reuse
1036 * storage).
1037 */
1038 st_src_reg
1039 glsl_to_tgsi_visitor::get_temp(const glsl_type *type)
1040 {
1041 st_src_reg src;
1042
1043 src.type = native_integers ? type->base_type : GLSL_TYPE_FLOAT;
1044 src.reladdr = NULL;
1045 src.negate = 0;
1046 src.abs = 0;
1047
1048 if (!options->EmitNoIndirectTemp && type_has_array_or_matrix(type)) {
1049 if (next_array >= max_num_arrays) {
1050 max_num_arrays += 32;
1051 array_sizes = (unsigned*)
1052 realloc(array_sizes, sizeof(array_sizes[0]) * max_num_arrays);
1053 }
1054
1055 src.file = PROGRAM_ARRAY;
1056 src.index = 0;
1057 src.array_id = next_array + 1;
1058 array_sizes[next_array] = type_size(type);
1059 ++next_array;
1060
1061 } else {
1062 src.file = PROGRAM_TEMPORARY;
1063 src.index = next_temp;
1064 next_temp += type_size(type);
1065 }
1066
1067 if (type->is_array() || type->is_record()) {
1068 src.swizzle = SWIZZLE_NOOP;
1069 } else {
1070 src.swizzle = swizzle_for_size(type->vector_elements);
1071 }
1072
1073 return src;
1074 }
1075
1076 variable_storage *
1077 glsl_to_tgsi_visitor::find_variable_storage(ir_variable *var)
1078 {
1079 struct hash_entry *entry;
1080
1081 entry = _mesa_hash_table_search(this->variables, var);
1082 if (!entry)
1083 return NULL;
1084
1085 return (variable_storage *)entry->data;
1086 }
1087
1088 void
1089 glsl_to_tgsi_visitor::visit(ir_variable *ir)
1090 {
1091 if (strcmp(ir->name, "gl_FragCoord") == 0) {
1092 this->prog->OriginUpperLeft = ir->data.origin_upper_left;
1093 this->prog->PixelCenterInteger = ir->data.pixel_center_integer;
1094 }
1095
1096 if (ir->data.mode == ir_var_uniform && strncmp(ir->name, "gl_", 3) == 0) {
1097 unsigned int i;
1098 const ir_state_slot *const slots = ir->get_state_slots();
1099 assert(slots != NULL);
1100
1101 /* Check if this statevar's setup in the STATE file exactly
1102 * matches how we'll want to reference it as a
1103 * struct/array/whatever. If not, then we need to move it into
1104 * temporary storage and hope that it'll get copy-propagated
1105 * out.
1106 */
1107 for (i = 0; i < ir->get_num_state_slots(); i++) {
1108 if (slots[i].swizzle != SWIZZLE_XYZW) {
1109 break;
1110 }
1111 }
1112
1113 variable_storage *storage;
1114 st_dst_reg dst;
1115 if (i == ir->get_num_state_slots()) {
1116 /* We'll set the index later. */
1117 storage = new(mem_ctx) variable_storage(ir, PROGRAM_STATE_VAR, -1);
1118
1119 _mesa_hash_table_insert(this->variables, ir, storage);
1120
1121 dst = undef_dst;
1122 } else {
1123 /* The variable_storage constructor allocates slots based on the size
1124 * of the type. However, this had better match the number of state
1125 * elements that we're going to copy into the new temporary.
1126 */
1127 assert((int) ir->get_num_state_slots() == type_size(ir->type));
1128
1129 dst = st_dst_reg(get_temp(ir->type));
1130
1131 storage = new(mem_ctx) variable_storage(ir, dst.file, dst.index,
1132 dst.array_id);
1133
1134 _mesa_hash_table_insert(this->variables, ir, storage);
1135 }
1136
1137
1138 for (unsigned int i = 0; i < ir->get_num_state_slots(); i++) {
1139 int index = _mesa_add_state_reference(this->prog->Parameters,
1140 slots[i].tokens);
1141
1142 if (storage->file == PROGRAM_STATE_VAR) {
1143 if (storage->index == -1) {
1144 storage->index = index;
1145 } else {
1146 assert(index == storage->index + (int)i);
1147 }
1148 } else {
1149 /* We use GLSL_TYPE_FLOAT here regardless of the actual type of
1150 * the data being moved since MOV does not care about the type of
1151 * data it is moving, and we don't want to declare registers with
1152 * array or struct types.
1153 */
1154 st_src_reg src(PROGRAM_STATE_VAR, index, GLSL_TYPE_FLOAT);
1155 src.swizzle = slots[i].swizzle;
1156 emit_asm(ir, TGSI_OPCODE_MOV, dst, src);
1157 /* even a float takes up a whole vec4 reg in a struct/array. */
1158 dst.index++;
1159 }
1160 }
1161
1162 if (storage->file == PROGRAM_TEMPORARY &&
1163 dst.index != storage->index + (int) ir->get_num_state_slots()) {
1164 fail_link(this->shader_program,
1165 "failed to load builtin uniform `%s' (%d/%d regs loaded)\n",
1166 ir->name, dst.index - storage->index,
1167 type_size(ir->type));
1168 }
1169 }
1170 }
1171
1172 void
1173 glsl_to_tgsi_visitor::visit(ir_loop *ir)
1174 {
1175 emit_asm(NULL, TGSI_OPCODE_BGNLOOP);
1176
1177 visit_exec_list(&ir->body_instructions, this);
1178
1179 emit_asm(NULL, TGSI_OPCODE_ENDLOOP);
1180 }
1181
1182 void
1183 glsl_to_tgsi_visitor::visit(ir_loop_jump *ir)
1184 {
1185 switch (ir->mode) {
1186 case ir_loop_jump::jump_break:
1187 emit_asm(NULL, TGSI_OPCODE_BRK);
1188 break;
1189 case ir_loop_jump::jump_continue:
1190 emit_asm(NULL, TGSI_OPCODE_CONT);
1191 break;
1192 }
1193 }
1194
1195
1196 void
1197 glsl_to_tgsi_visitor::visit(ir_function_signature *ir)
1198 {
1199 assert(0);
1200 (void)ir;
1201 }
1202
1203 void
1204 glsl_to_tgsi_visitor::visit(ir_function *ir)
1205 {
1206 /* Ignore function bodies other than main() -- we shouldn't see calls to
1207 * them since they should all be inlined before we get to glsl_to_tgsi.
1208 */
1209 if (strcmp(ir->name, "main") == 0) {
1210 const ir_function_signature *sig;
1211 exec_list empty;
1212
1213 sig = ir->matching_signature(NULL, &empty, false);
1214
1215 assert(sig);
1216
1217 foreach_in_list(ir_instruction, ir, &sig->body) {
1218 ir->accept(this);
1219 }
1220 }
1221 }
1222
1223 bool
1224 glsl_to_tgsi_visitor::try_emit_mad(ir_expression *ir, int mul_operand)
1225 {
1226 int nonmul_operand = 1 - mul_operand;
1227 st_src_reg a, b, c;
1228 st_dst_reg result_dst;
1229
1230 ir_expression *expr = ir->operands[mul_operand]->as_expression();
1231 if (!expr || expr->operation != ir_binop_mul)
1232 return false;
1233
1234 expr->operands[0]->accept(this);
1235 a = this->result;
1236 expr->operands[1]->accept(this);
1237 b = this->result;
1238 ir->operands[nonmul_operand]->accept(this);
1239 c = this->result;
1240
1241 this->result = get_temp(ir->type);
1242 result_dst = st_dst_reg(this->result);
1243 result_dst.writemask = (1 << ir->type->vector_elements) - 1;
1244 emit_asm(ir, TGSI_OPCODE_MAD, result_dst, a, b, c);
1245
1246 return true;
1247 }
1248
1249 /**
1250 * Emit MAD(a, -b, a) instead of AND(a, NOT(b))
1251 *
1252 * The logic values are 1.0 for true and 0.0 for false. Logical-and is
1253 * implemented using multiplication, and logical-or is implemented using
1254 * addition. Logical-not can be implemented as (true - x), or (1.0 - x).
1255 * As result, the logical expression (a & !b) can be rewritten as:
1256 *
1257 * - a * !b
1258 * - a * (1 - b)
1259 * - (a * 1) - (a * b)
1260 * - a + -(a * b)
1261 * - a + (a * -b)
1262 *
1263 * This final expression can be implemented as a single MAD(a, -b, a)
1264 * instruction.
1265 */
1266 bool
1267 glsl_to_tgsi_visitor::try_emit_mad_for_and_not(ir_expression *ir,
1268 int try_operand)
1269 {
1270 const int other_operand = 1 - try_operand;
1271 st_src_reg a, b;
1272
1273 ir_expression *expr = ir->operands[try_operand]->as_expression();
1274 if (!expr || expr->operation != ir_unop_logic_not)
1275 return false;
1276
1277 ir->operands[other_operand]->accept(this);
1278 a = this->result;
1279 expr->operands[0]->accept(this);
1280 b = this->result;
1281
1282 b.negate = ~b.negate;
1283
1284 this->result = get_temp(ir->type);
1285 emit_asm(ir, TGSI_OPCODE_MAD, st_dst_reg(this->result), a, b, a);
1286
1287 return true;
1288 }
1289
1290 void
1291 glsl_to_tgsi_visitor::reladdr_to_temp(ir_instruction *ir,
1292 st_src_reg *reg, int *num_reladdr)
1293 {
1294 if (!reg->reladdr && !reg->reladdr2)
1295 return;
1296
1297 if (reg->reladdr)
1298 emit_arl(ir, address_reg, *reg->reladdr);
1299 if (reg->reladdr2)
1300 emit_arl(ir, address_reg2, *reg->reladdr2);
1301
1302 if (*num_reladdr != 1) {
1303 st_src_reg temp = get_temp(glsl_type::get_instance(reg->type, 4, 1));
1304
1305 emit_asm(ir, TGSI_OPCODE_MOV, st_dst_reg(temp), *reg);
1306 *reg = temp;
1307 }
1308
1309 (*num_reladdr)--;
1310 }
1311
1312 void
1313 glsl_to_tgsi_visitor::visit(ir_expression *ir)
1314 {
1315 st_src_reg op[ARRAY_SIZE(ir->operands)];
1316
1317 /* Quick peephole: Emit MAD(a, b, c) instead of ADD(MUL(a, b), c)
1318 */
1319 if (!this->precise && ir->operation == ir_binop_add) {
1320 if (try_emit_mad(ir, 1))
1321 return;
1322 if (try_emit_mad(ir, 0))
1323 return;
1324 }
1325
1326 /* Quick peephole: Emit OPCODE_MAD(-a, -b, a) instead of AND(a, NOT(b))
1327 */
1328 if (!native_integers && ir->operation == ir_binop_logic_and) {
1329 if (try_emit_mad_for_and_not(ir, 1))
1330 return;
1331 if (try_emit_mad_for_and_not(ir, 0))
1332 return;
1333 }
1334
1335 if (ir->operation == ir_quadop_vector)
1336 assert(!"ir_quadop_vector should have been lowered");
1337
1338 for (unsigned int operand = 0; operand < ir->num_operands; operand++) {
1339 this->result.file = PROGRAM_UNDEFINED;
1340 ir->operands[operand]->accept(this);
1341 if (this->result.file == PROGRAM_UNDEFINED) {
1342 printf("Failed to get tree for expression operand:\n");
1343 ir->operands[operand]->print();
1344 printf("\n");
1345 exit(1);
1346 }
1347 op[operand] = this->result;
1348
1349 /* Matrix expression operands should have been broken down to vector
1350 * operations already.
1351 */
1352 assert(!ir->operands[operand]->type->is_matrix());
1353 }
1354
1355 visit_expression(ir, op);
1356 }
1357
1358 /* The non-recursive part of the expression visitor lives in a separate
1359 * function and should be prevented from being inlined, to avoid a stack
1360 * explosion when deeply nested expressions are visited.
1361 */
1362 void
1363 glsl_to_tgsi_visitor::visit_expression(ir_expression* ir, st_src_reg *op)
1364 {
1365 st_src_reg result_src;
1366 st_dst_reg result_dst;
1367
1368 int vector_elements = ir->operands[0]->type->vector_elements;
1369 if (ir->operands[1] &&
1370 ir->operation != ir_binop_interpolate_at_offset &&
1371 ir->operation != ir_binop_interpolate_at_sample) {
1372 st_src_reg *swz_op = NULL;
1373 if (vector_elements > ir->operands[1]->type->vector_elements) {
1374 assert(ir->operands[1]->type->vector_elements == 1);
1375 swz_op = &op[1];
1376 } else if (vector_elements < ir->operands[1]->type->vector_elements) {
1377 assert(ir->operands[0]->type->vector_elements == 1);
1378 swz_op = &op[0];
1379 }
1380 if (swz_op) {
1381 uint16_t swizzle_x = GET_SWZ(swz_op->swizzle, 0);
1382 swz_op->swizzle = MAKE_SWIZZLE4(swizzle_x, swizzle_x,
1383 swizzle_x, swizzle_x);
1384 }
1385 vector_elements = MAX2(vector_elements,
1386 ir->operands[1]->type->vector_elements);
1387 }
1388 if (ir->operands[2] &&
1389 ir->operands[2]->type->vector_elements != vector_elements) {
1390 /* This can happen with ir_triop_lrp, i.e. glsl mix */
1391 assert(ir->operands[2]->type->vector_elements == 1);
1392 uint16_t swizzle_x = GET_SWZ(op[2].swizzle, 0);
1393 op[2].swizzle = MAKE_SWIZZLE4(swizzle_x, swizzle_x,
1394 swizzle_x, swizzle_x);
1395 }
1396
1397 this->result.file = PROGRAM_UNDEFINED;
1398
1399 /* Storage for our result. Ideally for an assignment we'd be using
1400 * the actual storage for the result here, instead.
1401 */
1402 result_src = get_temp(ir->type);
1403 /* convenience for the emit functions below. */
1404 result_dst = st_dst_reg(result_src);
1405 /* Limit writes to the channels that will be used by result_src later.
1406 * This does limit this temp's use as a temporary for multi-instruction
1407 * sequences.
1408 */
1409 result_dst.writemask = (1 << ir->type->vector_elements) - 1;
1410
1411 switch (ir->operation) {
1412 case ir_unop_logic_not:
1413 if (result_dst.type != GLSL_TYPE_FLOAT)
1414 emit_asm(ir, TGSI_OPCODE_NOT, result_dst, op[0]);
1415 else {
1416 /* Previously 'SEQ dst, src, 0.0' was used for this. However, many
1417 * older GPUs implement SEQ using multiple instructions (i915 uses two
1418 * SGE instructions and a MUL instruction). Since our logic values are
1419 * 0.0 and 1.0, 1-x also implements !x.
1420 */
1421 op[0].negate = ~op[0].negate;
1422 emit_asm(ir, TGSI_OPCODE_ADD, result_dst, op[0],
1423 st_src_reg_for_float(1.0));
1424 }
1425 break;
1426 case ir_unop_neg:
1427 if (result_dst.type == GLSL_TYPE_INT64 ||
1428 result_dst.type == GLSL_TYPE_UINT64)
1429 emit_asm(ir, TGSI_OPCODE_I64NEG, result_dst, op[0]);
1430 else if (result_dst.type == GLSL_TYPE_INT ||
1431 result_dst.type == GLSL_TYPE_UINT)
1432 emit_asm(ir, TGSI_OPCODE_INEG, result_dst, op[0]);
1433 else if (result_dst.type == GLSL_TYPE_DOUBLE)
1434 emit_asm(ir, TGSI_OPCODE_DNEG, result_dst, op[0]);
1435 else {
1436 op[0].negate = ~op[0].negate;
1437 result_src = op[0];
1438 }
1439 break;
1440 case ir_unop_subroutine_to_int:
1441 emit_asm(ir, TGSI_OPCODE_MOV, result_dst, op[0]);
1442 break;
1443 case ir_unop_abs:
1444 if (result_dst.type == GLSL_TYPE_FLOAT)
1445 emit_asm(ir, TGSI_OPCODE_MOV, result_dst, op[0].get_abs());
1446 else if (result_dst.type == GLSL_TYPE_DOUBLE)
1447 emit_asm(ir, TGSI_OPCODE_DABS, result_dst, op[0]);
1448 else if (result_dst.type == GLSL_TYPE_INT64 ||
1449 result_dst.type == GLSL_TYPE_UINT64)
1450 emit_asm(ir, TGSI_OPCODE_I64ABS, result_dst, op[0]);
1451 else
1452 emit_asm(ir, TGSI_OPCODE_IABS, result_dst, op[0]);
1453 break;
1454 case ir_unop_sign:
1455 emit_asm(ir, TGSI_OPCODE_SSG, result_dst, op[0]);
1456 break;
1457 case ir_unop_rcp:
1458 emit_scalar(ir, TGSI_OPCODE_RCP, result_dst, op[0]);
1459 break;
1460
1461 case ir_unop_exp2:
1462 emit_scalar(ir, TGSI_OPCODE_EX2, result_dst, op[0]);
1463 break;
1464 case ir_unop_exp:
1465 assert(!"not reached: should be handled by exp_to_exp2");
1466 break;
1467 case ir_unop_log:
1468 assert(!"not reached: should be handled by log_to_log2");
1469 break;
1470 case ir_unop_log2:
1471 emit_scalar(ir, TGSI_OPCODE_LG2, result_dst, op[0]);
1472 break;
1473 case ir_unop_sin:
1474 emit_scalar(ir, TGSI_OPCODE_SIN, result_dst, op[0]);
1475 break;
1476 case ir_unop_cos:
1477 emit_scalar(ir, TGSI_OPCODE_COS, result_dst, op[0]);
1478 break;
1479 case ir_unop_saturate: {
1480 glsl_to_tgsi_instruction *inst;
1481 inst = emit_asm(ir, TGSI_OPCODE_MOV, result_dst, op[0]);
1482 inst->saturate = true;
1483 break;
1484 }
1485
1486 case ir_unop_dFdx:
1487 case ir_unop_dFdx_coarse:
1488 emit_asm(ir, TGSI_OPCODE_DDX, result_dst, op[0]);
1489 break;
1490 case ir_unop_dFdx_fine:
1491 emit_asm(ir, TGSI_OPCODE_DDX_FINE, result_dst, op[0]);
1492 break;
1493 case ir_unop_dFdy:
1494 case ir_unop_dFdy_coarse:
1495 case ir_unop_dFdy_fine:
1496 {
1497 /* The X component contains 1 or -1 depending on whether the framebuffer
1498 * is a FBO or the window system buffer, respectively.
1499 * It is then multiplied with the source operand of DDY.
1500 */
1501 static const gl_state_index16 transform_y_state[STATE_LENGTH]
1502 = { STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM };
1503
1504 unsigned transform_y_index =
1505 _mesa_add_state_reference(this->prog->Parameters,
1506 transform_y_state);
1507
1508 st_src_reg transform_y = st_src_reg(PROGRAM_STATE_VAR,
1509 transform_y_index,
1510 glsl_type::vec4_type);
1511 transform_y.swizzle = SWIZZLE_XXXX;
1512
1513 st_src_reg temp = get_temp(glsl_type::vec4_type);
1514
1515 emit_asm(ir, TGSI_OPCODE_MUL, st_dst_reg(temp), transform_y, op[0]);
1516 emit_asm(ir, ir->operation == ir_unop_dFdy_fine ?
1517 TGSI_OPCODE_DDY_FINE : TGSI_OPCODE_DDY, result_dst, temp);
1518 break;
1519 }
1520
1521 case ir_unop_frexp_sig:
1522 emit_asm(ir, TGSI_OPCODE_DFRACEXP, result_dst, undef_dst, op[0]);
1523 break;
1524
1525 case ir_unop_frexp_exp:
1526 emit_asm(ir, TGSI_OPCODE_DFRACEXP, undef_dst, result_dst, op[0]);
1527 break;
1528
1529 case ir_unop_noise: {
1530 /* At some point, a motivated person could add a better
1531 * implementation of noise. Currently not even the nvidia
1532 * binary drivers do anything more than this. In any case, the
1533 * place to do this is in the GL state tracker, not the poor
1534 * driver.
1535 */
1536 emit_asm(ir, TGSI_OPCODE_MOV, result_dst, st_src_reg_for_float(0.5));
1537 break;
1538 }
1539
1540 case ir_binop_add:
1541 emit_asm(ir, TGSI_OPCODE_ADD, result_dst, op[0], op[1]);
1542 break;
1543 case ir_binop_sub:
1544 op[1].negate = ~op[1].negate;
1545 emit_asm(ir, TGSI_OPCODE_ADD, result_dst, op[0], op[1]);
1546 break;
1547
1548 case ir_binop_mul:
1549 emit_asm(ir, TGSI_OPCODE_MUL, result_dst, op[0], op[1]);
1550 break;
1551 case ir_binop_div:
1552 emit_asm(ir, TGSI_OPCODE_DIV, result_dst, op[0], op[1]);
1553 break;
1554 case ir_binop_mod:
1555 if (result_dst.type == GLSL_TYPE_FLOAT)
1556 assert(!"ir_binop_mod should have been converted to b * fract(a/b)");
1557 else
1558 emit_asm(ir, TGSI_OPCODE_MOD, result_dst, op[0], op[1]);
1559 break;
1560
1561 case ir_binop_less:
1562 emit_asm(ir, TGSI_OPCODE_SLT, result_dst, op[0], op[1]);
1563 break;
1564 case ir_binop_gequal:
1565 emit_asm(ir, TGSI_OPCODE_SGE, result_dst, op[0], op[1]);
1566 break;
1567 case ir_binop_equal:
1568 emit_asm(ir, TGSI_OPCODE_SEQ, result_dst, op[0], op[1]);
1569 break;
1570 case ir_binop_nequal:
1571 emit_asm(ir, TGSI_OPCODE_SNE, result_dst, op[0], op[1]);
1572 break;
1573 case ir_binop_all_equal:
1574 /* "==" operator producing a scalar boolean. */
1575 if (ir->operands[0]->type->is_vector() ||
1576 ir->operands[1]->type->is_vector()) {
1577 st_src_reg temp = get_temp(native_integers ?
1578 glsl_type::uvec4_type :
1579 glsl_type::vec4_type);
1580
1581 if (native_integers) {
1582 st_dst_reg temp_dst = st_dst_reg(temp);
1583 st_src_reg temp1 = st_src_reg(temp), temp2 = st_src_reg(temp);
1584
1585 if (ir->operands[0]->type->is_boolean() &&
1586 ir->operands[1]->as_constant() &&
1587 ir->operands[1]->as_constant()->is_one()) {
1588 emit_asm(ir, TGSI_OPCODE_MOV, st_dst_reg(temp), op[0]);
1589 } else {
1590 emit_asm(ir, TGSI_OPCODE_SEQ, st_dst_reg(temp), op[0], op[1]);
1591 }
1592
1593 /* Emit 1-3 AND operations to combine the SEQ results. */
1594 switch (ir->operands[0]->type->vector_elements) {
1595 case 2:
1596 break;
1597 case 3:
1598 temp_dst.writemask = WRITEMASK_Y;
1599 temp1.swizzle = SWIZZLE_YYYY;
1600 temp2.swizzle = SWIZZLE_ZZZZ;
1601 emit_asm(ir, TGSI_OPCODE_AND, temp_dst, temp1, temp2);
1602 break;
1603 case 4:
1604 temp_dst.writemask = WRITEMASK_X;
1605 temp1.swizzle = SWIZZLE_XXXX;
1606 temp2.swizzle = SWIZZLE_YYYY;
1607 emit_asm(ir, TGSI_OPCODE_AND, temp_dst, temp1, temp2);
1608 temp_dst.writemask = WRITEMASK_Y;
1609 temp1.swizzle = SWIZZLE_ZZZZ;
1610 temp2.swizzle = SWIZZLE_WWWW;
1611 emit_asm(ir, TGSI_OPCODE_AND, temp_dst, temp1, temp2);
1612 }
1613
1614 temp1.swizzle = SWIZZLE_XXXX;
1615 temp2.swizzle = SWIZZLE_YYYY;
1616 emit_asm(ir, TGSI_OPCODE_AND, result_dst, temp1, temp2);
1617 } else {
1618 emit_asm(ir, TGSI_OPCODE_SNE, st_dst_reg(temp), op[0], op[1]);
1619
1620 /* After the dot-product, the value will be an integer on the
1621 * range [0,4]. Zero becomes 1.0, and positive values become zero.
1622 */
1623 emit_dp(ir, result_dst, temp, temp, vector_elements);
1624
1625 /* Negating the result of the dot-product gives values on the range
1626 * [-4, 0]. Zero becomes 1.0, and negative values become zero.
1627 * This is achieved using SGE.
1628 */
1629 st_src_reg sge_src = result_src;
1630 sge_src.negate = ~sge_src.negate;
1631 emit_asm(ir, TGSI_OPCODE_SGE, result_dst, sge_src,
1632 st_src_reg_for_float(0.0));
1633 }
1634 } else {
1635 emit_asm(ir, TGSI_OPCODE_SEQ, result_dst, op[0], op[1]);
1636 }
1637 break;
1638 case ir_binop_any_nequal:
1639 /* "!=" operator producing a scalar boolean. */
1640 if (ir->operands[0]->type->is_vector() ||
1641 ir->operands[1]->type->is_vector()) {
1642 st_src_reg temp = get_temp(native_integers ?
1643 glsl_type::uvec4_type :
1644 glsl_type::vec4_type);
1645 if (ir->operands[0]->type->is_boolean() &&
1646 ir->operands[1]->as_constant() &&
1647 ir->operands[1]->as_constant()->is_zero()) {
1648 emit_asm(ir, TGSI_OPCODE_MOV, st_dst_reg(temp), op[0]);
1649 } else {
1650 emit_asm(ir, TGSI_OPCODE_SNE, st_dst_reg(temp), op[0], op[1]);
1651 }
1652
1653 if (native_integers) {
1654 st_dst_reg temp_dst = st_dst_reg(temp);
1655 st_src_reg temp1 = st_src_reg(temp), temp2 = st_src_reg(temp);
1656
1657 /* Emit 1-3 OR operations to combine the SNE results. */
1658 switch (ir->operands[0]->type->vector_elements) {
1659 case 2:
1660 break;
1661 case 3:
1662 temp_dst.writemask = WRITEMASK_Y;
1663 temp1.swizzle = SWIZZLE_YYYY;
1664 temp2.swizzle = SWIZZLE_ZZZZ;
1665 emit_asm(ir, TGSI_OPCODE_OR, temp_dst, temp1, temp2);
1666 break;
1667 case 4:
1668 temp_dst.writemask = WRITEMASK_X;
1669 temp1.swizzle = SWIZZLE_XXXX;
1670 temp2.swizzle = SWIZZLE_YYYY;
1671 emit_asm(ir, TGSI_OPCODE_OR, temp_dst, temp1, temp2);
1672 temp_dst.writemask = WRITEMASK_Y;
1673 temp1.swizzle = SWIZZLE_ZZZZ;
1674 temp2.swizzle = SWIZZLE_WWWW;
1675 emit_asm(ir, TGSI_OPCODE_OR, temp_dst, temp1, temp2);
1676 }
1677
1678 temp1.swizzle = SWIZZLE_XXXX;
1679 temp2.swizzle = SWIZZLE_YYYY;
1680 emit_asm(ir, TGSI_OPCODE_OR, result_dst, temp1, temp2);
1681 } else {
1682 /* After the dot-product, the value will be an integer on the
1683 * range [0,4]. Zero stays zero, and positive values become 1.0.
1684 */
1685 glsl_to_tgsi_instruction *const dp =
1686 emit_dp(ir, result_dst, temp, temp, vector_elements);
1687 if (this->prog->Target == GL_FRAGMENT_PROGRAM_ARB) {
1688 /* The clamping to [0,1] can be done for free in the fragment
1689 * shader with a saturate.
1690 */
1691 dp->saturate = true;
1692 } else {
1693 /* Negating the result of the dot-product gives values on the
1694 * range [-4, 0]. Zero stays zero, and negative values become
1695 * 1.0. This achieved using SLT.
1696 */
1697 st_src_reg slt_src = result_src;
1698 slt_src.negate = ~slt_src.negate;
1699 emit_asm(ir, TGSI_OPCODE_SLT, result_dst, slt_src,
1700 st_src_reg_for_float(0.0));
1701 }
1702 }
1703 } else {
1704 emit_asm(ir, TGSI_OPCODE_SNE, result_dst, op[0], op[1]);
1705 }
1706 break;
1707
1708 case ir_binop_logic_xor:
1709 if (native_integers)
1710 emit_asm(ir, TGSI_OPCODE_XOR, result_dst, op[0], op[1]);
1711 else
1712 emit_asm(ir, TGSI_OPCODE_SNE, result_dst, op[0], op[1]);
1713 break;
1714
1715 case ir_binop_logic_or: {
1716 if (native_integers) {
1717 /* If integers are used as booleans, we can use an actual "or"
1718 * instruction.
1719 */
1720 assert(native_integers);
1721 emit_asm(ir, TGSI_OPCODE_OR, result_dst, op[0], op[1]);
1722 } else {
1723 /* After the addition, the value will be an integer on the
1724 * range [0,2]. Zero stays zero, and positive values become 1.0.
1725 */
1726 glsl_to_tgsi_instruction *add =
1727 emit_asm(ir, TGSI_OPCODE_ADD, result_dst, op[0], op[1]);
1728 if (this->prog->Target == GL_FRAGMENT_PROGRAM_ARB) {
1729 /* The clamping to [0,1] can be done for free in the fragment
1730 * shader with a saturate if floats are being used as boolean
1731 * values.
1732 */
1733 add->saturate = true;
1734 } else {
1735 /* Negating the result of the addition gives values on the range
1736 * [-2, 0]. Zero stays zero, and negative values become 1.0
1737 * This is achieved using SLT.
1738 */
1739 st_src_reg slt_src = result_src;
1740 slt_src.negate = ~slt_src.negate;
1741 emit_asm(ir, TGSI_OPCODE_SLT, result_dst, slt_src,
1742 st_src_reg_for_float(0.0));
1743 }
1744 }
1745 break;
1746 }
1747
1748 case ir_binop_logic_and:
1749 /* If native integers are disabled, the bool args are stored as float 0.0
1750 * or 1.0, so "mul" gives us "and". If they're enabled, just use the
1751 * actual AND opcode.
1752 */
1753 if (native_integers)
1754 emit_asm(ir, TGSI_OPCODE_AND, result_dst, op[0], op[1]);
1755 else
1756 emit_asm(ir, TGSI_OPCODE_MUL, result_dst, op[0], op[1]);
1757 break;
1758
1759 case ir_binop_dot:
1760 assert(ir->operands[0]->type->is_vector());
1761 assert(ir->operands[0]->type == ir->operands[1]->type);
1762 emit_dp(ir, result_dst, op[0], op[1],
1763 ir->operands[0]->type->vector_elements);
1764 break;
1765
1766 case ir_unop_sqrt:
1767 if (have_sqrt) {
1768 emit_scalar(ir, TGSI_OPCODE_SQRT, result_dst, op[0]);
1769 } else {
1770 /* This is the only instruction sequence that makes the game "Risen"
1771 * render correctly. ABS is not required for the game, but since GLSL
1772 * declares negative values as "undefined", allowing us to do whatever
1773 * we want, I choose to use ABS to match DX9 and pre-GLSL RSQ
1774 * behavior.
1775 */
1776 emit_scalar(ir, TGSI_OPCODE_RSQ, result_dst, op[0].get_abs());
1777 emit_scalar(ir, TGSI_OPCODE_RCP, result_dst, result_src);
1778 }
1779 break;
1780 case ir_unop_rsq:
1781 emit_scalar(ir, TGSI_OPCODE_RSQ, result_dst, op[0]);
1782 break;
1783 case ir_unop_i2f:
1784 if (native_integers) {
1785 emit_asm(ir, TGSI_OPCODE_I2F, result_dst, op[0]);
1786 break;
1787 }
1788 /* fallthrough to next case otherwise */
1789 case ir_unop_b2f:
1790 if (native_integers) {
1791 emit_asm(ir, TGSI_OPCODE_AND, result_dst, op[0],
1792 st_src_reg_for_float(1.0));
1793 break;
1794 }
1795 /* fallthrough to next case otherwise */
1796 case ir_unop_i2u:
1797 case ir_unop_u2i:
1798 case ir_unop_i642u64:
1799 case ir_unop_u642i64:
1800 /* Converting between signed and unsigned integers is a no-op. */
1801 result_src = op[0];
1802 result_src.type = result_dst.type;
1803 break;
1804 case ir_unop_b2i:
1805 if (native_integers) {
1806 /* Booleans are stored as integers using ~0 for true and 0 for false.
1807 * GLSL requires that int(bool) return 1 for true and 0 for false.
1808 * This conversion is done with AND, but it could be done with NEG.
1809 */
1810 emit_asm(ir, TGSI_OPCODE_AND, result_dst, op[0],
1811 st_src_reg_for_int(1));
1812 } else {
1813 /* Booleans and integers are both stored as floats when native
1814 * integers are disabled.
1815 */
1816 result_src = op[0];
1817 }
1818 break;
1819 case ir_unop_f2i:
1820 if (native_integers)
1821 emit_asm(ir, TGSI_OPCODE_F2I, result_dst, op[0]);
1822 else
1823 emit_asm(ir, TGSI_OPCODE_TRUNC, result_dst, op[0]);
1824 break;
1825 case ir_unop_f2u:
1826 if (native_integers)
1827 emit_asm(ir, TGSI_OPCODE_F2U, result_dst, op[0]);
1828 else
1829 emit_asm(ir, TGSI_OPCODE_TRUNC, result_dst, op[0]);
1830 break;
1831 case ir_unop_bitcast_f2i:
1832 case ir_unop_bitcast_f2u:
1833 /* Make sure we don't propagate the negate modifier to integer opcodes. */
1834 if (op[0].negate || op[0].abs)
1835 emit_asm(ir, TGSI_OPCODE_MOV, result_dst, op[0]);
1836 else
1837 result_src = op[0];
1838 result_src.type = ir->operation == ir_unop_bitcast_f2i ? GLSL_TYPE_INT :
1839 GLSL_TYPE_UINT;
1840 break;
1841 case ir_unop_bitcast_i2f:
1842 case ir_unop_bitcast_u2f:
1843 result_src = op[0];
1844 result_src.type = GLSL_TYPE_FLOAT;
1845 break;
1846 case ir_unop_f2b:
1847 emit_asm(ir, TGSI_OPCODE_SNE, result_dst, op[0],
1848 st_src_reg_for_float(0.0));
1849 break;
1850 case ir_unop_d2b:
1851 emit_asm(ir, TGSI_OPCODE_SNE, result_dst, op[0],
1852 st_src_reg_for_double(0.0));
1853 break;
1854 case ir_unop_i2b:
1855 if (native_integers)
1856 emit_asm(ir, TGSI_OPCODE_USNE, result_dst, op[0],
1857 st_src_reg_for_int(0));
1858 else
1859 emit_asm(ir, TGSI_OPCODE_SNE, result_dst, op[0],
1860 st_src_reg_for_float(0.0));
1861 break;
1862 case ir_unop_bitcast_u642d:
1863 case ir_unop_bitcast_i642d:
1864 result_src = op[0];
1865 result_src.type = GLSL_TYPE_DOUBLE;
1866 break;
1867 case ir_unop_bitcast_d2i64:
1868 result_src = op[0];
1869 result_src.type = GLSL_TYPE_INT64;
1870 break;
1871 case ir_unop_bitcast_d2u64:
1872 result_src = op[0];
1873 result_src.type = GLSL_TYPE_UINT64;
1874 break;
1875 case ir_unop_trunc:
1876 emit_asm(ir, TGSI_OPCODE_TRUNC, result_dst, op[0]);
1877 break;
1878 case ir_unop_ceil:
1879 emit_asm(ir, TGSI_OPCODE_CEIL, result_dst, op[0]);
1880 break;
1881 case ir_unop_floor:
1882 emit_asm(ir, TGSI_OPCODE_FLR, result_dst, op[0]);
1883 break;
1884 case ir_unop_round_even:
1885 emit_asm(ir, TGSI_OPCODE_ROUND, result_dst, op[0]);
1886 break;
1887 case ir_unop_fract:
1888 emit_asm(ir, TGSI_OPCODE_FRC, result_dst, op[0]);
1889 break;
1890
1891 case ir_binop_min:
1892 emit_asm(ir, TGSI_OPCODE_MIN, result_dst, op[0], op[1]);
1893 break;
1894 case ir_binop_max:
1895 emit_asm(ir, TGSI_OPCODE_MAX, result_dst, op[0], op[1]);
1896 break;
1897 case ir_binop_pow:
1898 emit_scalar(ir, TGSI_OPCODE_POW, result_dst, op[0], op[1]);
1899 break;
1900
1901 case ir_unop_bit_not:
1902 if (native_integers) {
1903 emit_asm(ir, TGSI_OPCODE_NOT, result_dst, op[0]);
1904 break;
1905 }
1906 case ir_unop_u2f:
1907 if (native_integers) {
1908 emit_asm(ir, TGSI_OPCODE_U2F, result_dst, op[0]);
1909 break;
1910 }
1911 case ir_binop_lshift:
1912 case ir_binop_rshift:
1913 if (native_integers) {
1914 enum tgsi_opcode opcode = ir->operation == ir_binop_lshift
1915 ? TGSI_OPCODE_SHL : TGSI_OPCODE_ISHR;
1916 st_src_reg count;
1917
1918 if (glsl_base_type_is_64bit(op[0].type)) {
1919 /* GLSL shift operations have 32-bit shift counts, but TGSI uses
1920 * 64 bits.
1921 */
1922 count = get_temp(glsl_type::u64vec(ir->operands[1]
1923 ->type->components()));
1924 emit_asm(ir, TGSI_OPCODE_U2I64, st_dst_reg(count), op[1]);
1925 } else {
1926 count = op[1];
1927 }
1928
1929 emit_asm(ir, opcode, result_dst, op[0], count);
1930 break;
1931 }
1932 case ir_binop_bit_and:
1933 if (native_integers) {
1934 emit_asm(ir, TGSI_OPCODE_AND, result_dst, op[0], op[1]);
1935 break;
1936 }
1937 case ir_binop_bit_xor:
1938 if (native_integers) {
1939 emit_asm(ir, TGSI_OPCODE_XOR, result_dst, op[0], op[1]);
1940 break;
1941 }
1942 case ir_binop_bit_or:
1943 if (native_integers) {
1944 emit_asm(ir, TGSI_OPCODE_OR, result_dst, op[0], op[1]);
1945 break;
1946 }
1947
1948 assert(!"GLSL 1.30 features unsupported");
1949 break;
1950
1951 case ir_binop_ubo_load: {
1952 if (ctx->Const.UseSTD430AsDefaultPacking) {
1953 ir_rvalue *block = ir->operands[0];
1954 ir_rvalue *offset = ir->operands[1];
1955 ir_constant *const_block = block->as_constant();
1956
1957 st_src_reg cbuf(PROGRAM_CONSTANT,
1958 (const_block ? const_block->value.u[0] + 1 : 1),
1959 ir->type->base_type);
1960
1961 cbuf.has_index2 = true;
1962
1963 if (!const_block) {
1964 block->accept(this);
1965 cbuf.reladdr = ralloc(mem_ctx, st_src_reg);
1966 *cbuf.reladdr = this->result;
1967 emit_arl(ir, sampler_reladdr, this->result);
1968 }
1969
1970 /* Calculate the surface offset */
1971 offset->accept(this);
1972 st_src_reg off = this->result;
1973
1974 glsl_to_tgsi_instruction *inst =
1975 emit_asm(ir, TGSI_OPCODE_LOAD, result_dst, off);
1976
1977 if (result_dst.type == GLSL_TYPE_BOOL)
1978 emit_asm(ir, TGSI_OPCODE_USNE, result_dst, st_src_reg(result_dst),
1979 st_src_reg_for_int(0));
1980
1981 add_buffer_to_load_and_stores(inst, &cbuf, &this->instructions,
1982 NULL);
1983 } else {
1984 ir_constant *const_uniform_block = ir->operands[0]->as_constant();
1985 ir_constant *const_offset_ir = ir->operands[1]->as_constant();
1986 unsigned const_offset = const_offset_ir ?
1987 const_offset_ir->value.u[0] : 0;
1988 unsigned const_block = const_uniform_block ?
1989 const_uniform_block->value.u[0] + 1 : 1;
1990 st_src_reg index_reg = get_temp(glsl_type::uint_type);
1991 st_src_reg cbuf;
1992
1993 cbuf.type = ir->type->base_type;
1994 cbuf.file = PROGRAM_CONSTANT;
1995 cbuf.index = 0;
1996 cbuf.reladdr = NULL;
1997 cbuf.negate = 0;
1998 cbuf.abs = 0;
1999 cbuf.index2D = const_block;
2000
2001 assert(ir->type->is_vector() || ir->type->is_scalar());
2002
2003 if (const_offset_ir) {
2004 /* Constant index into constant buffer */
2005 cbuf.reladdr = NULL;
2006 cbuf.index = const_offset / 16;
2007 } else {
2008 ir_expression *offset_expr = ir->operands[1]->as_expression();
2009 st_src_reg offset = op[1];
2010
2011 /* The OpenGL spec is written in such a way that accesses with
2012 * non-constant offset are almost always vec4-aligned. The only
2013 * exception to this are members of structs in arrays of structs:
2014 * each struct in an array of structs is at least vec4-aligned,
2015 * but single-element and [ui]vec2 members of the struct may be at
2016 * an offset that is not a multiple of 16 bytes.
2017 *
2018 * Here, we extract that offset, relying on previous passes to
2019 * always generate offset expressions of the form
2020 * (+ expr constant_offset).
2021 *
2022 * Note that the std430 layout, which allows more cases of
2023 * alignment less than vec4 in arrays, is not supported for
2024 * uniform blocks, so we do not have to deal with it here.
2025 */
2026 if (offset_expr && offset_expr->operation == ir_binop_add) {
2027 const_offset_ir = offset_expr->operands[1]->as_constant();
2028 if (const_offset_ir) {
2029 const_offset = const_offset_ir->value.u[0];
2030 cbuf.index = const_offset / 16;
2031 offset_expr->operands[0]->accept(this);
2032 offset = this->result;
2033 }
2034 }
2035
2036 /* Relative/variable index into constant buffer */
2037 emit_asm(ir, TGSI_OPCODE_USHR, st_dst_reg(index_reg), offset,
2038 st_src_reg_for_int(4));
2039 cbuf.reladdr = ralloc(mem_ctx, st_src_reg);
2040 memcpy(cbuf.reladdr, &index_reg, sizeof(index_reg));
2041 }
2042
2043 if (const_uniform_block) {
2044 /* Constant constant buffer */
2045 cbuf.reladdr2 = NULL;
2046 } else {
2047 /* Relative/variable constant buffer */
2048 cbuf.reladdr2 = ralloc(mem_ctx, st_src_reg);
2049 memcpy(cbuf.reladdr2, &op[0], sizeof(st_src_reg));
2050 }
2051 cbuf.has_index2 = true;
2052
2053 cbuf.swizzle = swizzle_for_size(ir->type->vector_elements);
2054 if (glsl_base_type_is_64bit(cbuf.type))
2055 cbuf.swizzle += MAKE_SWIZZLE4(const_offset % 16 / 8,
2056 const_offset % 16 / 8,
2057 const_offset % 16 / 8,
2058 const_offset % 16 / 8);
2059 else
2060 cbuf.swizzle += MAKE_SWIZZLE4(const_offset % 16 / 4,
2061 const_offset % 16 / 4,
2062 const_offset % 16 / 4,
2063 const_offset % 16 / 4);
2064
2065 if (ir->type->is_boolean()) {
2066 emit_asm(ir, TGSI_OPCODE_USNE, result_dst, cbuf,
2067 st_src_reg_for_int(0));
2068 } else {
2069 emit_asm(ir, TGSI_OPCODE_MOV, result_dst, cbuf);
2070 }
2071 }
2072 break;
2073 }
2074 case ir_triop_lrp:
2075 /* note: we have to reorder the three args here */
2076 emit_asm(ir, TGSI_OPCODE_LRP, result_dst, op[2], op[1], op[0]);
2077 break;
2078 case ir_triop_csel:
2079 if (this->ctx->Const.NativeIntegers)
2080 emit_asm(ir, TGSI_OPCODE_UCMP, result_dst, op[0], op[1], op[2]);
2081 else {
2082 op[0].negate = ~op[0].negate;
2083 emit_asm(ir, TGSI_OPCODE_CMP, result_dst, op[0], op[1], op[2]);
2084 }
2085 break;
2086 case ir_triop_bitfield_extract:
2087 emit_asm(ir, TGSI_OPCODE_IBFE, result_dst, op[0], op[1], op[2]);
2088 break;
2089 case ir_quadop_bitfield_insert:
2090 emit_asm(ir, TGSI_OPCODE_BFI, result_dst, op[0], op[1], op[2], op[3]);
2091 break;
2092 case ir_unop_bitfield_reverse:
2093 emit_asm(ir, TGSI_OPCODE_BREV, result_dst, op[0]);
2094 break;
2095 case ir_unop_bit_count:
2096 emit_asm(ir, TGSI_OPCODE_POPC, result_dst, op[0]);
2097 break;
2098 case ir_unop_find_msb:
2099 emit_asm(ir, TGSI_OPCODE_IMSB, result_dst, op[0]);
2100 break;
2101 case ir_unop_find_lsb:
2102 emit_asm(ir, TGSI_OPCODE_LSB, result_dst, op[0]);
2103 break;
2104 case ir_binop_imul_high:
2105 emit_asm(ir, TGSI_OPCODE_IMUL_HI, result_dst, op[0], op[1]);
2106 break;
2107 case ir_triop_fma:
2108 /* In theory, MAD is incorrect here. */
2109 if (have_fma)
2110 emit_asm(ir, TGSI_OPCODE_FMA, result_dst, op[0], op[1], op[2]);
2111 else
2112 emit_asm(ir, TGSI_OPCODE_MAD, result_dst, op[0], op[1], op[2]);
2113 break;
2114 case ir_unop_interpolate_at_centroid:
2115 emit_asm(ir, TGSI_OPCODE_INTERP_CENTROID, result_dst, op[0]);
2116 break;
2117 case ir_binop_interpolate_at_offset: {
2118 /* The y coordinate needs to be flipped for the default fb */
2119 static const gl_state_index16 transform_y_state[STATE_LENGTH]
2120 = { STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM };
2121
2122 unsigned transform_y_index =
2123 _mesa_add_state_reference(this->prog->Parameters,
2124 transform_y_state);
2125
2126 st_src_reg transform_y = st_src_reg(PROGRAM_STATE_VAR,
2127 transform_y_index,
2128 glsl_type::vec4_type);
2129 transform_y.swizzle = SWIZZLE_XXXX;
2130
2131 st_src_reg temp = get_temp(glsl_type::vec2_type);
2132 st_dst_reg temp_dst = st_dst_reg(temp);
2133
2134 emit_asm(ir, TGSI_OPCODE_MOV, temp_dst, op[1]);
2135 temp_dst.writemask = WRITEMASK_Y;
2136 emit_asm(ir, TGSI_OPCODE_MUL, temp_dst, transform_y, op[1]);
2137 emit_asm(ir, TGSI_OPCODE_INTERP_OFFSET, result_dst, op[0], temp);
2138 break;
2139 }
2140 case ir_binop_interpolate_at_sample:
2141 emit_asm(ir, TGSI_OPCODE_INTERP_SAMPLE, result_dst, op[0], op[1]);
2142 break;
2143
2144 case ir_unop_d2f:
2145 emit_asm(ir, TGSI_OPCODE_D2F, result_dst, op[0]);
2146 break;
2147 case ir_unop_f2d:
2148 emit_asm(ir, TGSI_OPCODE_F2D, result_dst, op[0]);
2149 break;
2150 case ir_unop_d2i:
2151 emit_asm(ir, TGSI_OPCODE_D2I, result_dst, op[0]);
2152 break;
2153 case ir_unop_i2d:
2154 emit_asm(ir, TGSI_OPCODE_I2D, result_dst, op[0]);
2155 break;
2156 case ir_unop_d2u:
2157 emit_asm(ir, TGSI_OPCODE_D2U, result_dst, op[0]);
2158 break;
2159 case ir_unop_u2d:
2160 emit_asm(ir, TGSI_OPCODE_U2D, result_dst, op[0]);
2161 break;
2162 case ir_unop_unpack_double_2x32:
2163 case ir_unop_pack_double_2x32:
2164 case ir_unop_unpack_int_2x32:
2165 case ir_unop_pack_int_2x32:
2166 case ir_unop_unpack_uint_2x32:
2167 case ir_unop_pack_uint_2x32:
2168 case ir_unop_unpack_sampler_2x32:
2169 case ir_unop_pack_sampler_2x32:
2170 case ir_unop_unpack_image_2x32:
2171 case ir_unop_pack_image_2x32:
2172 emit_asm(ir, TGSI_OPCODE_MOV, result_dst, op[0]);
2173 break;
2174
2175 case ir_binop_ldexp:
2176 if (ir->operands[0]->type->is_double()) {
2177 emit_asm(ir, TGSI_OPCODE_DLDEXP, result_dst, op[0], op[1]);
2178 } else if (ir->operands[0]->type->is_float()) {
2179 emit_asm(ir, TGSI_OPCODE_LDEXP, result_dst, op[0], op[1]);
2180 } else {
2181 assert(!"Invalid ldexp for non-double opcode in glsl_to_tgsi_visitor::visit()");
2182 }
2183 break;
2184
2185 case ir_unop_pack_half_2x16:
2186 emit_asm(ir, TGSI_OPCODE_PK2H, result_dst, op[0]);
2187 break;
2188 case ir_unop_unpack_half_2x16:
2189 emit_asm(ir, TGSI_OPCODE_UP2H, result_dst, op[0]);
2190 break;
2191
2192 case ir_unop_get_buffer_size: {
2193 ir_constant *const_offset = ir->operands[0]->as_constant();
2194 int buf_base = ctx->st->has_hw_atomics
2195 ? 0 : ctx->Const.Program[shader->Stage].MaxAtomicBuffers;
2196 st_src_reg buffer(
2197 PROGRAM_BUFFER,
2198 buf_base + (const_offset ? const_offset->value.u[0] : 0),
2199 GLSL_TYPE_UINT);
2200 if (!const_offset) {
2201 buffer.reladdr = ralloc(mem_ctx, st_src_reg);
2202 *buffer.reladdr = op[0];
2203 emit_arl(ir, sampler_reladdr, op[0]);
2204 }
2205 emit_asm(ir, TGSI_OPCODE_RESQ, result_dst)->resource = buffer;
2206 break;
2207 }
2208
2209 case ir_unop_u2i64:
2210 case ir_unop_u2u64:
2211 case ir_unop_b2i64: {
2212 st_src_reg temp = get_temp(glsl_type::uvec4_type);
2213 st_dst_reg temp_dst = st_dst_reg(temp);
2214 unsigned orig_swz = op[0].swizzle;
2215 /*
2216 * To convert unsigned to 64-bit:
2217 * zero Y channel, copy X channel.
2218 */
2219 temp_dst.writemask = WRITEMASK_Y;
2220 if (vector_elements > 1)
2221 temp_dst.writemask |= WRITEMASK_W;
2222 emit_asm(ir, TGSI_OPCODE_MOV, temp_dst, st_src_reg_for_int(0));
2223 temp_dst.writemask = WRITEMASK_X;
2224 if (vector_elements > 1)
2225 temp_dst.writemask |= WRITEMASK_Z;
2226 op[0].swizzle = MAKE_SWIZZLE4(GET_SWZ(orig_swz, 0), GET_SWZ(orig_swz, 0),
2227 GET_SWZ(orig_swz, 1), GET_SWZ(orig_swz, 1));
2228 if (ir->operation == ir_unop_u2i64 || ir->operation == ir_unop_u2u64)
2229 emit_asm(ir, TGSI_OPCODE_MOV, temp_dst, op[0]);
2230 else
2231 emit_asm(ir, TGSI_OPCODE_AND, temp_dst, op[0], st_src_reg_for_int(1));
2232 result_src = temp;
2233 result_src.type = GLSL_TYPE_UINT64;
2234 if (vector_elements > 2) {
2235 /* Subtle: We rely on the fact that get_temp here returns the next
2236 * TGSI temporary register directly after the temp register used for
2237 * the first two components, so that the result gets picked up
2238 * automatically.
2239 */
2240 st_src_reg temp = get_temp(glsl_type::uvec4_type);
2241 st_dst_reg temp_dst = st_dst_reg(temp);
2242 temp_dst.writemask = WRITEMASK_Y;
2243 if (vector_elements > 3)
2244 temp_dst.writemask |= WRITEMASK_W;
2245 emit_asm(ir, TGSI_OPCODE_MOV, temp_dst, st_src_reg_for_int(0));
2246
2247 temp_dst.writemask = WRITEMASK_X;
2248 if (vector_elements > 3)
2249 temp_dst.writemask |= WRITEMASK_Z;
2250 op[0].swizzle = MAKE_SWIZZLE4(GET_SWZ(orig_swz, 2),
2251 GET_SWZ(orig_swz, 2),
2252 GET_SWZ(orig_swz, 3),
2253 GET_SWZ(orig_swz, 3));
2254 if (ir->operation == ir_unop_u2i64 || ir->operation == ir_unop_u2u64)
2255 emit_asm(ir, TGSI_OPCODE_MOV, temp_dst, op[0]);
2256 else
2257 emit_asm(ir, TGSI_OPCODE_AND, temp_dst, op[0],
2258 st_src_reg_for_int(1));
2259 }
2260 break;
2261 }
2262 case ir_unop_i642i:
2263 case ir_unop_u642i:
2264 case ir_unop_u642u:
2265 case ir_unop_i642u: {
2266 st_src_reg temp = get_temp(glsl_type::uvec4_type);
2267 st_dst_reg temp_dst = st_dst_reg(temp);
2268 unsigned orig_swz = op[0].swizzle;
2269 unsigned orig_idx = op[0].index;
2270 int el;
2271 temp_dst.writemask = WRITEMASK_X;
2272
2273 for (el = 0; el < vector_elements; el++) {
2274 unsigned swz = GET_SWZ(orig_swz, el);
2275 if (swz & 1)
2276 op[0].swizzle = MAKE_SWIZZLE4(SWIZZLE_Z, SWIZZLE_Z,
2277 SWIZZLE_Z, SWIZZLE_Z);
2278 else
2279 op[0].swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X,
2280 SWIZZLE_X, SWIZZLE_X);
2281 if (swz > 2)
2282 op[0].index = orig_idx + 1;
2283 op[0].type = GLSL_TYPE_UINT;
2284 temp_dst.writemask = WRITEMASK_X << el;
2285 emit_asm(ir, TGSI_OPCODE_MOV, temp_dst, op[0]);
2286 }
2287 result_src = temp;
2288 if (ir->operation == ir_unop_u642u || ir->operation == ir_unop_i642u)
2289 result_src.type = GLSL_TYPE_UINT;
2290 else
2291 result_src.type = GLSL_TYPE_INT;
2292 break;
2293 }
2294 case ir_unop_i642b:
2295 emit_asm(ir, TGSI_OPCODE_U64SNE, result_dst, op[0],
2296 st_src_reg_for_int64(0));
2297 break;
2298 case ir_unop_i642f:
2299 emit_asm(ir, TGSI_OPCODE_I642F, result_dst, op[0]);
2300 break;
2301 case ir_unop_u642f:
2302 emit_asm(ir, TGSI_OPCODE_U642F, result_dst, op[0]);
2303 break;
2304 case ir_unop_i642d:
2305 emit_asm(ir, TGSI_OPCODE_I642D, result_dst, op[0]);
2306 break;
2307 case ir_unop_u642d:
2308 emit_asm(ir, TGSI_OPCODE_U642D, result_dst, op[0]);
2309 break;
2310 case ir_unop_i2i64:
2311 emit_asm(ir, TGSI_OPCODE_I2I64, result_dst, op[0]);
2312 break;
2313 case ir_unop_f2i64:
2314 emit_asm(ir, TGSI_OPCODE_F2I64, result_dst, op[0]);
2315 break;
2316 case ir_unop_d2i64:
2317 emit_asm(ir, TGSI_OPCODE_D2I64, result_dst, op[0]);
2318 break;
2319 case ir_unop_i2u64:
2320 emit_asm(ir, TGSI_OPCODE_I2I64, result_dst, op[0]);
2321 break;
2322 case ir_unop_f2u64:
2323 emit_asm(ir, TGSI_OPCODE_F2U64, result_dst, op[0]);
2324 break;
2325 case ir_unop_d2u64:
2326 emit_asm(ir, TGSI_OPCODE_D2U64, result_dst, op[0]);
2327 break;
2328 /* these might be needed */
2329 case ir_unop_pack_snorm_2x16:
2330 case ir_unop_pack_unorm_2x16:
2331 case ir_unop_pack_snorm_4x8:
2332 case ir_unop_pack_unorm_4x8:
2333
2334 case ir_unop_unpack_snorm_2x16:
2335 case ir_unop_unpack_unorm_2x16:
2336 case ir_unop_unpack_snorm_4x8:
2337 case ir_unop_unpack_unorm_4x8:
2338
2339 case ir_quadop_vector:
2340 case ir_binop_vector_extract:
2341 case ir_triop_vector_insert:
2342 case ir_binop_carry:
2343 case ir_binop_borrow:
2344 case ir_unop_ssbo_unsized_array_length:
2345 /* This operation is not supported, or should have already been handled.
2346 */
2347 assert(!"Invalid ir opcode in glsl_to_tgsi_visitor::visit()");
2348 break;
2349 }
2350
2351 this->result = result_src;
2352 }
2353
2354
2355 void
2356 glsl_to_tgsi_visitor::visit(ir_swizzle *ir)
2357 {
2358 st_src_reg src;
2359 int i;
2360 int swizzle[4];
2361
2362 /* Note that this is only swizzles in expressions, not those on the left
2363 * hand side of an assignment, which do write masking. See ir_assignment
2364 * for that.
2365 */
2366
2367 ir->val->accept(this);
2368 src = this->result;
2369 assert(src.file != PROGRAM_UNDEFINED);
2370 assert(ir->type->vector_elements > 0);
2371
2372 for (i = 0; i < 4; i++) {
2373 if (i < ir->type->vector_elements) {
2374 switch (i) {
2375 case 0:
2376 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.x);
2377 break;
2378 case 1:
2379 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.y);
2380 break;
2381 case 2:
2382 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.z);
2383 break;
2384 case 3:
2385 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.w);
2386 break;
2387 }
2388 } else {
2389 /* If the type is smaller than a vec4, replicate the last
2390 * channel out.
2391 */
2392 swizzle[i] = swizzle[ir->type->vector_elements - 1];
2393 }
2394 }
2395
2396 src.swizzle = MAKE_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
2397
2398 this->result = src;
2399 }
2400
2401 /* Test if the variable is an array. Note that geometry and
2402 * tessellation shader inputs are outputs are always arrays (except
2403 * for patch inputs), so only the array element type is considered.
2404 */
2405 static bool
2406 is_inout_array(unsigned stage, ir_variable *var, bool *remove_array)
2407 {
2408 const glsl_type *type = var->type;
2409
2410 *remove_array = false;
2411
2412 if ((stage == MESA_SHADER_VERTEX && var->data.mode == ir_var_shader_in) ||
2413 (stage == MESA_SHADER_FRAGMENT && var->data.mode == ir_var_shader_out))
2414 return false;
2415
2416 if (((stage == MESA_SHADER_GEOMETRY && var->data.mode == ir_var_shader_in) ||
2417 (stage == MESA_SHADER_TESS_EVAL && var->data.mode == ir_var_shader_in) ||
2418 stage == MESA_SHADER_TESS_CTRL) &&
2419 !var->data.patch) {
2420 if (!var->type->is_array())
2421 return false; /* a system value probably */
2422
2423 type = var->type->fields.array;
2424 *remove_array = true;
2425 }
2426
2427 return type->is_array() || type->is_matrix();
2428 }
2429
2430 static unsigned
2431 st_translate_interp_loc(ir_variable *var)
2432 {
2433 if (var->data.centroid)
2434 return TGSI_INTERPOLATE_LOC_CENTROID;
2435 else if (var->data.sample)
2436 return TGSI_INTERPOLATE_LOC_SAMPLE;
2437 else
2438 return TGSI_INTERPOLATE_LOC_CENTER;
2439 }
2440
2441 void
2442 glsl_to_tgsi_visitor::visit(ir_dereference_variable *ir)
2443 {
2444 variable_storage *entry;
2445 ir_variable *var = ir->var;
2446 bool remove_array;
2447
2448 if (handle_bound_deref(ir->as_dereference()))
2449 return;
2450
2451 entry = find_variable_storage(ir->var);
2452
2453 if (!entry) {
2454 switch (var->data.mode) {
2455 case ir_var_uniform:
2456 entry = new(mem_ctx) variable_storage(var, PROGRAM_UNIFORM,
2457 var->data.param_index);
2458 _mesa_hash_table_insert(this->variables, var, entry);
2459 break;
2460 case ir_var_shader_in: {
2461 /* The linker assigns locations for varyings and attributes,
2462 * including deprecated builtins (like gl_Color), user-assign
2463 * generic attributes (glBindVertexLocation), and
2464 * user-defined varyings.
2465 */
2466 assert(var->data.location != -1);
2467
2468 const glsl_type *type_without_array = var->type->without_array();
2469 struct inout_decl *decl = &inputs[num_inputs];
2470 unsigned component = var->data.location_frac;
2471 unsigned num_components;
2472 num_inputs++;
2473
2474 if (type_without_array->is_64bit())
2475 component = component / 2;
2476 if (type_without_array->vector_elements)
2477 num_components = type_without_array->vector_elements;
2478 else
2479 num_components = 4;
2480
2481 decl->mesa_index = var->data.location;
2482 decl->interp = (glsl_interp_mode) var->data.interpolation;
2483 decl->interp_loc = st_translate_interp_loc(var);
2484 decl->base_type = type_without_array->base_type;
2485 decl->usage_mask = u_bit_consecutive(component, num_components);
2486
2487 if (is_inout_array(shader->Stage, var, &remove_array)) {
2488 decl->array_id = num_input_arrays + 1;
2489 num_input_arrays++;
2490 } else {
2491 decl->array_id = 0;
2492 }
2493
2494 if (remove_array)
2495 decl->size = type_size(var->type->fields.array);
2496 else
2497 decl->size = type_size(var->type);
2498
2499 entry = new(mem_ctx) variable_storage(var,
2500 PROGRAM_INPUT,
2501 decl->mesa_index,
2502 decl->array_id);
2503 entry->component = component;
2504
2505 _mesa_hash_table_insert(this->variables, var, entry);
2506
2507 break;
2508 }
2509 case ir_var_shader_out: {
2510 assert(var->data.location != -1);
2511
2512 const glsl_type *type_without_array = var->type->without_array();
2513 struct inout_decl *decl = &outputs[num_outputs];
2514 unsigned component = var->data.location_frac;
2515 unsigned num_components;
2516 num_outputs++;
2517
2518 decl->invariant = var->data.invariant;
2519
2520 if (type_without_array->is_64bit())
2521 component = component / 2;
2522 if (type_without_array->vector_elements)
2523 num_components = type_without_array->vector_elements;
2524 else
2525 num_components = 4;
2526
2527 decl->mesa_index = var->data.location + FRAG_RESULT_MAX * var->data.index;
2528 decl->base_type = type_without_array->base_type;
2529 decl->usage_mask = u_bit_consecutive(component, num_components);
2530 if (var->data.stream & (1u << 31)) {
2531 decl->gs_out_streams = var->data.stream & ~(1u << 31);
2532 } else {
2533 assert(var->data.stream < 4);
2534 decl->gs_out_streams = 0;
2535 for (unsigned i = 0; i < num_components; ++i)
2536 decl->gs_out_streams |= var->data.stream << (2 * (component + i));
2537 }
2538
2539 if (is_inout_array(shader->Stage, var, &remove_array)) {
2540 decl->array_id = num_output_arrays + 1;
2541 num_output_arrays++;
2542 } else {
2543 decl->array_id = 0;
2544 }
2545
2546 if (remove_array)
2547 decl->size = type_size(var->type->fields.array);
2548 else
2549 decl->size = type_size(var->type);
2550
2551 if (var->data.fb_fetch_output) {
2552 st_dst_reg dst = st_dst_reg(get_temp(var->type));
2553 st_src_reg src = st_src_reg(PROGRAM_OUTPUT, decl->mesa_index,
2554 var->type, component, decl->array_id);
2555 emit_asm(NULL, TGSI_OPCODE_FBFETCH, dst, src);
2556 entry = new(mem_ctx) variable_storage(var, dst.file, dst.index,
2557 dst.array_id);
2558 } else {
2559 entry = new(mem_ctx) variable_storage(var,
2560 PROGRAM_OUTPUT,
2561 decl->mesa_index,
2562 decl->array_id);
2563 }
2564 entry->component = component;
2565
2566 _mesa_hash_table_insert(this->variables, var, entry);
2567
2568 break;
2569 }
2570 case ir_var_system_value:
2571 entry = new(mem_ctx) variable_storage(var,
2572 PROGRAM_SYSTEM_VALUE,
2573 var->data.location);
2574 break;
2575 case ir_var_auto:
2576 case ir_var_temporary:
2577 st_src_reg src = get_temp(var->type);
2578
2579 entry = new(mem_ctx) variable_storage(var, src.file, src.index,
2580 src.array_id);
2581 _mesa_hash_table_insert(this->variables, var, entry);
2582
2583 break;
2584 }
2585
2586 if (!entry) {
2587 printf("Failed to make storage for %s\n", var->name);
2588 exit(1);
2589 }
2590 }
2591
2592 this->result = st_src_reg(entry->file, entry->index, var->type,
2593 entry->component, entry->array_id);
2594 if (this->shader->Stage == MESA_SHADER_VERTEX &&
2595 var->data.mode == ir_var_shader_in &&
2596 var->type->without_array()->is_double())
2597 this->result.is_double_vertex_input = true;
2598 if (!native_integers)
2599 this->result.type = GLSL_TYPE_FLOAT;
2600 }
2601
2602 static void
2603 shrink_array_declarations(struct inout_decl *decls, unsigned count,
2604 GLbitfield64* usage_mask,
2605 GLbitfield64 double_usage_mask,
2606 GLbitfield* patch_usage_mask)
2607 {
2608 unsigned i;
2609 int j;
2610
2611 /* Fix array declarations by removing unused array elements at both ends
2612 * of the arrays. For example, mat4[3] where only mat[1] is used.
2613 */
2614 for (i = 0; i < count; i++) {
2615 struct inout_decl *decl = &decls[i];
2616 if (!decl->array_id)
2617 continue;
2618
2619 /* Shrink the beginning. */
2620 for (j = 0; j < (int)decl->size; j++) {
2621 if (decl->mesa_index >= VARYING_SLOT_PATCH0) {
2622 if (*patch_usage_mask &
2623 BITFIELD64_BIT(decl->mesa_index - VARYING_SLOT_PATCH0 + j))
2624 break;
2625 }
2626 else {
2627 if (*usage_mask & BITFIELD64_BIT(decl->mesa_index+j))
2628 break;
2629 if (double_usage_mask & BITFIELD64_BIT(decl->mesa_index+j-1))
2630 break;
2631 }
2632
2633 decl->mesa_index++;
2634 decl->size--;
2635 j--;
2636 }
2637
2638 /* Shrink the end. */
2639 for (j = decl->size-1; j >= 0; j--) {
2640 if (decl->mesa_index >= VARYING_SLOT_PATCH0) {
2641 if (*patch_usage_mask &
2642 BITFIELD64_BIT(decl->mesa_index - VARYING_SLOT_PATCH0 + j))
2643 break;
2644 }
2645 else {
2646 if (*usage_mask & BITFIELD64_BIT(decl->mesa_index+j))
2647 break;
2648 if (double_usage_mask & BITFIELD64_BIT(decl->mesa_index+j-1))
2649 break;
2650 }
2651
2652 decl->size--;
2653 }
2654
2655 /* When not all entries of an array are accessed, we mark them as used
2656 * here anyway, to ensure that the input/output mapping logic doesn't get
2657 * confused.
2658 *
2659 * TODO This happens when an array isn't used via indirect access, which
2660 * some game ports do (at least eON-based). There is an optimization
2661 * opportunity here by replacing the array declaration with non-array
2662 * declarations of those slots that are actually used.
2663 */
2664 for (j = 1; j < (int)decl->size; ++j) {
2665 if (decl->mesa_index >= VARYING_SLOT_PATCH0)
2666 *patch_usage_mask |= BITFIELD64_BIT(decl->mesa_index - VARYING_SLOT_PATCH0 + j);
2667 else
2668 *usage_mask |= BITFIELD64_BIT(decl->mesa_index + j);
2669 }
2670 }
2671 }
2672
2673 void
2674 glsl_to_tgsi_visitor::visit(ir_dereference_array *ir)
2675 {
2676 ir_constant *index;
2677 st_src_reg src;
2678 bool is_2D = false;
2679 ir_variable *var = ir->variable_referenced();
2680
2681 if (handle_bound_deref(ir->as_dereference()))
2682 return;
2683
2684 /* We only need the logic provided by st_glsl_storage_type_size()
2685 * for arrays of structs. Indirect sampler and image indexing is handled
2686 * elsewhere.
2687 */
2688 int element_size = ir->type->without_array()->is_record() ?
2689 st_glsl_storage_type_size(ir->type, var->data.bindless) :
2690 type_size(ir->type);
2691
2692 index = ir->array_index->constant_expression_value(ralloc_parent(ir));
2693
2694 ir->array->accept(this);
2695 src = this->result;
2696
2697 if (!src.has_index2) {
2698 switch (this->prog->Target) {
2699 case GL_TESS_CONTROL_PROGRAM_NV:
2700 is_2D = (src.file == PROGRAM_INPUT || src.file == PROGRAM_OUTPUT) &&
2701 !ir->variable_referenced()->data.patch;
2702 break;
2703 case GL_TESS_EVALUATION_PROGRAM_NV:
2704 is_2D = src.file == PROGRAM_INPUT &&
2705 !ir->variable_referenced()->data.patch;
2706 break;
2707 case GL_GEOMETRY_PROGRAM_NV:
2708 is_2D = src.file == PROGRAM_INPUT;
2709 break;
2710 }
2711 }
2712
2713 if (is_2D)
2714 element_size = 1;
2715
2716 if (index) {
2717
2718 if (this->prog->Target == GL_VERTEX_PROGRAM_ARB &&
2719 src.file == PROGRAM_INPUT)
2720 element_size = attrib_type_size(ir->type, true);
2721 if (is_2D) {
2722 src.index2D = index->value.i[0];
2723 src.has_index2 = true;
2724 } else
2725 src.index += index->value.i[0] * element_size;
2726 } else {
2727 /* Variable index array dereference. It eats the "vec4" of the
2728 * base of the array and an index that offsets the TGSI register
2729 * index.
2730 */
2731 ir->array_index->accept(this);
2732
2733 st_src_reg index_reg;
2734
2735 if (element_size == 1) {
2736 index_reg = this->result;
2737 } else {
2738 index_reg = get_temp(native_integers ?
2739 glsl_type::int_type : glsl_type::float_type);
2740
2741 emit_asm(ir, TGSI_OPCODE_MUL, st_dst_reg(index_reg),
2742 this->result, st_src_reg_for_type(index_reg.type, element_size));
2743 }
2744
2745 /* If there was already a relative address register involved, add the
2746 * new and the old together to get the new offset.
2747 */
2748 if (!is_2D && src.reladdr != NULL) {
2749 st_src_reg accum_reg = get_temp(native_integers ?
2750 glsl_type::int_type : glsl_type::float_type);
2751
2752 emit_asm(ir, TGSI_OPCODE_ADD, st_dst_reg(accum_reg),
2753 index_reg, *src.reladdr);
2754
2755 index_reg = accum_reg;
2756 }
2757
2758 if (is_2D) {
2759 src.reladdr2 = ralloc(mem_ctx, st_src_reg);
2760 memcpy(src.reladdr2, &index_reg, sizeof(index_reg));
2761 src.index2D = 0;
2762 src.has_index2 = true;
2763 } else {
2764 src.reladdr = ralloc(mem_ctx, st_src_reg);
2765 memcpy(src.reladdr, &index_reg, sizeof(index_reg));
2766 }
2767 }
2768
2769 /* Change the register type to the element type of the array. */
2770 src.type = ir->type->base_type;
2771
2772 this->result = src;
2773 }
2774
2775 void
2776 glsl_to_tgsi_visitor::visit(ir_dereference_record *ir)
2777 {
2778 unsigned int i;
2779 const glsl_type *struct_type = ir->record->type;
2780 ir_variable *var = ir->record->variable_referenced();
2781 int offset = 0;
2782
2783 if (handle_bound_deref(ir->as_dereference()))
2784 return;
2785
2786 ir->record->accept(this);
2787
2788 assert(ir->field_idx >= 0);
2789 assert(var);
2790 for (i = 0; i < struct_type->length; i++) {
2791 if (i == (unsigned) ir->field_idx)
2792 break;
2793 const glsl_type *member_type = struct_type->fields.structure[i].type;
2794 offset += st_glsl_storage_type_size(member_type, var->data.bindless);
2795 }
2796
2797 /* If the type is smaller than a vec4, replicate the last channel out. */
2798 if (ir->type->is_scalar() || ir->type->is_vector())
2799 this->result.swizzle = swizzle_for_size(ir->type->vector_elements);
2800 else
2801 this->result.swizzle = SWIZZLE_NOOP;
2802
2803 this->result.index += offset;
2804 this->result.type = ir->type->base_type;
2805 }
2806
2807 /**
2808 * We want to be careful in assignment setup to hit the actual storage
2809 * instead of potentially using a temporary like we might with the
2810 * ir_dereference handler.
2811 */
2812 static st_dst_reg
2813 get_assignment_lhs(ir_dereference *ir, glsl_to_tgsi_visitor *v, int *component)
2814 {
2815 /* The LHS must be a dereference. If the LHS is a variable indexed array
2816 * access of a vector, it must be separated into a series conditional moves
2817 * before reaching this point (see ir_vec_index_to_cond_assign).
2818 */
2819 assert(ir->as_dereference());
2820 ir_dereference_array *deref_array = ir->as_dereference_array();
2821 if (deref_array) {
2822 assert(!deref_array->array->type->is_vector());
2823 }
2824
2825 /* Use the rvalue deref handler for the most part. We write swizzles using
2826 * the writemask, but we do extract the base component for enhanced layouts
2827 * from the source swizzle.
2828 */
2829 ir->accept(v);
2830 *component = GET_SWZ(v->result.swizzle, 0);
2831 return st_dst_reg(v->result);
2832 }
2833
2834 /**
2835 * Process the condition of a conditional assignment
2836 *
2837 * Examines the condition of a conditional assignment to generate the optimal
2838 * first operand of a \c CMP instruction. If the condition is a relational
2839 * operator with 0 (e.g., \c ir_binop_less), the value being compared will be
2840 * used as the source for the \c CMP instruction. Otherwise the comparison
2841 * is processed to a boolean result, and the boolean result is used as the
2842 * operand to the CMP instruction.
2843 */
2844 bool
2845 glsl_to_tgsi_visitor::process_move_condition(ir_rvalue *ir)
2846 {
2847 ir_rvalue *src_ir = ir;
2848 bool negate = true;
2849 bool switch_order = false;
2850
2851 ir_expression *const expr = ir->as_expression();
2852
2853 if (native_integers) {
2854 if ((expr != NULL) && (expr->num_operands == 2)) {
2855 enum glsl_base_type type = expr->operands[0]->type->base_type;
2856 if (type == GLSL_TYPE_INT || type == GLSL_TYPE_UINT ||
2857 type == GLSL_TYPE_BOOL) {
2858 if (expr->operation == ir_binop_equal) {
2859 if (expr->operands[0]->is_zero()) {
2860 src_ir = expr->operands[1];
2861 switch_order = true;
2862 }
2863 else if (expr->operands[1]->is_zero()) {
2864 src_ir = expr->operands[0];
2865 switch_order = true;
2866 }
2867 }
2868 else if (expr->operation == ir_binop_nequal) {
2869 if (expr->operands[0]->is_zero()) {
2870 src_ir = expr->operands[1];
2871 }
2872 else if (expr->operands[1]->is_zero()) {
2873 src_ir = expr->operands[0];
2874 }
2875 }
2876 }
2877 }
2878
2879 src_ir->accept(this);
2880 return switch_order;
2881 }
2882
2883 if ((expr != NULL) && (expr->num_operands == 2)) {
2884 bool zero_on_left = false;
2885
2886 if (expr->operands[0]->is_zero()) {
2887 src_ir = expr->operands[1];
2888 zero_on_left = true;
2889 } else if (expr->operands[1]->is_zero()) {
2890 src_ir = expr->operands[0];
2891 zero_on_left = false;
2892 }
2893
2894 /* a is - 0 + - 0 +
2895 * (a < 0) T F F ( a < 0) T F F
2896 * (0 < a) F F T (-a < 0) F F T
2897 * (a >= 0) F T T ( a < 0) T F F (swap order of other operands)
2898 * (0 >= a) T T F (-a < 0) F F T (swap order of other operands)
2899 *
2900 * Note that exchanging the order of 0 and 'a' in the comparison simply
2901 * means that the value of 'a' should be negated.
2902 */
2903 if (src_ir != ir) {
2904 switch (expr->operation) {
2905 case ir_binop_less:
2906 switch_order = false;
2907 negate = zero_on_left;
2908 break;
2909
2910 case ir_binop_gequal:
2911 switch_order = true;
2912 negate = zero_on_left;
2913 break;
2914
2915 default:
2916 /* This isn't the right kind of comparison afterall, so make sure
2917 * the whole condition is visited.
2918 */
2919 src_ir = ir;
2920 break;
2921 }
2922 }
2923 }
2924
2925 src_ir->accept(this);
2926
2927 /* We use the TGSI_OPCODE_CMP (a < 0 ? b : c) for conditional moves, and the
2928 * condition we produced is 0.0 or 1.0. By flipping the sign, we can
2929 * choose which value TGSI_OPCODE_CMP produces without an extra instruction
2930 * computing the condition.
2931 */
2932 if (negate)
2933 this->result.negate = ~this->result.negate;
2934
2935 return switch_order;
2936 }
2937
2938 void
2939 glsl_to_tgsi_visitor::emit_block_mov(ir_assignment *ir, const struct glsl_type *type,
2940 st_dst_reg *l, st_src_reg *r,
2941 st_src_reg *cond, bool cond_swap)
2942 {
2943 if (type->is_record()) {
2944 for (unsigned int i = 0; i < type->length; i++) {
2945 emit_block_mov(ir, type->fields.structure[i].type, l, r,
2946 cond, cond_swap);
2947 }
2948 return;
2949 }
2950
2951 if (type->is_array()) {
2952 for (unsigned int i = 0; i < type->length; i++) {
2953 emit_block_mov(ir, type->fields.array, l, r, cond, cond_swap);
2954 }
2955 return;
2956 }
2957
2958 if (type->is_matrix()) {
2959 const struct glsl_type *vec_type;
2960
2961 vec_type = glsl_type::get_instance(type->is_double()
2962 ? GLSL_TYPE_DOUBLE : GLSL_TYPE_FLOAT,
2963 type->vector_elements, 1);
2964
2965 for (int i = 0; i < type->matrix_columns; i++) {
2966 emit_block_mov(ir, vec_type, l, r, cond, cond_swap);
2967 }
2968 return;
2969 }
2970
2971 assert(type->is_scalar() || type->is_vector());
2972
2973 l->type = type->base_type;
2974 r->type = type->base_type;
2975 if (cond) {
2976 st_src_reg l_src = st_src_reg(*l);
2977
2978 if (l_src.file == PROGRAM_OUTPUT &&
2979 this->prog->Target == GL_FRAGMENT_PROGRAM_ARB &&
2980 (l_src.index == FRAG_RESULT_DEPTH ||
2981 l_src.index == FRAG_RESULT_STENCIL)) {
2982 /* This is a special case because the source swizzles will be shifted
2983 * later to account for the difference between GLSL (where they're
2984 * plain floats) and TGSI (where they're Z and Y components). */
2985 l_src.swizzle = SWIZZLE_XXXX;
2986 }
2987
2988 if (native_integers) {
2989 emit_asm(ir, TGSI_OPCODE_UCMP, *l, *cond,
2990 cond_swap ? l_src : *r,
2991 cond_swap ? *r : l_src);
2992 } else {
2993 emit_asm(ir, TGSI_OPCODE_CMP, *l, *cond,
2994 cond_swap ? l_src : *r,
2995 cond_swap ? *r : l_src);
2996 }
2997 } else {
2998 emit_asm(ir, TGSI_OPCODE_MOV, *l, *r);
2999 }
3000 l->index++;
3001 r->index++;
3002 if (type->is_dual_slot()) {
3003 l->index++;
3004 if (r->is_double_vertex_input == false)
3005 r->index++;
3006 }
3007 }
3008
3009 void
3010 glsl_to_tgsi_visitor::visit(ir_assignment *ir)
3011 {
3012 int dst_component;
3013 st_dst_reg l;
3014 st_src_reg r;
3015
3016 /* all generated instructions need to be flaged as precise */
3017 this->precise = is_precise(ir->lhs->variable_referenced());
3018 ir->rhs->accept(this);
3019 r = this->result;
3020
3021 l = get_assignment_lhs(ir->lhs, this, &dst_component);
3022
3023 {
3024 int swizzles[4];
3025 int first_enabled_chan = 0;
3026 int rhs_chan = 0;
3027 ir_variable *variable = ir->lhs->variable_referenced();
3028
3029 if (shader->Stage == MESA_SHADER_FRAGMENT &&
3030 variable->data.mode == ir_var_shader_out &&
3031 (variable->data.location == FRAG_RESULT_DEPTH ||
3032 variable->data.location == FRAG_RESULT_STENCIL)) {
3033 assert(ir->lhs->type->is_scalar());
3034 assert(ir->write_mask == WRITEMASK_X);
3035
3036 if (variable->data.location == FRAG_RESULT_DEPTH)
3037 l.writemask = WRITEMASK_Z;
3038 else {
3039 assert(variable->data.location == FRAG_RESULT_STENCIL);
3040 l.writemask = WRITEMASK_Y;
3041 }
3042 } else if (ir->write_mask == 0) {
3043 assert(!ir->lhs->type->is_scalar() && !ir->lhs->type->is_vector());
3044
3045 unsigned num_elements =
3046 ir->lhs->type->without_array()->vector_elements;
3047
3048 if (num_elements) {
3049 l.writemask = u_bit_consecutive(0, num_elements);
3050 } else {
3051 /* The type is a struct or an array of (array of) structs. */
3052 l.writemask = WRITEMASK_XYZW;
3053 }
3054 } else {
3055 l.writemask = ir->write_mask;
3056 }
3057
3058 for (int i = 0; i < 4; i++) {
3059 if (l.writemask & (1 << i)) {
3060 first_enabled_chan = GET_SWZ(r.swizzle, i);
3061 break;
3062 }
3063 }
3064
3065 l.writemask = l.writemask << dst_component;
3066
3067 /* Swizzle a small RHS vector into the channels being written.
3068 *
3069 * glsl ir treats write_mask as dictating how many channels are
3070 * present on the RHS while TGSI treats write_mask as just
3071 * showing which channels of the vec4 RHS get written.
3072 */
3073 for (int i = 0; i < 4; i++) {
3074 if (l.writemask & (1 << i))
3075 swizzles[i] = GET_SWZ(r.swizzle, rhs_chan++);
3076 else
3077 swizzles[i] = first_enabled_chan;
3078 }
3079 r.swizzle = MAKE_SWIZZLE4(swizzles[0], swizzles[1],
3080 swizzles[2], swizzles[3]);
3081 }
3082
3083 assert(l.file != PROGRAM_UNDEFINED);
3084 assert(r.file != PROGRAM_UNDEFINED);
3085
3086 if (ir->condition) {
3087 const bool switch_order = this->process_move_condition(ir->condition);
3088 st_src_reg condition = this->result;
3089
3090 emit_block_mov(ir, ir->lhs->type, &l, &r, &condition, switch_order);
3091 } else if (ir->rhs->as_expression() &&
3092 this->instructions.get_tail() &&
3093 ir->rhs == ((glsl_to_tgsi_instruction *)this->instructions.get_tail())->ir &&
3094 !((glsl_to_tgsi_instruction *)this->instructions.get_tail())->is_64bit_expanded &&
3095 type_size(ir->lhs->type) == 1 &&
3096 l.writemask == ((glsl_to_tgsi_instruction *)this->instructions.get_tail())->dst[0].writemask) {
3097 /* To avoid emitting an extra MOV when assigning an expression to a
3098 * variable, emit the last instruction of the expression again, but
3099 * replace the destination register with the target of the assignment.
3100 * Dead code elimination will remove the original instruction.
3101 */
3102 glsl_to_tgsi_instruction *inst, *new_inst;
3103 inst = (glsl_to_tgsi_instruction *)this->instructions.get_tail();
3104 new_inst = emit_asm(ir, inst->op, l, inst->src[0], inst->src[1], inst->src[2], inst->src[3]);
3105 new_inst->saturate = inst->saturate;
3106 new_inst->resource = inst->resource;
3107 inst->dead_mask = inst->dst[0].writemask;
3108 } else {
3109 emit_block_mov(ir, ir->rhs->type, &l, &r, NULL, false);
3110 }
3111 this->precise = 0;
3112 }
3113
3114
3115 void
3116 glsl_to_tgsi_visitor::visit(ir_constant *ir)
3117 {
3118 st_src_reg src;
3119 GLdouble stack_vals[4] = { 0 };
3120 gl_constant_value *values = (gl_constant_value *) stack_vals;
3121 GLenum gl_type = GL_NONE;
3122 unsigned int i;
3123 static int in_array = 0;
3124 gl_register_file file = in_array ? PROGRAM_CONSTANT : PROGRAM_IMMEDIATE;
3125
3126 /* Unfortunately, 4 floats is all we can get into
3127 * _mesa_add_typed_unnamed_constant. So, make a temp to store an
3128 * aggregate constant and move each constant value into it. If we
3129 * get lucky, copy propagation will eliminate the extra moves.
3130 */
3131 if (ir->type->is_record()) {
3132 st_src_reg temp_base = get_temp(ir->type);
3133 st_dst_reg temp = st_dst_reg(temp_base);
3134
3135 for (i = 0; i < ir->type->length; i++) {
3136 ir_constant *const field_value = ir->get_record_field(i);
3137 int size = type_size(field_value->type);
3138
3139 assert(size > 0);
3140
3141 field_value->accept(this);
3142 src = this->result;
3143
3144 for (unsigned j = 0; j < (unsigned int)size; j++) {
3145 emit_asm(ir, TGSI_OPCODE_MOV, temp, src);
3146
3147 src.index++;
3148 temp.index++;
3149 }
3150 }
3151 this->result = temp_base;
3152 return;
3153 }
3154
3155 if (ir->type->is_array()) {
3156 st_src_reg temp_base = get_temp(ir->type);
3157 st_dst_reg temp = st_dst_reg(temp_base);
3158 int size = type_size(ir->type->fields.array);
3159
3160 assert(size > 0);
3161 in_array++;
3162
3163 for (i = 0; i < ir->type->length; i++) {
3164 ir->const_elements[i]->accept(this);
3165 src = this->result;
3166 for (int j = 0; j < size; j++) {
3167 emit_asm(ir, TGSI_OPCODE_MOV, temp, src);
3168
3169 src.index++;
3170 temp.index++;
3171 }
3172 }
3173 this->result = temp_base;
3174 in_array--;
3175 return;
3176 }
3177
3178 if (ir->type->is_matrix()) {
3179 st_src_reg mat = get_temp(ir->type);
3180 st_dst_reg mat_column = st_dst_reg(mat);
3181
3182 for (i = 0; i < ir->type->matrix_columns; i++) {
3183 switch (ir->type->base_type) {
3184 case GLSL_TYPE_FLOAT:
3185 values = (gl_constant_value *)
3186 &ir->value.f[i * ir->type->vector_elements];
3187
3188 src = st_src_reg(file, -1, ir->type->base_type);
3189 src.index = add_constant(file,
3190 values,
3191 ir->type->vector_elements,
3192 GL_FLOAT,
3193 &src.swizzle);
3194 emit_asm(ir, TGSI_OPCODE_MOV, mat_column, src);
3195 break;
3196 case GLSL_TYPE_DOUBLE:
3197 values = (gl_constant_value *)
3198 &ir->value.d[i * ir->type->vector_elements];
3199 src = st_src_reg(file, -1, ir->type->base_type);
3200 src.index = add_constant(file,
3201 values,
3202 ir->type->vector_elements,
3203 GL_DOUBLE,
3204 &src.swizzle);
3205 if (ir->type->vector_elements >= 2) {
3206 mat_column.writemask = WRITEMASK_XY;
3207 src.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y,
3208 SWIZZLE_X, SWIZZLE_Y);
3209 emit_asm(ir, TGSI_OPCODE_MOV, mat_column, src);
3210 } else {
3211 mat_column.writemask = WRITEMASK_X;
3212 src.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X,
3213 SWIZZLE_X, SWIZZLE_X);
3214 emit_asm(ir, TGSI_OPCODE_MOV, mat_column, src);
3215 }
3216 src.index++;
3217 if (ir->type->vector_elements > 2) {
3218 if (ir->type->vector_elements == 4) {
3219 mat_column.writemask = WRITEMASK_ZW;
3220 src.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y,
3221 SWIZZLE_X, SWIZZLE_Y);
3222 emit_asm(ir, TGSI_OPCODE_MOV, mat_column, src);
3223 } else {
3224 mat_column.writemask = WRITEMASK_Z;
3225 src.swizzle = MAKE_SWIZZLE4(SWIZZLE_Y, SWIZZLE_Y,
3226 SWIZZLE_Y, SWIZZLE_Y);
3227 emit_asm(ir, TGSI_OPCODE_MOV, mat_column, src);
3228 mat_column.writemask = WRITEMASK_XYZW;
3229 src.swizzle = SWIZZLE_XYZW;
3230 }
3231 mat_column.index++;
3232 }
3233 break;
3234 default:
3235 unreachable("Illegal matrix constant type.\n");
3236 break;
3237 }
3238 mat_column.index++;
3239 }
3240 this->result = mat;
3241 return;
3242 }
3243
3244 switch (ir->type->base_type) {
3245 case GLSL_TYPE_FLOAT:
3246 gl_type = GL_FLOAT;
3247 for (i = 0; i < ir->type->vector_elements; i++) {
3248 values[i].f = ir->value.f[i];
3249 }
3250 break;
3251 case GLSL_TYPE_DOUBLE:
3252 gl_type = GL_DOUBLE;
3253 for (i = 0; i < ir->type->vector_elements; i++) {
3254 memcpy(&values[i * 2], &ir->value.d[i], sizeof(double));
3255 }
3256 break;
3257 case GLSL_TYPE_INT64:
3258 gl_type = GL_INT64_ARB;
3259 for (i = 0; i < ir->type->vector_elements; i++) {
3260 memcpy(&values[i * 2], &ir->value.d[i], sizeof(int64_t));
3261 }
3262 break;
3263 case GLSL_TYPE_UINT64:
3264 gl_type = GL_UNSIGNED_INT64_ARB;
3265 for (i = 0; i < ir->type->vector_elements; i++) {
3266 memcpy(&values[i * 2], &ir->value.d[i], sizeof(uint64_t));
3267 }
3268 break;
3269 case GLSL_TYPE_UINT:
3270 gl_type = native_integers ? GL_UNSIGNED_INT : GL_FLOAT;
3271 for (i = 0; i < ir->type->vector_elements; i++) {
3272 if (native_integers)
3273 values[i].u = ir->value.u[i];
3274 else
3275 values[i].f = ir->value.u[i];
3276 }
3277 break;
3278 case GLSL_TYPE_INT:
3279 gl_type = native_integers ? GL_INT : GL_FLOAT;
3280 for (i = 0; i < ir->type->vector_elements; i++) {
3281 if (native_integers)
3282 values[i].i = ir->value.i[i];
3283 else
3284 values[i].f = ir->value.i[i];
3285 }
3286 break;
3287 case GLSL_TYPE_BOOL:
3288 gl_type = native_integers ? GL_BOOL : GL_FLOAT;
3289 for (i = 0; i < ir->type->vector_elements; i++) {
3290 values[i].u = ir->value.b[i] ? ctx->Const.UniformBooleanTrue : 0;
3291 }
3292 break;
3293 default:
3294 assert(!"Non-float/uint/int/bool constant");
3295 }
3296
3297 this->result = st_src_reg(file, -1, ir->type);
3298 this->result.index = add_constant(file,
3299 values,
3300 ir->type->vector_elements,
3301 gl_type,
3302 &this->result.swizzle);
3303 }
3304
3305 void
3306 glsl_to_tgsi_visitor::visit_atomic_counter_intrinsic(ir_call *ir)
3307 {
3308 exec_node *param = ir->actual_parameters.get_head();
3309 ir_dereference *deref = static_cast<ir_dereference *>(param);
3310 ir_variable *location = deref->variable_referenced();
3311 bool has_hw_atomics = st_context(ctx)->has_hw_atomics;
3312 /* Calculate the surface offset */
3313 st_src_reg offset;
3314 unsigned array_size = 0, base = 0;
3315 uint16_t index = 0;
3316 st_src_reg resource;
3317
3318 get_deref_offsets(deref, &array_size, &base, &index, &offset, false);
3319
3320 if (has_hw_atomics) {
3321 variable_storage *entry = find_variable_storage(location);
3322 st_src_reg buffer(PROGRAM_HW_ATOMIC, 0, GLSL_TYPE_ATOMIC_UINT,
3323 location->data.binding);
3324
3325 if (!entry) {
3326 entry = new(mem_ctx) variable_storage(location, PROGRAM_HW_ATOMIC,
3327 num_atomics);
3328 _mesa_hash_table_insert(this->variables, location, entry);
3329
3330 atomic_info[num_atomics].location = location->data.location;
3331 atomic_info[num_atomics].binding = location->data.binding;
3332 atomic_info[num_atomics].size = location->type->arrays_of_arrays_size();
3333 if (atomic_info[num_atomics].size == 0)
3334 atomic_info[num_atomics].size = 1;
3335 atomic_info[num_atomics].array_id = 0;
3336 num_atomics++;
3337 }
3338
3339 if (offset.file != PROGRAM_UNDEFINED) {
3340 if (atomic_info[entry->index].array_id == 0) {
3341 num_atomic_arrays++;
3342 atomic_info[entry->index].array_id = num_atomic_arrays;
3343 }
3344 buffer.array_id = atomic_info[entry->index].array_id;
3345 }
3346
3347 buffer.index = index;
3348 buffer.index += location->data.offset / ATOMIC_COUNTER_SIZE;
3349 buffer.has_index2 = true;
3350
3351 if (offset.file != PROGRAM_UNDEFINED) {
3352 buffer.reladdr = ralloc(mem_ctx, st_src_reg);
3353 *buffer.reladdr = offset;
3354 emit_arl(ir, sampler_reladdr, offset);
3355 }
3356 offset = st_src_reg_for_int(0);
3357
3358 resource = buffer;
3359 } else {
3360 st_src_reg buffer(PROGRAM_BUFFER, location->data.binding,
3361 GLSL_TYPE_ATOMIC_UINT);
3362
3363 if (offset.file != PROGRAM_UNDEFINED) {
3364 emit_asm(ir, TGSI_OPCODE_MUL, st_dst_reg(offset),
3365 offset, st_src_reg_for_int(ATOMIC_COUNTER_SIZE));
3366 emit_asm(ir, TGSI_OPCODE_ADD, st_dst_reg(offset),
3367 offset, st_src_reg_for_int(location->data.offset + index * ATOMIC_COUNTER_SIZE));
3368 } else {
3369 offset = st_src_reg_for_int(location->data.offset + index * ATOMIC_COUNTER_SIZE);
3370 }
3371 resource = buffer;
3372 }
3373
3374 ir->return_deref->accept(this);
3375 st_dst_reg dst(this->result);
3376 dst.writemask = WRITEMASK_X;
3377
3378 glsl_to_tgsi_instruction *inst;
3379
3380 if (ir->callee->intrinsic_id == ir_intrinsic_atomic_counter_read) {
3381 inst = emit_asm(ir, TGSI_OPCODE_LOAD, dst, offset);
3382 } else if (ir->callee->intrinsic_id == ir_intrinsic_atomic_counter_increment) {
3383 inst = emit_asm(ir, TGSI_OPCODE_ATOMUADD, dst, offset,
3384 st_src_reg_for_int(1));
3385 } else if (ir->callee->intrinsic_id == ir_intrinsic_atomic_counter_predecrement) {
3386 inst = emit_asm(ir, TGSI_OPCODE_ATOMUADD, dst, offset,
3387 st_src_reg_for_int(-1));
3388 emit_asm(ir, TGSI_OPCODE_ADD, dst, this->result, st_src_reg_for_int(-1));
3389 } else {
3390 param = param->get_next();
3391 ir_rvalue *val = ((ir_instruction *)param)->as_rvalue();
3392 val->accept(this);
3393
3394 st_src_reg data = this->result, data2 = undef_src;
3395 enum tgsi_opcode opcode;
3396 switch (ir->callee->intrinsic_id) {
3397 case ir_intrinsic_atomic_counter_add:
3398 opcode = TGSI_OPCODE_ATOMUADD;
3399 break;
3400 case ir_intrinsic_atomic_counter_min:
3401 opcode = TGSI_OPCODE_ATOMIMIN;
3402 break;
3403 case ir_intrinsic_atomic_counter_max:
3404 opcode = TGSI_OPCODE_ATOMIMAX;
3405 break;
3406 case ir_intrinsic_atomic_counter_and:
3407 opcode = TGSI_OPCODE_ATOMAND;
3408 break;
3409 case ir_intrinsic_atomic_counter_or:
3410 opcode = TGSI_OPCODE_ATOMOR;
3411 break;
3412 case ir_intrinsic_atomic_counter_xor:
3413 opcode = TGSI_OPCODE_ATOMXOR;
3414 break;
3415 case ir_intrinsic_atomic_counter_exchange:
3416 opcode = TGSI_OPCODE_ATOMXCHG;
3417 break;
3418 case ir_intrinsic_atomic_counter_comp_swap: {
3419 opcode = TGSI_OPCODE_ATOMCAS;
3420 param = param->get_next();
3421 val = ((ir_instruction *)param)->as_rvalue();
3422 val->accept(this);
3423 data2 = this->result;
3424 break;
3425 }
3426 default:
3427 assert(!"Unexpected intrinsic");
3428 return;
3429 }
3430
3431 inst = emit_asm(ir, opcode, dst, offset, data, data2);
3432 }
3433
3434 inst->resource = resource;
3435 }
3436
3437 void
3438 glsl_to_tgsi_visitor::visit_ssbo_intrinsic(ir_call *ir)
3439 {
3440 exec_node *param = ir->actual_parameters.get_head();
3441
3442 ir_rvalue *block = ((ir_instruction *)param)->as_rvalue();
3443
3444 param = param->get_next();
3445 ir_rvalue *offset = ((ir_instruction *)param)->as_rvalue();
3446
3447 ir_constant *const_block = block->as_constant();
3448 int buf_base = st_context(ctx)->has_hw_atomics
3449 ? 0 : ctx->Const.Program[shader->Stage].MaxAtomicBuffers;
3450 st_src_reg buffer(
3451 PROGRAM_BUFFER,
3452 buf_base + (const_block ? const_block->value.u[0] : 0),
3453 GLSL_TYPE_UINT);
3454
3455 if (!const_block) {
3456 block->accept(this);
3457 buffer.reladdr = ralloc(mem_ctx, st_src_reg);
3458 *buffer.reladdr = this->result;
3459 emit_arl(ir, sampler_reladdr, this->result);
3460 }
3461
3462 /* Calculate the surface offset */
3463 offset->accept(this);
3464 st_src_reg off = this->result;
3465
3466 st_dst_reg dst = undef_dst;
3467 if (ir->return_deref) {
3468 ir->return_deref->accept(this);
3469 dst = st_dst_reg(this->result);
3470 dst.writemask = (1 << ir->return_deref->type->vector_elements) - 1;
3471 }
3472
3473 glsl_to_tgsi_instruction *inst;
3474
3475 if (ir->callee->intrinsic_id == ir_intrinsic_ssbo_load) {
3476 inst = emit_asm(ir, TGSI_OPCODE_LOAD, dst, off);
3477 if (dst.type == GLSL_TYPE_BOOL)
3478 emit_asm(ir, TGSI_OPCODE_USNE, dst, st_src_reg(dst),
3479 st_src_reg_for_int(0));
3480 } else if (ir->callee->intrinsic_id == ir_intrinsic_ssbo_store) {
3481 param = param->get_next();
3482 ir_rvalue *val = ((ir_instruction *)param)->as_rvalue();
3483 val->accept(this);
3484
3485 param = param->get_next();
3486 ir_constant *write_mask = ((ir_instruction *)param)->as_constant();
3487 assert(write_mask);
3488 dst.writemask = write_mask->value.u[0];
3489
3490 dst.type = this->result.type;
3491 inst = emit_asm(ir, TGSI_OPCODE_STORE, dst, off, this->result);
3492 } else {
3493 param = param->get_next();
3494 ir_rvalue *val = ((ir_instruction *)param)->as_rvalue();
3495 val->accept(this);
3496
3497 st_src_reg data = this->result, data2 = undef_src;
3498 enum tgsi_opcode opcode;
3499 switch (ir->callee->intrinsic_id) {
3500 case ir_intrinsic_ssbo_atomic_add:
3501 opcode = TGSI_OPCODE_ATOMUADD;
3502 break;
3503 case ir_intrinsic_ssbo_atomic_min:
3504 opcode = TGSI_OPCODE_ATOMIMIN;
3505 break;
3506 case ir_intrinsic_ssbo_atomic_max:
3507 opcode = TGSI_OPCODE_ATOMIMAX;
3508 break;
3509 case ir_intrinsic_ssbo_atomic_and:
3510 opcode = TGSI_OPCODE_ATOMAND;
3511 break;
3512 case ir_intrinsic_ssbo_atomic_or:
3513 opcode = TGSI_OPCODE_ATOMOR;
3514 break;
3515 case ir_intrinsic_ssbo_atomic_xor:
3516 opcode = TGSI_OPCODE_ATOMXOR;
3517 break;
3518 case ir_intrinsic_ssbo_atomic_exchange:
3519 opcode = TGSI_OPCODE_ATOMXCHG;
3520 break;
3521 case ir_intrinsic_ssbo_atomic_comp_swap:
3522 opcode = TGSI_OPCODE_ATOMCAS;
3523 param = param->get_next();
3524 val = ((ir_instruction *)param)->as_rvalue();
3525 val->accept(this);
3526 data2 = this->result;
3527 break;
3528 default:
3529 assert(!"Unexpected intrinsic");
3530 return;
3531 }
3532
3533 inst = emit_asm(ir, opcode, dst, off, data, data2);
3534 }
3535
3536 param = param->get_next();
3537 ir_constant *access = NULL;
3538 if (!param->is_tail_sentinel()) {
3539 access = ((ir_instruction *)param)->as_constant();
3540 assert(access);
3541 }
3542
3543 add_buffer_to_load_and_stores(inst, &buffer, &this->instructions, access);
3544 }
3545
3546 void
3547 glsl_to_tgsi_visitor::visit_membar_intrinsic(ir_call *ir)
3548 {
3549 switch (ir->callee->intrinsic_id) {
3550 case ir_intrinsic_memory_barrier:
3551 emit_asm(ir, TGSI_OPCODE_MEMBAR, undef_dst,
3552 st_src_reg_for_int(TGSI_MEMBAR_SHADER_BUFFER |
3553 TGSI_MEMBAR_ATOMIC_BUFFER |
3554 TGSI_MEMBAR_SHADER_IMAGE |
3555 TGSI_MEMBAR_SHARED));
3556 break;
3557 case ir_intrinsic_memory_barrier_atomic_counter:
3558 emit_asm(ir, TGSI_OPCODE_MEMBAR, undef_dst,
3559 st_src_reg_for_int(TGSI_MEMBAR_ATOMIC_BUFFER));
3560 break;
3561 case ir_intrinsic_memory_barrier_buffer:
3562 emit_asm(ir, TGSI_OPCODE_MEMBAR, undef_dst,
3563 st_src_reg_for_int(TGSI_MEMBAR_SHADER_BUFFER));
3564 break;
3565 case ir_intrinsic_memory_barrier_image:
3566 emit_asm(ir, TGSI_OPCODE_MEMBAR, undef_dst,
3567 st_src_reg_for_int(TGSI_MEMBAR_SHADER_IMAGE));
3568 break;
3569 case ir_intrinsic_memory_barrier_shared:
3570 emit_asm(ir, TGSI_OPCODE_MEMBAR, undef_dst,
3571 st_src_reg_for_int(TGSI_MEMBAR_SHARED));
3572 break;
3573 case ir_intrinsic_group_memory_barrier:
3574 emit_asm(ir, TGSI_OPCODE_MEMBAR, undef_dst,
3575 st_src_reg_for_int(TGSI_MEMBAR_SHADER_BUFFER |
3576 TGSI_MEMBAR_ATOMIC_BUFFER |
3577 TGSI_MEMBAR_SHADER_IMAGE |
3578 TGSI_MEMBAR_SHARED |
3579 TGSI_MEMBAR_THREAD_GROUP));
3580 break;
3581 default:
3582 assert(!"Unexpected memory barrier intrinsic");
3583 }
3584 }
3585
3586 void
3587 glsl_to_tgsi_visitor::visit_shared_intrinsic(ir_call *ir)
3588 {
3589 exec_node *param = ir->actual_parameters.get_head();
3590
3591 ir_rvalue *offset = ((ir_instruction *)param)->as_rvalue();
3592
3593 st_src_reg buffer(PROGRAM_MEMORY, 0, GLSL_TYPE_UINT);
3594
3595 /* Calculate the surface offset */
3596 offset->accept(this);
3597 st_src_reg off = this->result;
3598
3599 st_dst_reg dst = undef_dst;
3600 if (ir->return_deref) {
3601 ir->return_deref->accept(this);
3602 dst = st_dst_reg(this->result);
3603 dst.writemask = (1 << ir->return_deref->type->vector_elements) - 1;
3604 }
3605
3606 glsl_to_tgsi_instruction *inst;
3607
3608 if (ir->callee->intrinsic_id == ir_intrinsic_shared_load) {
3609 inst = emit_asm(ir, TGSI_OPCODE_LOAD, dst, off);
3610 inst->resource = buffer;
3611 } else if (ir->callee->intrinsic_id == ir_intrinsic_shared_store) {
3612 param = param->get_next();
3613 ir_rvalue *val = ((ir_instruction *)param)->as_rvalue();
3614 val->accept(this);
3615
3616 param = param->get_next();
3617 ir_constant *write_mask = ((ir_instruction *)param)->as_constant();
3618 assert(write_mask);
3619 dst.writemask = write_mask->value.u[0];
3620
3621 dst.type = this->result.type;
3622 inst = emit_asm(ir, TGSI_OPCODE_STORE, dst, off, this->result);
3623 inst->resource = buffer;
3624 } else {
3625 param = param->get_next();
3626 ir_rvalue *val = ((ir_instruction *)param)->as_rvalue();
3627 val->accept(this);
3628
3629 st_src_reg data = this->result, data2 = undef_src;
3630 enum tgsi_opcode opcode;
3631 switch (ir->callee->intrinsic_id) {
3632 case ir_intrinsic_shared_atomic_add:
3633 opcode = TGSI_OPCODE_ATOMUADD;
3634 break;
3635 case ir_intrinsic_shared_atomic_min:
3636 opcode = TGSI_OPCODE_ATOMIMIN;
3637 break;
3638 case ir_intrinsic_shared_atomic_max:
3639 opcode = TGSI_OPCODE_ATOMIMAX;
3640 break;
3641 case ir_intrinsic_shared_atomic_and:
3642 opcode = TGSI_OPCODE_ATOMAND;
3643 break;
3644 case ir_intrinsic_shared_atomic_or:
3645 opcode = TGSI_OPCODE_ATOMOR;
3646 break;
3647 case ir_intrinsic_shared_atomic_xor:
3648 opcode = TGSI_OPCODE_ATOMXOR;
3649 break;
3650 case ir_intrinsic_shared_atomic_exchange:
3651 opcode = TGSI_OPCODE_ATOMXCHG;
3652 break;
3653 case ir_intrinsic_shared_atomic_comp_swap:
3654 opcode = TGSI_OPCODE_ATOMCAS;
3655 param = param->get_next();
3656 val = ((ir_instruction *)param)->as_rvalue();
3657 val->accept(this);
3658 data2 = this->result;
3659 break;
3660 default:
3661 assert(!"Unexpected intrinsic");
3662 return;
3663 }
3664
3665 inst = emit_asm(ir, opcode, dst, off, data, data2);
3666 inst->resource = buffer;
3667 }
3668 }
3669
3670 static void
3671 get_image_qualifiers(ir_dereference *ir, const glsl_type **type,
3672 bool *memory_coherent, bool *memory_volatile,
3673 bool *memory_restrict, unsigned *image_format)
3674 {
3675
3676 switch (ir->ir_type) {
3677 case ir_type_dereference_record: {
3678 ir_dereference_record *deref_record = ir->as_dereference_record();
3679 const glsl_type *struct_type = deref_record->record->type;
3680 int fild_idx = deref_record->field_idx;
3681
3682 *type = struct_type->fields.structure[fild_idx].type->without_array();
3683 *memory_coherent =
3684 struct_type->fields.structure[fild_idx].memory_coherent;
3685 *memory_volatile =
3686 struct_type->fields.structure[fild_idx].memory_volatile;
3687 *memory_restrict =
3688 struct_type->fields.structure[fild_idx].memory_restrict;
3689 *image_format =
3690 struct_type->fields.structure[fild_idx].image_format;
3691 break;
3692 }
3693
3694 case ir_type_dereference_array: {
3695 ir_dereference_array *deref_arr = ir->as_dereference_array();
3696 get_image_qualifiers((ir_dereference *)deref_arr->array, type,
3697 memory_coherent, memory_volatile, memory_restrict,
3698 image_format);
3699 break;
3700 }
3701
3702 case ir_type_dereference_variable: {
3703 ir_variable *var = ir->variable_referenced();
3704
3705 *type = var->type->without_array();
3706 *memory_coherent = var->data.memory_coherent;
3707 *memory_volatile = var->data.memory_volatile;
3708 *memory_restrict = var->data.memory_restrict;
3709 *image_format = var->data.image_format;
3710 break;
3711 }
3712
3713 default:
3714 break;
3715 }
3716 }
3717
3718 void
3719 glsl_to_tgsi_visitor::visit_image_intrinsic(ir_call *ir)
3720 {
3721 exec_node *param = ir->actual_parameters.get_head();
3722
3723 ir_dereference *img = (ir_dereference *)param;
3724 const ir_variable *imgvar = img->variable_referenced();
3725 unsigned sampler_array_size = 1, sampler_base = 0;
3726 bool memory_coherent = false, memory_volatile = false, memory_restrict = false;
3727 unsigned image_format = 0;
3728 const glsl_type *type = NULL;
3729
3730 get_image_qualifiers(img, &type, &memory_coherent, &memory_volatile,
3731 &memory_restrict, &image_format);
3732
3733 st_src_reg reladdr;
3734 st_src_reg image(PROGRAM_IMAGE, 0, GLSL_TYPE_UINT);
3735 uint16_t index = 0;
3736 get_deref_offsets(img, &sampler_array_size, &sampler_base,
3737 &index, &reladdr, !imgvar->contains_bindless());
3738
3739 image.index = index;
3740 if (reladdr.file != PROGRAM_UNDEFINED) {
3741 image.reladdr = ralloc(mem_ctx, st_src_reg);
3742 *image.reladdr = reladdr;
3743 emit_arl(ir, sampler_reladdr, reladdr);
3744 }
3745
3746 st_dst_reg dst = undef_dst;
3747 if (ir->return_deref) {
3748 ir->return_deref->accept(this);
3749 dst = st_dst_reg(this->result);
3750 dst.writemask = (1 << ir->return_deref->type->vector_elements) - 1;
3751 }
3752
3753 glsl_to_tgsi_instruction *inst;
3754
3755 st_src_reg bindless;
3756 if (imgvar->contains_bindless()) {
3757 img->accept(this);
3758 bindless = this->result;
3759 }
3760
3761 if (ir->callee->intrinsic_id == ir_intrinsic_image_size) {
3762 dst.writemask = WRITEMASK_XYZ;
3763 inst = emit_asm(ir, TGSI_OPCODE_RESQ, dst);
3764 } else if (ir->callee->intrinsic_id == ir_intrinsic_image_samples) {
3765 st_src_reg res = get_temp(glsl_type::ivec4_type);
3766 st_dst_reg dstres = st_dst_reg(res);
3767 dstres.writemask = WRITEMASK_W;
3768 inst = emit_asm(ir, TGSI_OPCODE_RESQ, dstres);
3769 res.swizzle = SWIZZLE_WWWW;
3770 emit_asm(ir, TGSI_OPCODE_MOV, dst, res);
3771 } else {
3772 st_src_reg arg1 = undef_src, arg2 = undef_src;
3773 st_src_reg coord;
3774 st_dst_reg coord_dst;
3775 coord = get_temp(glsl_type::ivec4_type);
3776 coord_dst = st_dst_reg(coord);
3777 coord_dst.writemask = (1 << type->coordinate_components()) - 1;
3778 param = param->get_next();
3779 ((ir_dereference *)param)->accept(this);
3780 emit_asm(ir, TGSI_OPCODE_MOV, coord_dst, this->result);
3781 coord.swizzle = SWIZZLE_XXXX;
3782 switch (type->coordinate_components()) {
3783 case 4: assert(!"unexpected coord count");
3784 /* fallthrough */
3785 case 3: coord.swizzle |= SWIZZLE_Z << 6;
3786 /* fallthrough */
3787 case 2: coord.swizzle |= SWIZZLE_Y << 3;
3788 }
3789
3790 if (type->sampler_dimensionality == GLSL_SAMPLER_DIM_MS) {
3791 param = param->get_next();
3792 ((ir_dereference *)param)->accept(this);
3793 st_src_reg sample = this->result;
3794 sample.swizzle = SWIZZLE_XXXX;
3795 coord_dst.writemask = WRITEMASK_W;
3796 emit_asm(ir, TGSI_OPCODE_MOV, coord_dst, sample);
3797 coord.swizzle |= SWIZZLE_W << 9;
3798 }
3799
3800 param = param->get_next();
3801 if (!param->is_tail_sentinel()) {
3802 ((ir_dereference *)param)->accept(this);
3803 arg1 = this->result;
3804 param = param->get_next();
3805 }
3806
3807 if (!param->is_tail_sentinel()) {
3808 ((ir_dereference *)param)->accept(this);
3809 arg2 = this->result;
3810 param = param->get_next();
3811 }
3812
3813 assert(param->is_tail_sentinel());
3814
3815 enum tgsi_opcode opcode;
3816 switch (ir->callee->intrinsic_id) {
3817 case ir_intrinsic_image_load:
3818 opcode = TGSI_OPCODE_LOAD;
3819 break;
3820 case ir_intrinsic_image_store:
3821 opcode = TGSI_OPCODE_STORE;
3822 break;
3823 case ir_intrinsic_image_atomic_add:
3824 opcode = TGSI_OPCODE_ATOMUADD;
3825 break;
3826 case ir_intrinsic_image_atomic_min:
3827 opcode = TGSI_OPCODE_ATOMIMIN;
3828 break;
3829 case ir_intrinsic_image_atomic_max:
3830 opcode = TGSI_OPCODE_ATOMIMAX;
3831 break;
3832 case ir_intrinsic_image_atomic_and:
3833 opcode = TGSI_OPCODE_ATOMAND;
3834 break;
3835 case ir_intrinsic_image_atomic_or:
3836 opcode = TGSI_OPCODE_ATOMOR;
3837 break;
3838 case ir_intrinsic_image_atomic_xor:
3839 opcode = TGSI_OPCODE_ATOMXOR;
3840 break;
3841 case ir_intrinsic_image_atomic_exchange:
3842 opcode = TGSI_OPCODE_ATOMXCHG;
3843 break;
3844 case ir_intrinsic_image_atomic_comp_swap:
3845 opcode = TGSI_OPCODE_ATOMCAS;
3846 break;
3847 default:
3848 assert(!"Unexpected intrinsic");
3849 return;
3850 }
3851
3852 inst = emit_asm(ir, opcode, dst, coord, arg1, arg2);
3853 if (opcode == TGSI_OPCODE_STORE)
3854 inst->dst[0].writemask = WRITEMASK_XYZW;
3855 }
3856
3857 if (imgvar->contains_bindless()) {
3858 inst->resource = bindless;
3859 inst->resource.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y,
3860 SWIZZLE_X, SWIZZLE_Y);
3861 } else {
3862 inst->resource = image;
3863 inst->sampler_array_size = sampler_array_size;
3864 inst->sampler_base = sampler_base;
3865 }
3866
3867 inst->tex_target = type->sampler_index();
3868 inst->image_format = st_mesa_format_to_pipe_format(st_context(ctx),
3869 _mesa_get_shader_image_format(image_format));
3870
3871 if (memory_coherent)
3872 inst->buffer_access |= TGSI_MEMORY_COHERENT;
3873 if (memory_restrict)
3874 inst->buffer_access |= TGSI_MEMORY_RESTRICT;
3875 if (memory_volatile)
3876 inst->buffer_access |= TGSI_MEMORY_VOLATILE;
3877 }
3878
3879 void
3880 glsl_to_tgsi_visitor::visit_generic_intrinsic(ir_call *ir, enum tgsi_opcode op)
3881 {
3882 ir->return_deref->accept(this);
3883 st_dst_reg dst = st_dst_reg(this->result);
3884
3885 dst.writemask = u_bit_consecutive(0, ir->return_deref->var->type->vector_elements);
3886
3887 st_src_reg src[4] = { undef_src, undef_src, undef_src, undef_src };
3888 unsigned num_src = 0;
3889 foreach_in_list(ir_rvalue, param, &ir->actual_parameters) {
3890 assert(num_src < ARRAY_SIZE(src));
3891
3892 this->result.file = PROGRAM_UNDEFINED;
3893 param->accept(this);
3894 assert(this->result.file != PROGRAM_UNDEFINED);
3895
3896 src[num_src] = this->result;
3897 num_src++;
3898 }
3899
3900 emit_asm(ir, op, dst, src[0], src[1], src[2], src[3]);
3901 }
3902
3903 void
3904 glsl_to_tgsi_visitor::visit(ir_call *ir)
3905 {
3906 ir_function_signature *sig = ir->callee;
3907
3908 /* Filter out intrinsics */
3909 switch (sig->intrinsic_id) {
3910 case ir_intrinsic_atomic_counter_read:
3911 case ir_intrinsic_atomic_counter_increment:
3912 case ir_intrinsic_atomic_counter_predecrement:
3913 case ir_intrinsic_atomic_counter_add:
3914 case ir_intrinsic_atomic_counter_min:
3915 case ir_intrinsic_atomic_counter_max:
3916 case ir_intrinsic_atomic_counter_and:
3917 case ir_intrinsic_atomic_counter_or:
3918 case ir_intrinsic_atomic_counter_xor:
3919 case ir_intrinsic_atomic_counter_exchange:
3920 case ir_intrinsic_atomic_counter_comp_swap:
3921 visit_atomic_counter_intrinsic(ir);
3922 return;
3923
3924 case ir_intrinsic_ssbo_load:
3925 case ir_intrinsic_ssbo_store:
3926 case ir_intrinsic_ssbo_atomic_add:
3927 case ir_intrinsic_ssbo_atomic_min:
3928 case ir_intrinsic_ssbo_atomic_max:
3929 case ir_intrinsic_ssbo_atomic_and:
3930 case ir_intrinsic_ssbo_atomic_or:
3931 case ir_intrinsic_ssbo_atomic_xor:
3932 case ir_intrinsic_ssbo_atomic_exchange:
3933 case ir_intrinsic_ssbo_atomic_comp_swap:
3934 visit_ssbo_intrinsic(ir);
3935 return;
3936
3937 case ir_intrinsic_memory_barrier:
3938 case ir_intrinsic_memory_barrier_atomic_counter:
3939 case ir_intrinsic_memory_barrier_buffer:
3940 case ir_intrinsic_memory_barrier_image:
3941 case ir_intrinsic_memory_barrier_shared:
3942 case ir_intrinsic_group_memory_barrier:
3943 visit_membar_intrinsic(ir);
3944 return;
3945
3946 case ir_intrinsic_shared_load:
3947 case ir_intrinsic_shared_store:
3948 case ir_intrinsic_shared_atomic_add:
3949 case ir_intrinsic_shared_atomic_min:
3950 case ir_intrinsic_shared_atomic_max:
3951 case ir_intrinsic_shared_atomic_and:
3952 case ir_intrinsic_shared_atomic_or:
3953 case ir_intrinsic_shared_atomic_xor:
3954 case ir_intrinsic_shared_atomic_exchange:
3955 case ir_intrinsic_shared_atomic_comp_swap:
3956 visit_shared_intrinsic(ir);
3957 return;
3958
3959 case ir_intrinsic_image_load:
3960 case ir_intrinsic_image_store:
3961 case ir_intrinsic_image_atomic_add:
3962 case ir_intrinsic_image_atomic_min:
3963 case ir_intrinsic_image_atomic_max:
3964 case ir_intrinsic_image_atomic_and:
3965 case ir_intrinsic_image_atomic_or:
3966 case ir_intrinsic_image_atomic_xor:
3967 case ir_intrinsic_image_atomic_exchange:
3968 case ir_intrinsic_image_atomic_comp_swap:
3969 case ir_intrinsic_image_size:
3970 case ir_intrinsic_image_samples:
3971 visit_image_intrinsic(ir);
3972 return;
3973
3974 case ir_intrinsic_shader_clock:
3975 visit_generic_intrinsic(ir, TGSI_OPCODE_CLOCK);
3976 return;
3977
3978 case ir_intrinsic_vote_all:
3979 visit_generic_intrinsic(ir, TGSI_OPCODE_VOTE_ALL);
3980 return;
3981 case ir_intrinsic_vote_any:
3982 visit_generic_intrinsic(ir, TGSI_OPCODE_VOTE_ANY);
3983 return;
3984 case ir_intrinsic_vote_eq:
3985 visit_generic_intrinsic(ir, TGSI_OPCODE_VOTE_EQ);
3986 return;
3987 case ir_intrinsic_ballot:
3988 visit_generic_intrinsic(ir, TGSI_OPCODE_BALLOT);
3989 return;
3990 case ir_intrinsic_read_first_invocation:
3991 visit_generic_intrinsic(ir, TGSI_OPCODE_READ_FIRST);
3992 return;
3993 case ir_intrinsic_read_invocation:
3994 visit_generic_intrinsic(ir, TGSI_OPCODE_READ_INVOC);
3995 return;
3996
3997 case ir_intrinsic_invalid:
3998 case ir_intrinsic_generic_load:
3999 case ir_intrinsic_generic_store:
4000 case ir_intrinsic_generic_atomic_add:
4001 case ir_intrinsic_generic_atomic_and:
4002 case ir_intrinsic_generic_atomic_or:
4003 case ir_intrinsic_generic_atomic_xor:
4004 case ir_intrinsic_generic_atomic_min:
4005 case ir_intrinsic_generic_atomic_max:
4006 case ir_intrinsic_generic_atomic_exchange:
4007 case ir_intrinsic_generic_atomic_comp_swap:
4008 case ir_intrinsic_begin_invocation_interlock:
4009 case ir_intrinsic_end_invocation_interlock:
4010 unreachable("Invalid intrinsic");
4011 }
4012 }
4013
4014 void
4015 glsl_to_tgsi_visitor::calc_deref_offsets(ir_dereference *tail,
4016 unsigned *array_elements,
4017 uint16_t *index,
4018 st_src_reg *indirect,
4019 unsigned *location)
4020 {
4021 switch (tail->ir_type) {
4022 case ir_type_dereference_record: {
4023 ir_dereference_record *deref_record = tail->as_dereference_record();
4024 const glsl_type *struct_type = deref_record->record->type;
4025 int field_index = deref_record->field_idx;
4026
4027 calc_deref_offsets(deref_record->record->as_dereference(), array_elements, index, indirect, location);
4028
4029 assert(field_index >= 0);
4030 *location += struct_type->record_location_offset(field_index);
4031 break;
4032 }
4033
4034 case ir_type_dereference_array: {
4035 ir_dereference_array *deref_arr = tail->as_dereference_array();
4036
4037 void *mem_ctx = ralloc_parent(deref_arr);
4038 ir_constant *array_index =
4039 deref_arr->array_index->constant_expression_value(mem_ctx);
4040
4041 if (!array_index) {
4042 st_src_reg temp_reg;
4043 st_dst_reg temp_dst;
4044
4045 temp_reg = get_temp(glsl_type::uint_type);
4046 temp_dst = st_dst_reg(temp_reg);
4047 temp_dst.writemask = 1;
4048
4049 deref_arr->array_index->accept(this);
4050 if (*array_elements != 1)
4051 emit_asm(NULL, TGSI_OPCODE_MUL, temp_dst, this->result, st_src_reg_for_int(*array_elements));
4052 else
4053 emit_asm(NULL, TGSI_OPCODE_MOV, temp_dst, this->result);
4054
4055 if (indirect->file == PROGRAM_UNDEFINED)
4056 *indirect = temp_reg;
4057 else {
4058 temp_dst = st_dst_reg(*indirect);
4059 temp_dst.writemask = 1;
4060 emit_asm(NULL, TGSI_OPCODE_ADD, temp_dst, *indirect, temp_reg);
4061 }
4062 } else
4063 *index += array_index->value.u[0] * *array_elements;
4064
4065 *array_elements *= deref_arr->array->type->length;
4066
4067 calc_deref_offsets(deref_arr->array->as_dereference(), array_elements, index, indirect, location);
4068 break;
4069 }
4070 default:
4071 break;
4072 }
4073 }
4074
4075 void
4076 glsl_to_tgsi_visitor::get_deref_offsets(ir_dereference *ir,
4077 unsigned *array_size,
4078 unsigned *base,
4079 uint16_t *index,
4080 st_src_reg *reladdr,
4081 bool opaque)
4082 {
4083 GLuint shader = _mesa_program_enum_to_shader_stage(this->prog->Target);
4084 unsigned location = 0;
4085 ir_variable *var = ir->variable_referenced();
4086
4087 memset(reladdr, 0, sizeof(*reladdr));
4088 reladdr->file = PROGRAM_UNDEFINED;
4089
4090 *base = 0;
4091 *array_size = 1;
4092
4093 assert(var);
4094 location = var->data.location;
4095 calc_deref_offsets(ir, array_size, index, reladdr, &location);
4096
4097 /*
4098 * If we end up with no indirect then adjust the base to the index,
4099 * and set the array size to 1.
4100 */
4101 if (reladdr->file == PROGRAM_UNDEFINED) {
4102 *base = *index;
4103 *array_size = 1;
4104 }
4105
4106 if (opaque) {
4107 assert(location != 0xffffffff);
4108 *base += this->shader_program->data->UniformStorage[location].opaque[shader].index;
4109 *index += this->shader_program->data->UniformStorage[location].opaque[shader].index;
4110 }
4111 }
4112
4113 st_src_reg
4114 glsl_to_tgsi_visitor::canonicalize_gather_offset(st_src_reg offset)
4115 {
4116 if (offset.reladdr || offset.reladdr2 ||
4117 offset.has_index2 ||
4118 offset.file == PROGRAM_UNIFORM ||
4119 offset.file == PROGRAM_CONSTANT ||
4120 offset.file == PROGRAM_STATE_VAR) {
4121 st_src_reg tmp = get_temp(glsl_type::ivec2_type);
4122 st_dst_reg tmp_dst = st_dst_reg(tmp);
4123 tmp_dst.writemask = WRITEMASK_XY;
4124 emit_asm(NULL, TGSI_OPCODE_MOV, tmp_dst, offset);
4125 return tmp;
4126 }
4127
4128 return offset;
4129 }
4130
4131 bool
4132 glsl_to_tgsi_visitor::handle_bound_deref(ir_dereference *ir)
4133 {
4134 ir_variable *var = ir->variable_referenced();
4135
4136 if (!var || var->data.mode != ir_var_uniform || var->data.bindless ||
4137 !(ir->type->is_image() || ir->type->is_sampler()))
4138 return false;
4139
4140 /* Convert from bound sampler/image to bindless handle. */
4141 bool is_image = ir->type->is_image();
4142 st_src_reg resource(is_image ? PROGRAM_IMAGE : PROGRAM_SAMPLER, 0, GLSL_TYPE_UINT);
4143 uint16_t index = 0;
4144 unsigned array_size = 1, base = 0;
4145 st_src_reg reladdr;
4146 get_deref_offsets(ir, &array_size, &base, &index, &reladdr, true);
4147
4148 resource.index = index;
4149 if (reladdr.file != PROGRAM_UNDEFINED) {
4150 resource.reladdr = ralloc(mem_ctx, st_src_reg);
4151 *resource.reladdr = reladdr;
4152 emit_arl(ir, sampler_reladdr, reladdr);
4153 }
4154
4155 this->result = get_temp(glsl_type::uvec2_type);
4156 st_dst_reg dst(this->result);
4157 dst.writemask = WRITEMASK_XY;
4158
4159 glsl_to_tgsi_instruction *inst = emit_asm(
4160 ir, is_image ? TGSI_OPCODE_IMG2HND : TGSI_OPCODE_SAMP2HND, dst);
4161
4162 inst->tex_target = ir->type->sampler_index();
4163 inst->resource = resource;
4164 inst->sampler_array_size = array_size;
4165 inst->sampler_base = base;
4166
4167 return true;
4168 }
4169
4170 void
4171 glsl_to_tgsi_visitor::visit(ir_texture *ir)
4172 {
4173 st_src_reg result_src, coord, cube_sc, lod_info, projector, dx, dy;
4174 st_src_reg offset[MAX_GLSL_TEXTURE_OFFSET], sample_index, component;
4175 st_src_reg levels_src, reladdr;
4176 st_dst_reg result_dst, coord_dst, cube_sc_dst;
4177 glsl_to_tgsi_instruction *inst = NULL;
4178 enum tgsi_opcode opcode = TGSI_OPCODE_NOP;
4179 const glsl_type *sampler_type = ir->sampler->type;
4180 unsigned sampler_array_size = 1, sampler_base = 0;
4181 bool is_cube_array = false, is_cube_shadow = false;
4182 ir_variable *var = ir->sampler->variable_referenced();
4183 unsigned i;
4184
4185 /* if we are a cube array sampler or a cube shadow */
4186 if (sampler_type->sampler_dimensionality == GLSL_SAMPLER_DIM_CUBE) {
4187 is_cube_array = sampler_type->sampler_array;
4188 is_cube_shadow = sampler_type->sampler_shadow;
4189 }
4190
4191 if (ir->coordinate) {
4192 ir->coordinate->accept(this);
4193
4194 /* Put our coords in a temp. We'll need to modify them for shadow,
4195 * projection, or LOD, so the only case we'd use it as-is is if
4196 * we're doing plain old texturing. The optimization passes on
4197 * glsl_to_tgsi_visitor should handle cleaning up our mess in that case.
4198 */
4199 coord = get_temp(glsl_type::vec4_type);
4200 coord_dst = st_dst_reg(coord);
4201 coord_dst.writemask = (1 << ir->coordinate->type->vector_elements) - 1;
4202 emit_asm(ir, TGSI_OPCODE_MOV, coord_dst, this->result);
4203 }
4204
4205 if (ir->projector) {
4206 ir->projector->accept(this);
4207 projector = this->result;
4208 }
4209
4210 /* Storage for our result. Ideally for an assignment we'd be using
4211 * the actual storage for the result here, instead.
4212 */
4213 result_src = get_temp(ir->type);
4214 result_dst = st_dst_reg(result_src);
4215 result_dst.writemask = (1 << ir->type->vector_elements) - 1;
4216
4217 switch (ir->op) {
4218 case ir_tex:
4219 opcode = (is_cube_array && ir->shadow_comparator) ? TGSI_OPCODE_TEX2 : TGSI_OPCODE_TEX;
4220 if (ir->offset) {
4221 ir->offset->accept(this);
4222 offset[0] = this->result;
4223 }
4224 break;
4225 case ir_txb:
4226 if (is_cube_array || is_cube_shadow) {
4227 opcode = TGSI_OPCODE_TXB2;
4228 }
4229 else {
4230 opcode = TGSI_OPCODE_TXB;
4231 }
4232 ir->lod_info.bias->accept(this);
4233 lod_info = this->result;
4234 if (ir->offset) {
4235 ir->offset->accept(this);
4236 offset[0] = this->result;
4237 }
4238 break;
4239 case ir_txl:
4240 if (this->has_tex_txf_lz && ir->lod_info.lod->is_zero()) {
4241 opcode = TGSI_OPCODE_TEX_LZ;
4242 } else {
4243 opcode = is_cube_array ? TGSI_OPCODE_TXL2 : TGSI_OPCODE_TXL;
4244 ir->lod_info.lod->accept(this);
4245 lod_info = this->result;
4246 }
4247 if (ir->offset) {
4248 ir->offset->accept(this);
4249 offset[0] = this->result;
4250 }
4251 break;
4252 case ir_txd:
4253 opcode = TGSI_OPCODE_TXD;
4254 ir->lod_info.grad.dPdx->accept(this);
4255 dx = this->result;
4256 ir->lod_info.grad.dPdy->accept(this);
4257 dy = this->result;
4258 if (ir->offset) {
4259 ir->offset->accept(this);
4260 offset[0] = this->result;
4261 }
4262 break;
4263 case ir_txs:
4264 opcode = TGSI_OPCODE_TXQ;
4265 ir->lod_info.lod->accept(this);
4266 lod_info = this->result;
4267 break;
4268 case ir_query_levels:
4269 opcode = TGSI_OPCODE_TXQ;
4270 lod_info = undef_src;
4271 levels_src = get_temp(ir->type);
4272 break;
4273 case ir_txf:
4274 if (this->has_tex_txf_lz && ir->lod_info.lod->is_zero()) {
4275 opcode = TGSI_OPCODE_TXF_LZ;
4276 } else {
4277 opcode = TGSI_OPCODE_TXF;
4278 ir->lod_info.lod->accept(this);
4279 lod_info = this->result;
4280 }
4281 if (ir->offset) {
4282 ir->offset->accept(this);
4283 offset[0] = this->result;
4284 }
4285 break;
4286 case ir_txf_ms:
4287 opcode = TGSI_OPCODE_TXF;
4288 ir->lod_info.sample_index->accept(this);
4289 sample_index = this->result;
4290 break;
4291 case ir_tg4:
4292 opcode = TGSI_OPCODE_TG4;
4293 ir->lod_info.component->accept(this);
4294 component = this->result;
4295 if (ir->offset) {
4296 ir->offset->accept(this);
4297 if (ir->offset->type->is_array()) {
4298 const glsl_type *elt_type = ir->offset->type->fields.array;
4299 for (i = 0; i < ir->offset->type->length; i++) {
4300 offset[i] = this->result;
4301 offset[i].index += i * type_size(elt_type);
4302 offset[i].type = elt_type->base_type;
4303 offset[i].swizzle = swizzle_for_size(elt_type->vector_elements);
4304 offset[i] = canonicalize_gather_offset(offset[i]);
4305 }
4306 } else {
4307 offset[0] = canonicalize_gather_offset(this->result);
4308 }
4309 }
4310 break;
4311 case ir_lod:
4312 opcode = TGSI_OPCODE_LODQ;
4313 break;
4314 case ir_texture_samples:
4315 opcode = TGSI_OPCODE_TXQS;
4316 break;
4317 case ir_samples_identical:
4318 unreachable("Unexpected ir_samples_identical opcode");
4319 }
4320
4321 if (ir->projector) {
4322 if (opcode == TGSI_OPCODE_TEX) {
4323 /* Slot the projector in as the last component of the coord. */
4324 coord_dst.writemask = WRITEMASK_W;
4325 emit_asm(ir, TGSI_OPCODE_MOV, coord_dst, projector);
4326 coord_dst.writemask = WRITEMASK_XYZW;
4327 opcode = TGSI_OPCODE_TXP;
4328 } else {
4329 st_src_reg coord_w = coord;
4330 coord_w.swizzle = SWIZZLE_WWWW;
4331
4332 /* For the other TEX opcodes there's no projective version
4333 * since the last slot is taken up by LOD info. Do the
4334 * projective divide now.
4335 */
4336 coord_dst.writemask = WRITEMASK_W;
4337 emit_asm(ir, TGSI_OPCODE_RCP, coord_dst, projector);
4338
4339 /* In the case where we have to project the coordinates "by hand,"
4340 * the shadow comparator value must also be projected.
4341 */
4342 st_src_reg tmp_src = coord;
4343 if (ir->shadow_comparator) {
4344 /* Slot the shadow value in as the second to last component of the
4345 * coord.
4346 */
4347 ir->shadow_comparator->accept(this);
4348
4349 tmp_src = get_temp(glsl_type::vec4_type);
4350 st_dst_reg tmp_dst = st_dst_reg(tmp_src);
4351
4352 /* Projective division not allowed for array samplers. */
4353 assert(!sampler_type->sampler_array);
4354
4355 tmp_dst.writemask = WRITEMASK_Z;
4356 emit_asm(ir, TGSI_OPCODE_MOV, tmp_dst, this->result);
4357
4358 tmp_dst.writemask = WRITEMASK_XY;
4359 emit_asm(ir, TGSI_OPCODE_MOV, tmp_dst, coord);
4360 }
4361
4362 coord_dst.writemask = WRITEMASK_XYZ;
4363 emit_asm(ir, TGSI_OPCODE_MUL, coord_dst, tmp_src, coord_w);
4364
4365 coord_dst.writemask = WRITEMASK_XYZW;
4366 coord.swizzle = SWIZZLE_XYZW;
4367 }
4368 }
4369
4370 /* If projection is done and the opcode is not TGSI_OPCODE_TXP, then the
4371 * shadow comparator was put in the correct place (and projected) by the
4372 * code, above, that handles by-hand projection.
4373 */
4374 if (ir->shadow_comparator && (!ir->projector || opcode == TGSI_OPCODE_TXP)) {
4375 /* Slot the shadow value in as the second to last component of the
4376 * coord.
4377 */
4378 ir->shadow_comparator->accept(this);
4379
4380 if (is_cube_array) {
4381 cube_sc = get_temp(glsl_type::float_type);
4382 cube_sc_dst = st_dst_reg(cube_sc);
4383 cube_sc_dst.writemask = WRITEMASK_X;
4384 emit_asm(ir, TGSI_OPCODE_MOV, cube_sc_dst, this->result);
4385 cube_sc_dst.writemask = WRITEMASK_X;
4386 }
4387 else {
4388 if ((sampler_type->sampler_dimensionality == GLSL_SAMPLER_DIM_2D &&
4389 sampler_type->sampler_array) ||
4390 sampler_type->sampler_dimensionality == GLSL_SAMPLER_DIM_CUBE) {
4391 coord_dst.writemask = WRITEMASK_W;
4392 } else {
4393 coord_dst.writemask = WRITEMASK_Z;
4394 }
4395 emit_asm(ir, TGSI_OPCODE_MOV, coord_dst, this->result);
4396 coord_dst.writemask = WRITEMASK_XYZW;
4397 }
4398 }
4399
4400 if (ir->op == ir_txf_ms) {
4401 coord_dst.writemask = WRITEMASK_W;
4402 emit_asm(ir, TGSI_OPCODE_MOV, coord_dst, sample_index);
4403 coord_dst.writemask = WRITEMASK_XYZW;
4404 } else if (opcode == TGSI_OPCODE_TXL || opcode == TGSI_OPCODE_TXB ||
4405 opcode == TGSI_OPCODE_TXF) {
4406 /* TGSI stores LOD or LOD bias in the last channel of the coords. */
4407 coord_dst.writemask = WRITEMASK_W;
4408 emit_asm(ir, TGSI_OPCODE_MOV, coord_dst, lod_info);
4409 coord_dst.writemask = WRITEMASK_XYZW;
4410 }
4411
4412 st_src_reg sampler(PROGRAM_SAMPLER, 0, GLSL_TYPE_UINT);
4413
4414 uint16_t index = 0;
4415 get_deref_offsets(ir->sampler, &sampler_array_size, &sampler_base,
4416 &index, &reladdr, !var->contains_bindless());
4417
4418 sampler.index = index;
4419 if (reladdr.file != PROGRAM_UNDEFINED) {
4420 sampler.reladdr = ralloc(mem_ctx, st_src_reg);
4421 *sampler.reladdr = reladdr;
4422 emit_arl(ir, sampler_reladdr, reladdr);
4423 }
4424
4425 st_src_reg bindless;
4426 if (var->contains_bindless()) {
4427 ir->sampler->accept(this);
4428 bindless = this->result;
4429 }
4430
4431 if (opcode == TGSI_OPCODE_TXD)
4432 inst = emit_asm(ir, opcode, result_dst, coord, dx, dy);
4433 else if (opcode == TGSI_OPCODE_TXQ) {
4434 if (ir->op == ir_query_levels) {
4435 /* the level is stored in W */
4436 inst = emit_asm(ir, opcode, st_dst_reg(levels_src), lod_info);
4437 result_dst.writemask = WRITEMASK_X;
4438 levels_src.swizzle = SWIZZLE_WWWW;
4439 emit_asm(ir, TGSI_OPCODE_MOV, result_dst, levels_src);
4440 } else
4441 inst = emit_asm(ir, opcode, result_dst, lod_info);
4442 } else if (opcode == TGSI_OPCODE_TXQS) {
4443 inst = emit_asm(ir, opcode, result_dst);
4444 } else if (opcode == TGSI_OPCODE_TXL2 || opcode == TGSI_OPCODE_TXB2) {
4445 inst = emit_asm(ir, opcode, result_dst, coord, lod_info);
4446 } else if (opcode == TGSI_OPCODE_TEX2) {
4447 inst = emit_asm(ir, opcode, result_dst, coord, cube_sc);
4448 } else if (opcode == TGSI_OPCODE_TG4) {
4449 if (is_cube_array && ir->shadow_comparator) {
4450 inst = emit_asm(ir, opcode, result_dst, coord, cube_sc);
4451 } else {
4452 inst = emit_asm(ir, opcode, result_dst, coord, component);
4453 }
4454 } else
4455 inst = emit_asm(ir, opcode, result_dst, coord);
4456
4457 if (ir->shadow_comparator)
4458 inst->tex_shadow = GL_TRUE;
4459
4460 if (var->contains_bindless()) {
4461 inst->resource = bindless;
4462 inst->resource.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y,
4463 SWIZZLE_X, SWIZZLE_Y);
4464 } else {
4465 inst->resource = sampler;
4466 inst->sampler_array_size = sampler_array_size;
4467 inst->sampler_base = sampler_base;
4468 }
4469
4470 if (ir->offset) {
4471 if (!inst->tex_offsets)
4472 inst->tex_offsets = rzalloc_array(inst, st_src_reg,
4473 MAX_GLSL_TEXTURE_OFFSET);
4474
4475 for (i = 0; i < MAX_GLSL_TEXTURE_OFFSET &&
4476 offset[i].file != PROGRAM_UNDEFINED; i++)
4477 inst->tex_offsets[i] = offset[i];
4478 inst->tex_offset_num_offset = i;
4479 }
4480
4481 inst->tex_target = sampler_type->sampler_index();
4482 inst->tex_type = ir->type->base_type;
4483
4484 this->result = result_src;
4485 }
4486
4487 void
4488 glsl_to_tgsi_visitor::visit(ir_return *ir)
4489 {
4490 assert(!ir->get_value());
4491
4492 emit_asm(ir, TGSI_OPCODE_RET);
4493 }
4494
4495 void
4496 glsl_to_tgsi_visitor::visit(ir_discard *ir)
4497 {
4498 if (ir->condition) {
4499 ir->condition->accept(this);
4500 st_src_reg condition = this->result;
4501
4502 /* Convert the bool condition to a float so we can negate. */
4503 if (native_integers) {
4504 st_src_reg temp = get_temp(ir->condition->type);
4505 emit_asm(ir, TGSI_OPCODE_AND, st_dst_reg(temp),
4506 condition, st_src_reg_for_float(1.0));
4507 condition = temp;
4508 }
4509
4510 condition.negate = ~condition.negate;
4511 emit_asm(ir, TGSI_OPCODE_KILL_IF, undef_dst, condition);
4512 } else {
4513 /* unconditional kil */
4514 emit_asm(ir, TGSI_OPCODE_KILL);
4515 }
4516 }
4517
4518 void
4519 glsl_to_tgsi_visitor::visit(ir_if *ir)
4520 {
4521 enum tgsi_opcode if_opcode;
4522 glsl_to_tgsi_instruction *if_inst;
4523
4524 ir->condition->accept(this);
4525 assert(this->result.file != PROGRAM_UNDEFINED);
4526
4527 if_opcode = native_integers ? TGSI_OPCODE_UIF : TGSI_OPCODE_IF;
4528
4529 if_inst = emit_asm(ir->condition, if_opcode, undef_dst, this->result);
4530
4531 this->instructions.push_tail(if_inst);
4532
4533 visit_exec_list(&ir->then_instructions, this);
4534
4535 if (!ir->else_instructions.is_empty()) {
4536 emit_asm(ir->condition, TGSI_OPCODE_ELSE);
4537 visit_exec_list(&ir->else_instructions, this);
4538 }
4539
4540 if_inst = emit_asm(ir->condition, TGSI_OPCODE_ENDIF);
4541 }
4542
4543
4544 void
4545 glsl_to_tgsi_visitor::visit(ir_emit_vertex *ir)
4546 {
4547 assert(this->prog->Target == GL_GEOMETRY_PROGRAM_NV);
4548
4549 ir->stream->accept(this);
4550 emit_asm(ir, TGSI_OPCODE_EMIT, undef_dst, this->result);
4551 }
4552
4553 void
4554 glsl_to_tgsi_visitor::visit(ir_end_primitive *ir)
4555 {
4556 assert(this->prog->Target == GL_GEOMETRY_PROGRAM_NV);
4557
4558 ir->stream->accept(this);
4559 emit_asm(ir, TGSI_OPCODE_ENDPRIM, undef_dst, this->result);
4560 }
4561
4562 void
4563 glsl_to_tgsi_visitor::visit(ir_barrier *ir)
4564 {
4565 assert(this->prog->Target == GL_TESS_CONTROL_PROGRAM_NV ||
4566 this->prog->Target == GL_COMPUTE_PROGRAM_NV);
4567
4568 emit_asm(ir, TGSI_OPCODE_BARRIER);
4569 }
4570
4571 glsl_to_tgsi_visitor::glsl_to_tgsi_visitor()
4572 {
4573 STATIC_ASSERT(sizeof(samplers_used) * 8 >= PIPE_MAX_SAMPLERS);
4574
4575 result.file = PROGRAM_UNDEFINED;
4576 next_temp = 1;
4577 array_sizes = NULL;
4578 max_num_arrays = 0;
4579 next_array = 0;
4580 num_inputs = 0;
4581 num_outputs = 0;
4582 num_input_arrays = 0;
4583 num_output_arrays = 0;
4584 num_atomics = 0;
4585 num_atomic_arrays = 0;
4586 num_immediates = 0;
4587 num_address_regs = 0;
4588 samplers_used = 0;
4589 images_used = 0;
4590 indirect_addr_consts = false;
4591 wpos_transform_const = -1;
4592 native_integers = false;
4593 mem_ctx = ralloc_context(NULL);
4594 ctx = NULL;
4595 prog = NULL;
4596 precise = 0;
4597 shader_program = NULL;
4598 shader = NULL;
4599 options = NULL;
4600 have_sqrt = false;
4601 have_fma = false;
4602 use_shared_memory = false;
4603 has_tex_txf_lz = false;
4604 variables = NULL;
4605 }
4606
4607 static void var_destroy(struct hash_entry *entry)
4608 {
4609 variable_storage *storage = (variable_storage *)entry->data;
4610
4611 delete storage;
4612 }
4613
4614 glsl_to_tgsi_visitor::~glsl_to_tgsi_visitor()
4615 {
4616 _mesa_hash_table_destroy(variables, var_destroy);
4617 free(array_sizes);
4618 ralloc_free(mem_ctx);
4619 }
4620
4621 extern "C" void free_glsl_to_tgsi_visitor(glsl_to_tgsi_visitor *v)
4622 {
4623 delete v;
4624 }
4625
4626
4627 /**
4628 * Count resources used by the given gpu program (number of texture
4629 * samplers, etc).
4630 */
4631 static void
4632 count_resources(glsl_to_tgsi_visitor *v, gl_program *prog)
4633 {
4634 v->samplers_used = 0;
4635 v->images_used = 0;
4636 prog->info.textures_used_by_txf = 0;
4637
4638 foreach_in_list(glsl_to_tgsi_instruction, inst, &v->instructions) {
4639 if (inst->info->is_tex) {
4640 for (int i = 0; i < inst->sampler_array_size; i++) {
4641 unsigned idx = inst->sampler_base + i;
4642 v->samplers_used |= 1u << idx;
4643
4644 debug_assert(idx < (int)ARRAY_SIZE(v->sampler_types));
4645 v->sampler_types[idx] = inst->tex_type;
4646 v->sampler_targets[idx] =
4647 st_translate_texture_target(inst->tex_target, inst->tex_shadow);
4648
4649 if (inst->op == TGSI_OPCODE_TXF || inst->op == TGSI_OPCODE_TXF_LZ) {
4650 prog->info.textures_used_by_txf |= 1u << idx;
4651 }
4652 }
4653 }
4654
4655 if (inst->tex_target == TEXTURE_EXTERNAL_INDEX)
4656 prog->ExternalSamplersUsed |= 1 << inst->resource.index;
4657
4658 if (inst->resource.file != PROGRAM_UNDEFINED && (
4659 is_resource_instruction(inst->op) ||
4660 inst->op == TGSI_OPCODE_STORE)) {
4661 if (inst->resource.file == PROGRAM_MEMORY) {
4662 v->use_shared_memory = true;
4663 } else if (inst->resource.file == PROGRAM_IMAGE) {
4664 for (int i = 0; i < inst->sampler_array_size; i++) {
4665 unsigned idx = inst->sampler_base + i;
4666 v->images_used |= 1 << idx;
4667 v->image_targets[idx] =
4668 st_translate_texture_target(inst->tex_target, false);
4669 v->image_formats[idx] = inst->image_format;
4670 }
4671 }
4672 }
4673 }
4674 prog->SamplersUsed = v->samplers_used;
4675
4676 if (v->shader_program != NULL)
4677 _mesa_update_shader_textures_used(v->shader_program, prog);
4678 }
4679
4680 /**
4681 * Returns the mask of channels (bitmask of WRITEMASK_X,Y,Z,W) which
4682 * are read from the given src in this instruction
4683 */
4684 static int
4685 get_src_arg_mask(st_dst_reg dst, st_src_reg src)
4686 {
4687 int read_mask = 0, comp;
4688
4689 /* Now, given the src swizzle and the written channels, find which
4690 * components are actually read
4691 */
4692 for (comp = 0; comp < 4; ++comp) {
4693 const unsigned coord = GET_SWZ(src.swizzle, comp);
4694 assert(coord < 4);
4695 if (dst.writemask & (1 << comp) && coord <= SWIZZLE_W)
4696 read_mask |= 1 << coord;
4697 }
4698
4699 return read_mask;
4700 }
4701
4702 /**
4703 * This pass replaces CMP T0, T1 T2 T0 with MOV T0, T2 when the CMP
4704 * instruction is the first instruction to write to register T0. There are
4705 * several lowering passes done in GLSL IR (e.g. branches and
4706 * relative addressing) that create a large number of conditional assignments
4707 * that ir_to_mesa converts to CMP instructions like the one mentioned above.
4708 *
4709 * Here is why this conversion is safe:
4710 * CMP T0, T1 T2 T0 can be expanded to:
4711 * if (T1 < 0.0)
4712 * MOV T0, T2;
4713 * else
4714 * MOV T0, T0;
4715 *
4716 * If (T1 < 0.0) evaluates to true then our replacement MOV T0, T2 is the same
4717 * as the original program. If (T1 < 0.0) evaluates to false, executing
4718 * MOV T0, T0 will store a garbage value in T0 since T0 is uninitialized.
4719 * Therefore, it doesn't matter that we are replacing MOV T0, T0 with MOV T0, T2
4720 * because any instruction that was going to read from T0 after this was going
4721 * to read a garbage value anyway.
4722 */
4723 void
4724 glsl_to_tgsi_visitor::simplify_cmp(void)
4725 {
4726 int tempWritesSize = 0;
4727 unsigned *tempWrites = NULL;
4728 unsigned outputWrites[VARYING_SLOT_TESS_MAX];
4729
4730 memset(outputWrites, 0, sizeof(outputWrites));
4731
4732 foreach_in_list(glsl_to_tgsi_instruction, inst, &this->instructions) {
4733 unsigned prevWriteMask = 0;
4734
4735 /* Give up if we encounter relative addressing or flow control. */
4736 if (inst->dst[0].reladdr || inst->dst[0].reladdr2 ||
4737 inst->dst[1].reladdr || inst->dst[1].reladdr2 ||
4738 inst->info->is_branch ||
4739 inst->op == TGSI_OPCODE_CONT ||
4740 inst->op == TGSI_OPCODE_END ||
4741 inst->op == TGSI_OPCODE_RET) {
4742 break;
4743 }
4744
4745 if (inst->dst[0].file == PROGRAM_OUTPUT) {
4746 assert(inst->dst[0].index < (signed)ARRAY_SIZE(outputWrites));
4747 prevWriteMask = outputWrites[inst->dst[0].index];
4748 outputWrites[inst->dst[0].index] |= inst->dst[0].writemask;
4749 } else if (inst->dst[0].file == PROGRAM_TEMPORARY) {
4750 if (inst->dst[0].index >= tempWritesSize) {
4751 const int inc = 4096;
4752
4753 tempWrites = (unsigned*)
4754 realloc(tempWrites,
4755 (tempWritesSize + inc) * sizeof(unsigned));
4756 if (!tempWrites)
4757 return;
4758
4759 memset(tempWrites + tempWritesSize, 0, inc * sizeof(unsigned));
4760 tempWritesSize += inc;
4761 }
4762
4763 prevWriteMask = tempWrites[inst->dst[0].index];
4764 tempWrites[inst->dst[0].index] |= inst->dst[0].writemask;
4765 } else
4766 continue;
4767
4768 /* For a CMP to be considered a conditional write, the destination
4769 * register and source register two must be the same. */
4770 if (inst->op == TGSI_OPCODE_CMP
4771 && !(inst->dst[0].writemask & prevWriteMask)
4772 && inst->src[2].file == inst->dst[0].file
4773 && inst->src[2].index == inst->dst[0].index
4774 && inst->dst[0].writemask ==
4775 get_src_arg_mask(inst->dst[0], inst->src[2])) {
4776
4777 inst->op = TGSI_OPCODE_MOV;
4778 inst->info = tgsi_get_opcode_info(inst->op);
4779 inst->src[0] = inst->src[1];
4780 }
4781 }
4782
4783 free(tempWrites);
4784 }
4785
4786 static void
4787 rename_temp_handle_src(struct rename_reg_pair *renames, st_src_reg *src)
4788 {
4789 if (src && src->file == PROGRAM_TEMPORARY) {
4790 int old_idx = src->index;
4791 if (renames[old_idx].valid)
4792 src->index = renames[old_idx].new_reg;
4793 }
4794 }
4795
4796 /* Replaces all references to a temporary register index with another index. */
4797 void
4798 glsl_to_tgsi_visitor::rename_temp_registers(struct rename_reg_pair *renames)
4799 {
4800 foreach_in_list(glsl_to_tgsi_instruction, inst, &this->instructions) {
4801 unsigned j;
4802 for (j = 0; j < num_inst_src_regs(inst); j++) {
4803 rename_temp_handle_src(renames, &inst->src[j]);
4804 rename_temp_handle_src(renames, inst->src[j].reladdr);
4805 rename_temp_handle_src(renames, inst->src[j].reladdr2);
4806 }
4807
4808 for (j = 0; j < inst->tex_offset_num_offset; j++) {
4809 rename_temp_handle_src(renames, &inst->tex_offsets[j]);
4810 rename_temp_handle_src(renames, inst->tex_offsets[j].reladdr);
4811 rename_temp_handle_src(renames, inst->tex_offsets[j].reladdr2);
4812 }
4813
4814 rename_temp_handle_src(renames, &inst->resource);
4815 rename_temp_handle_src(renames, inst->resource.reladdr);
4816 rename_temp_handle_src(renames, inst->resource.reladdr2);
4817
4818 for (j = 0; j < num_inst_dst_regs(inst); j++) {
4819 if (inst->dst[j].file == PROGRAM_TEMPORARY) {
4820 int old_idx = inst->dst[j].index;
4821 if (renames[old_idx].valid)
4822 inst->dst[j].index = renames[old_idx].new_reg;
4823 }
4824 rename_temp_handle_src(renames, inst->dst[j].reladdr);
4825 rename_temp_handle_src(renames, inst->dst[j].reladdr2);
4826 }
4827 }
4828 }
4829
4830 void
4831 glsl_to_tgsi_visitor::get_first_temp_write(int *first_writes)
4832 {
4833 int depth = 0; /* loop depth */
4834 int loop_start = -1; /* index of the first active BGNLOOP (if any) */
4835 unsigned i = 0, j;
4836
4837 foreach_in_list(glsl_to_tgsi_instruction, inst, &this->instructions) {
4838 for (j = 0; j < num_inst_dst_regs(inst); j++) {
4839 if (inst->dst[j].file == PROGRAM_TEMPORARY) {
4840 if (first_writes[inst->dst[j].index] == -1)
4841 first_writes[inst->dst[j].index] = (depth == 0) ? i : loop_start;
4842 }
4843 }
4844
4845 if (inst->op == TGSI_OPCODE_BGNLOOP) {
4846 if (depth++ == 0)
4847 loop_start = i;
4848 } else if (inst->op == TGSI_OPCODE_ENDLOOP) {
4849 if (--depth == 0)
4850 loop_start = -1;
4851 }
4852 assert(depth >= 0);
4853 i++;
4854 }
4855 }
4856
4857 void
4858 glsl_to_tgsi_visitor::get_first_temp_read(int *first_reads)
4859 {
4860 int depth = 0; /* loop depth */
4861 int loop_start = -1; /* index of the first active BGNLOOP (if any) */
4862 unsigned i = 0, j;
4863
4864 foreach_in_list(glsl_to_tgsi_instruction, inst, &this->instructions) {
4865 for (j = 0; j < num_inst_src_regs(inst); j++) {
4866 if (inst->src[j].file == PROGRAM_TEMPORARY) {
4867 if (first_reads[inst->src[j].index] == -1)
4868 first_reads[inst->src[j].index] = (depth == 0) ? i : loop_start;
4869 }
4870 }
4871 for (j = 0; j < inst->tex_offset_num_offset; j++) {
4872 if (inst->tex_offsets[j].file == PROGRAM_TEMPORARY) {
4873 if (first_reads[inst->tex_offsets[j].index] == -1)
4874 first_reads[inst->tex_offsets[j].index] = (depth == 0) ? i : loop_start;
4875 }
4876 }
4877 if (inst->op == TGSI_OPCODE_BGNLOOP) {
4878 if (depth++ == 0)
4879 loop_start = i;
4880 } else if (inst->op == TGSI_OPCODE_ENDLOOP) {
4881 if (--depth == 0)
4882 loop_start = -1;
4883 }
4884 assert(depth >= 0);
4885 i++;
4886 }
4887 }
4888
4889 void
4890 glsl_to_tgsi_visitor::get_last_temp_read_first_temp_write(int *last_reads, int *first_writes)
4891 {
4892 int depth = 0; /* loop depth */
4893 int loop_start = -1; /* index of the first active BGNLOOP (if any) */
4894 unsigned i = 0, j;
4895 int k;
4896 foreach_in_list(glsl_to_tgsi_instruction, inst, &this->instructions) {
4897 for (j = 0; j < num_inst_src_regs(inst); j++) {
4898 if (inst->src[j].file == PROGRAM_TEMPORARY)
4899 last_reads[inst->src[j].index] = (depth == 0) ? i : -2;
4900 }
4901 for (j = 0; j < num_inst_dst_regs(inst); j++) {
4902 if (inst->dst[j].file == PROGRAM_TEMPORARY) {
4903 if (first_writes[inst->dst[j].index] == -1)
4904 first_writes[inst->dst[j].index] = (depth == 0) ? i : loop_start;
4905 last_reads[inst->dst[j].index] = (depth == 0) ? i : -2;
4906 }
4907 }
4908 for (j = 0; j < inst->tex_offset_num_offset; j++) {
4909 if (inst->tex_offsets[j].file == PROGRAM_TEMPORARY)
4910 last_reads[inst->tex_offsets[j].index] = (depth == 0) ? i : -2;
4911 }
4912 if (inst->op == TGSI_OPCODE_BGNLOOP) {
4913 if (depth++ == 0)
4914 loop_start = i;
4915 } else if (inst->op == TGSI_OPCODE_ENDLOOP) {
4916 if (--depth == 0) {
4917 loop_start = -1;
4918 for (k = 0; k < this->next_temp; k++) {
4919 if (last_reads[k] == -2) {
4920 last_reads[k] = i;
4921 }
4922 }
4923 }
4924 }
4925 assert(depth >= 0);
4926 i++;
4927 }
4928 }
4929
4930 void
4931 glsl_to_tgsi_visitor::get_last_temp_write(int *last_writes)
4932 {
4933 int depth = 0; /* loop depth */
4934 int i = 0, k;
4935 unsigned j;
4936
4937 foreach_in_list(glsl_to_tgsi_instruction, inst, &this->instructions) {
4938 for (j = 0; j < num_inst_dst_regs(inst); j++) {
4939 if (inst->dst[j].file == PROGRAM_TEMPORARY)
4940 last_writes[inst->dst[j].index] = (depth == 0) ? i : -2;
4941 }
4942
4943 if (inst->op == TGSI_OPCODE_BGNLOOP)
4944 depth++;
4945 else if (inst->op == TGSI_OPCODE_ENDLOOP)
4946 if (--depth == 0) {
4947 for (k = 0; k < this->next_temp; k++) {
4948 if (last_writes[k] == -2) {
4949 last_writes[k] = i;
4950 }
4951 }
4952 }
4953 assert(depth >= 0);
4954 i++;
4955 }
4956 }
4957
4958 /*
4959 * On a basic block basis, tracks available PROGRAM_TEMPORARY register
4960 * channels for copy propagation and updates following instructions to
4961 * use the original versions.
4962 *
4963 * The glsl_to_tgsi_visitor lazily produces code assuming that this pass
4964 * will occur. As an example, a TXP production before this pass:
4965 *
4966 * 0: MOV TEMP[1], INPUT[4].xyyy;
4967 * 1: MOV TEMP[1].w, INPUT[4].wwww;
4968 * 2: TXP TEMP[2], TEMP[1], texture[0], 2D;
4969 *
4970 * and after:
4971 *
4972 * 0: MOV TEMP[1], INPUT[4].xyyy;
4973 * 1: MOV TEMP[1].w, INPUT[4].wwww;
4974 * 2: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D;
4975 *
4976 * which allows for dead code elimination on TEMP[1]'s writes.
4977 */
4978 void
4979 glsl_to_tgsi_visitor::copy_propagate(void)
4980 {
4981 glsl_to_tgsi_instruction **acp = rzalloc_array(mem_ctx,
4982 glsl_to_tgsi_instruction *,
4983 this->next_temp * 4);
4984 int *acp_level = rzalloc_array(mem_ctx, int, this->next_temp * 4);
4985 int level = 0;
4986
4987 foreach_in_list(glsl_to_tgsi_instruction, inst, &this->instructions) {
4988 assert(inst->dst[0].file != PROGRAM_TEMPORARY
4989 || inst->dst[0].index < this->next_temp);
4990
4991 /* First, do any copy propagation possible into the src regs. */
4992 for (int r = 0; r < 3; r++) {
4993 glsl_to_tgsi_instruction *first = NULL;
4994 bool good = true;
4995 int acp_base = inst->src[r].index * 4;
4996
4997 if (inst->src[r].file != PROGRAM_TEMPORARY ||
4998 inst->src[r].reladdr ||
4999 inst->src[r].reladdr2)
5000 continue;
5001
5002 /* See if we can find entries in the ACP consisting of MOVs
5003 * from the same src register for all the swizzled channels
5004 * of this src register reference.
5005 */
5006 for (int i = 0; i < 4; i++) {
5007 int src_chan = GET_SWZ(inst->src[r].swizzle, i);
5008 glsl_to_tgsi_instruction *copy_chan = acp[acp_base + src_chan];
5009
5010 if (!copy_chan) {
5011 good = false;
5012 break;
5013 }
5014
5015 assert(acp_level[acp_base + src_chan] <= level);
5016
5017 if (!first) {
5018 first = copy_chan;
5019 } else {
5020 if (first->src[0].file != copy_chan->src[0].file ||
5021 first->src[0].index != copy_chan->src[0].index ||
5022 first->src[0].double_reg2 != copy_chan->src[0].double_reg2 ||
5023 first->src[0].index2D != copy_chan->src[0].index2D) {
5024 good = false;
5025 break;
5026 }
5027 }
5028 }
5029
5030 if (good) {
5031 /* We've now validated that we can copy-propagate to
5032 * replace this src register reference. Do it.
5033 */
5034 inst->src[r].file = first->src[0].file;
5035 inst->src[r].index = first->src[0].index;
5036 inst->src[r].index2D = first->src[0].index2D;
5037 inst->src[r].has_index2 = first->src[0].has_index2;
5038 inst->src[r].double_reg2 = first->src[0].double_reg2;
5039 inst->src[r].array_id = first->src[0].array_id;
5040
5041 int swizzle = 0;
5042 for (int i = 0; i < 4; i++) {
5043 int src_chan = GET_SWZ(inst->src[r].swizzle, i);
5044 glsl_to_tgsi_instruction *copy_inst = acp[acp_base + src_chan];
5045 swizzle |= (GET_SWZ(copy_inst->src[0].swizzle, src_chan) << (3 * i));
5046 }
5047 inst->src[r].swizzle = swizzle;
5048 }
5049 }
5050
5051 switch (inst->op) {
5052 case TGSI_OPCODE_BGNLOOP:
5053 case TGSI_OPCODE_ENDLOOP:
5054 /* End of a basic block, clear the ACP entirely. */
5055 memset(acp, 0, sizeof(*acp) * this->next_temp * 4);
5056 break;
5057
5058 case TGSI_OPCODE_IF:
5059 case TGSI_OPCODE_UIF:
5060 ++level;
5061 break;
5062
5063 case TGSI_OPCODE_ENDIF:
5064 case TGSI_OPCODE_ELSE:
5065 /* Clear all channels written inside the block from the ACP, but
5066 * leaving those that were not touched.
5067 */
5068 for (int r = 0; r < this->next_temp; r++) {
5069 for (int c = 0; c < 4; c++) {
5070 if (!acp[4 * r + c])
5071 continue;
5072
5073 if (acp_level[4 * r + c] >= level)
5074 acp[4 * r + c] = NULL;
5075 }
5076 }
5077 if (inst->op == TGSI_OPCODE_ENDIF)
5078 --level;
5079 break;
5080
5081 default:
5082 /* Continuing the block, clear any written channels from
5083 * the ACP.
5084 */
5085 for (int d = 0; d < 2; d++) {
5086 if (inst->dst[d].file == PROGRAM_TEMPORARY && inst->dst[d].reladdr) {
5087 /* Any temporary might be written, so no copy propagation
5088 * across this instruction.
5089 */
5090 memset(acp, 0, sizeof(*acp) * this->next_temp * 4);
5091 } else if (inst->dst[d].file == PROGRAM_OUTPUT &&
5092 inst->dst[d].reladdr) {
5093 /* Any output might be written, so no copy propagation
5094 * from outputs across this instruction.
5095 */
5096 for (int r = 0; r < this->next_temp; r++) {
5097 for (int c = 0; c < 4; c++) {
5098 if (!acp[4 * r + c])
5099 continue;
5100
5101 if (acp[4 * r + c]->src[0].file == PROGRAM_OUTPUT)
5102 acp[4 * r + c] = NULL;
5103 }
5104 }
5105 } else if (inst->dst[d].file == PROGRAM_TEMPORARY ||
5106 inst->dst[d].file == PROGRAM_OUTPUT) {
5107 /* Clear where it's used as dst. */
5108 if (inst->dst[d].file == PROGRAM_TEMPORARY) {
5109 for (int c = 0; c < 4; c++) {
5110 if (inst->dst[d].writemask & (1 << c))
5111 acp[4 * inst->dst[d].index + c] = NULL;
5112 }
5113 }
5114
5115 /* Clear where it's used as src. */
5116 for (int r = 0; r < this->next_temp; r++) {
5117 for (int c = 0; c < 4; c++) {
5118 if (!acp[4 * r + c])
5119 continue;
5120
5121 int src_chan = GET_SWZ(acp[4 * r + c]->src[0].swizzle, c);
5122
5123 if (acp[4 * r + c]->src[0].file == inst->dst[d].file &&
5124 acp[4 * r + c]->src[0].index == inst->dst[d].index &&
5125 inst->dst[d].writemask & (1 << src_chan)) {
5126 acp[4 * r + c] = NULL;
5127 }
5128 }
5129 }
5130 }
5131 }
5132 break;
5133 }
5134
5135 /* If this is a copy, add it to the ACP. */
5136 if (inst->op == TGSI_OPCODE_MOV &&
5137 inst->dst[0].file == PROGRAM_TEMPORARY &&
5138 !(inst->dst[0].file == inst->src[0].file &&
5139 inst->dst[0].index == inst->src[0].index) &&
5140 !inst->dst[0].reladdr &&
5141 !inst->dst[0].reladdr2 &&
5142 !inst->saturate &&
5143 inst->src[0].file != PROGRAM_ARRAY &&
5144 (inst->src[0].file != PROGRAM_OUTPUT ||
5145 this->shader->Stage != MESA_SHADER_TESS_CTRL) &&
5146 !inst->src[0].reladdr &&
5147 !inst->src[0].reladdr2 &&
5148 !inst->src[0].negate &&
5149 !inst->src[0].abs) {
5150 for (int i = 0; i < 4; i++) {
5151 if (inst->dst[0].writemask & (1 << i)) {
5152 acp[4 * inst->dst[0].index + i] = inst;
5153 acp_level[4 * inst->dst[0].index + i] = level;
5154 }
5155 }
5156 }
5157 }
5158
5159 ralloc_free(acp_level);
5160 ralloc_free(acp);
5161 }
5162
5163 static void
5164 dead_code_handle_reladdr(glsl_to_tgsi_instruction **writes, st_src_reg *reladdr)
5165 {
5166 if (reladdr && reladdr->file == PROGRAM_TEMPORARY) {
5167 /* Clear where it's used as src. */
5168 int swz = GET_SWZ(reladdr->swizzle, 0);
5169 writes[4 * reladdr->index + swz] = NULL;
5170 }
5171 }
5172
5173 /*
5174 * On a basic block basis, tracks available PROGRAM_TEMPORARY registers for dead
5175 * code elimination.
5176 *
5177 * The glsl_to_tgsi_visitor lazily produces code assuming that this pass
5178 * will occur. As an example, a TXP production after copy propagation but
5179 * before this pass:
5180 *
5181 * 0: MOV TEMP[1], INPUT[4].xyyy;
5182 * 1: MOV TEMP[1].w, INPUT[4].wwww;
5183 * 2: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D;
5184 *
5185 * and after this pass:
5186 *
5187 * 0: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D;
5188 */
5189 int
5190 glsl_to_tgsi_visitor::eliminate_dead_code(void)
5191 {
5192 glsl_to_tgsi_instruction **writes = rzalloc_array(mem_ctx,
5193 glsl_to_tgsi_instruction *,
5194 this->next_temp * 4);
5195 int *write_level = rzalloc_array(mem_ctx, int, this->next_temp * 4);
5196 int level = 0;
5197 int removed = 0;
5198
5199 foreach_in_list(glsl_to_tgsi_instruction, inst, &this->instructions) {
5200 assert(inst->dst[0].file != PROGRAM_TEMPORARY
5201 || inst->dst[0].index < this->next_temp);
5202
5203 switch (inst->op) {
5204 case TGSI_OPCODE_BGNLOOP:
5205 case TGSI_OPCODE_ENDLOOP:
5206 case TGSI_OPCODE_CONT:
5207 case TGSI_OPCODE_BRK:
5208 /* End of a basic block, clear the write array entirely.
5209 *
5210 * This keeps us from killing dead code when the writes are
5211 * on either side of a loop, even when the register isn't touched
5212 * inside the loop. However, glsl_to_tgsi_visitor doesn't seem to emit
5213 * dead code of this type, so it shouldn't make a difference as long as
5214 * the dead code elimination pass in the GLSL compiler does its job.
5215 */
5216 memset(writes, 0, sizeof(*writes) * this->next_temp * 4);
5217 break;
5218
5219 case TGSI_OPCODE_ENDIF:
5220 case TGSI_OPCODE_ELSE:
5221 /* Promote the recorded level of all channels written inside the
5222 * preceding if or else block to the level above the if/else block.
5223 */
5224 for (int r = 0; r < this->next_temp; r++) {
5225 for (int c = 0; c < 4; c++) {
5226 if (!writes[4 * r + c])
5227 continue;
5228
5229 if (write_level[4 * r + c] == level)
5230 write_level[4 * r + c] = level-1;
5231 }
5232 }
5233 if (inst->op == TGSI_OPCODE_ENDIF)
5234 --level;
5235 break;
5236
5237 case TGSI_OPCODE_IF:
5238 case TGSI_OPCODE_UIF:
5239 ++level;
5240 /* fallthrough to default case to mark the condition as read */
5241 default:
5242 /* Continuing the block, clear any channels from the write array that
5243 * are read by this instruction.
5244 */
5245 for (unsigned i = 0; i < ARRAY_SIZE(inst->src); i++) {
5246 if (inst->src[i].file == PROGRAM_TEMPORARY && inst->src[i].reladdr){
5247 /* Any temporary might be read, so no dead code elimination
5248 * across this instruction.
5249 */
5250 memset(writes, 0, sizeof(*writes) * this->next_temp * 4);
5251 } else if (inst->src[i].file == PROGRAM_TEMPORARY) {
5252 /* Clear where it's used as src. */
5253 int src_chans = 1 << GET_SWZ(inst->src[i].swizzle, 0);
5254 src_chans |= 1 << GET_SWZ(inst->src[i].swizzle, 1);
5255 src_chans |= 1 << GET_SWZ(inst->src[i].swizzle, 2);
5256 src_chans |= 1 << GET_SWZ(inst->src[i].swizzle, 3);
5257
5258 for (int c = 0; c < 4; c++) {
5259 if (src_chans & (1 << c))
5260 writes[4 * inst->src[i].index + c] = NULL;
5261 }
5262 }
5263 dead_code_handle_reladdr(writes, inst->src[i].reladdr);
5264 dead_code_handle_reladdr(writes, inst->src[i].reladdr2);
5265 }
5266 for (unsigned i = 0; i < inst->tex_offset_num_offset; i++) {
5267 if (inst->tex_offsets[i].file == PROGRAM_TEMPORARY && inst->tex_offsets[i].reladdr){
5268 /* Any temporary might be read, so no dead code elimination
5269 * across this instruction.
5270 */
5271 memset(writes, 0, sizeof(*writes) * this->next_temp * 4);
5272 } else if (inst->tex_offsets[i].file == PROGRAM_TEMPORARY) {
5273 /* Clear where it's used as src. */
5274 int src_chans = 1 << GET_SWZ(inst->tex_offsets[i].swizzle, 0);
5275 src_chans |= 1 << GET_SWZ(inst->tex_offsets[i].swizzle, 1);
5276 src_chans |= 1 << GET_SWZ(inst->tex_offsets[i].swizzle, 2);
5277 src_chans |= 1 << GET_SWZ(inst->tex_offsets[i].swizzle, 3);
5278
5279 for (int c = 0; c < 4; c++) {
5280 if (src_chans & (1 << c))
5281 writes[4 * inst->tex_offsets[i].index + c] = NULL;
5282 }
5283 }
5284 dead_code_handle_reladdr(writes, inst->tex_offsets[i].reladdr);
5285 dead_code_handle_reladdr(writes, inst->tex_offsets[i].reladdr2);
5286 }
5287
5288 if (inst->resource.file == PROGRAM_TEMPORARY) {
5289 int src_chans;
5290
5291 src_chans = 1 << GET_SWZ(inst->resource.swizzle, 0);
5292 src_chans |= 1 << GET_SWZ(inst->resource.swizzle, 1);
5293 src_chans |= 1 << GET_SWZ(inst->resource.swizzle, 2);
5294 src_chans |= 1 << GET_SWZ(inst->resource.swizzle, 3);
5295
5296 for (int c = 0; c < 4; c++) {
5297 if (src_chans & (1 << c))
5298 writes[4 * inst->resource.index + c] = NULL;
5299 }
5300 }
5301 dead_code_handle_reladdr(writes, inst->resource.reladdr);
5302 dead_code_handle_reladdr(writes, inst->resource.reladdr2);
5303
5304 for (unsigned i = 0; i < ARRAY_SIZE(inst->dst); i++) {
5305 dead_code_handle_reladdr(writes, inst->dst[i].reladdr);
5306 dead_code_handle_reladdr(writes, inst->dst[i].reladdr2);
5307 }
5308 break;
5309 }
5310
5311 /* If this instruction writes to a temporary, add it to the write array.
5312 * If there is already an instruction in the write array for one or more
5313 * of the channels, flag that channel write as dead.
5314 */
5315 for (unsigned i = 0; i < ARRAY_SIZE(inst->dst); i++) {
5316 if (inst->dst[i].file == PROGRAM_TEMPORARY &&
5317 !inst->dst[i].reladdr) {
5318 for (int c = 0; c < 4; c++) {
5319 if (inst->dst[i].writemask & (1 << c)) {
5320 if (writes[4 * inst->dst[i].index + c]) {
5321 if (write_level[4 * inst->dst[i].index + c] < level)
5322 continue;
5323 else
5324 writes[4 * inst->dst[i].index + c]->dead_mask |= (1 << c);
5325 }
5326 writes[4 * inst->dst[i].index + c] = inst;
5327 write_level[4 * inst->dst[i].index + c] = level;
5328 }
5329 }
5330 }
5331 }
5332 }
5333
5334 /* Anything still in the write array at this point is dead code. */
5335 for (int r = 0; r < this->next_temp; r++) {
5336 for (int c = 0; c < 4; c++) {
5337 glsl_to_tgsi_instruction *inst = writes[4 * r + c];
5338 if (inst)
5339 inst->dead_mask |= (1 << c);
5340 }
5341 }
5342
5343 /* Now actually remove the instructions that are completely dead and update
5344 * the writemask of other instructions with dead channels.
5345 */
5346 foreach_in_list_safe(glsl_to_tgsi_instruction, inst, &this->instructions) {
5347 if (!inst->dead_mask || !inst->dst[0].writemask)
5348 continue;
5349 /* No amount of dead masks should remove memory stores */
5350 if (inst->info->is_store)
5351 continue;
5352
5353 if ((inst->dst[0].writemask & ~inst->dead_mask) == 0) {
5354 inst->remove();
5355 delete inst;
5356 removed++;
5357 } else {
5358 if (glsl_base_type_is_64bit(inst->dst[0].type)) {
5359 if (inst->dead_mask == WRITEMASK_XY ||
5360 inst->dead_mask == WRITEMASK_ZW)
5361 inst->dst[0].writemask &= ~(inst->dead_mask);
5362 } else
5363 inst->dst[0].writemask &= ~(inst->dead_mask);
5364 }
5365 }
5366
5367 ralloc_free(write_level);
5368 ralloc_free(writes);
5369
5370 return removed;
5371 }
5372
5373 /* merge DFRACEXP instructions into one. */
5374 void
5375 glsl_to_tgsi_visitor::merge_two_dsts(void)
5376 {
5377 /* We never delete inst, but we may delete its successor. */
5378 foreach_in_list(glsl_to_tgsi_instruction, inst, &this->instructions) {
5379 glsl_to_tgsi_instruction *inst2;
5380 unsigned defined;
5381
5382 if (num_inst_dst_regs(inst) != 2)
5383 continue;
5384
5385 if (inst->dst[0].file != PROGRAM_UNDEFINED &&
5386 inst->dst[1].file != PROGRAM_UNDEFINED)
5387 continue;
5388
5389 assert(inst->dst[0].file != PROGRAM_UNDEFINED ||
5390 inst->dst[1].file != PROGRAM_UNDEFINED);
5391
5392 if (inst->dst[0].file == PROGRAM_UNDEFINED)
5393 defined = 1;
5394 else
5395 defined = 0;
5396
5397 inst2 = (glsl_to_tgsi_instruction *) inst->next;
5398 while (!inst2->is_tail_sentinel()) {
5399 if (inst->op == inst2->op &&
5400 inst2->dst[defined].file == PROGRAM_UNDEFINED &&
5401 inst->src[0].file == inst2->src[0].file &&
5402 inst->src[0].index == inst2->src[0].index &&
5403 inst->src[0].type == inst2->src[0].type &&
5404 inst->src[0].swizzle == inst2->src[0].swizzle)
5405 break;
5406 inst2 = (glsl_to_tgsi_instruction *) inst2->next;
5407 }
5408
5409 if (inst2->is_tail_sentinel()) {
5410 /* Undefined destinations are not allowed, substitute with an unused
5411 * temporary register.
5412 */
5413 st_src_reg tmp = get_temp(glsl_type::vec4_type);
5414 inst->dst[defined ^ 1] = st_dst_reg(tmp);
5415 inst->dst[defined ^ 1].writemask = 0;
5416 continue;
5417 }
5418
5419 inst->dst[defined ^ 1] = inst2->dst[defined ^ 1];
5420 inst2->remove();
5421 delete inst2;
5422 }
5423 }
5424
5425 /* Merges temporary registers together where possible to reduce the number of
5426 * registers needed to run a program.
5427 *
5428 * Produces optimal code only after copy propagation and dead code elimination
5429 * have been run. */
5430 void
5431 glsl_to_tgsi_visitor::merge_registers(void)
5432 {
5433 struct lifetime *lifetimes =
5434 rzalloc_array(mem_ctx, struct lifetime, this->next_temp);
5435
5436 if (get_temp_registers_required_lifetimes(mem_ctx, &this->instructions,
5437 this->next_temp, lifetimes)) {
5438 struct rename_reg_pair *renames =
5439 rzalloc_array(mem_ctx, struct rename_reg_pair, this->next_temp);
5440 get_temp_registers_remapping(mem_ctx, this->next_temp, lifetimes, renames);
5441 rename_temp_registers(renames);
5442 ralloc_free(renames);
5443 }
5444
5445 ralloc_free(lifetimes);
5446 }
5447
5448 /* Reassign indices to temporary registers by reusing unused indices created
5449 * by optimization passes. */
5450 void
5451 glsl_to_tgsi_visitor::renumber_registers(void)
5452 {
5453 int i = 0;
5454 int new_index = 0;
5455 int *first_writes = ralloc_array(mem_ctx, int, this->next_temp);
5456 struct rename_reg_pair *renames = rzalloc_array(mem_ctx, struct rename_reg_pair, this->next_temp);
5457
5458 for (i = 0; i < this->next_temp; i++) {
5459 first_writes[i] = -1;
5460 }
5461 get_first_temp_write(first_writes);
5462
5463 for (i = 0; i < this->next_temp; i++) {
5464 if (first_writes[i] < 0) continue;
5465 if (i != new_index) {
5466 renames[i].new_reg = new_index;
5467 renames[i].valid = true;
5468 }
5469 new_index++;
5470 }
5471
5472 rename_temp_registers(renames);
5473 this->next_temp = new_index;
5474 ralloc_free(renames);
5475 ralloc_free(first_writes);
5476 }
5477
5478 /* ------------------------- TGSI conversion stuff -------------------------- */
5479
5480 /**
5481 * Intermediate state used during shader translation.
5482 */
5483 struct st_translate {
5484 struct ureg_program *ureg;
5485
5486 unsigned temps_size;
5487 struct ureg_dst *temps;
5488
5489 struct ureg_dst *arrays;
5490 unsigned num_temp_arrays;
5491 struct ureg_src *constants;
5492 int num_constants;
5493 struct ureg_src *immediates;
5494 int num_immediates;
5495 struct ureg_dst outputs[PIPE_MAX_SHADER_OUTPUTS];
5496 struct ureg_src inputs[PIPE_MAX_SHADER_INPUTS];
5497 struct ureg_dst address[3];
5498 struct ureg_src samplers[PIPE_MAX_SAMPLERS];
5499 struct ureg_src buffers[PIPE_MAX_SHADER_BUFFERS];
5500 struct ureg_src images[PIPE_MAX_SHADER_IMAGES];
5501 struct ureg_src systemValues[SYSTEM_VALUE_MAX];
5502 struct ureg_src hw_atomics[PIPE_MAX_HW_ATOMIC_BUFFERS];
5503 struct ureg_src shared_memory;
5504 unsigned *array_sizes;
5505 struct inout_decl *input_decls;
5506 unsigned num_input_decls;
5507 struct inout_decl *output_decls;
5508 unsigned num_output_decls;
5509
5510 const ubyte *inputMapping;
5511 const ubyte *outputMapping;
5512
5513 enum pipe_shader_type procType; /**< PIPE_SHADER_VERTEX/FRAGMENT */
5514 bool need_uarl;
5515 };
5516
5517 /** Map Mesa's SYSTEM_VALUE_x to TGSI_SEMANTIC_x */
5518 enum tgsi_semantic
5519 _mesa_sysval_to_semantic(unsigned sysval)
5520 {
5521 switch (sysval) {
5522 /* Vertex shader */
5523 case SYSTEM_VALUE_VERTEX_ID:
5524 return TGSI_SEMANTIC_VERTEXID;
5525 case SYSTEM_VALUE_INSTANCE_ID:
5526 return TGSI_SEMANTIC_INSTANCEID;
5527 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE:
5528 return TGSI_SEMANTIC_VERTEXID_NOBASE;
5529 case SYSTEM_VALUE_BASE_VERTEX:
5530 return TGSI_SEMANTIC_BASEVERTEX;
5531 case SYSTEM_VALUE_BASE_INSTANCE:
5532 return TGSI_SEMANTIC_BASEINSTANCE;
5533 case SYSTEM_VALUE_DRAW_ID:
5534 return TGSI_SEMANTIC_DRAWID;
5535
5536 /* Geometry shader */
5537 case SYSTEM_VALUE_INVOCATION_ID:
5538 return TGSI_SEMANTIC_INVOCATIONID;
5539
5540 /* Fragment shader */
5541 case SYSTEM_VALUE_FRAG_COORD:
5542 return TGSI_SEMANTIC_POSITION;
5543 case SYSTEM_VALUE_FRONT_FACE:
5544 return TGSI_SEMANTIC_FACE;
5545 case SYSTEM_VALUE_SAMPLE_ID:
5546 return TGSI_SEMANTIC_SAMPLEID;
5547 case SYSTEM_VALUE_SAMPLE_POS:
5548 return TGSI_SEMANTIC_SAMPLEPOS;
5549 case SYSTEM_VALUE_SAMPLE_MASK_IN:
5550 return TGSI_SEMANTIC_SAMPLEMASK;
5551 case SYSTEM_VALUE_HELPER_INVOCATION:
5552 return TGSI_SEMANTIC_HELPER_INVOCATION;
5553
5554 /* Tessellation shader */
5555 case SYSTEM_VALUE_TESS_COORD:
5556 return TGSI_SEMANTIC_TESSCOORD;
5557 case SYSTEM_VALUE_VERTICES_IN:
5558 return TGSI_SEMANTIC_VERTICESIN;
5559 case SYSTEM_VALUE_PRIMITIVE_ID:
5560 return TGSI_SEMANTIC_PRIMID;
5561 case SYSTEM_VALUE_TESS_LEVEL_OUTER:
5562 return TGSI_SEMANTIC_TESSOUTER;
5563 case SYSTEM_VALUE_TESS_LEVEL_INNER:
5564 return TGSI_SEMANTIC_TESSINNER;
5565
5566 /* Compute shader */
5567 case SYSTEM_VALUE_LOCAL_INVOCATION_ID:
5568 return TGSI_SEMANTIC_THREAD_ID;
5569 case SYSTEM_VALUE_WORK_GROUP_ID:
5570 return TGSI_SEMANTIC_BLOCK_ID;
5571 case SYSTEM_VALUE_NUM_WORK_GROUPS:
5572 return TGSI_SEMANTIC_GRID_SIZE;
5573 case SYSTEM_VALUE_LOCAL_GROUP_SIZE:
5574 return TGSI_SEMANTIC_BLOCK_SIZE;
5575
5576 /* ARB_shader_ballot */
5577 case SYSTEM_VALUE_SUBGROUP_SIZE:
5578 return TGSI_SEMANTIC_SUBGROUP_SIZE;
5579 case SYSTEM_VALUE_SUBGROUP_INVOCATION:
5580 return TGSI_SEMANTIC_SUBGROUP_INVOCATION;
5581 case SYSTEM_VALUE_SUBGROUP_EQ_MASK:
5582 return TGSI_SEMANTIC_SUBGROUP_EQ_MASK;
5583 case SYSTEM_VALUE_SUBGROUP_GE_MASK:
5584 return TGSI_SEMANTIC_SUBGROUP_GE_MASK;
5585 case SYSTEM_VALUE_SUBGROUP_GT_MASK:
5586 return TGSI_SEMANTIC_SUBGROUP_GT_MASK;
5587 case SYSTEM_VALUE_SUBGROUP_LE_MASK:
5588 return TGSI_SEMANTIC_SUBGROUP_LE_MASK;
5589 case SYSTEM_VALUE_SUBGROUP_LT_MASK:
5590 return TGSI_SEMANTIC_SUBGROUP_LT_MASK;
5591
5592 /* Unhandled */
5593 case SYSTEM_VALUE_LOCAL_INVOCATION_INDEX:
5594 case SYSTEM_VALUE_GLOBAL_INVOCATION_ID:
5595 case SYSTEM_VALUE_VERTEX_CNT:
5596 default:
5597 assert(!"Unexpected SYSTEM_VALUE_ enum");
5598 return TGSI_SEMANTIC_COUNT;
5599 }
5600 }
5601
5602 /**
5603 * Map a glsl_to_tgsi constant/immediate to a TGSI immediate.
5604 */
5605 static struct ureg_src
5606 emit_immediate(struct st_translate *t,
5607 gl_constant_value values[4],
5608 GLenum type, int size)
5609 {
5610 struct ureg_program *ureg = t->ureg;
5611
5612 switch (type) {
5613 case GL_FLOAT:
5614 return ureg_DECL_immediate(ureg, &values[0].f, size);
5615 case GL_DOUBLE:
5616 return ureg_DECL_immediate_f64(ureg, (double *)&values[0].f, size);
5617 case GL_INT64_ARB:
5618 return ureg_DECL_immediate_int64(ureg, (int64_t *)&values[0].f, size);
5619 case GL_UNSIGNED_INT64_ARB:
5620 return ureg_DECL_immediate_uint64(ureg, (uint64_t *)&values[0].f, size);
5621 case GL_INT:
5622 return ureg_DECL_immediate_int(ureg, &values[0].i, size);
5623 case GL_UNSIGNED_INT:
5624 case GL_BOOL:
5625 return ureg_DECL_immediate_uint(ureg, &values[0].u, size);
5626 default:
5627 assert(!"should not get here - type must be float, int, uint, or bool");
5628 return ureg_src_undef();
5629 }
5630 }
5631
5632 /**
5633 * Map a glsl_to_tgsi dst register to a TGSI ureg_dst register.
5634 */
5635 static struct ureg_dst
5636 dst_register(struct st_translate *t, gl_register_file file, unsigned index,
5637 unsigned array_id)
5638 {
5639 unsigned array;
5640
5641 switch (file) {
5642 case PROGRAM_UNDEFINED:
5643 return ureg_dst_undef();
5644
5645 case PROGRAM_TEMPORARY:
5646 /* Allocate space for temporaries on demand. */
5647 if (index >= t->temps_size) {
5648 const int inc = align(index - t->temps_size + 1, 4096);
5649
5650 t->temps = (struct ureg_dst*)
5651 realloc(t->temps,
5652 (t->temps_size + inc) * sizeof(struct ureg_dst));
5653 if (!t->temps)
5654 return ureg_dst_undef();
5655
5656 memset(t->temps + t->temps_size, 0, inc * sizeof(struct ureg_dst));
5657 t->temps_size += inc;
5658 }
5659
5660 if (ureg_dst_is_undef(t->temps[index]))
5661 t->temps[index] = ureg_DECL_local_temporary(t->ureg);
5662
5663 return t->temps[index];
5664
5665 case PROGRAM_ARRAY:
5666 assert(array_id && array_id <= t->num_temp_arrays);
5667 array = array_id - 1;
5668
5669 if (ureg_dst_is_undef(t->arrays[array]))
5670 t->arrays[array] = ureg_DECL_array_temporary(
5671 t->ureg, t->array_sizes[array], TRUE);
5672
5673 return ureg_dst_array_offset(t->arrays[array], index);
5674
5675 case PROGRAM_OUTPUT:
5676 if (!array_id) {
5677 if (t->procType == PIPE_SHADER_FRAGMENT)
5678 assert(index < 2 * FRAG_RESULT_MAX);
5679 else if (t->procType == PIPE_SHADER_TESS_CTRL ||
5680 t->procType == PIPE_SHADER_TESS_EVAL)
5681 assert(index < VARYING_SLOT_TESS_MAX);
5682 else
5683 assert(index < VARYING_SLOT_MAX);
5684
5685 assert(t->outputMapping[index] < ARRAY_SIZE(t->outputs));
5686 assert(t->outputs[t->outputMapping[index]].File != TGSI_FILE_NULL);
5687 return t->outputs[t->outputMapping[index]];
5688 }
5689 else {
5690 struct inout_decl *decl =
5691 find_inout_array(t->output_decls,
5692 t->num_output_decls, array_id);
5693 unsigned mesa_index = decl->mesa_index;
5694 int slot = t->outputMapping[mesa_index];
5695
5696 assert(slot != -1 && t->outputs[slot].File == TGSI_FILE_OUTPUT);
5697
5698 struct ureg_dst dst = t->outputs[slot];
5699 dst.ArrayID = array_id;
5700 return ureg_dst_array_offset(dst, index - mesa_index);
5701 }
5702
5703 case PROGRAM_ADDRESS:
5704 return t->address[index];
5705
5706 default:
5707 assert(!"unknown dst register file");
5708 return ureg_dst_undef();
5709 }
5710 }
5711
5712 static struct ureg_src
5713 translate_src(struct st_translate *t, const st_src_reg *src_reg);
5714
5715 static struct ureg_src
5716 translate_addr(struct st_translate *t, const st_src_reg *reladdr,
5717 unsigned addr_index)
5718 {
5719 if (t->need_uarl || !reladdr->is_legal_tgsi_address_operand())
5720 return ureg_src(t->address[addr_index]);
5721
5722 return translate_src(t, reladdr);
5723 }
5724
5725 /**
5726 * Create a TGSI ureg_dst register from an st_dst_reg.
5727 */
5728 static struct ureg_dst
5729 translate_dst(struct st_translate *t,
5730 const st_dst_reg *dst_reg,
5731 bool saturate)
5732 {
5733 struct ureg_dst dst = dst_register(t, dst_reg->file, dst_reg->index,
5734 dst_reg->array_id);
5735
5736 if (dst.File == TGSI_FILE_NULL)
5737 return dst;
5738
5739 dst = ureg_writemask(dst, dst_reg->writemask);
5740
5741 if (saturate)
5742 dst = ureg_saturate(dst);
5743
5744 if (dst_reg->reladdr != NULL) {
5745 assert(dst_reg->file != PROGRAM_TEMPORARY);
5746 dst = ureg_dst_indirect(dst, translate_addr(t, dst_reg->reladdr, 0));
5747 }
5748
5749 if (dst_reg->has_index2) {
5750 if (dst_reg->reladdr2)
5751 dst = ureg_dst_dimension_indirect(dst,
5752 translate_addr(t, dst_reg->reladdr2, 1),
5753 dst_reg->index2D);
5754 else
5755 dst = ureg_dst_dimension(dst, dst_reg->index2D);
5756 }
5757
5758 return dst;
5759 }
5760
5761 /**
5762 * Create a TGSI ureg_src register from an st_src_reg.
5763 */
5764 static struct ureg_src
5765 translate_src(struct st_translate *t, const st_src_reg *src_reg)
5766 {
5767 struct ureg_src src;
5768 int index = src_reg->index;
5769 int double_reg2 = src_reg->double_reg2 ? 1 : 0;
5770
5771 switch (src_reg->file) {
5772 case PROGRAM_UNDEFINED:
5773 src = ureg_imm4f(t->ureg, 0, 0, 0, 0);
5774 break;
5775
5776 case PROGRAM_TEMPORARY:
5777 case PROGRAM_ARRAY:
5778 src = ureg_src(dst_register(t, src_reg->file, src_reg->index,
5779 src_reg->array_id));
5780 break;
5781
5782 case PROGRAM_OUTPUT: {
5783 struct ureg_dst dst = dst_register(t, src_reg->file, src_reg->index,
5784 src_reg->array_id);
5785 assert(dst.WriteMask != 0);
5786 unsigned shift = ffs(dst.WriteMask) - 1;
5787 src = ureg_swizzle(ureg_src(dst),
5788 shift,
5789 MIN2(shift + 1, 3),
5790 MIN2(shift + 2, 3),
5791 MIN2(shift + 3, 3));
5792 break;
5793 }
5794
5795 case PROGRAM_UNIFORM:
5796 assert(src_reg->index >= 0);
5797 src = src_reg->index < t->num_constants ?
5798 t->constants[src_reg->index] : ureg_imm4f(t->ureg, 0, 0, 0, 0);
5799 break;
5800 case PROGRAM_STATE_VAR:
5801 case PROGRAM_CONSTANT: /* ie, immediate */
5802 if (src_reg->has_index2)
5803 src = ureg_src_register(TGSI_FILE_CONSTANT, src_reg->index);
5804 else
5805 src = src_reg->index >= 0 && src_reg->index < t->num_constants ?
5806 t->constants[src_reg->index] : ureg_imm4f(t->ureg, 0, 0, 0, 0);
5807 break;
5808
5809 case PROGRAM_IMMEDIATE:
5810 assert(src_reg->index >= 0 && src_reg->index < t->num_immediates);
5811 src = t->immediates[src_reg->index];
5812 break;
5813
5814 case PROGRAM_INPUT:
5815 /* GLSL inputs are 64-bit containers, so we have to
5816 * map back to the original index and add the offset after
5817 * mapping. */
5818 index -= double_reg2;
5819 if (!src_reg->array_id) {
5820 assert(t->inputMapping[index] < ARRAY_SIZE(t->inputs));
5821 assert(t->inputs[t->inputMapping[index]].File != TGSI_FILE_NULL);
5822 src = t->inputs[t->inputMapping[index] + double_reg2];
5823 }
5824 else {
5825 struct inout_decl *decl = find_inout_array(t->input_decls,
5826 t->num_input_decls,
5827 src_reg->array_id);
5828 unsigned mesa_index = decl->mesa_index;
5829 int slot = t->inputMapping[mesa_index];
5830
5831 assert(slot != -1 && t->inputs[slot].File == TGSI_FILE_INPUT);
5832
5833 src = t->inputs[slot];
5834 src.ArrayID = src_reg->array_id;
5835 src = ureg_src_array_offset(src, index + double_reg2 - mesa_index);
5836 }
5837 break;
5838
5839 case PROGRAM_ADDRESS:
5840 src = ureg_src(t->address[src_reg->index]);
5841 break;
5842
5843 case PROGRAM_SYSTEM_VALUE:
5844 assert(src_reg->index < (int) ARRAY_SIZE(t->systemValues));
5845 src = t->systemValues[src_reg->index];
5846 break;
5847
5848 case PROGRAM_HW_ATOMIC:
5849 src = ureg_src_array_register(TGSI_FILE_HW_ATOMIC, src_reg->index,
5850 src_reg->array_id);
5851 break;
5852
5853 default:
5854 assert(!"unknown src register file");
5855 return ureg_src_undef();
5856 }
5857
5858 if (src_reg->has_index2) {
5859 /* 2D indexes occur with geometry shader inputs (attrib, vertex)
5860 * and UBO constant buffers (buffer, position).
5861 */
5862 if (src_reg->reladdr2)
5863 src = ureg_src_dimension_indirect(src,
5864 translate_addr(t, src_reg->reladdr2, 1),
5865 src_reg->index2D);
5866 else
5867 src = ureg_src_dimension(src, src_reg->index2D);
5868 }
5869
5870 src = ureg_swizzle(src,
5871 GET_SWZ(src_reg->swizzle, 0) & 0x3,
5872 GET_SWZ(src_reg->swizzle, 1) & 0x3,
5873 GET_SWZ(src_reg->swizzle, 2) & 0x3,
5874 GET_SWZ(src_reg->swizzle, 3) & 0x3);
5875
5876 if (src_reg->abs)
5877 src = ureg_abs(src);
5878
5879 if ((src_reg->negate & 0xf) == NEGATE_XYZW)
5880 src = ureg_negate(src);
5881
5882 if (src_reg->reladdr != NULL) {
5883 assert(src_reg->file != PROGRAM_TEMPORARY);
5884 src = ureg_src_indirect(src, translate_addr(t, src_reg->reladdr, 0));
5885 }
5886
5887 return src;
5888 }
5889
5890 static struct tgsi_texture_offset
5891 translate_tex_offset(struct st_translate *t,
5892 const st_src_reg *in_offset)
5893 {
5894 struct tgsi_texture_offset offset;
5895 struct ureg_src src = translate_src(t, in_offset);
5896
5897 offset.File = src.File;
5898 offset.Index = src.Index;
5899 offset.SwizzleX = src.SwizzleX;
5900 offset.SwizzleY = src.SwizzleY;
5901 offset.SwizzleZ = src.SwizzleZ;
5902 offset.Padding = 0;
5903
5904 assert(!src.Indirect);
5905 assert(!src.DimIndirect);
5906 assert(!src.Dimension);
5907 assert(!src.Absolute); /* those shouldn't be used with integers anyway */
5908 assert(!src.Negate);
5909
5910 return offset;
5911 }
5912
5913 static void
5914 compile_tgsi_instruction(struct st_translate *t,
5915 const glsl_to_tgsi_instruction *inst)
5916 {
5917 struct ureg_program *ureg = t->ureg;
5918 int i;
5919 struct ureg_dst dst[2];
5920 struct ureg_src src[4];
5921 struct tgsi_texture_offset texoffsets[MAX_GLSL_TEXTURE_OFFSET];
5922
5923 int num_dst;
5924 int num_src;
5925 enum tgsi_texture_type tex_target = TGSI_TEXTURE_BUFFER;
5926
5927 num_dst = num_inst_dst_regs(inst);
5928 num_src = num_inst_src_regs(inst);
5929
5930 for (i = 0; i < num_dst; i++)
5931 dst[i] = translate_dst(t,
5932 &inst->dst[i],
5933 inst->saturate);
5934
5935 for (i = 0; i < num_src; i++)
5936 src[i] = translate_src(t, &inst->src[i]);
5937
5938 switch (inst->op) {
5939 case TGSI_OPCODE_BGNLOOP:
5940 case TGSI_OPCODE_ELSE:
5941 case TGSI_OPCODE_ENDLOOP:
5942 case TGSI_OPCODE_IF:
5943 case TGSI_OPCODE_UIF:
5944 assert(num_dst == 0);
5945 ureg_insn(ureg, inst->op, NULL, 0, src, num_src, inst->precise);
5946 return;
5947
5948 case TGSI_OPCODE_TEX:
5949 case TGSI_OPCODE_TEX_LZ:
5950 case TGSI_OPCODE_TXB:
5951 case TGSI_OPCODE_TXD:
5952 case TGSI_OPCODE_TXL:
5953 case TGSI_OPCODE_TXP:
5954 case TGSI_OPCODE_TXQ:
5955 case TGSI_OPCODE_TXQS:
5956 case TGSI_OPCODE_TXF:
5957 case TGSI_OPCODE_TXF_LZ:
5958 case TGSI_OPCODE_TEX2:
5959 case TGSI_OPCODE_TXB2:
5960 case TGSI_OPCODE_TXL2:
5961 case TGSI_OPCODE_TG4:
5962 case TGSI_OPCODE_LODQ:
5963 case TGSI_OPCODE_SAMP2HND:
5964 if (inst->resource.file == PROGRAM_SAMPLER) {
5965 src[num_src] = t->samplers[inst->resource.index];
5966 } else {
5967 /* Bindless samplers. */
5968 src[num_src] = translate_src(t, &inst->resource);
5969 }
5970 assert(src[num_src].File != TGSI_FILE_NULL);
5971 if (inst->resource.reladdr)
5972 src[num_src] =
5973 ureg_src_indirect(src[num_src],
5974 translate_addr(t, inst->resource.reladdr, 2));
5975 num_src++;
5976 for (i = 0; i < (int)inst->tex_offset_num_offset; i++) {
5977 texoffsets[i] = translate_tex_offset(t, &inst->tex_offsets[i]);
5978 }
5979 tex_target = st_translate_texture_target(inst->tex_target, inst->tex_shadow);
5980
5981 ureg_tex_insn(ureg,
5982 inst->op,
5983 dst, num_dst,
5984 tex_target,
5985 st_translate_texture_type(inst->tex_type),
5986 texoffsets, inst->tex_offset_num_offset,
5987 src, num_src);
5988 return;
5989
5990 case TGSI_OPCODE_RESQ:
5991 case TGSI_OPCODE_LOAD:
5992 case TGSI_OPCODE_ATOMUADD:
5993 case TGSI_OPCODE_ATOMXCHG:
5994 case TGSI_OPCODE_ATOMCAS:
5995 case TGSI_OPCODE_ATOMAND:
5996 case TGSI_OPCODE_ATOMOR:
5997 case TGSI_OPCODE_ATOMXOR:
5998 case TGSI_OPCODE_ATOMUMIN:
5999 case TGSI_OPCODE_ATOMUMAX:
6000 case TGSI_OPCODE_ATOMIMIN:
6001 case TGSI_OPCODE_ATOMIMAX:
6002 case TGSI_OPCODE_IMG2HND:
6003 for (i = num_src - 1; i >= 0; i--)
6004 src[i + 1] = src[i];
6005 num_src++;
6006 if (inst->resource.file == PROGRAM_MEMORY) {
6007 src[0] = t->shared_memory;
6008 } else if (inst->resource.file == PROGRAM_BUFFER) {
6009 src[0] = t->buffers[inst->resource.index];
6010 } else if (inst->resource.file == PROGRAM_HW_ATOMIC) {
6011 src[0] = translate_src(t, &inst->resource);
6012 } else if (inst->resource.file == PROGRAM_CONSTANT) {
6013 assert(inst->resource.has_index2);
6014 src[0] = ureg_src_register(TGSI_FILE_CONSTBUF, inst->resource.index);
6015 } else {
6016 assert(inst->resource.file != PROGRAM_UNDEFINED);
6017 if (inst->resource.file == PROGRAM_IMAGE) {
6018 src[0] = t->images[inst->resource.index];
6019 } else {
6020 /* Bindless images. */
6021 src[0] = translate_src(t, &inst->resource);
6022 }
6023 tex_target = st_translate_texture_target(inst->tex_target, inst->tex_shadow);
6024 }
6025 if (inst->resource.reladdr)
6026 src[0] = ureg_src_indirect(src[0],
6027 translate_addr(t, inst->resource.reladdr, 2));
6028 assert(src[0].File != TGSI_FILE_NULL);
6029 ureg_memory_insn(ureg, inst->op, dst, num_dst, src, num_src,
6030 inst->buffer_access,
6031 tex_target, inst->image_format);
6032 break;
6033
6034 case TGSI_OPCODE_STORE:
6035 if (inst->resource.file == PROGRAM_MEMORY) {
6036 dst[0] = ureg_dst(t->shared_memory);
6037 } else if (inst->resource.file == PROGRAM_BUFFER) {
6038 dst[0] = ureg_dst(t->buffers[inst->resource.index]);
6039 } else {
6040 if (inst->resource.file == PROGRAM_IMAGE) {
6041 dst[0] = ureg_dst(t->images[inst->resource.index]);
6042 } else {
6043 /* Bindless images. */
6044 dst[0] = ureg_dst(translate_src(t, &inst->resource));
6045 }
6046 tex_target = st_translate_texture_target(inst->tex_target, inst->tex_shadow);
6047 }
6048 dst[0] = ureg_writemask(dst[0], inst->dst[0].writemask);
6049 if (inst->resource.reladdr)
6050 dst[0] = ureg_dst_indirect(dst[0],
6051 translate_addr(t, inst->resource.reladdr, 2));
6052 assert(dst[0].File != TGSI_FILE_NULL);
6053 ureg_memory_insn(ureg, inst->op, dst, num_dst, src, num_src,
6054 inst->buffer_access,
6055 tex_target, inst->image_format);
6056 break;
6057
6058 default:
6059 ureg_insn(ureg,
6060 inst->op,
6061 dst, num_dst,
6062 src, num_src, inst->precise);
6063 break;
6064 }
6065 }
6066
6067 /**
6068 * Emit the TGSI instructions for inverting and adjusting WPOS.
6069 * This code is unavoidable because it also depends on whether
6070 * a FBO is bound (STATE_FB_WPOS_Y_TRANSFORM).
6071 */
6072 static void
6073 emit_wpos_adjustment(struct gl_context *ctx,
6074 struct st_translate *t,
6075 int wpos_transform_const,
6076 boolean invert,
6077 GLfloat adjX, GLfloat adjY[2])
6078 {
6079 struct ureg_program *ureg = t->ureg;
6080
6081 assert(wpos_transform_const >= 0);
6082
6083 /* Fragment program uses fragment position input.
6084 * Need to replace instances of INPUT[WPOS] with temp T
6085 * where T = INPUT[WPOS] is inverted by Y.
6086 */
6087 struct ureg_src wpostrans = ureg_DECL_constant(ureg, wpos_transform_const);
6088 struct ureg_dst wpos_temp = ureg_DECL_temporary(ureg);
6089 struct ureg_src *wpos =
6090 ctx->Const.GLSLFragCoordIsSysVal ?
6091 &t->systemValues[SYSTEM_VALUE_FRAG_COORD] :
6092 &t->inputs[t->inputMapping[VARYING_SLOT_POS]];
6093 struct ureg_src wpos_input = *wpos;
6094
6095 /* First, apply the coordinate shift: */
6096 if (adjX || adjY[0] || adjY[1]) {
6097 if (adjY[0] != adjY[1]) {
6098 /* Adjust the y coordinate by adjY[1] or adjY[0] respectively
6099 * depending on whether inversion is actually going to be applied
6100 * or not, which is determined by testing against the inversion
6101 * state variable used below, which will be either +1 or -1.
6102 */
6103 struct ureg_dst adj_temp = ureg_DECL_local_temporary(ureg);
6104
6105 ureg_CMP(ureg, adj_temp,
6106 ureg_scalar(wpostrans, invert ? 2 : 0),
6107 ureg_imm4f(ureg, adjX, adjY[0], 0.0f, 0.0f),
6108 ureg_imm4f(ureg, adjX, adjY[1], 0.0f, 0.0f));
6109 ureg_ADD(ureg, wpos_temp, wpos_input, ureg_src(adj_temp));
6110 } else {
6111 ureg_ADD(ureg, wpos_temp, wpos_input,
6112 ureg_imm4f(ureg, adjX, adjY[0], 0.0f, 0.0f));
6113 }
6114 wpos_input = ureg_src(wpos_temp);
6115 } else {
6116 /* MOV wpos_temp, input[wpos]
6117 */
6118 ureg_MOV(ureg, wpos_temp, wpos_input);
6119 }
6120
6121 /* Now the conditional y flip: STATE_FB_WPOS_Y_TRANSFORM.xy/zw will be
6122 * inversion/identity, or the other way around if we're drawing to an FBO.
6123 */
6124 if (invert) {
6125 /* MAD wpos_temp.y, wpos_input, wpostrans.xxxx, wpostrans.yyyy
6126 */
6127 ureg_MAD(ureg,
6128 ureg_writemask(wpos_temp, TGSI_WRITEMASK_Y),
6129 wpos_input,
6130 ureg_scalar(wpostrans, 0),
6131 ureg_scalar(wpostrans, 1));
6132 } else {
6133 /* MAD wpos_temp.y, wpos_input, wpostrans.zzzz, wpostrans.wwww
6134 */
6135 ureg_MAD(ureg,
6136 ureg_writemask(wpos_temp, TGSI_WRITEMASK_Y),
6137 wpos_input,
6138 ureg_scalar(wpostrans, 2),
6139 ureg_scalar(wpostrans, 3));
6140 }
6141
6142 /* Use wpos_temp as position input from here on:
6143 */
6144 *wpos = ureg_src(wpos_temp);
6145 }
6146
6147
6148 /**
6149 * Emit fragment position/ooordinate code.
6150 */
6151 static void
6152 emit_wpos(struct st_context *st,
6153 struct st_translate *t,
6154 const struct gl_program *program,
6155 struct ureg_program *ureg,
6156 int wpos_transform_const)
6157 {
6158 struct pipe_screen *pscreen = st->pipe->screen;
6159 GLfloat adjX = 0.0f;
6160 GLfloat adjY[2] = { 0.0f, 0.0f };
6161 boolean invert = FALSE;
6162
6163 /* Query the pixel center conventions supported by the pipe driver and set
6164 * adjX, adjY to help out if it cannot handle the requested one internally.
6165 *
6166 * The bias of the y-coordinate depends on whether y-inversion takes place
6167 * (adjY[1]) or not (adjY[0]), which is in turn dependent on whether we are
6168 * drawing to an FBO (causes additional inversion), and whether the pipe
6169 * driver origin and the requested origin differ (the latter condition is
6170 * stored in the 'invert' variable).
6171 *
6172 * For height = 100 (i = integer, h = half-integer, l = lower, u = upper):
6173 *
6174 * center shift only:
6175 * i -> h: +0.5
6176 * h -> i: -0.5
6177 *
6178 * inversion only:
6179 * l,i -> u,i: ( 0.0 + 1.0) * -1 + 100 = 99
6180 * l,h -> u,h: ( 0.5 + 0.0) * -1 + 100 = 99.5
6181 * u,i -> l,i: (99.0 + 1.0) * -1 + 100 = 0
6182 * u,h -> l,h: (99.5 + 0.0) * -1 + 100 = 0.5
6183 *
6184 * inversion and center shift:
6185 * l,i -> u,h: ( 0.0 + 0.5) * -1 + 100 = 99.5
6186 * l,h -> u,i: ( 0.5 + 0.5) * -1 + 100 = 99
6187 * u,i -> l,h: (99.0 + 0.5) * -1 + 100 = 0.5
6188 * u,h -> l,i: (99.5 + 0.5) * -1 + 100 = 0
6189 */
6190 if (program->OriginUpperLeft) {
6191 /* Fragment shader wants origin in upper-left */
6192 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT)) {
6193 /* the driver supports upper-left origin */
6194 }
6195 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT)) {
6196 /* the driver supports lower-left origin, need to invert Y */
6197 ureg_property(ureg, TGSI_PROPERTY_FS_COORD_ORIGIN,
6198 TGSI_FS_COORD_ORIGIN_LOWER_LEFT);
6199 invert = TRUE;
6200 }
6201 else
6202 assert(0);
6203 }
6204 else {
6205 /* Fragment shader wants origin in lower-left */
6206 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT))
6207 /* the driver supports lower-left origin */
6208 ureg_property(ureg, TGSI_PROPERTY_FS_COORD_ORIGIN,
6209 TGSI_FS_COORD_ORIGIN_LOWER_LEFT);
6210 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT))
6211 /* the driver supports upper-left origin, need to invert Y */
6212 invert = TRUE;
6213 else
6214 assert(0);
6215 }
6216
6217 if (program->PixelCenterInteger) {
6218 /* Fragment shader wants pixel center integer */
6219 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER)) {
6220 /* the driver supports pixel center integer */
6221 adjY[1] = 1.0f;
6222 ureg_property(ureg, TGSI_PROPERTY_FS_COORD_PIXEL_CENTER,
6223 TGSI_FS_COORD_PIXEL_CENTER_INTEGER);
6224 }
6225 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER)) {
6226 /* the driver supports pixel center half integer, need to bias X,Y */
6227 adjX = -0.5f;
6228 adjY[0] = -0.5f;
6229 adjY[1] = 0.5f;
6230 }
6231 else
6232 assert(0);
6233 }
6234 else {
6235 /* Fragment shader wants pixel center half integer */
6236 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER)) {
6237 /* the driver supports pixel center half integer */
6238 }
6239 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER)) {
6240 /* the driver supports pixel center integer, need to bias X,Y */
6241 adjX = adjY[0] = adjY[1] = 0.5f;
6242 ureg_property(ureg, TGSI_PROPERTY_FS_COORD_PIXEL_CENTER,
6243 TGSI_FS_COORD_PIXEL_CENTER_INTEGER);
6244 }
6245 else
6246 assert(0);
6247 }
6248
6249 /* we invert after adjustment so that we avoid the MOV to temporary,
6250 * and reuse the adjustment ADD instead */
6251 emit_wpos_adjustment(st->ctx, t, wpos_transform_const, invert, adjX, adjY);
6252 }
6253
6254 /**
6255 * OpenGL's fragment gl_FrontFace input is 1 for front-facing, 0 for back.
6256 * TGSI uses +1 for front, -1 for back.
6257 * This function converts the TGSI value to the GL value. Simply clamping/
6258 * saturating the value to [0,1] does the job.
6259 */
6260 static void
6261 emit_face_var(struct gl_context *ctx, struct st_translate *t)
6262 {
6263 struct ureg_program *ureg = t->ureg;
6264 struct ureg_dst face_temp = ureg_DECL_temporary(ureg);
6265 struct ureg_src face_input = t->inputs[t->inputMapping[VARYING_SLOT_FACE]];
6266
6267 if (ctx->Const.NativeIntegers) {
6268 ureg_FSGE(ureg, face_temp, face_input, ureg_imm1f(ureg, 0));
6269 }
6270 else {
6271 /* MOV_SAT face_temp, input[face] */
6272 ureg_MOV(ureg, ureg_saturate(face_temp), face_input);
6273 }
6274
6275 /* Use face_temp as face input from here on: */
6276 t->inputs[t->inputMapping[VARYING_SLOT_FACE]] = ureg_src(face_temp);
6277 }
6278
6279 static void
6280 emit_compute_block_size(const struct gl_program *prog,
6281 struct ureg_program *ureg) {
6282 ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH,
6283 prog->info.cs.local_size[0]);
6284 ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT,
6285 prog->info.cs.local_size[1]);
6286 ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH,
6287 prog->info.cs.local_size[2]);
6288 }
6289
6290 struct sort_inout_decls {
6291 bool operator()(const struct inout_decl &a, const struct inout_decl &b) const {
6292 return mapping[a.mesa_index] < mapping[b.mesa_index];
6293 }
6294
6295 const ubyte *mapping;
6296 };
6297
6298 /* Sort the given array of decls by the corresponding slot (TGSI file index).
6299 *
6300 * This is for the benefit of older drivers which are broken when the
6301 * declarations aren't sorted in this way.
6302 */
6303 static void
6304 sort_inout_decls_by_slot(struct inout_decl *decls,
6305 unsigned count,
6306 const ubyte mapping[])
6307 {
6308 sort_inout_decls sorter;
6309 sorter.mapping = mapping;
6310 std::sort(decls, decls + count, sorter);
6311 }
6312
6313 static enum tgsi_interpolate_mode
6314 st_translate_interp(enum glsl_interp_mode glsl_qual, GLuint varying)
6315 {
6316 switch (glsl_qual) {
6317 case INTERP_MODE_NONE:
6318 if (varying == VARYING_SLOT_COL0 || varying == VARYING_SLOT_COL1)
6319 return TGSI_INTERPOLATE_COLOR;
6320 return TGSI_INTERPOLATE_PERSPECTIVE;
6321 case INTERP_MODE_SMOOTH:
6322 return TGSI_INTERPOLATE_PERSPECTIVE;
6323 case INTERP_MODE_FLAT:
6324 return TGSI_INTERPOLATE_CONSTANT;
6325 case INTERP_MODE_NOPERSPECTIVE:
6326 return TGSI_INTERPOLATE_LINEAR;
6327 default:
6328 assert(0 && "unexpected interp mode in st_translate_interp()");
6329 return TGSI_INTERPOLATE_PERSPECTIVE;
6330 }
6331 }
6332
6333 /**
6334 * Translate intermediate IR (glsl_to_tgsi_instruction) to TGSI format.
6335 * \param program the program to translate
6336 * \param numInputs number of input registers used
6337 * \param inputMapping maps Mesa fragment program inputs to TGSI generic
6338 * input indexes
6339 * \param inputSemanticName the TGSI_SEMANTIC flag for each input
6340 * \param inputSemanticIndex the semantic index (ex: which texcoord) for
6341 * each input
6342 * \param interpMode the TGSI_INTERPOLATE_LINEAR/PERSP mode for each input
6343 * \param numOutputs number of output registers used
6344 * \param outputMapping maps Mesa fragment program outputs to TGSI
6345 * generic outputs
6346 * \param outputSemanticName the TGSI_SEMANTIC flag for each output
6347 * \param outputSemanticIndex the semantic index (ex: which texcoord) for
6348 * each output
6349 *
6350 * \return PIPE_OK or PIPE_ERROR_OUT_OF_MEMORY
6351 */
6352 extern "C" enum pipe_error
6353 st_translate_program(
6354 struct gl_context *ctx,
6355 enum pipe_shader_type procType,
6356 struct ureg_program *ureg,
6357 glsl_to_tgsi_visitor *program,
6358 const struct gl_program *proginfo,
6359 GLuint numInputs,
6360 const ubyte inputMapping[],
6361 const ubyte inputSlotToAttr[],
6362 const ubyte inputSemanticName[],
6363 const ubyte inputSemanticIndex[],
6364 const ubyte interpMode[],
6365 GLuint numOutputs,
6366 const ubyte outputMapping[],
6367 const ubyte outputSemanticName[],
6368 const ubyte outputSemanticIndex[])
6369 {
6370 struct pipe_screen *screen = st_context(ctx)->pipe->screen;
6371 struct st_translate *t;
6372 unsigned i;
6373 struct gl_program_constants *frag_const =
6374 &ctx->Const.Program[MESA_SHADER_FRAGMENT];
6375 enum pipe_error ret = PIPE_OK;
6376
6377 assert(numInputs <= ARRAY_SIZE(t->inputs));
6378 assert(numOutputs <= ARRAY_SIZE(t->outputs));
6379
6380 ASSERT_BITFIELD_SIZE(st_src_reg, type, GLSL_TYPE_ERROR);
6381 ASSERT_BITFIELD_SIZE(st_dst_reg, type, GLSL_TYPE_ERROR);
6382 ASSERT_BITFIELD_SIZE(glsl_to_tgsi_instruction, tex_type, GLSL_TYPE_ERROR);
6383 ASSERT_BITFIELD_SIZE(glsl_to_tgsi_instruction, image_format, PIPE_FORMAT_COUNT);
6384 ASSERT_BITFIELD_SIZE(glsl_to_tgsi_instruction, tex_target,
6385 (gl_texture_index) (NUM_TEXTURE_TARGETS - 1));
6386 ASSERT_BITFIELD_SIZE(glsl_to_tgsi_instruction, image_format,
6387 (enum pipe_format) (PIPE_FORMAT_COUNT - 1));
6388 ASSERT_BITFIELD_SIZE(glsl_to_tgsi_instruction, op,
6389 (enum tgsi_opcode) (TGSI_OPCODE_LAST - 1));
6390
6391 t = CALLOC_STRUCT(st_translate);
6392 if (!t) {
6393 ret = PIPE_ERROR_OUT_OF_MEMORY;
6394 goto out;
6395 }
6396
6397 t->procType = procType;
6398 t->need_uarl = !screen->get_param(screen, PIPE_CAP_TGSI_ANY_REG_AS_ADDRESS);
6399 t->inputMapping = inputMapping;
6400 t->outputMapping = outputMapping;
6401 t->ureg = ureg;
6402 t->num_temp_arrays = program->next_array;
6403 if (t->num_temp_arrays)
6404 t->arrays = (struct ureg_dst*)
6405 calloc(t->num_temp_arrays, sizeof(t->arrays[0]));
6406
6407 /*
6408 * Declare input attributes.
6409 */
6410 switch (procType) {
6411 case PIPE_SHADER_FRAGMENT:
6412 case PIPE_SHADER_GEOMETRY:
6413 case PIPE_SHADER_TESS_EVAL:
6414 case PIPE_SHADER_TESS_CTRL:
6415 sort_inout_decls_by_slot(program->inputs, program->num_inputs, inputMapping);
6416
6417 for (i = 0; i < program->num_inputs; ++i) {
6418 struct inout_decl *decl = &program->inputs[i];
6419 unsigned slot = inputMapping[decl->mesa_index];
6420 struct ureg_src src;
6421 ubyte tgsi_usage_mask = decl->usage_mask;
6422
6423 if (glsl_base_type_is_64bit(decl->base_type)) {
6424 if (tgsi_usage_mask == 1)
6425 tgsi_usage_mask = TGSI_WRITEMASK_XY;
6426 else if (tgsi_usage_mask == 2)
6427 tgsi_usage_mask = TGSI_WRITEMASK_ZW;
6428 else
6429 tgsi_usage_mask = TGSI_WRITEMASK_XYZW;
6430 }
6431
6432 enum tgsi_interpolate_mode interp_mode = TGSI_INTERPOLATE_CONSTANT;
6433 enum tgsi_interpolate_loc interp_location = TGSI_INTERPOLATE_LOC_CENTER;
6434 if (procType == PIPE_SHADER_FRAGMENT) {
6435 assert(interpMode);
6436 interp_mode = interpMode[slot] != TGSI_INTERPOLATE_COUNT ?
6437 (enum tgsi_interpolate_mode) interpMode[slot] :
6438 st_translate_interp(decl->interp, inputSlotToAttr[slot]);
6439
6440 interp_location = (enum tgsi_interpolate_loc) decl->interp_loc;
6441 }
6442
6443 src = ureg_DECL_fs_input_cyl_centroid_layout(ureg,
6444 (enum tgsi_semantic) inputSemanticName[slot],
6445 inputSemanticIndex[slot],
6446 interp_mode, 0, interp_location, slot, tgsi_usage_mask,
6447 decl->array_id, decl->size);
6448
6449 for (unsigned j = 0; j < decl->size; ++j) {
6450 if (t->inputs[slot + j].File != TGSI_FILE_INPUT) {
6451 /* The ArrayID is set up in dst_register */
6452 t->inputs[slot + j] = src;
6453 t->inputs[slot + j].ArrayID = 0;
6454 t->inputs[slot + j].Index += j;
6455 }
6456 }
6457 }
6458 break;
6459 case PIPE_SHADER_VERTEX:
6460 for (i = 0; i < numInputs; i++) {
6461 t->inputs[i] = ureg_DECL_vs_input(ureg, i);
6462 }
6463 break;
6464 case PIPE_SHADER_COMPUTE:
6465 break;
6466 default:
6467 assert(0);
6468 }
6469
6470 /*
6471 * Declare output attributes.
6472 */
6473 switch (procType) {
6474 case PIPE_SHADER_FRAGMENT:
6475 case PIPE_SHADER_COMPUTE:
6476 break;
6477 case PIPE_SHADER_GEOMETRY:
6478 case PIPE_SHADER_TESS_EVAL:
6479 case PIPE_SHADER_TESS_CTRL:
6480 case PIPE_SHADER_VERTEX:
6481 sort_inout_decls_by_slot(program->outputs, program->num_outputs, outputMapping);
6482
6483 for (i = 0; i < program->num_outputs; ++i) {
6484 struct inout_decl *decl = &program->outputs[i];
6485 unsigned slot = outputMapping[decl->mesa_index];
6486 struct ureg_dst dst;
6487 ubyte tgsi_usage_mask = decl->usage_mask;
6488
6489 if (glsl_base_type_is_64bit(decl->base_type)) {
6490 if (tgsi_usage_mask == 1)
6491 tgsi_usage_mask = TGSI_WRITEMASK_XY;
6492 else if (tgsi_usage_mask == 2)
6493 tgsi_usage_mask = TGSI_WRITEMASK_ZW;
6494 else
6495 tgsi_usage_mask = TGSI_WRITEMASK_XYZW;
6496 }
6497
6498 dst = ureg_DECL_output_layout(ureg,
6499 (enum tgsi_semantic) outputSemanticName[slot],
6500 outputSemanticIndex[slot],
6501 decl->gs_out_streams,
6502 slot, tgsi_usage_mask, decl->array_id, decl->size, decl->invariant);
6503 dst.Invariant = decl->invariant;
6504 for (unsigned j = 0; j < decl->size; ++j) {
6505 if (t->outputs[slot + j].File != TGSI_FILE_OUTPUT) {
6506 /* The ArrayID is set up in dst_register */
6507 t->outputs[slot + j] = dst;
6508 t->outputs[slot + j].ArrayID = 0;
6509 t->outputs[slot + j].Index += j;
6510 t->outputs[slot + j].Invariant = decl->invariant;
6511 }
6512 }
6513 }
6514 break;
6515 default:
6516 assert(0);
6517 }
6518
6519 if (procType == PIPE_SHADER_FRAGMENT) {
6520 if (program->shader->Program->info.fs.early_fragment_tests ||
6521 program->shader->Program->info.fs.post_depth_coverage) {
6522 ureg_property(ureg, TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL, 1);
6523
6524 if (program->shader->Program->info.fs.post_depth_coverage)
6525 ureg_property(ureg, TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE, 1);
6526 }
6527
6528 if (proginfo->info.inputs_read & VARYING_BIT_POS) {
6529 /* Must do this after setting up t->inputs. */
6530 emit_wpos(st_context(ctx), t, proginfo, ureg,
6531 program->wpos_transform_const);
6532 }
6533
6534 if (proginfo->info.inputs_read & VARYING_BIT_FACE)
6535 emit_face_var(ctx, t);
6536
6537 for (i = 0; i < numOutputs; i++) {
6538 switch (outputSemanticName[i]) {
6539 case TGSI_SEMANTIC_POSITION:
6540 t->outputs[i] = ureg_DECL_output(ureg,
6541 TGSI_SEMANTIC_POSITION, /* Z/Depth */
6542 outputSemanticIndex[i]);
6543 t->outputs[i] = ureg_writemask(t->outputs[i], TGSI_WRITEMASK_Z);
6544 break;
6545 case TGSI_SEMANTIC_STENCIL:
6546 t->outputs[i] = ureg_DECL_output(ureg,
6547 TGSI_SEMANTIC_STENCIL, /* Stencil */
6548 outputSemanticIndex[i]);
6549 t->outputs[i] = ureg_writemask(t->outputs[i], TGSI_WRITEMASK_Y);
6550 break;
6551 case TGSI_SEMANTIC_COLOR:
6552 t->outputs[i] = ureg_DECL_output(ureg,
6553 TGSI_SEMANTIC_COLOR,
6554 outputSemanticIndex[i]);
6555 break;
6556 case TGSI_SEMANTIC_SAMPLEMASK:
6557 t->outputs[i] = ureg_DECL_output(ureg,
6558 TGSI_SEMANTIC_SAMPLEMASK,
6559 outputSemanticIndex[i]);
6560 /* TODO: If we ever support more than 32 samples, this will have
6561 * to become an array.
6562 */
6563 t->outputs[i] = ureg_writemask(t->outputs[i], TGSI_WRITEMASK_X);
6564 break;
6565 default:
6566 assert(!"fragment shader outputs must be POSITION/STENCIL/COLOR");
6567 ret = PIPE_ERROR_BAD_INPUT;
6568 goto out;
6569 }
6570 }
6571 }
6572 else if (procType == PIPE_SHADER_VERTEX) {
6573 for (i = 0; i < numOutputs; i++) {
6574 if (outputSemanticName[i] == TGSI_SEMANTIC_FOG) {
6575 /* force register to contain a fog coordinate in the form (F, 0, 0, 1). */
6576 ureg_MOV(ureg,
6577 ureg_writemask(t->outputs[i], TGSI_WRITEMASK_YZW),
6578 ureg_imm4f(ureg, 0.0f, 0.0f, 0.0f, 1.0f));
6579 t->outputs[i] = ureg_writemask(t->outputs[i], TGSI_WRITEMASK_X);
6580 }
6581 }
6582 }
6583
6584 if (procType == PIPE_SHADER_COMPUTE) {
6585 emit_compute_block_size(proginfo, ureg);
6586 }
6587
6588 /* Declare address register.
6589 */
6590 if (program->num_address_regs > 0) {
6591 assert(program->num_address_regs <= 3);
6592 for (int i = 0; i < program->num_address_regs; i++)
6593 t->address[i] = ureg_DECL_address(ureg);
6594 }
6595
6596 /* Declare misc input registers
6597 */
6598 {
6599 GLbitfield64 sysInputs = proginfo->info.system_values_read;
6600
6601 for (i = 0; sysInputs; i++) {
6602 if (sysInputs & (1ull << i)) {
6603 enum tgsi_semantic semName = _mesa_sysval_to_semantic(i);
6604
6605 t->systemValues[i] = ureg_DECL_system_value(ureg, semName, 0);
6606
6607 if (semName == TGSI_SEMANTIC_INSTANCEID ||
6608 semName == TGSI_SEMANTIC_VERTEXID) {
6609 /* From Gallium perspective, these system values are always
6610 * integer, and require native integer support. However, if
6611 * native integer is supported on the vertex stage but not the
6612 * pixel stage (e.g, i915g + draw), Mesa will generate IR that
6613 * assumes these system values are floats. To resolve the
6614 * inconsistency, we insert a U2F.
6615 */
6616 struct st_context *st = st_context(ctx);
6617 struct pipe_screen *pscreen = st->pipe->screen;
6618 assert(procType == PIPE_SHADER_VERTEX);
6619 assert(pscreen->get_shader_param(pscreen, PIPE_SHADER_VERTEX, PIPE_SHADER_CAP_INTEGERS));
6620 (void) pscreen;
6621 if (!ctx->Const.NativeIntegers) {
6622 struct ureg_dst temp = ureg_DECL_local_temporary(t->ureg);
6623 ureg_U2F(t->ureg, ureg_writemask(temp, TGSI_WRITEMASK_X),
6624 t->systemValues[i]);
6625 t->systemValues[i] = ureg_scalar(ureg_src(temp), 0);
6626 }
6627 }
6628
6629 if (procType == PIPE_SHADER_FRAGMENT &&
6630 semName == TGSI_SEMANTIC_POSITION)
6631 emit_wpos(st_context(ctx), t, proginfo, ureg,
6632 program->wpos_transform_const);
6633
6634 sysInputs &= ~(1ull << i);
6635 }
6636 }
6637 }
6638
6639 t->array_sizes = program->array_sizes;
6640 t->input_decls = program->inputs;
6641 t->num_input_decls = program->num_inputs;
6642 t->output_decls = program->outputs;
6643 t->num_output_decls = program->num_outputs;
6644
6645 /* Emit constants and uniforms. TGSI uses a single index space for these,
6646 * so we put all the translated regs in t->constants.
6647 */
6648 if (proginfo->Parameters) {
6649 t->constants = (struct ureg_src *)
6650 calloc(proginfo->Parameters->NumParameters, sizeof(t->constants[0]));
6651 if (t->constants == NULL) {
6652 ret = PIPE_ERROR_OUT_OF_MEMORY;
6653 goto out;
6654 }
6655 t->num_constants = proginfo->Parameters->NumParameters;
6656
6657 for (i = 0; i < proginfo->Parameters->NumParameters; i++) {
6658 unsigned pvo = proginfo->Parameters->ParameterValueOffset[i];
6659
6660 switch (proginfo->Parameters->Parameters[i].Type) {
6661 case PROGRAM_STATE_VAR:
6662 case PROGRAM_UNIFORM:
6663 t->constants[i] = ureg_DECL_constant(ureg, i);
6664 break;
6665
6666 /* Emit immediates for PROGRAM_CONSTANT only when there's no indirect
6667 * addressing of the const buffer.
6668 * FIXME: Be smarter and recognize param arrays:
6669 * indirect addressing is only valid within the referenced
6670 * array.
6671 */
6672 case PROGRAM_CONSTANT:
6673 if (program->indirect_addr_consts)
6674 t->constants[i] = ureg_DECL_constant(ureg, i);
6675 else
6676 t->constants[i] = emit_immediate(t,
6677 proginfo->Parameters->ParameterValues + pvo,
6678 proginfo->Parameters->Parameters[i].DataType,
6679 4);
6680 break;
6681 default:
6682 break;
6683 }
6684 }
6685 }
6686
6687 for (i = 0; i < proginfo->info.num_ubos; i++) {
6688 unsigned size = proginfo->sh.UniformBlocks[i]->UniformBufferSize;
6689 unsigned num_const_vecs = (size + 15) / 16;
6690 unsigned first, last;
6691 assert(num_const_vecs > 0);
6692 first = 0;
6693 last = num_const_vecs > 0 ? num_const_vecs - 1 : 0;
6694 ureg_DECL_constant2D(t->ureg, first, last, i + 1);
6695 }
6696
6697 /* Emit immediate values.
6698 */
6699 t->immediates = (struct ureg_src *)
6700 calloc(program->num_immediates, sizeof(struct ureg_src));
6701 if (t->immediates == NULL) {
6702 ret = PIPE_ERROR_OUT_OF_MEMORY;
6703 goto out;
6704 }
6705 t->num_immediates = program->num_immediates;
6706
6707 i = 0;
6708 foreach_in_list(immediate_storage, imm, &program->immediates) {
6709 assert(i < program->num_immediates);
6710 t->immediates[i++] = emit_immediate(t, imm->values, imm->type, imm->size32);
6711 }
6712 assert(i == program->num_immediates);
6713
6714 /* texture samplers */
6715 for (i = 0; i < frag_const->MaxTextureImageUnits; i++) {
6716 if (program->samplers_used & (1u << i)) {
6717 enum tgsi_return_type type =
6718 st_translate_texture_type(program->sampler_types[i]);
6719
6720 t->samplers[i] = ureg_DECL_sampler(ureg, i);
6721
6722 ureg_DECL_sampler_view(ureg, i, program->sampler_targets[i],
6723 type, type, type, type);
6724 }
6725 }
6726
6727 /* Declare atomic and shader storage buffers. */
6728 {
6729 struct gl_program *prog = program->prog;
6730
6731 if (!st_context(ctx)->has_hw_atomics) {
6732 for (i = 0; i < prog->info.num_abos; i++) {
6733 unsigned index = prog->sh.AtomicBuffers[i]->Binding;
6734 assert(index < frag_const->MaxAtomicBuffers);
6735 t->buffers[index] = ureg_DECL_buffer(ureg, index, true);
6736 }
6737 } else {
6738 for (i = 0; i < program->num_atomics; i++) {
6739 struct hwatomic_decl *ainfo = &program->atomic_info[i];
6740 gl_uniform_storage *uni_storage = &prog->sh.data->UniformStorage[ainfo->location];
6741 int base = uni_storage->offset / ATOMIC_COUNTER_SIZE;
6742 ureg_DECL_hw_atomic(ureg, base, base + ainfo->size - 1, ainfo->binding,
6743 ainfo->array_id);
6744 }
6745 }
6746
6747 assert(prog->info.num_ssbos <= frag_const->MaxShaderStorageBlocks);
6748 for (i = 0; i < prog->info.num_ssbos; i++) {
6749 unsigned index = i;
6750 if (!st_context(ctx)->has_hw_atomics)
6751 index += frag_const->MaxAtomicBuffers;
6752
6753 t->buffers[index] = ureg_DECL_buffer(ureg, index, false);
6754 }
6755 }
6756
6757 if (program->use_shared_memory)
6758 t->shared_memory = ureg_DECL_memory(ureg, TGSI_MEMORY_TYPE_SHARED);
6759
6760 for (i = 0; i < program->shader->Program->info.num_images; i++) {
6761 if (program->images_used & (1 << i)) {
6762 t->images[i] = ureg_DECL_image(ureg, i,
6763 program->image_targets[i],
6764 program->image_formats[i],
6765 true, false);
6766 }
6767 }
6768
6769 /* Emit each instruction in turn:
6770 */
6771 foreach_in_list(glsl_to_tgsi_instruction, inst, &program->instructions)
6772 compile_tgsi_instruction(t, inst);
6773
6774 /* Set the next shader stage hint for VS and TES. */
6775 switch (procType) {
6776 case PIPE_SHADER_VERTEX:
6777 case PIPE_SHADER_TESS_EVAL:
6778 if (program->shader_program->SeparateShader)
6779 break;
6780
6781 for (i = program->shader->Stage+1; i <= MESA_SHADER_FRAGMENT; i++) {
6782 if (program->shader_program->_LinkedShaders[i]) {
6783 ureg_set_next_shader_processor(
6784 ureg, pipe_shader_type_from_mesa((gl_shader_stage)i));
6785 break;
6786 }
6787 }
6788 break;
6789 default:
6790 ; /* nothing - silence compiler warning */
6791 }
6792
6793 out:
6794 if (t) {
6795 free(t->arrays);
6796 free(t->temps);
6797 free(t->constants);
6798 t->num_constants = 0;
6799 free(t->immediates);
6800 t->num_immediates = 0;
6801 FREE(t);
6802 }
6803
6804 return ret;
6805 }
6806 /* ----------------------------- End TGSI code ------------------------------ */
6807
6808
6809 /**
6810 * Convert a shader's GLSL IR into a Mesa gl_program, although without
6811 * generating Mesa IR.
6812 */
6813 static struct gl_program *
6814 get_mesa_program_tgsi(struct gl_context *ctx,
6815 struct gl_shader_program *shader_program,
6816 struct gl_linked_shader *shader)
6817 {
6818 glsl_to_tgsi_visitor* v;
6819 struct gl_program *prog;
6820 struct gl_shader_compiler_options *options =
6821 &ctx->Const.ShaderCompilerOptions[shader->Stage];
6822 struct pipe_screen *pscreen = ctx->st->pipe->screen;
6823 enum pipe_shader_type ptarget = pipe_shader_type_from_mesa(shader->Stage);
6824 unsigned skip_merge_registers;
6825
6826 validate_ir_tree(shader->ir);
6827
6828 prog = shader->Program;
6829
6830 prog->Parameters = _mesa_new_parameter_list();
6831 v = new glsl_to_tgsi_visitor();
6832 v->ctx = ctx;
6833 v->prog = prog;
6834 v->shader_program = shader_program;
6835 v->shader = shader;
6836 v->options = options;
6837 v->native_integers = ctx->Const.NativeIntegers;
6838
6839 v->have_sqrt = pscreen->get_shader_param(pscreen, ptarget,
6840 PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED);
6841 v->have_fma = pscreen->get_shader_param(pscreen, ptarget,
6842 PIPE_SHADER_CAP_TGSI_FMA_SUPPORTED);
6843 v->has_tex_txf_lz = pscreen->get_param(pscreen,
6844 PIPE_CAP_TGSI_TEX_TXF_LZ);
6845 v->need_uarl = !pscreen->get_param(pscreen, PIPE_CAP_TGSI_ANY_REG_AS_ADDRESS);
6846
6847 v->variables = _mesa_hash_table_create(v->mem_ctx, _mesa_hash_pointer,
6848 _mesa_key_pointer_equal);
6849 skip_merge_registers =
6850 pscreen->get_shader_param(pscreen, ptarget,
6851 PIPE_SHADER_CAP_TGSI_SKIP_MERGE_REGISTERS);
6852
6853 _mesa_generate_parameters_list_for_uniforms(ctx, shader_program, shader,
6854 prog->Parameters);
6855
6856 /* Remove reads from output registers. */
6857 if (!pscreen->get_param(pscreen, PIPE_CAP_TGSI_CAN_READ_OUTPUTS))
6858 lower_output_reads(shader->Stage, shader->ir);
6859
6860 /* Emit intermediate IR for main(). */
6861 visit_exec_list(shader->ir, v);
6862
6863 #if 0
6864 /* Print out some information (for debugging purposes) used by the
6865 * optimization passes. */
6866 {
6867 int i;
6868 int *first_writes = ralloc_array(v->mem_ctx, int, v->next_temp);
6869 int *first_reads = ralloc_array(v->mem_ctx, int, v->next_temp);
6870 int *last_writes = ralloc_array(v->mem_ctx, int, v->next_temp);
6871 int *last_reads = ralloc_array(v->mem_ctx, int, v->next_temp);
6872
6873 for (i = 0; i < v->next_temp; i++) {
6874 first_writes[i] = -1;
6875 first_reads[i] = -1;
6876 last_writes[i] = -1;
6877 last_reads[i] = -1;
6878 }
6879 v->get_first_temp_read(first_reads);
6880 v->get_last_temp_read_first_temp_write(last_reads, first_writes);
6881 v->get_last_temp_write(last_writes);
6882 for (i = 0; i < v->next_temp; i++)
6883 printf("Temp %d: FR=%3d FW=%3d LR=%3d LW=%3d\n", i, first_reads[i],
6884 first_writes[i],
6885 last_reads[i],
6886 last_writes[i]);
6887 ralloc_free(first_writes);
6888 ralloc_free(first_reads);
6889 ralloc_free(last_writes);
6890 ralloc_free(last_reads);
6891 }
6892 #endif
6893
6894 /* Perform optimizations on the instructions in the glsl_to_tgsi_visitor. */
6895 v->simplify_cmp();
6896 v->copy_propagate();
6897
6898 while (v->eliminate_dead_code());
6899
6900 v->merge_two_dsts();
6901 if (!skip_merge_registers)
6902 v->merge_registers();
6903 v->renumber_registers();
6904
6905 /* Write the END instruction. */
6906 v->emit_asm(NULL, TGSI_OPCODE_END);
6907
6908 if (ctx->_Shader->Flags & GLSL_DUMP) {
6909 _mesa_log("\n");
6910 _mesa_log("GLSL IR for linked %s program %d:\n",
6911 _mesa_shader_stage_to_string(shader->Stage),
6912 shader_program->Name);
6913 _mesa_print_ir(_mesa_get_log_file(), shader->ir, NULL);
6914 _mesa_log("\n\n");
6915 }
6916
6917 do_set_program_inouts(shader->ir, prog, shader->Stage);
6918 _mesa_copy_linked_program_data(shader_program, shader);
6919 shrink_array_declarations(v->inputs, v->num_inputs,
6920 &prog->info.inputs_read,
6921 prog->info.vs.double_inputs_read,
6922 &prog->info.patch_inputs_read);
6923 shrink_array_declarations(v->outputs, v->num_outputs,
6924 &prog->info.outputs_written, 0ULL,
6925 &prog->info.patch_outputs_written);
6926 count_resources(v, prog);
6927
6928 /* The GLSL IR won't be needed anymore. */
6929 ralloc_free(shader->ir);
6930 shader->ir = NULL;
6931
6932 /* This must be done before the uniform storage is associated. */
6933 if (shader->Stage == MESA_SHADER_FRAGMENT &&
6934 (prog->info.inputs_read & VARYING_BIT_POS ||
6935 prog->info.system_values_read & (1ull << SYSTEM_VALUE_FRAG_COORD))) {
6936 static const gl_state_index16 wposTransformState[STATE_LENGTH] = {
6937 STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM
6938 };
6939
6940 v->wpos_transform_const = _mesa_add_state_reference(prog->Parameters,
6941 wposTransformState);
6942 }
6943
6944 /* Avoid reallocation of the program parameter list, because the uniform
6945 * storage is only associated with the original parameter list.
6946 * This should be enough for Bitmap and DrawPixels constants.
6947 */
6948 _mesa_reserve_parameter_storage(prog->Parameters, 8);
6949
6950 /* This has to be done last. Any operation the can cause
6951 * prog->ParameterValues to get reallocated (e.g., anything that adds a
6952 * program constant) has to happen before creating this linkage.
6953 */
6954 _mesa_associate_uniform_storage(ctx, shader_program, prog, true);
6955 if (!shader_program->data->LinkStatus) {
6956 free_glsl_to_tgsi_visitor(v);
6957 _mesa_reference_program(ctx, &shader->Program, NULL);
6958 return NULL;
6959 }
6960
6961 struct st_vertex_program *stvp;
6962 struct st_fragment_program *stfp;
6963 struct st_common_program *stp;
6964 struct st_compute_program *stcp;
6965
6966 switch (shader->Stage) {
6967 case MESA_SHADER_VERTEX:
6968 stvp = (struct st_vertex_program *)prog;
6969 stvp->glsl_to_tgsi = v;
6970 break;
6971 case MESA_SHADER_FRAGMENT:
6972 stfp = (struct st_fragment_program *)prog;
6973 stfp->glsl_to_tgsi = v;
6974 break;
6975 case MESA_SHADER_TESS_CTRL:
6976 case MESA_SHADER_TESS_EVAL:
6977 case MESA_SHADER_GEOMETRY:
6978 stp = st_common_program(prog);
6979 stp->glsl_to_tgsi = v;
6980 break;
6981 case MESA_SHADER_COMPUTE:
6982 stcp = (struct st_compute_program *)prog;
6983 stcp->glsl_to_tgsi = v;
6984 break;
6985 default:
6986 assert(!"should not be reached");
6987 return NULL;
6988 }
6989
6990 return prog;
6991 }
6992
6993 /* See if there are unsupported control flow statements. */
6994 class ir_control_flow_info_visitor : public ir_hierarchical_visitor {
6995 private:
6996 const struct gl_shader_compiler_options *options;
6997 public:
6998 ir_control_flow_info_visitor(const struct gl_shader_compiler_options *options)
6999 : options(options),
7000 unsupported(false)
7001 {
7002 }
7003
7004 virtual ir_visitor_status visit_enter(ir_function *ir)
7005 {
7006 /* Other functions are skipped (same as glsl_to_tgsi). */
7007 if (strcmp(ir->name, "main") == 0)
7008 return visit_continue;
7009
7010 return visit_continue_with_parent;
7011 }
7012
7013 virtual ir_visitor_status visit_enter(ir_call *ir)
7014 {
7015 if (!ir->callee->is_intrinsic()) {
7016 unsupported = true; /* it's a function call */
7017 return visit_stop;
7018 }
7019 return visit_continue;
7020 }
7021
7022 virtual ir_visitor_status visit_enter(ir_return *ir)
7023 {
7024 if (options->EmitNoMainReturn) {
7025 unsupported = true;
7026 return visit_stop;
7027 }
7028 return visit_continue;
7029 }
7030
7031 bool unsupported;
7032 };
7033
7034 static bool
7035 has_unsupported_control_flow(exec_list *ir,
7036 const struct gl_shader_compiler_options *options)
7037 {
7038 ir_control_flow_info_visitor visitor(options);
7039 visit_list_elements(&visitor, ir);
7040 return visitor.unsupported;
7041 }
7042
7043 extern "C" {
7044
7045 /**
7046 * Link a shader.
7047 * Called via ctx->Driver.LinkShader()
7048 * This actually involves converting GLSL IR into an intermediate TGSI-like IR
7049 * with code lowering and other optimizations.
7050 */
7051 GLboolean
7052 st_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
7053 {
7054 struct pipe_screen *pscreen = ctx->st->pipe->screen;
7055
7056 enum pipe_shader_ir preferred_ir = (enum pipe_shader_ir)
7057 pscreen->get_shader_param(pscreen, PIPE_SHADER_VERTEX,
7058 PIPE_SHADER_CAP_PREFERRED_IR);
7059 bool use_nir = preferred_ir == PIPE_SHADER_IR_NIR;
7060
7061 /* Return early if we are loading the shader from on-disk cache */
7062 if (st_load_ir_from_disk_cache(ctx, prog, use_nir)) {
7063 return GL_TRUE;
7064 }
7065
7066 assert(prog->data->LinkStatus);
7067
7068 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
7069 if (prog->_LinkedShaders[i] == NULL)
7070 continue;
7071
7072 struct gl_linked_shader *shader = prog->_LinkedShaders[i];
7073 exec_list *ir = shader->ir;
7074 gl_shader_stage stage = shader->Stage;
7075 const struct gl_shader_compiler_options *options =
7076 &ctx->Const.ShaderCompilerOptions[stage];
7077 enum pipe_shader_type ptarget = pipe_shader_type_from_mesa(stage);
7078 bool have_dround = pscreen->get_shader_param(pscreen, ptarget,
7079 PIPE_SHADER_CAP_TGSI_DROUND_SUPPORTED);
7080 bool have_dfrexp = pscreen->get_shader_param(pscreen, ptarget,
7081 PIPE_SHADER_CAP_TGSI_DFRACEXP_DLDEXP_SUPPORTED);
7082 bool have_ldexp = pscreen->get_shader_param(pscreen, ptarget,
7083 PIPE_SHADER_CAP_TGSI_LDEXP_SUPPORTED);
7084 unsigned if_threshold = pscreen->get_shader_param(pscreen, ptarget,
7085 PIPE_SHADER_CAP_LOWER_IF_THRESHOLD);
7086
7087 /* If there are forms of indirect addressing that the driver
7088 * cannot handle, perform the lowering pass.
7089 */
7090 if (options->EmitNoIndirectInput || options->EmitNoIndirectOutput ||
7091 options->EmitNoIndirectTemp || options->EmitNoIndirectUniform) {
7092 lower_variable_index_to_cond_assign(stage, ir,
7093 options->EmitNoIndirectInput,
7094 options->EmitNoIndirectOutput,
7095 options->EmitNoIndirectTemp,
7096 options->EmitNoIndirectUniform);
7097 }
7098
7099 if (!pscreen->get_param(pscreen, PIPE_CAP_INT64_DIVMOD))
7100 lower_64bit_integer_instructions(ir, DIV64 | MOD64);
7101
7102 if (ctx->Extensions.ARB_shading_language_packing) {
7103 unsigned lower_inst = LOWER_PACK_SNORM_2x16 |
7104 LOWER_UNPACK_SNORM_2x16 |
7105 LOWER_PACK_UNORM_2x16 |
7106 LOWER_UNPACK_UNORM_2x16 |
7107 LOWER_PACK_SNORM_4x8 |
7108 LOWER_UNPACK_SNORM_4x8 |
7109 LOWER_UNPACK_UNORM_4x8 |
7110 LOWER_PACK_UNORM_4x8;
7111
7112 if (ctx->Extensions.ARB_gpu_shader5)
7113 lower_inst |= LOWER_PACK_USE_BFI |
7114 LOWER_PACK_USE_BFE;
7115 if (!ctx->st->has_half_float_packing)
7116 lower_inst |= LOWER_PACK_HALF_2x16 |
7117 LOWER_UNPACK_HALF_2x16;
7118
7119 lower_packing_builtins(ir, lower_inst);
7120 }
7121
7122 if (!pscreen->get_param(pscreen, PIPE_CAP_TEXTURE_GATHER_OFFSETS))
7123 lower_offset_arrays(ir);
7124 do_mat_op_to_vec(ir);
7125
7126 if (stage == MESA_SHADER_FRAGMENT)
7127 lower_blend_equation_advanced(
7128 shader, ctx->Extensions.KHR_blend_equation_advanced_coherent);
7129
7130 lower_instructions(ir,
7131 MOD_TO_FLOOR |
7132 FDIV_TO_MUL_RCP |
7133 EXP_TO_EXP2 |
7134 LOG_TO_LOG2 |
7135 (have_ldexp ? 0 : LDEXP_TO_ARITH) |
7136 (have_dfrexp ? 0 : DFREXP_DLDEXP_TO_ARITH) |
7137 CARRY_TO_ARITH |
7138 BORROW_TO_ARITH |
7139 (have_dround ? 0 : DOPS_TO_DFRAC) |
7140 (options->EmitNoPow ? POW_TO_EXP2 : 0) |
7141 (!ctx->Const.NativeIntegers ? INT_DIV_TO_MUL_RCP : 0) |
7142 (options->EmitNoSat ? SAT_TO_CLAMP : 0) |
7143 (ctx->Const.ForceGLSLAbsSqrt ? SQRT_TO_ABS_SQRT : 0) |
7144 /* Assume that if ARB_gpu_shader5 is not supported
7145 * then all of the extended integer functions need
7146 * lowering. It may be necessary to add some caps
7147 * for individual instructions.
7148 */
7149 (!ctx->Extensions.ARB_gpu_shader5
7150 ? BIT_COUNT_TO_MATH |
7151 EXTRACT_TO_SHIFTS |
7152 INSERT_TO_SHIFTS |
7153 REVERSE_TO_SHIFTS |
7154 FIND_LSB_TO_FLOAT_CAST |
7155 FIND_MSB_TO_FLOAT_CAST |
7156 IMUL_HIGH_TO_MUL
7157 : 0));
7158
7159 do_vec_index_to_cond_assign(ir);
7160 lower_vector_insert(ir, true);
7161 lower_quadop_vector(ir, false);
7162 lower_noise(ir);
7163 if (options->MaxIfDepth == 0) {
7164 lower_discard(ir);
7165 }
7166
7167 if (ctx->Const.GLSLOptimizeConservatively) {
7168 /* Do it once and repeat only if there's unsupported control flow. */
7169 do {
7170 do_common_optimization(ir, true, true, options,
7171 ctx->Const.NativeIntegers);
7172 lower_if_to_cond_assign((gl_shader_stage)i, ir,
7173 options->MaxIfDepth, if_threshold);
7174 } while (has_unsupported_control_flow(ir, options));
7175 } else {
7176 /* Repeat it until it stops making changes. */
7177 bool progress;
7178 do {
7179 progress = do_common_optimization(ir, true, true, options,
7180 ctx->Const.NativeIntegers);
7181 progress |= lower_if_to_cond_assign((gl_shader_stage)i, ir,
7182 options->MaxIfDepth, if_threshold);
7183 } while (progress);
7184 }
7185
7186 /* Do this again to lower ir_binop_vector_extract introduced
7187 * by optimization passes.
7188 */
7189 do_vec_index_to_cond_assign(ir);
7190
7191 validate_ir_tree(ir);
7192 }
7193
7194 build_program_resource_list(ctx, prog);
7195
7196 if (use_nir)
7197 return st_link_nir(ctx, prog);
7198
7199 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
7200 struct gl_linked_shader *shader = prog->_LinkedShaders[i];
7201 if (shader == NULL)
7202 continue;
7203
7204 struct gl_program *linked_prog =
7205 get_mesa_program_tgsi(ctx, prog, shader);
7206 st_set_prog_affected_state_flags(linked_prog);
7207
7208 if (linked_prog) {
7209 if (!ctx->Driver.ProgramStringNotify(ctx,
7210 _mesa_shader_stage_to_program(i),
7211 linked_prog)) {
7212 _mesa_reference_program(ctx, &shader->Program, NULL);
7213 return GL_FALSE;
7214 }
7215 }
7216 }
7217
7218 return GL_TRUE;
7219 }
7220
7221 void
7222 st_translate_stream_output_info(glsl_to_tgsi_visitor *glsl_to_tgsi,
7223 const ubyte outputMapping[],
7224 struct pipe_stream_output_info *so)
7225 {
7226 if (!glsl_to_tgsi->shader_program->last_vert_prog)
7227 return;
7228
7229 struct gl_transform_feedback_info *info =
7230 glsl_to_tgsi->shader_program->last_vert_prog->sh.LinkedTransformFeedback;
7231 st_translate_stream_output_info2(info, outputMapping, so);
7232 }
7233
7234 void
7235 st_translate_stream_output_info2(struct gl_transform_feedback_info *info,
7236 const ubyte outputMapping[],
7237 struct pipe_stream_output_info *so)
7238 {
7239 unsigned i;
7240
7241 for (i = 0; i < info->NumOutputs; i++) {
7242 so->output[i].register_index =
7243 outputMapping[info->Outputs[i].OutputRegister];
7244 so->output[i].start_component = info->Outputs[i].ComponentOffset;
7245 so->output[i].num_components = info->Outputs[i].NumComponents;
7246 so->output[i].output_buffer = info->Outputs[i].OutputBuffer;
7247 so->output[i].dst_offset = info->Outputs[i].DstOffset;
7248 so->output[i].stream = info->Outputs[i].StreamId;
7249 }
7250
7251 for (i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
7252 so->stride[i] = info->Buffers[i].Stride;
7253 }
7254 so->num_outputs = info->NumOutputs;
7255 }
7256
7257 } /* extern "C" */