2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 #include "r600_llvm.h"
25 #include "r600_formats.h"
26 #include "r600_opcodes.h"
27 #include "r600_shader.h"
30 #include "pipe/p_shader_tokens.h"
31 #include "tgsi/tgsi_info.h"
32 #include "tgsi/tgsi_parse.h"
33 #include "tgsi/tgsi_scan.h"
34 #include "tgsi/tgsi_dump.h"
35 #include "util/u_memory.h"
41 Why CAYMAN got loops for lots of instructions is explained here.
43 -These 8xx t-slot only ops are implemented in all vector slots.
44 MUL_LIT, FLT_TO_UINT, INT_TO_FLT, UINT_TO_FLT
45 These 8xx t-slot only opcodes become vector ops, with all four
46 slots expecting the arguments on sources a and b. Result is
47 broadcast to all channels.
48 MULLO_INT, MULHI_INT, MULLO_UINT, MULHI_UINT
49 These 8xx t-slot only opcodes become vector ops in the z, y, and
51 EXP_IEEE, LOG_IEEE/CLAMPED, RECIP_IEEE/CLAMPED/FF/INT/UINT/_64/CLAMPED_64
52 RECIPSQRT_IEEE/CLAMPED/FF/_64/CLAMPED_64
55 The w slot may have an independent co-issued operation, or if the
56 result is required to be in the w slot, the opcode above may be
57 issued in the w slot as well.
58 The compiler must issue the source argument to slots z, y, and x
61 static int r600_pipe_shader(struct pipe_context
*ctx
, struct r600_pipe_shader
*shader
)
63 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
64 struct r600_shader
*rshader
= &shader
->shader
;
69 if (shader
->bo
== NULL
) {
70 shader
->bo
= (struct r600_resource
*)
71 pipe_buffer_create(ctx
->screen
, PIPE_BIND_CUSTOM
, PIPE_USAGE_IMMUTABLE
, rshader
->bc
.ndw
* 4);
72 if (shader
->bo
== NULL
) {
75 ptr
= (uint32_t*)rctx
->ws
->buffer_map(shader
->bo
->cs_buf
, rctx
->cs
, PIPE_TRANSFER_WRITE
);
76 if (R600_BIG_ENDIAN
) {
77 for (i
= 0; i
< rshader
->bc
.ndw
; ++i
) {
78 ptr
[i
] = bswap_32(rshader
->bc
.bytecode
[i
]);
81 memcpy(ptr
, rshader
->bc
.bytecode
, rshader
->bc
.ndw
* sizeof(*ptr
));
83 rctx
->ws
->buffer_unmap(shader
->bo
->cs_buf
);
86 switch (rshader
->processor_type
) {
87 case TGSI_PROCESSOR_VERTEX
:
88 if (rctx
->chip_class
>= EVERGREEN
) {
89 evergreen_pipe_shader_vs(ctx
, shader
);
91 r600_pipe_shader_vs(ctx
, shader
);
94 case TGSI_PROCESSOR_FRAGMENT
:
95 if (rctx
->chip_class
>= EVERGREEN
) {
96 evergreen_pipe_shader_ps(ctx
, shader
);
98 r600_pipe_shader_ps(ctx
, shader
);
107 static int r600_shader_from_tgsi(struct r600_screen
*rscreen
,
108 struct r600_pipe_shader
*pipeshader
,
109 struct r600_shader_key key
);
111 static void r600_dump_streamout(struct pipe_stream_output_info
*so
)
115 fprintf(stderr
, "STREAMOUT\n");
116 for (i
= 0; i
< so
->num_outputs
; i
++) {
117 unsigned mask
= ((1 << so
->output
[i
].num_components
) - 1) <<
118 so
->output
[i
].start_component
;
119 fprintf(stderr
, " %i: MEM_STREAM0_BUF%i[%i..%i] <- OUT[%i].%s%s%s%s%s\n",
120 i
, so
->output
[i
].output_buffer
,
121 so
->output
[i
].dst_offset
, so
->output
[i
].dst_offset
+ so
->output
[i
].num_components
- 1,
122 so
->output
[i
].register_index
,
127 so
->output
[i
].dst_offset
< so
->output
[i
].start_component
? " (will lower)" : "");
131 int r600_pipe_shader_create(struct pipe_context
*ctx
,
132 struct r600_pipe_shader
*shader
,
133 struct r600_shader_key key
)
135 static int dump_shaders
= -1;
136 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
137 struct r600_pipe_shader_selector
*sel
= shader
->selector
;
140 /* Would like some magic "get_bool_option_once" routine.
142 if (dump_shaders
== -1)
143 dump_shaders
= debug_get_bool_option("R600_DUMP_SHADERS", FALSE
);
146 fprintf(stderr
, "--------------------------------------------------------------\n");
147 tgsi_dump(sel
->tokens
, 0);
149 if (sel
->so
.num_outputs
) {
150 r600_dump_streamout(&sel
->so
);
153 r
= r600_shader_from_tgsi(rctx
->screen
, shader
, key
);
155 R600_ERR("translation from TGSI failed !\n");
158 r
= r600_bytecode_build(&shader
->shader
.bc
);
160 R600_ERR("building bytecode failed !\n");
164 r600_bytecode_dump(&shader
->shader
.bc
);
165 fprintf(stderr
, "______________________________________________________________\n");
167 return r600_pipe_shader(ctx
, shader
);
170 void r600_pipe_shader_destroy(struct pipe_context
*ctx
, struct r600_pipe_shader
*shader
)
172 pipe_resource_reference((struct pipe_resource
**)&shader
->bo
, NULL
);
173 r600_bytecode_clear(&shader
->shader
.bc
);
177 * tgsi -> r600 shader
179 struct r600_shader_tgsi_instruction
;
181 struct r600_shader_src
{
190 struct r600_shader_ctx
{
191 struct tgsi_shader_info info
;
192 struct tgsi_parse_context parse
;
193 const struct tgsi_token
*tokens
;
195 unsigned file_offset
[TGSI_FILE_COUNT
];
197 struct r600_shader_tgsi_instruction
*inst_info
;
198 struct r600_bytecode
*bc
;
199 struct r600_shader
*shader
;
200 struct r600_shader_src src
[4];
203 uint32_t max_driver_temp_used
;
205 /* needed for evergreen interpolation */
206 boolean input_centroid
;
207 boolean input_linear
;
208 boolean input_perspective
;
212 boolean clip_vertex_write
;
218 struct r600_shader_tgsi_instruction
{
219 unsigned tgsi_opcode
;
221 unsigned r600_opcode
;
222 int (*process
)(struct r600_shader_ctx
*ctx
);
225 static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction
[], eg_shader_tgsi_instruction
[], cm_shader_tgsi_instruction
[];
226 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx
*ctx
);
227 static inline void callstack_check_depth(struct r600_shader_ctx
*ctx
, unsigned reason
, unsigned check_max_only
);
228 static void fc_pushlevel(struct r600_shader_ctx
*ctx
, int type
);
229 static int tgsi_else(struct r600_shader_ctx
*ctx
);
230 static int tgsi_endif(struct r600_shader_ctx
*ctx
);
231 static int tgsi_bgnloop(struct r600_shader_ctx
*ctx
);
232 static int tgsi_endloop(struct r600_shader_ctx
*ctx
);
233 static int tgsi_loop_brk_cont(struct r600_shader_ctx
*ctx
);
236 * bytestream -> r600 shader
238 * These functions are used to transform the output of the LLVM backend into
239 * struct r600_bytecode.
242 static void r600_bytecode_from_byte_stream(struct r600_shader_ctx
*ctx
,
243 unsigned char * bytes
, unsigned num_bytes
);
246 int r600_compute_shader_create(struct pipe_context
* ctx
,
247 LLVMModuleRef mod
, struct r600_bytecode
* bytecode
)
249 struct r600_context
*r600_ctx
= (struct r600_context
*)ctx
;
250 unsigned char * bytes
;
252 struct r600_shader_ctx shader_ctx
;
255 if (debug_get_bool_option("R600_DUMP_SHADERS", FALSE
)) {
259 r600_llvm_compile(mod
, &bytes
, &byte_count
, r600_ctx
->family
, dump
);
260 shader_ctx
.bc
= bytecode
;
261 r600_bytecode_init(shader_ctx
.bc
, r600_ctx
->chip_class
, r600_ctx
->family
,
262 r600_ctx
->screen
->msaa_texture_support
);
263 shader_ctx
.bc
->type
= TGSI_PROCESSOR_COMPUTE
;
264 r600_bytecode_from_byte_stream(&shader_ctx
, bytes
, byte_count
);
265 if (shader_ctx
.bc
->chip_class
== CAYMAN
) {
266 cm_bytecode_add_cf_end(shader_ctx
.bc
);
268 r600_bytecode_build(shader_ctx
.bc
);
270 r600_bytecode_dump(shader_ctx
.bc
);
276 #endif /* HAVE_OPENCL */
278 static uint32_t i32_from_byte_stream(unsigned char * bytes
,
279 unsigned * bytes_read
)
283 for (i
= 0; i
< 4; i
++) {
284 out
|= bytes
[(*bytes_read
)++] << (8 * i
);
289 static unsigned r600_src_from_byte_stream(unsigned char * bytes
,
290 unsigned bytes_read
, struct r600_bytecode_alu
* alu
, unsigned src_idx
)
294 sel0
= bytes
[bytes_read
++];
295 sel1
= bytes
[bytes_read
++];
296 alu
->src
[src_idx
].sel
= sel0
| (sel1
<< 8);
297 alu
->src
[src_idx
].chan
= bytes
[bytes_read
++];
298 alu
->src
[src_idx
].neg
= bytes
[bytes_read
++];
299 alu
->src
[src_idx
].abs
= bytes
[bytes_read
++];
300 alu
->src
[src_idx
].rel
= bytes
[bytes_read
++];
301 alu
->src
[src_idx
].kc_bank
= bytes
[bytes_read
++];
302 for (i
= 0; i
< 4; i
++) {
303 alu
->src
[src_idx
].value
|= bytes
[bytes_read
++] << (i
* 8);
308 static unsigned r600_alu_from_byte_stream(struct r600_shader_ctx
*ctx
,
309 unsigned char * bytes
, unsigned bytes_read
)
312 struct r600_bytecode_alu alu
;
313 unsigned src_const_reg
[3];
314 uint32_t word0
, word1
;
316 memset(&alu
, 0, sizeof(alu
));
317 for(src_idx
= 0; src_idx
< 3; src_idx
++) {
319 src_const_reg
[src_idx
] = bytes
[bytes_read
++];
320 for (i
= 0; i
< 4; i
++) {
321 alu
.src
[src_idx
].value
|= bytes
[bytes_read
++] << (i
* 8);
325 word0
= i32_from_byte_stream(bytes
, &bytes_read
);
326 word1
= i32_from_byte_stream(bytes
, &bytes_read
);
328 switch(ctx
->bc
->chip_class
) {
330 r600_bytecode_alu_read(&alu
, word0
, word1
);
335 r700_bytecode_alu_read(&alu
, word0
, word1
);
339 for(src_idx
= 0; src_idx
< 3; src_idx
++) {
340 if (src_const_reg
[src_idx
])
341 alu
.src
[src_idx
].sel
+= 512;
344 #if HAVE_LLVM < 0x0302
345 if (alu
.inst
== CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE
) ||
346 alu
.inst
== CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE
) ||
347 alu
.inst
== CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_INT
) ||
348 alu
.inst
== CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT
)) {
351 alu
.src
[1].sel
= V_SQ_ALU_SRC_0
;
357 if (alu
.inst
== CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT
)) {
358 ctx
->bc
->ar_reg
= alu
.src
[0].sel
;
359 ctx
->bc
->ar_loaded
= 0;
363 if (alu
.execute_mask
) {
365 r600_bytecode_add_alu_type(ctx
->bc
, &alu
, CTX_INST(V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE
));
367 r600_bytecode_add_alu(ctx
->bc
, &alu
);
370 /* XXX: Handle other KILL instructions */
371 if (alu
.inst
== CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
)) {
372 ctx
->shader
->uses_kill
= 1;
373 /* XXX: This should be enforced in the LLVM backend. */
374 ctx
->bc
->force_add_cf
= 1;
379 static void llvm_if(struct r600_shader_ctx
*ctx
)
381 r600_bytecode_add_cfinst(ctx
->bc
, CTX_INST(V_SQ_CF_WORD1_SQ_CF_INST_JUMP
));
382 fc_pushlevel(ctx
, FC_IF
);
383 callstack_check_depth(ctx
, FC_PUSH_VPM
, 0);
386 static void r600_break_from_byte_stream(struct r600_shader_ctx
*ctx
)
388 unsigned opcode
= TGSI_OPCODE_BRK
;
389 if (ctx
->bc
->chip_class
== CAYMAN
)
390 ctx
->inst_info
= &cm_shader_tgsi_instruction
[opcode
];
391 else if (ctx
->bc
->chip_class
>= EVERGREEN
)
392 ctx
->inst_info
= &eg_shader_tgsi_instruction
[opcode
];
394 ctx
->inst_info
= &r600_shader_tgsi_instruction
[opcode
];
396 tgsi_loop_brk_cont(ctx
);
400 static unsigned r600_fc_from_byte_stream(struct r600_shader_ctx
*ctx
,
401 unsigned char * bytes
, unsigned bytes_read
)
403 struct r600_bytecode_alu alu
;
405 memset(&alu
, 0, sizeof(alu
));
406 bytes_read
= r600_src_from_byte_stream(bytes
, bytes_read
, &alu
, 0);
407 inst
= bytes
[bytes_read
++];
409 case 0: /* IF_PREDICATED */
418 case 3: /* BGNLOOP */
421 case 4: /* ENDLOOP */
424 case 5: /* PREDICATED_BREAK */
425 r600_break_from_byte_stream(ctx
);
427 case 6: /* CONTINUE */
429 unsigned opcode
= TGSI_OPCODE_CONT
;
430 if (ctx
->bc
->chip_class
== CAYMAN
) {
432 &cm_shader_tgsi_instruction
[opcode
];
433 } else if (ctx
->bc
->chip_class
>= EVERGREEN
) {
435 &eg_shader_tgsi_instruction
[opcode
];
438 &r600_shader_tgsi_instruction
[opcode
];
440 tgsi_loop_brk_cont(ctx
);
448 static unsigned r600_tex_from_byte_stream(struct r600_shader_ctx
*ctx
,
449 unsigned char * bytes
, unsigned bytes_read
)
451 struct r600_bytecode_tex tex
;
453 tex
.inst
= bytes
[bytes_read
++];
454 tex
.resource_id
= bytes
[bytes_read
++];
455 tex
.src_gpr
= bytes
[bytes_read
++];
456 tex
.src_rel
= bytes
[bytes_read
++];
457 tex
.dst_gpr
= bytes
[bytes_read
++];
458 tex
.dst_rel
= bytes
[bytes_read
++];
459 tex
.dst_sel_x
= bytes
[bytes_read
++];
460 tex
.dst_sel_y
= bytes
[bytes_read
++];
461 tex
.dst_sel_z
= bytes
[bytes_read
++];
462 tex
.dst_sel_w
= bytes
[bytes_read
++];
463 tex
.lod_bias
= bytes
[bytes_read
++];
464 tex
.coord_type_x
= bytes
[bytes_read
++];
465 tex
.coord_type_y
= bytes
[bytes_read
++];
466 tex
.coord_type_z
= bytes
[bytes_read
++];
467 tex
.coord_type_w
= bytes
[bytes_read
++];
468 tex
.offset_x
= bytes
[bytes_read
++];
469 tex
.offset_y
= bytes
[bytes_read
++];
470 tex
.offset_z
= bytes
[bytes_read
++];
471 tex
.sampler_id
= bytes
[bytes_read
++];
472 tex
.src_sel_x
= bytes
[bytes_read
++];
473 tex
.src_sel_y
= bytes
[bytes_read
++];
474 tex
.src_sel_z
= bytes
[bytes_read
++];
475 tex
.src_sel_w
= bytes
[bytes_read
++];
479 r600_bytecode_add_tex(ctx
->bc
, &tex
);
484 static int r600_vtx_from_byte_stream(struct r600_shader_ctx
*ctx
,
485 unsigned char * bytes
, unsigned bytes_read
)
487 struct r600_bytecode_vtx vtx
;
489 uint32_t word0
= i32_from_byte_stream(bytes
, &bytes_read
);
490 uint32_t word1
= i32_from_byte_stream(bytes
, &bytes_read
);
491 uint32_t word2
= i32_from_byte_stream(bytes
, &bytes_read
);
493 memset(&vtx
, 0, sizeof(vtx
));
496 vtx
.inst
= G_SQ_VTX_WORD0_VTX_INST(word0
);
497 vtx
.fetch_type
= G_SQ_VTX_WORD0_FETCH_TYPE(word0
);
498 vtx
.buffer_id
= G_SQ_VTX_WORD0_BUFFER_ID(word0
);
499 vtx
.src_gpr
= G_SQ_VTX_WORD0_SRC_GPR(word0
);
500 vtx
.src_sel_x
= G_SQ_VTX_WORD0_SRC_SEL_X(word0
);
501 vtx
.mega_fetch_count
= G_SQ_VTX_WORD0_MEGA_FETCH_COUNT(word0
);
504 vtx
.dst_gpr
= G_SQ_VTX_WORD1_GPR_DST_GPR(word1
);
505 vtx
.dst_sel_x
= G_SQ_VTX_WORD1_DST_SEL_X(word1
);
506 vtx
.dst_sel_y
= G_SQ_VTX_WORD1_DST_SEL_Y(word1
);
507 vtx
.dst_sel_z
= G_SQ_VTX_WORD1_DST_SEL_Z(word1
);
508 vtx
.dst_sel_w
= G_SQ_VTX_WORD1_DST_SEL_W(word1
);
509 vtx
.use_const_fields
= G_SQ_VTX_WORD1_USE_CONST_FIELDS(word1
);
510 vtx
.data_format
= G_SQ_VTX_WORD1_DATA_FORMAT(word1
);
511 vtx
.num_format_all
= G_SQ_VTX_WORD1_NUM_FORMAT_ALL(word1
);
512 vtx
.format_comp_all
= G_SQ_VTX_WORD1_FORMAT_COMP_ALL(word1
);
513 vtx
.srf_mode_all
= G_SQ_VTX_WORD1_SRF_MODE_ALL(word1
);
516 vtx
.offset
= G_SQ_VTX_WORD2_OFFSET(word2
);
517 vtx
.endian
= G_SQ_VTX_WORD2_ENDIAN_SWAP(word2
);
519 if (r600_bytecode_add_vtx(ctx
->bc
, &vtx
)) {
520 fprintf(stderr
, "Error adding vtx\n");
522 /* Use the Texture Cache */
523 ctx
->bc
->cf_last
->inst
= EG_V_SQ_CF_WORD1_SQ_CF_INST_TEX
;
527 static int r600_export_from_byte_stream(struct r600_shader_ctx
*ctx
,
528 unsigned char * bytes
, unsigned bytes_read
)
530 uint32_t word0
= 0, word1
= 0;
531 struct r600_bytecode_output output
;
532 memset(&output
, 0, sizeof(struct r600_bytecode_output
));
533 word0
= i32_from_byte_stream(bytes
, &bytes_read
);
534 word1
= i32_from_byte_stream(bytes
, &bytes_read
);
535 if (ctx
->bc
->chip_class
>= EVERGREEN
)
536 eg_bytecode_export_read(&output
, word0
,word1
);
538 r600_bytecode_export_read(&output
, word0
,word1
);
539 r600_bytecode_add_output(ctx
->bc
, &output
);
543 static void r600_bytecode_from_byte_stream(struct r600_shader_ctx
*ctx
,
544 unsigned char * bytes
, unsigned num_bytes
)
546 unsigned bytes_read
= 0;
548 while (bytes_read
< num_bytes
) {
549 char inst_type
= bytes
[bytes_read
++];
552 bytes_read
= r600_alu_from_byte_stream(ctx
, bytes
,
556 bytes_read
= r600_tex_from_byte_stream(ctx
, bytes
,
560 bytes_read
= r600_fc_from_byte_stream(ctx
, bytes
,
564 r600_bytecode_add_cfinst(ctx
->bc
, CF_NATIVE
);
565 for (i
= 0; i
< 2; i
++) {
566 for (byte
= 0 ; byte
< 4; byte
++) {
567 ctx
->bc
->cf_last
->isa
[i
] |=
568 (bytes
[bytes_read
++] << (byte
* 8));
574 bytes_read
= r600_vtx_from_byte_stream(ctx
, bytes
,
578 bytes_read
= r600_export_from_byte_stream(ctx
, bytes
,
582 /* XXX: Error here */
588 /* End bytestream -> r600 shader functions*/
590 static int tgsi_is_supported(struct r600_shader_ctx
*ctx
)
592 struct tgsi_full_instruction
*i
= &ctx
->parse
.FullToken
.FullInstruction
;
595 if (i
->Instruction
.NumDstRegs
> 1) {
596 R600_ERR("too many dst (%d)\n", i
->Instruction
.NumDstRegs
);
599 if (i
->Instruction
.Predicate
) {
600 R600_ERR("predicate unsupported\n");
604 if (i
->Instruction
.Label
) {
605 R600_ERR("label unsupported\n");
609 for (j
= 0; j
< i
->Instruction
.NumSrcRegs
; j
++) {
610 if (i
->Src
[j
].Register
.Dimension
) {
611 R600_ERR("unsupported src %d (dimension %d)\n", j
,
612 i
->Src
[j
].Register
.Dimension
);
616 for (j
= 0; j
< i
->Instruction
.NumDstRegs
; j
++) {
617 if (i
->Dst
[j
].Register
.Dimension
) {
618 R600_ERR("unsupported dst (dimension)\n");
625 static int evergreen_interp_alu(struct r600_shader_ctx
*ctx
, int input
)
628 struct r600_bytecode_alu alu
;
629 int gpr
= 0, base_chan
= 0;
632 if (ctx
->shader
->input
[input
].interpolate
== TGSI_INTERPOLATE_PERSPECTIVE
) {
634 if (ctx
->shader
->input
[input
].centroid
)
636 } else if (ctx
->shader
->input
[input
].interpolate
== TGSI_INTERPOLATE_LINEAR
) {
638 /* if we have perspective add one */
639 if (ctx
->input_perspective
) {
641 /* if we have perspective centroid */
642 if (ctx
->input_centroid
)
645 if (ctx
->shader
->input
[input
].centroid
)
649 /* work out gpr and base_chan from index */
651 base_chan
= (2 * (ij_index
% 2)) + 1;
653 for (i
= 0; i
< 8; i
++) {
654 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
657 alu
.inst
= EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INTERP_ZW
;
659 alu
.inst
= EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INTERP_XY
;
661 if ((i
> 1) && (i
< 6)) {
662 alu
.dst
.sel
= ctx
->shader
->input
[input
].gpr
;
666 alu
.dst
.chan
= i
% 4;
668 alu
.src
[0].sel
= gpr
;
669 alu
.src
[0].chan
= (base_chan
- (i
% 2));
671 alu
.src
[1].sel
= V_SQ_ALU_SRC_PARAM_BASE
+ ctx
->shader
->input
[input
].lds_pos
;
673 alu
.bank_swizzle_force
= SQ_ALU_VEC_210
;
676 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
683 static int evergreen_interp_flat(struct r600_shader_ctx
*ctx
, int input
)
686 struct r600_bytecode_alu alu
;
688 for (i
= 0; i
< 4; i
++) {
689 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
691 alu
.inst
= EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INTERP_LOAD_P0
;
693 alu
.dst
.sel
= ctx
->shader
->input
[input
].gpr
;
698 alu
.src
[0].sel
= V_SQ_ALU_SRC_PARAM_BASE
+ ctx
->shader
->input
[input
].lds_pos
;
703 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
711 * Special export handling in shaders
713 * shader export ARRAY_BASE for EXPORT_POS:
716 * 62, 63 are clip distance vectors
718 * The use of the values exported in 61-63 are controlled by PA_CL_VS_OUT_CNTL:
719 * VS_OUT_MISC_VEC_ENA - enables the use of all fields in export 61
720 * USE_VTX_POINT_SIZE - point size in the X channel of export 61
721 * USE_VTX_EDGE_FLAG - edge flag in the Y channel of export 61
722 * USE_VTX_RENDER_TARGET_INDX - render target index in the Z channel of export 61
723 * USE_VTX_VIEWPORT_INDX - viewport index in the W channel of export 61
724 * USE_VTX_KILL_FLAG - kill flag in the Z channel of export 61 (mutually
725 * exclusive from render target index)
726 * VS_OUT_CCDIST0_VEC_ENA/VS_OUT_CCDIST1_VEC_ENA - enable clip distance vectors
729 * shader export ARRAY_BASE for EXPORT_PIXEL:
731 * 61 computed Z vector
733 * The use of the values exported in the computed Z vector are controlled
734 * by DB_SHADER_CONTROL:
735 * Z_EXPORT_ENABLE - Z as a float in RED
736 * STENCIL_REF_EXPORT_ENABLE - stencil ref as int in GREEN
737 * COVERAGE_TO_MASK_ENABLE - alpha to mask in ALPHA
738 * MASK_EXPORT_ENABLE - pixel sample mask in BLUE
739 * DB_SOURCE_FORMAT - export control restrictions
744 /* Map name/sid pair from tgsi to the 8-bit semantic index for SPI setup */
745 static int r600_spi_sid(struct r600_shader_io
* io
)
747 int index
, name
= io
->name
;
749 /* These params are handled differently, they don't need
750 * semantic indices, so we'll use 0 for them.
752 if (name
== TGSI_SEMANTIC_POSITION
||
753 name
== TGSI_SEMANTIC_PSIZE
||
754 name
== TGSI_SEMANTIC_FACE
)
757 if (name
== TGSI_SEMANTIC_GENERIC
) {
758 /* For generic params simply use sid from tgsi */
761 /* For non-generic params - pack name and sid into 8 bits */
762 index
= 0x80 | (name
<<3) | (io
->sid
);
765 /* Make sure that all really used indices have nonzero value, so
766 * we can just compare it to 0 later instead of comparing the name
767 * with different values to detect special cases. */
774 /* turn input into interpolate on EG */
775 static int evergreen_interp_input(struct r600_shader_ctx
*ctx
, int index
)
779 if (ctx
->shader
->input
[index
].spi_sid
) {
780 ctx
->shader
->input
[index
].lds_pos
= ctx
->shader
->nlds
++;
781 if (!ctx
->use_llvm
) {
782 if (ctx
->shader
->input
[index
].interpolate
> 0) {
783 r
= evergreen_interp_alu(ctx
, index
);
785 r
= evergreen_interp_flat(ctx
, index
);
792 static int select_twoside_color(struct r600_shader_ctx
*ctx
, int front
, int back
)
794 struct r600_bytecode_alu alu
;
796 int gpr_front
= ctx
->shader
->input
[front
].gpr
;
797 int gpr_back
= ctx
->shader
->input
[back
].gpr
;
799 for (i
= 0; i
< 4; i
++) {
800 memset(&alu
, 0, sizeof(alu
));
801 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGT
);
804 alu
.dst
.sel
= gpr_front
;
805 alu
.src
[0].sel
= ctx
->face_gpr
;
806 alu
.src
[1].sel
= gpr_front
;
807 alu
.src
[2].sel
= gpr_back
;
814 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
821 static int tgsi_declaration(struct r600_shader_ctx
*ctx
)
823 struct tgsi_full_declaration
*d
= &ctx
->parse
.FullToken
.FullDeclaration
;
827 switch (d
->Declaration
.File
) {
828 case TGSI_FILE_INPUT
:
829 i
= ctx
->shader
->ninput
++;
830 ctx
->shader
->input
[i
].name
= d
->Semantic
.Name
;
831 ctx
->shader
->input
[i
].sid
= d
->Semantic
.Index
;
832 ctx
->shader
->input
[i
].spi_sid
= r600_spi_sid(&ctx
->shader
->input
[i
]);
833 ctx
->shader
->input
[i
].interpolate
= d
->Interp
.Interpolate
;
834 ctx
->shader
->input
[i
].centroid
= d
->Interp
.Centroid
;
835 ctx
->shader
->input
[i
].gpr
= ctx
->file_offset
[TGSI_FILE_INPUT
] + d
->Range
.First
;
836 if (ctx
->type
== TGSI_PROCESSOR_FRAGMENT
) {
837 switch (ctx
->shader
->input
[i
].name
) {
838 case TGSI_SEMANTIC_FACE
:
839 ctx
->face_gpr
= ctx
->shader
->input
[i
].gpr
;
841 case TGSI_SEMANTIC_COLOR
:
844 case TGSI_SEMANTIC_POSITION
:
845 ctx
->fragcoord_input
= i
;
848 if (ctx
->bc
->chip_class
>= EVERGREEN
) {
849 if ((r
= evergreen_interp_input(ctx
, i
)))
854 case TGSI_FILE_OUTPUT
:
855 i
= ctx
->shader
->noutput
++;
856 ctx
->shader
->output
[i
].name
= d
->Semantic
.Name
;
857 ctx
->shader
->output
[i
].sid
= d
->Semantic
.Index
;
858 ctx
->shader
->output
[i
].spi_sid
= r600_spi_sid(&ctx
->shader
->output
[i
]);
859 ctx
->shader
->output
[i
].gpr
= ctx
->file_offset
[TGSI_FILE_OUTPUT
] + d
->Range
.First
;
860 ctx
->shader
->output
[i
].interpolate
= d
->Interp
.Interpolate
;
861 ctx
->shader
->output
[i
].write_mask
= d
->Declaration
.UsageMask
;
862 if (ctx
->type
== TGSI_PROCESSOR_VERTEX
) {
863 switch (d
->Semantic
.Name
) {
864 case TGSI_SEMANTIC_CLIPDIST
:
865 ctx
->shader
->clip_dist_write
|= d
->Declaration
.UsageMask
<< (d
->Semantic
.Index
<< 2);
867 case TGSI_SEMANTIC_PSIZE
:
868 ctx
->shader
->vs_out_misc_write
= 1;
869 ctx
->shader
->vs_out_point_size
= 1;
871 case TGSI_SEMANTIC_CLIPVERTEX
:
872 ctx
->clip_vertex_write
= TRUE
;
876 } else if (ctx
->type
== TGSI_PROCESSOR_FRAGMENT
) {
877 switch (d
->Semantic
.Name
) {
878 case TGSI_SEMANTIC_COLOR
:
879 ctx
->shader
->nr_ps_max_color_exports
++;
884 case TGSI_FILE_CONSTANT
:
885 case TGSI_FILE_TEMPORARY
:
886 case TGSI_FILE_SAMPLER
:
887 case TGSI_FILE_ADDRESS
:
890 case TGSI_FILE_SYSTEM_VALUE
:
891 if (d
->Semantic
.Name
== TGSI_SEMANTIC_INSTANCEID
) {
892 if (!ctx
->native_integers
) {
893 struct r600_bytecode_alu alu
;
894 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
896 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT
);
905 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
909 } else if (d
->Semantic
.Name
== TGSI_SEMANTIC_VERTEXID
)
912 R600_ERR("unsupported file %d declaration\n", d
->Declaration
.File
);
918 static int r600_get_temp(struct r600_shader_ctx
*ctx
)
920 return ctx
->temp_reg
+ ctx
->max_driver_temp_used
++;
924 * for evergreen we need to scan the shader to find the number of GPRs we need to
925 * reserve for interpolation.
927 * we need to know if we are going to emit
928 * any centroid inputs
929 * if perspective and linear are required
931 static int evergreen_gpr_count(struct r600_shader_ctx
*ctx
)
936 ctx
->input_linear
= FALSE
;
937 ctx
->input_perspective
= FALSE
;
938 ctx
->input_centroid
= FALSE
;
939 ctx
->num_interp_gpr
= 1;
941 /* any centroid inputs */
942 for (i
= 0; i
< ctx
->info
.num_inputs
; i
++) {
943 /* skip position/face */
944 if (ctx
->info
.input_semantic_name
[i
] == TGSI_SEMANTIC_POSITION
||
945 ctx
->info
.input_semantic_name
[i
] == TGSI_SEMANTIC_FACE
)
947 if (ctx
->info
.input_interpolate
[i
] == TGSI_INTERPOLATE_LINEAR
)
948 ctx
->input_linear
= TRUE
;
949 if (ctx
->info
.input_interpolate
[i
] == TGSI_INTERPOLATE_PERSPECTIVE
)
950 ctx
->input_perspective
= TRUE
;
951 if (ctx
->info
.input_centroid
[i
])
952 ctx
->input_centroid
= TRUE
;
956 /* ignoring sample for now */
957 if (ctx
->input_perspective
)
959 if (ctx
->input_linear
)
961 if (ctx
->input_centroid
)
964 ctx
->num_interp_gpr
+= (num_baryc
+ 1) >> 1;
966 /* XXX PULL MODEL and LINE STIPPLE, FIXED PT POS */
967 return ctx
->num_interp_gpr
;
970 static void tgsi_src(struct r600_shader_ctx
*ctx
,
971 const struct tgsi_full_src_register
*tgsi_src
,
972 struct r600_shader_src
*r600_src
)
974 memset(r600_src
, 0, sizeof(*r600_src
));
975 r600_src
->swizzle
[0] = tgsi_src
->Register
.SwizzleX
;
976 r600_src
->swizzle
[1] = tgsi_src
->Register
.SwizzleY
;
977 r600_src
->swizzle
[2] = tgsi_src
->Register
.SwizzleZ
;
978 r600_src
->swizzle
[3] = tgsi_src
->Register
.SwizzleW
;
979 r600_src
->neg
= tgsi_src
->Register
.Negate
;
980 r600_src
->abs
= tgsi_src
->Register
.Absolute
;
982 if (tgsi_src
->Register
.File
== TGSI_FILE_IMMEDIATE
) {
984 if ((tgsi_src
->Register
.SwizzleX
== tgsi_src
->Register
.SwizzleY
) &&
985 (tgsi_src
->Register
.SwizzleX
== tgsi_src
->Register
.SwizzleZ
) &&
986 (tgsi_src
->Register
.SwizzleX
== tgsi_src
->Register
.SwizzleW
)) {
988 index
= tgsi_src
->Register
.Index
* 4 + tgsi_src
->Register
.SwizzleX
;
989 r600_bytecode_special_constants(ctx
->literals
[index
], &r600_src
->sel
, &r600_src
->neg
);
990 if (r600_src
->sel
!= V_SQ_ALU_SRC_LITERAL
)
993 index
= tgsi_src
->Register
.Index
;
994 r600_src
->sel
= V_SQ_ALU_SRC_LITERAL
;
995 memcpy(r600_src
->value
, ctx
->literals
+ index
* 4, sizeof(r600_src
->value
));
996 } else if (tgsi_src
->Register
.File
== TGSI_FILE_SYSTEM_VALUE
) {
997 if (ctx
->info
.system_value_semantic_name
[tgsi_src
->Register
.Index
] == TGSI_SEMANTIC_INSTANCEID
) {
998 r600_src
->swizzle
[0] = 3;
999 r600_src
->swizzle
[1] = 3;
1000 r600_src
->swizzle
[2] = 3;
1001 r600_src
->swizzle
[3] = 3;
1003 } else if (ctx
->info
.system_value_semantic_name
[tgsi_src
->Register
.Index
] == TGSI_SEMANTIC_VERTEXID
) {
1004 r600_src
->swizzle
[0] = 0;
1005 r600_src
->swizzle
[1] = 0;
1006 r600_src
->swizzle
[2] = 0;
1007 r600_src
->swizzle
[3] = 0;
1011 if (tgsi_src
->Register
.Indirect
)
1012 r600_src
->rel
= V_SQ_REL_RELATIVE
;
1013 r600_src
->sel
= tgsi_src
->Register
.Index
;
1014 r600_src
->sel
+= ctx
->file_offset
[tgsi_src
->Register
.File
];
1018 static int tgsi_fetch_rel_const(struct r600_shader_ctx
*ctx
, unsigned int offset
, unsigned int dst_reg
)
1020 struct r600_bytecode_vtx vtx
;
1021 unsigned int ar_reg
;
1025 struct r600_bytecode_alu alu
;
1027 memset(&alu
, 0, sizeof(alu
));
1029 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT
);
1030 alu
.src
[0].sel
= ctx
->bc
->ar_reg
;
1032 alu
.src
[1].sel
= V_SQ_ALU_SRC_LITERAL
;
1033 alu
.src
[1].value
= offset
;
1035 alu
.dst
.sel
= dst_reg
;
1039 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
1044 ar_reg
= ctx
->bc
->ar_reg
;
1047 memset(&vtx
, 0, sizeof(vtx
));
1048 vtx
.fetch_type
= 2; /* VTX_FETCH_NO_INDEX_OFFSET */
1049 vtx
.src_gpr
= ar_reg
;
1050 vtx
.mega_fetch_count
= 16;
1051 vtx
.dst_gpr
= dst_reg
;
1052 vtx
.dst_sel_x
= 0; /* SEL_X */
1053 vtx
.dst_sel_y
= 1; /* SEL_Y */
1054 vtx
.dst_sel_z
= 2; /* SEL_Z */
1055 vtx
.dst_sel_w
= 3; /* SEL_W */
1056 vtx
.data_format
= FMT_32_32_32_32_FLOAT
;
1057 vtx
.num_format_all
= 2; /* NUM_FORMAT_SCALED */
1058 vtx
.format_comp_all
= 1; /* FORMAT_COMP_SIGNED */
1059 vtx
.srf_mode_all
= 1; /* SRF_MODE_NO_ZERO */
1060 vtx
.endian
= r600_endian_swap(32);
1062 if ((r
= r600_bytecode_add_vtx(ctx
->bc
, &vtx
)))
1068 static int tgsi_split_constant(struct r600_shader_ctx
*ctx
)
1070 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1071 struct r600_bytecode_alu alu
;
1072 int i
, j
, k
, nconst
, r
;
1074 for (i
= 0, nconst
= 0; i
< inst
->Instruction
.NumSrcRegs
; i
++) {
1075 if (inst
->Src
[i
].Register
.File
== TGSI_FILE_CONSTANT
) {
1078 tgsi_src(ctx
, &inst
->Src
[i
], &ctx
->src
[i
]);
1080 for (i
= 0, j
= nconst
- 1; i
< inst
->Instruction
.NumSrcRegs
; i
++) {
1081 if (inst
->Src
[i
].Register
.File
!= TGSI_FILE_CONSTANT
) {
1085 if (ctx
->src
[i
].rel
) {
1086 int treg
= r600_get_temp(ctx
);
1087 if ((r
= tgsi_fetch_rel_const(ctx
, ctx
->src
[i
].sel
- 512, treg
)))
1090 ctx
->src
[i
].sel
= treg
;
1091 ctx
->src
[i
].rel
= 0;
1094 int treg
= r600_get_temp(ctx
);
1095 for (k
= 0; k
< 4; k
++) {
1096 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
1097 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
);
1098 alu
.src
[0].sel
= ctx
->src
[i
].sel
;
1099 alu
.src
[0].chan
= k
;
1100 alu
.src
[0].rel
= ctx
->src
[i
].rel
;
1106 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
1110 ctx
->src
[i
].sel
= treg
;
1118 /* need to move any immediate into a temp - for trig functions which use literal for PI stuff */
1119 static int tgsi_split_literal_constant(struct r600_shader_ctx
*ctx
)
1121 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1122 struct r600_bytecode_alu alu
;
1123 int i
, j
, k
, nliteral
, r
;
1125 for (i
= 0, nliteral
= 0; i
< inst
->Instruction
.NumSrcRegs
; i
++) {
1126 if (ctx
->src
[i
].sel
== V_SQ_ALU_SRC_LITERAL
) {
1130 for (i
= 0, j
= nliteral
- 1; i
< inst
->Instruction
.NumSrcRegs
; i
++) {
1131 if (j
> 0 && ctx
->src
[i
].sel
== V_SQ_ALU_SRC_LITERAL
) {
1132 int treg
= r600_get_temp(ctx
);
1133 for (k
= 0; k
< 4; k
++) {
1134 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
1135 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
);
1136 alu
.src
[0].sel
= ctx
->src
[i
].sel
;
1137 alu
.src
[0].chan
= k
;
1138 alu
.src
[0].value
= ctx
->src
[i
].value
[k
];
1144 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
1148 ctx
->src
[i
].sel
= treg
;
1155 static int process_twoside_color_inputs(struct r600_shader_ctx
*ctx
)
1157 int i
, r
, count
= ctx
->shader
->ninput
;
1159 for (i
= 0; i
< count
; i
++) {
1160 if (ctx
->shader
->input
[i
].name
== TGSI_SEMANTIC_COLOR
) {
1161 unsigned back_facing_reg
= ctx
->shader
->input
[i
].potential_back_facing_reg
;
1162 if (ctx
->bc
->chip_class
>= EVERGREEN
) {
1163 if ((r
= evergreen_interp_input(ctx
, back_facing_reg
)))
1167 if (!ctx
->use_llvm
) {
1168 r
= select_twoside_color(ctx
, i
, back_facing_reg
);
1177 static int r600_shader_from_tgsi(struct r600_screen
*rscreen
,
1178 struct r600_pipe_shader
*pipeshader
,
1179 struct r600_shader_key key
)
1181 struct r600_shader
*shader
= &pipeshader
->shader
;
1182 struct tgsi_token
*tokens
= pipeshader
->selector
->tokens
;
1183 struct pipe_stream_output_info so
= pipeshader
->selector
->so
;
1184 struct tgsi_full_immediate
*immediate
;
1185 struct tgsi_full_property
*property
;
1186 struct r600_shader_ctx ctx
;
1187 struct r600_bytecode_output output
[32];
1188 unsigned output_done
, noutput
;
1191 int next_pixel_base
= 0, next_pos_base
= 60, next_param_base
= 0;
1192 /* Declarations used by llvm code */
1193 bool use_llvm
= false;
1194 unsigned char * inst_bytes
= NULL
;
1195 unsigned inst_byte_count
= 0;
1197 #ifdef R600_USE_LLVM
1198 use_llvm
= debug_get_bool_option("R600_LLVM", TRUE
);
1200 ctx
.bc
= &shader
->bc
;
1201 ctx
.shader
= shader
;
1202 ctx
.native_integers
= true;
1204 r600_bytecode_init(ctx
.bc
, rscreen
->chip_class
, rscreen
->family
,
1205 rscreen
->msaa_texture_support
);
1206 ctx
.tokens
= tokens
;
1207 tgsi_scan_shader(tokens
, &ctx
.info
);
1208 tgsi_parse_init(&ctx
.parse
, tokens
);
1209 ctx
.type
= ctx
.parse
.FullHeader
.Processor
.Processor
;
1210 shader
->processor_type
= ctx
.type
;
1211 ctx
.bc
->type
= shader
->processor_type
;
1214 ctx
.fragcoord_input
= -1;
1215 ctx
.colors_used
= 0;
1216 ctx
.clip_vertex_write
= 0;
1218 shader
->nr_ps_color_exports
= 0;
1219 shader
->nr_ps_max_color_exports
= 0;
1221 shader
->two_side
= key
.color_two_side
;
1223 /* register allocations */
1224 /* Values [0,127] correspond to GPR[0..127].
1225 * Values [128,159] correspond to constant buffer bank 0
1226 * Values [160,191] correspond to constant buffer bank 1
1227 * Values [256,511] correspond to cfile constants c[0..255]. (Gone on EG)
1228 * Values [256,287] correspond to constant buffer bank 2 (EG)
1229 * Values [288,319] correspond to constant buffer bank 3 (EG)
1230 * Other special values are shown in the list below.
1231 * 244 ALU_SRC_1_DBL_L: special constant 1.0 double-float, LSW. (RV670+)
1232 * 245 ALU_SRC_1_DBL_M: special constant 1.0 double-float, MSW. (RV670+)
1233 * 246 ALU_SRC_0_5_DBL_L: special constant 0.5 double-float, LSW. (RV670+)
1234 * 247 ALU_SRC_0_5_DBL_M: special constant 0.5 double-float, MSW. (RV670+)
1235 * 248 SQ_ALU_SRC_0: special constant 0.0.
1236 * 249 SQ_ALU_SRC_1: special constant 1.0 float.
1237 * 250 SQ_ALU_SRC_1_INT: special constant 1 integer.
1238 * 251 SQ_ALU_SRC_M_1_INT: special constant -1 integer.
1239 * 252 SQ_ALU_SRC_0_5: special constant 0.5 float.
1240 * 253 SQ_ALU_SRC_LITERAL: literal constant.
1241 * 254 SQ_ALU_SRC_PV: previous vector result.
1242 * 255 SQ_ALU_SRC_PS: previous scalar result.
1244 for (i
= 0; i
< TGSI_FILE_COUNT
; i
++) {
1245 ctx
.file_offset
[i
] = 0;
1247 if (ctx
.type
== TGSI_PROCESSOR_VERTEX
) {
1248 ctx
.file_offset
[TGSI_FILE_INPUT
] = 1;
1249 if (ctx
.bc
->chip_class
>= EVERGREEN
) {
1250 r600_bytecode_add_cfinst(ctx
.bc
, EG_V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS
);
1252 r600_bytecode_add_cfinst(ctx
.bc
, V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS
);
1255 if (ctx
.type
== TGSI_PROCESSOR_FRAGMENT
&& ctx
.bc
->chip_class
>= EVERGREEN
) {
1256 ctx
.file_offset
[TGSI_FILE_INPUT
] = evergreen_gpr_count(&ctx
);
1259 #ifdef R600_USE_LLVM
1260 if (use_llvm
&& ctx
.info
.indirect_files
) {
1261 fprintf(stderr
, "Warning: R600 LLVM backend does not support "
1262 "indirect adressing. Falling back to TGSI "
1267 ctx
.use_llvm
= use_llvm
;
1270 ctx
.file_offset
[TGSI_FILE_OUTPUT
] =
1271 ctx
.file_offset
[TGSI_FILE_INPUT
];
1273 ctx
.file_offset
[TGSI_FILE_OUTPUT
] =
1274 ctx
.file_offset
[TGSI_FILE_INPUT
] +
1275 ctx
.info
.file_max
[TGSI_FILE_INPUT
] + 1;
1277 ctx
.file_offset
[TGSI_FILE_TEMPORARY
] = ctx
.file_offset
[TGSI_FILE_OUTPUT
] +
1278 ctx
.info
.file_max
[TGSI_FILE_OUTPUT
] + 1;
1280 /* Outside the GPR range. This will be translated to one of the
1281 * kcache banks later. */
1282 ctx
.file_offset
[TGSI_FILE_CONSTANT
] = 512;
1284 ctx
.file_offset
[TGSI_FILE_IMMEDIATE
] = V_SQ_ALU_SRC_LITERAL
;
1285 ctx
.bc
->ar_reg
= ctx
.file_offset
[TGSI_FILE_TEMPORARY
] +
1286 ctx
.info
.file_max
[TGSI_FILE_TEMPORARY
] + 1;
1287 ctx
.temp_reg
= ctx
.bc
->ar_reg
+ 1;
1290 ctx
.literals
= NULL
;
1291 shader
->fs_write_all
= FALSE
;
1292 while (!tgsi_parse_end_of_tokens(&ctx
.parse
)) {
1293 tgsi_parse_token(&ctx
.parse
);
1294 switch (ctx
.parse
.FullToken
.Token
.Type
) {
1295 case TGSI_TOKEN_TYPE_IMMEDIATE
:
1296 immediate
= &ctx
.parse
.FullToken
.FullImmediate
;
1297 ctx
.literals
= realloc(ctx
.literals
, (ctx
.nliterals
+ 1) * 16);
1298 if(ctx
.literals
== NULL
) {
1302 ctx
.literals
[ctx
.nliterals
* 4 + 0] = immediate
->u
[0].Uint
;
1303 ctx
.literals
[ctx
.nliterals
* 4 + 1] = immediate
->u
[1].Uint
;
1304 ctx
.literals
[ctx
.nliterals
* 4 + 2] = immediate
->u
[2].Uint
;
1305 ctx
.literals
[ctx
.nliterals
* 4 + 3] = immediate
->u
[3].Uint
;
1308 case TGSI_TOKEN_TYPE_DECLARATION
:
1309 r
= tgsi_declaration(&ctx
);
1313 case TGSI_TOKEN_TYPE_INSTRUCTION
:
1315 case TGSI_TOKEN_TYPE_PROPERTY
:
1316 property
= &ctx
.parse
.FullToken
.FullProperty
;
1317 switch (property
->Property
.PropertyName
) {
1318 case TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
:
1319 if (property
->u
[0].Data
== 1)
1320 shader
->fs_write_all
= TRUE
;
1322 case TGSI_PROPERTY_VS_PROHIBIT_UCPS
:
1323 /* we don't need this one */
1328 R600_ERR("unsupported token type %d\n", ctx
.parse
.FullToken
.Token
.Type
);
1334 /* Process two side if needed */
1335 if (shader
->two_side
&& ctx
.colors_used
) {
1336 int i
, count
= ctx
.shader
->ninput
;
1337 unsigned next_lds_loc
= ctx
.shader
->nlds
;
1339 /* additional inputs will be allocated right after the existing inputs,
1340 * we won't need them after the color selection, so we don't need to
1341 * reserve these gprs for the rest of the shader code and to adjust
1342 * output offsets etc. */
1343 int gpr
= ctx
.file_offset
[TGSI_FILE_INPUT
] +
1344 ctx
.info
.file_max
[TGSI_FILE_INPUT
] + 1;
1346 if (ctx
.face_gpr
== -1) {
1347 i
= ctx
.shader
->ninput
++;
1348 ctx
.shader
->input
[i
].name
= TGSI_SEMANTIC_FACE
;
1349 ctx
.shader
->input
[i
].spi_sid
= 0;
1350 ctx
.shader
->input
[i
].gpr
= gpr
++;
1351 ctx
.face_gpr
= ctx
.shader
->input
[i
].gpr
;
1354 for (i
= 0; i
< count
; i
++) {
1355 if (ctx
.shader
->input
[i
].name
== TGSI_SEMANTIC_COLOR
) {
1356 int ni
= ctx
.shader
->ninput
++;
1357 memcpy(&ctx
.shader
->input
[ni
],&ctx
.shader
->input
[i
], sizeof(struct r600_shader_io
));
1358 ctx
.shader
->input
[ni
].name
= TGSI_SEMANTIC_BCOLOR
;
1359 ctx
.shader
->input
[ni
].spi_sid
= r600_spi_sid(&ctx
.shader
->input
[ni
]);
1360 ctx
.shader
->input
[ni
].gpr
= gpr
++;
1361 // TGSI to LLVM needs to know the lds position of inputs.
1362 // Non LLVM path computes it later (in process_twoside_color)
1363 ctx
.shader
->input
[ni
].lds_pos
= next_lds_loc
++;
1364 ctx
.shader
->input
[i
].potential_back_facing_reg
= ni
;
1369 /* LLVM backend setup */
1370 #ifdef R600_USE_LLVM
1372 struct radeon_llvm_context radeon_llvm_ctx
;
1375 memset(&radeon_llvm_ctx
, 0, sizeof(radeon_llvm_ctx
));
1376 radeon_llvm_ctx
.reserved_reg_count
= ctx
.file_offset
[TGSI_FILE_INPUT
];
1377 radeon_llvm_ctx
.type
= ctx
.type
;
1378 radeon_llvm_ctx
.two_side
= shader
->two_side
;
1379 radeon_llvm_ctx
.face_input
= ctx
.face_gpr
;
1380 radeon_llvm_ctx
.r600_inputs
= ctx
.shader
->input
;
1381 radeon_llvm_ctx
.r600_outputs
= ctx
.shader
->output
;
1382 radeon_llvm_ctx
.color_buffer_count
= MAX2(key
.nr_cbufs
, 1);
1383 radeon_llvm_ctx
.chip_class
= ctx
.bc
->chip_class
;
1384 radeon_llvm_ctx
.fs_color_all
= shader
->fs_write_all
&& (rscreen
->chip_class
>= EVERGREEN
);
1385 mod
= r600_tgsi_llvm(&radeon_llvm_ctx
, tokens
);
1386 if (debug_get_bool_option("R600_DUMP_SHADERS", FALSE
)) {
1389 if (r600_llvm_compile(mod
, &inst_bytes
, &inst_byte_count
,
1390 rscreen
->family
, dump
)) {
1392 radeon_llvm_dispose(&radeon_llvm_ctx
);
1394 fprintf(stderr
, "R600 LLVM backend failed to compile "
1395 "shader. Falling back to TGSI\n");
1397 ctx
.file_offset
[TGSI_FILE_OUTPUT
] =
1398 ctx
.file_offset
[TGSI_FILE_INPUT
];
1400 radeon_llvm_dispose(&radeon_llvm_ctx
);
1403 /* End of LLVM backend setup */
1405 if (shader
->fs_write_all
&& rscreen
->chip_class
>= EVERGREEN
)
1406 shader
->nr_ps_max_color_exports
= 8;
1408 if (ctx
.fragcoord_input
>= 0 && !use_llvm
) {
1409 if (ctx
.bc
->chip_class
== CAYMAN
) {
1410 for (j
= 0 ; j
< 4; j
++) {
1411 struct r600_bytecode_alu alu
;
1412 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
1413 alu
.inst
= BC_INST(ctx
.bc
, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
);
1414 alu
.src
[0].sel
= shader
->input
[ctx
.fragcoord_input
].gpr
;
1415 alu
.src
[0].chan
= 3;
1417 alu
.dst
.sel
= shader
->input
[ctx
.fragcoord_input
].gpr
;
1419 alu
.dst
.write
= (j
== 3);
1421 if ((r
= r600_bytecode_add_alu(ctx
.bc
, &alu
)))
1425 struct r600_bytecode_alu alu
;
1426 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
1427 alu
.inst
= BC_INST(ctx
.bc
, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
);
1428 alu
.src
[0].sel
= shader
->input
[ctx
.fragcoord_input
].gpr
;
1429 alu
.src
[0].chan
= 3;
1431 alu
.dst
.sel
= shader
->input
[ctx
.fragcoord_input
].gpr
;
1435 if ((r
= r600_bytecode_add_alu(ctx
.bc
, &alu
)))
1440 if (shader
->two_side
&& ctx
.colors_used
) {
1441 if ((r
= process_twoside_color_inputs(&ctx
)))
1445 tgsi_parse_init(&ctx
.parse
, tokens
);
1446 while (!tgsi_parse_end_of_tokens(&ctx
.parse
)) {
1447 tgsi_parse_token(&ctx
.parse
);
1448 switch (ctx
.parse
.FullToken
.Token
.Type
) {
1449 case TGSI_TOKEN_TYPE_INSTRUCTION
:
1453 r
= tgsi_is_supported(&ctx
);
1456 ctx
.max_driver_temp_used
= 0;
1457 /* reserve first tmp for everyone */
1458 r600_get_temp(&ctx
);
1460 opcode
= ctx
.parse
.FullToken
.FullInstruction
.Instruction
.Opcode
;
1461 if ((r
= tgsi_split_constant(&ctx
)))
1463 if ((r
= tgsi_split_literal_constant(&ctx
)))
1465 if (ctx
.bc
->chip_class
== CAYMAN
)
1466 ctx
.inst_info
= &cm_shader_tgsi_instruction
[opcode
];
1467 else if (ctx
.bc
->chip_class
>= EVERGREEN
)
1468 ctx
.inst_info
= &eg_shader_tgsi_instruction
[opcode
];
1470 ctx
.inst_info
= &r600_shader_tgsi_instruction
[opcode
];
1471 r
= ctx
.inst_info
->process(&ctx
);
1480 /* Reset the temporary register counter. */
1481 ctx
.max_driver_temp_used
= 0;
1483 /* Get instructions if we are using the LLVM backend. */
1485 r600_bytecode_from_byte_stream(&ctx
, inst_bytes
, inst_byte_count
);
1489 noutput
= shader
->noutput
;
1491 if (ctx
.clip_vertex_write
) {
1492 unsigned clipdist_temp
[2];
1494 clipdist_temp
[0] = r600_get_temp(&ctx
);
1495 clipdist_temp
[1] = r600_get_temp(&ctx
);
1497 /* need to convert a clipvertex write into clipdistance writes and not export
1498 the clip vertex anymore */
1500 memset(&shader
->output
[noutput
], 0, 2*sizeof(struct r600_shader_io
));
1501 shader
->output
[noutput
].name
= TGSI_SEMANTIC_CLIPDIST
;
1502 shader
->output
[noutput
].gpr
= clipdist_temp
[0];
1504 shader
->output
[noutput
].name
= TGSI_SEMANTIC_CLIPDIST
;
1505 shader
->output
[noutput
].gpr
= clipdist_temp
[1];
1508 /* reset spi_sid for clipvertex output to avoid confusing spi */
1509 shader
->output
[ctx
.cv_output
].spi_sid
= 0;
1511 shader
->clip_dist_write
= 0xFF;
1513 for (i
= 0; i
< 8; i
++) {
1517 for (j
= 0; j
< 4; j
++) {
1518 struct r600_bytecode_alu alu
;
1519 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
1520 alu
.inst
= BC_INST(ctx
.bc
, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
);
1521 alu
.src
[0].sel
= shader
->output
[ctx
.cv_output
].gpr
;
1522 alu
.src
[0].chan
= j
;
1524 alu
.src
[1].sel
= 512 + i
;
1525 alu
.src
[1].kc_bank
= R600_UCP_CONST_BUFFER
;
1526 alu
.src
[1].chan
= j
;
1528 alu
.dst
.sel
= clipdist_temp
[oreg
];
1530 alu
.dst
.write
= (j
== ochan
);
1533 r
= r600_bytecode_add_alu(ctx
.bc
, &alu
);
1540 /* Add stream outputs. */
1541 if (ctx
.type
== TGSI_PROCESSOR_VERTEX
&& so
.num_outputs
) {
1542 unsigned so_gpr
[PIPE_MAX_SHADER_OUTPUTS
];
1544 /* Sanity checking. */
1545 if (so
.num_outputs
> PIPE_MAX_SHADER_OUTPUTS
) {
1546 R600_ERR("Too many stream outputs: %d\n", so
.num_outputs
);
1550 for (i
= 0; i
< so
.num_outputs
; i
++) {
1551 if (so
.output
[i
].output_buffer
>= 4) {
1552 R600_ERR("Exceeded the max number of stream output buffers, got: %d\n",
1553 so
.output
[i
].output_buffer
);
1559 /* Initialize locations where the outputs are stored. */
1560 for (i
= 0; i
< so
.num_outputs
; i
++) {
1561 so_gpr
[i
] = shader
->output
[so
.output
[i
].register_index
].gpr
;
1563 /* Lower outputs with dst_offset < start_component.
1565 * We can only output 4D vectors with a write mask, e.g. we can
1566 * only output the W component at offset 3, etc. If we want
1567 * to store Y, Z, or W at buffer offset 0, we need to use MOV
1568 * to move it to X and output X. */
1569 if (so
.output
[i
].dst_offset
< so
.output
[i
].start_component
) {
1570 unsigned tmp
= r600_get_temp(&ctx
);
1572 for (j
= 0; j
< so
.output
[i
].num_components
; j
++) {
1573 struct r600_bytecode_alu alu
;
1574 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
1575 alu
.inst
= BC_INST(ctx
.bc
, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
);
1576 alu
.src
[0].sel
= so_gpr
[i
];
1577 alu
.src
[0].chan
= so
.output
[i
].start_component
+ j
;
1582 if (j
== so
.output
[i
].num_components
- 1)
1584 r
= r600_bytecode_add_alu(ctx
.bc
, &alu
);
1588 so
.output
[i
].start_component
= 0;
1593 /* Write outputs to buffers. */
1594 for (i
= 0; i
< so
.num_outputs
; i
++) {
1595 struct r600_bytecode_output output
;
1597 memset(&output
, 0, sizeof(struct r600_bytecode_output
));
1598 output
.gpr
= so_gpr
[i
];
1599 output
.elem_size
= so
.output
[i
].num_components
;
1600 output
.array_base
= so
.output
[i
].dst_offset
- so
.output
[i
].start_component
;
1601 output
.type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE
;
1602 output
.burst_count
= 1;
1604 /* array_size is an upper limit for the burst_count
1605 * with MEM_STREAM instructions */
1606 output
.array_size
= 0xFFF;
1607 output
.comp_mask
= ((1 << so
.output
[i
].num_components
) - 1) << so
.output
[i
].start_component
;
1608 if (ctx
.bc
->chip_class
>= EVERGREEN
) {
1609 switch (so
.output
[i
].output_buffer
) {
1611 output
.inst
= EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_MEM_STREAM0_BUF0
;
1614 output
.inst
= EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_MEM_STREAM0_BUF1
;
1617 output
.inst
= EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_MEM_STREAM0_BUF2
;
1620 output
.inst
= EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_MEM_STREAM0_BUF3
;
1624 switch (so
.output
[i
].output_buffer
) {
1626 output
.inst
= V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_MEM_STREAM0
;
1629 output
.inst
= V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_MEM_STREAM1
;
1632 output
.inst
= V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_MEM_STREAM2
;
1635 output
.inst
= V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_MEM_STREAM3
;
1639 r
= r600_bytecode_add_output(ctx
.bc
, &output
);
1646 for (i
= 0, j
= 0; i
< noutput
; i
++, j
++) {
1647 memset(&output
[j
], 0, sizeof(struct r600_bytecode_output
));
1648 output
[j
].gpr
= shader
->output
[i
].gpr
;
1649 output
[j
].elem_size
= 3;
1650 output
[j
].swizzle_x
= 0;
1651 output
[j
].swizzle_y
= 1;
1652 output
[j
].swizzle_z
= 2;
1653 output
[j
].swizzle_w
= 3;
1654 output
[j
].burst_count
= 1;
1655 output
[j
].barrier
= 1;
1656 output
[j
].type
= -1;
1657 output
[j
].inst
= BC_INST(ctx
.bc
, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
);
1659 case TGSI_PROCESSOR_VERTEX
:
1660 switch (shader
->output
[i
].name
) {
1661 case TGSI_SEMANTIC_POSITION
:
1662 output
[j
].array_base
= next_pos_base
++;
1663 output
[j
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS
;
1666 case TGSI_SEMANTIC_PSIZE
:
1667 output
[j
].array_base
= next_pos_base
++;
1668 output
[j
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS
;
1670 case TGSI_SEMANTIC_CLIPVERTEX
:
1673 case TGSI_SEMANTIC_CLIPDIST
:
1674 output
[j
].array_base
= next_pos_base
++;
1675 output
[j
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS
;
1676 /* spi_sid is 0 for clipdistance outputs that were generated
1677 * for clipvertex - we don't need to pass them to PS */
1678 if (shader
->output
[i
].spi_sid
) {
1680 /* duplicate it as PARAM to pass to the pixel shader */
1681 memcpy(&output
[j
], &output
[j
-1], sizeof(struct r600_bytecode_output
));
1682 output
[j
].array_base
= next_param_base
++;
1683 output
[j
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM
;
1686 case TGSI_SEMANTIC_FOG
:
1687 output
[j
].swizzle_y
= 4; /* 0 */
1688 output
[j
].swizzle_z
= 4; /* 0 */
1689 output
[j
].swizzle_w
= 5; /* 1 */
1693 case TGSI_PROCESSOR_FRAGMENT
:
1694 if (shader
->output
[i
].name
== TGSI_SEMANTIC_COLOR
) {
1695 /* never export more colors than the number of CBs */
1696 if (next_pixel_base
&& next_pixel_base
>= key
.nr_cbufs
) {
1701 output
[j
].swizzle_w
= key
.alpha_to_one
? 5 : 3;
1702 output
[j
].array_base
= next_pixel_base
++;
1703 output
[j
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL
;
1704 shader
->nr_ps_color_exports
++;
1705 if (shader
->fs_write_all
&& (rscreen
->chip_class
>= EVERGREEN
)) {
1706 for (k
= 1; k
< key
.nr_cbufs
; k
++) {
1708 memset(&output
[j
], 0, sizeof(struct r600_bytecode_output
));
1709 output
[j
].gpr
= shader
->output
[i
].gpr
;
1710 output
[j
].elem_size
= 3;
1711 output
[j
].swizzle_x
= 0;
1712 output
[j
].swizzle_y
= 1;
1713 output
[j
].swizzle_z
= 2;
1714 output
[j
].swizzle_w
= key
.alpha_to_one
? 5 : 3;
1715 output
[j
].burst_count
= 1;
1716 output
[j
].barrier
= 1;
1717 output
[j
].array_base
= next_pixel_base
++;
1718 output
[j
].inst
= BC_INST(ctx
.bc
, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
);
1719 output
[j
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL
;
1720 shader
->nr_ps_color_exports
++;
1723 } else if (shader
->output
[i
].name
== TGSI_SEMANTIC_POSITION
) {
1724 output
[j
].array_base
= 61;
1725 output
[j
].swizzle_x
= 2;
1726 output
[j
].swizzle_y
= 7;
1727 output
[j
].swizzle_z
= output
[j
].swizzle_w
= 7;
1728 output
[j
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL
;
1729 } else if (shader
->output
[i
].name
== TGSI_SEMANTIC_STENCIL
) {
1730 output
[j
].array_base
= 61;
1731 output
[j
].swizzle_x
= 7;
1732 output
[j
].swizzle_y
= 1;
1733 output
[j
].swizzle_z
= output
[j
].swizzle_w
= 7;
1734 output
[j
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL
;
1736 R600_ERR("unsupported fragment output name %d\n", shader
->output
[i
].name
);
1742 R600_ERR("unsupported processor type %d\n", ctx
.type
);
1747 if (output
[j
].type
==-1) {
1748 output
[j
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM
;
1749 output
[j
].array_base
= next_param_base
++;
1753 /* add fake param output for vertex shader if no param is exported */
1754 if (ctx
.type
== TGSI_PROCESSOR_VERTEX
&& next_param_base
== 0) {
1755 memset(&output
[j
], 0, sizeof(struct r600_bytecode_output
));
1757 output
[j
].elem_size
= 3;
1758 output
[j
].swizzle_x
= 7;
1759 output
[j
].swizzle_y
= 7;
1760 output
[j
].swizzle_z
= 7;
1761 output
[j
].swizzle_w
= 7;
1762 output
[j
].burst_count
= 1;
1763 output
[j
].barrier
= 1;
1764 output
[j
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM
;
1765 output
[j
].array_base
= 0;
1766 output
[j
].inst
= BC_INST(ctx
.bc
, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
);
1770 /* add fake pixel export */
1771 if (ctx
.type
== TGSI_PROCESSOR_FRAGMENT
&& next_pixel_base
== 0) {
1772 memset(&output
[j
], 0, sizeof(struct r600_bytecode_output
));
1774 output
[j
].elem_size
= 3;
1775 output
[j
].swizzle_x
= 7;
1776 output
[j
].swizzle_y
= 7;
1777 output
[j
].swizzle_z
= 7;
1778 output
[j
].swizzle_w
= 7;
1779 output
[j
].burst_count
= 1;
1780 output
[j
].barrier
= 1;
1781 output
[j
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL
;
1782 output
[j
].array_base
= 0;
1783 output
[j
].inst
= BC_INST(ctx
.bc
, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
);
1789 /* set export done on last export of each type */
1790 for (i
= noutput
- 1, output_done
= 0; i
>= 0; i
--) {
1791 if (ctx
.bc
->chip_class
< CAYMAN
) {
1792 if (i
== (noutput
- 1)) {
1793 output
[i
].end_of_program
= 1;
1796 if (!(output_done
& (1 << output
[i
].type
))) {
1797 output_done
|= (1 << output
[i
].type
);
1798 output
[i
].inst
= BC_INST(ctx
.bc
, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE
);
1801 /* add output to bytecode */
1802 if (!use_llvm
|| ctx
.type
!= TGSI_PROCESSOR_FRAGMENT
) {
1803 for (i
= 0; i
< noutput
; i
++) {
1804 r
= r600_bytecode_add_output(ctx
.bc
, &output
[i
]);
1809 /* add program end */
1810 if (ctx
.bc
->chip_class
== CAYMAN
)
1811 cm_bytecode_add_cf_end(ctx
.bc
);
1813 /* check GPR limit - we have 124 = 128 - 4
1814 * (4 are reserved as alu clause temporary registers) */
1815 if (ctx
.bc
->ngpr
> 124) {
1816 R600_ERR("GPR limit exceeded - shader requires %d registers\n", ctx
.bc
->ngpr
);
1822 tgsi_parse_free(&ctx
.parse
);
1826 tgsi_parse_free(&ctx
.parse
);
1830 static int tgsi_unsupported(struct r600_shader_ctx
*ctx
)
1832 R600_ERR("%s tgsi opcode unsupported\n",
1833 tgsi_get_opcode_name(ctx
->inst_info
->tgsi_opcode
));
1837 static int tgsi_end(struct r600_shader_ctx
*ctx
)
1842 static void r600_bytecode_src(struct r600_bytecode_alu_src
*bc_src
,
1843 const struct r600_shader_src
*shader_src
,
1846 bc_src
->sel
= shader_src
->sel
;
1847 bc_src
->chan
= shader_src
->swizzle
[chan
];
1848 bc_src
->neg
= shader_src
->neg
;
1849 bc_src
->abs
= shader_src
->abs
;
1850 bc_src
->rel
= shader_src
->rel
;
1851 bc_src
->value
= shader_src
->value
[bc_src
->chan
];
1854 static void r600_bytecode_src_set_abs(struct r600_bytecode_alu_src
*bc_src
)
1860 static void r600_bytecode_src_toggle_neg(struct r600_bytecode_alu_src
*bc_src
)
1862 bc_src
->neg
= !bc_src
->neg
;
1865 static void tgsi_dst(struct r600_shader_ctx
*ctx
,
1866 const struct tgsi_full_dst_register
*tgsi_dst
,
1868 struct r600_bytecode_alu_dst
*r600_dst
)
1870 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1872 r600_dst
->sel
= tgsi_dst
->Register
.Index
;
1873 r600_dst
->sel
+= ctx
->file_offset
[tgsi_dst
->Register
.File
];
1874 r600_dst
->chan
= swizzle
;
1875 r600_dst
->write
= 1;
1876 if (tgsi_dst
->Register
.Indirect
)
1877 r600_dst
->rel
= V_SQ_REL_RELATIVE
;
1878 if (inst
->Instruction
.Saturate
) {
1879 r600_dst
->clamp
= 1;
1883 static int tgsi_last_instruction(unsigned writemask
)
1887 for (i
= 0; i
< 4; i
++) {
1888 if (writemask
& (1 << i
)) {
1895 static int tgsi_op2_s(struct r600_shader_ctx
*ctx
, int swap
, int trans_only
)
1897 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1898 struct r600_bytecode_alu alu
;
1900 int lasti
= tgsi_last_instruction(inst
->Dst
[0].Register
.WriteMask
);
1902 for (i
= 0; i
< lasti
+ 1; i
++) {
1903 if (!(inst
->Dst
[0].Register
.WriteMask
& (1 << i
)))
1906 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
1907 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
1909 alu
.inst
= ctx
->inst_info
->r600_opcode
;
1911 for (j
= 0; j
< inst
->Instruction
.NumSrcRegs
; j
++) {
1912 r600_bytecode_src(&alu
.src
[j
], &ctx
->src
[j
], i
);
1915 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[1], i
);
1916 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[0], i
);
1918 /* handle some special cases */
1919 switch (ctx
->inst_info
->tgsi_opcode
) {
1920 case TGSI_OPCODE_SUB
:
1921 r600_bytecode_src_toggle_neg(&alu
.src
[1]);
1923 case TGSI_OPCODE_ABS
:
1924 r600_bytecode_src_set_abs(&alu
.src
[0]);
1929 if (i
== lasti
|| trans_only
) {
1932 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
1939 static int tgsi_op2(struct r600_shader_ctx
*ctx
)
1941 return tgsi_op2_s(ctx
, 0, 0);
1944 static int tgsi_op2_swap(struct r600_shader_ctx
*ctx
)
1946 return tgsi_op2_s(ctx
, 1, 0);
1949 static int tgsi_op2_trans(struct r600_shader_ctx
*ctx
)
1951 return tgsi_op2_s(ctx
, 0, 1);
1954 static int tgsi_ineg(struct r600_shader_ctx
*ctx
)
1956 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1957 struct r600_bytecode_alu alu
;
1959 int lasti
= tgsi_last_instruction(inst
->Dst
[0].Register
.WriteMask
);
1961 for (i
= 0; i
< lasti
+ 1; i
++) {
1963 if (!(inst
->Dst
[0].Register
.WriteMask
& (1 << i
)))
1965 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
1966 alu
.inst
= ctx
->inst_info
->r600_opcode
;
1968 alu
.src
[0].sel
= V_SQ_ALU_SRC_0
;
1970 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[0], i
);
1972 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
1977 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
1985 static int cayman_emit_float_instr(struct r600_shader_ctx
*ctx
)
1987 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1989 struct r600_bytecode_alu alu
;
1990 int last_slot
= (inst
->Dst
[0].Register
.WriteMask
& 0x8) ? 4 : 3;
1992 for (i
= 0 ; i
< last_slot
; i
++) {
1993 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
1994 alu
.inst
= ctx
->inst_info
->r600_opcode
;
1995 for (j
= 0; j
< inst
->Instruction
.NumSrcRegs
; j
++) {
1996 r600_bytecode_src(&alu
.src
[j
], &ctx
->src
[j
], 0);
1998 /* RSQ should take the absolute value of src */
1999 if (ctx
->inst_info
->tgsi_opcode
== TGSI_OPCODE_RSQ
) {
2000 r600_bytecode_src_set_abs(&alu
.src
[j
]);
2003 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
2004 alu
.dst
.write
= (inst
->Dst
[0].Register
.WriteMask
>> i
) & 1;
2006 if (i
== last_slot
- 1)
2008 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2015 static int cayman_mul_int_instr(struct r600_shader_ctx
*ctx
)
2017 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
2019 struct r600_bytecode_alu alu
;
2020 int last_slot
= (inst
->Dst
[0].Register
.WriteMask
& 0x8) ? 4 : 3;
2021 for (k
= 0; k
< last_slot
; k
++) {
2022 if (!(inst
->Dst
[0].Register
.WriteMask
& (1 << k
)))
2025 for (i
= 0 ; i
< 4; i
++) {
2026 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2027 alu
.inst
= ctx
->inst_info
->r600_opcode
;
2028 for (j
= 0; j
< inst
->Instruction
.NumSrcRegs
; j
++) {
2029 r600_bytecode_src(&alu
.src
[j
], &ctx
->src
[j
], k
);
2031 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
2032 alu
.dst
.write
= (i
== k
);
2035 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2044 * r600 - trunc to -PI..PI range
2045 * r700 - normalize by dividing by 2PI
2048 static int tgsi_setup_trig(struct r600_shader_ctx
*ctx
)
2050 static float half_inv_pi
= 1.0 /(3.1415926535 * 2);
2051 static float double_pi
= 3.1415926535 * 2;
2052 static float neg_pi
= -3.1415926535;
2055 struct r600_bytecode_alu alu
;
2057 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2058 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD
);
2062 alu
.dst
.sel
= ctx
->temp_reg
;
2065 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 0);
2067 alu
.src
[1].sel
= V_SQ_ALU_SRC_LITERAL
;
2068 alu
.src
[1].chan
= 0;
2069 alu
.src
[1].value
= *(uint32_t *)&half_inv_pi
;
2070 alu
.src
[2].sel
= V_SQ_ALU_SRC_0_5
;
2071 alu
.src
[2].chan
= 0;
2073 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2077 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2078 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT
);
2081 alu
.dst
.sel
= ctx
->temp_reg
;
2084 alu
.src
[0].sel
= ctx
->temp_reg
;
2085 alu
.src
[0].chan
= 0;
2087 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2091 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2092 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD
);
2096 alu
.dst
.sel
= ctx
->temp_reg
;
2099 alu
.src
[0].sel
= ctx
->temp_reg
;
2100 alu
.src
[0].chan
= 0;
2102 alu
.src
[1].sel
= V_SQ_ALU_SRC_LITERAL
;
2103 alu
.src
[1].chan
= 0;
2104 alu
.src
[2].sel
= V_SQ_ALU_SRC_LITERAL
;
2105 alu
.src
[2].chan
= 0;
2107 if (ctx
->bc
->chip_class
== R600
) {
2108 alu
.src
[1].value
= *(uint32_t *)&double_pi
;
2109 alu
.src
[2].value
= *(uint32_t *)&neg_pi
;
2111 alu
.src
[1].sel
= V_SQ_ALU_SRC_1
;
2112 alu
.src
[2].sel
= V_SQ_ALU_SRC_0_5
;
2117 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2123 static int cayman_trig(struct r600_shader_ctx
*ctx
)
2125 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
2126 struct r600_bytecode_alu alu
;
2127 int last_slot
= (inst
->Dst
[0].Register
.WriteMask
& 0x8) ? 4 : 3;
2130 r
= tgsi_setup_trig(ctx
);
2135 for (i
= 0; i
< last_slot
; i
++) {
2136 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2137 alu
.inst
= ctx
->inst_info
->r600_opcode
;
2140 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
2141 alu
.dst
.write
= (inst
->Dst
[0].Register
.WriteMask
>> i
) & 1;
2143 alu
.src
[0].sel
= ctx
->temp_reg
;
2144 alu
.src
[0].chan
= 0;
2145 if (i
== last_slot
- 1)
2147 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2154 static int tgsi_trig(struct r600_shader_ctx
*ctx
)
2156 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
2157 struct r600_bytecode_alu alu
;
2159 int lasti
= tgsi_last_instruction(inst
->Dst
[0].Register
.WriteMask
);
2161 r
= tgsi_setup_trig(ctx
);
2165 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2166 alu
.inst
= ctx
->inst_info
->r600_opcode
;
2168 alu
.dst
.sel
= ctx
->temp_reg
;
2171 alu
.src
[0].sel
= ctx
->temp_reg
;
2172 alu
.src
[0].chan
= 0;
2174 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2178 /* replicate result */
2179 for (i
= 0; i
< lasti
+ 1; i
++) {
2180 if (!(inst
->Dst
[0].Register
.WriteMask
& (1 << i
)))
2183 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2184 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
);
2186 alu
.src
[0].sel
= ctx
->temp_reg
;
2187 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
2190 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2197 static int tgsi_scs(struct r600_shader_ctx
*ctx
)
2199 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
2200 struct r600_bytecode_alu alu
;
2203 /* We'll only need the trig stuff if we are going to write to the
2204 * X or Y components of the destination vector.
2206 if (likely(inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_XY
)) {
2207 r
= tgsi_setup_trig(ctx
);
2213 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_X
) {
2214 if (ctx
->bc
->chip_class
== CAYMAN
) {
2215 for (i
= 0 ; i
< 3; i
++) {
2216 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2217 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS
);
2218 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
2224 alu
.src
[0].sel
= ctx
->temp_reg
;
2225 alu
.src
[0].chan
= 0;
2228 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2233 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2234 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS
);
2235 tgsi_dst(ctx
, &inst
->Dst
[0], 0, &alu
.dst
);
2237 alu
.src
[0].sel
= ctx
->temp_reg
;
2238 alu
.src
[0].chan
= 0;
2240 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2247 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Y
) {
2248 if (ctx
->bc
->chip_class
== CAYMAN
) {
2249 for (i
= 0 ; i
< 3; i
++) {
2250 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2251 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN
);
2252 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
2257 alu
.src
[0].sel
= ctx
->temp_reg
;
2258 alu
.src
[0].chan
= 0;
2261 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2266 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2267 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN
);
2268 tgsi_dst(ctx
, &inst
->Dst
[0], 1, &alu
.dst
);
2270 alu
.src
[0].sel
= ctx
->temp_reg
;
2271 alu
.src
[0].chan
= 0;
2273 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2280 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Z
) {
2281 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2283 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
);
2285 tgsi_dst(ctx
, &inst
->Dst
[0], 2, &alu
.dst
);
2287 alu
.src
[0].sel
= V_SQ_ALU_SRC_0
;
2288 alu
.src
[0].chan
= 0;
2292 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2298 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_W
) {
2299 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2301 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
);
2303 tgsi_dst(ctx
, &inst
->Dst
[0], 3, &alu
.dst
);
2305 alu
.src
[0].sel
= V_SQ_ALU_SRC_1
;
2306 alu
.src
[0].chan
= 0;
2310 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2318 static int tgsi_kill(struct r600_shader_ctx
*ctx
)
2320 struct r600_bytecode_alu alu
;
2323 for (i
= 0; i
< 4; i
++) {
2324 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2325 alu
.inst
= ctx
->inst_info
->r600_opcode
;
2329 alu
.src
[0].sel
= V_SQ_ALU_SRC_0
;
2331 if (ctx
->inst_info
->tgsi_opcode
== TGSI_OPCODE_KILP
) {
2332 alu
.src
[1].sel
= V_SQ_ALU_SRC_1
;
2335 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[0], i
);
2340 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2345 /* kill must be last in ALU */
2346 ctx
->bc
->force_add_cf
= 1;
2347 ctx
->shader
->uses_kill
= TRUE
;
2351 static int tgsi_lit(struct r600_shader_ctx
*ctx
)
2353 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
2354 struct r600_bytecode_alu alu
;
2357 /* tmp.x = max(src.y, 0.0) */
2358 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2359 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX
);
2360 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 1);
2361 alu
.src
[1].sel
= V_SQ_ALU_SRC_0
; /*0.0*/
2362 alu
.src
[1].chan
= 1;
2364 alu
.dst
.sel
= ctx
->temp_reg
;
2369 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2373 if (inst
->Dst
[0].Register
.WriteMask
& (1 << 2))
2379 if (ctx
->bc
->chip_class
== CAYMAN
) {
2380 for (i
= 0; i
< 3; i
++) {
2381 /* tmp.z = log(tmp.x) */
2382 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2383 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED
);
2384 alu
.src
[0].sel
= ctx
->temp_reg
;
2385 alu
.src
[0].chan
= 0;
2386 alu
.dst
.sel
= ctx
->temp_reg
;
2394 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2399 /* tmp.z = log(tmp.x) */
2400 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2401 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED
);
2402 alu
.src
[0].sel
= ctx
->temp_reg
;
2403 alu
.src
[0].chan
= 0;
2404 alu
.dst
.sel
= ctx
->temp_reg
;
2408 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2413 chan
= alu
.dst
.chan
;
2416 /* tmp.x = amd MUL_LIT(tmp.z, src.w, src.x ) */
2417 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2418 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT
);
2419 alu
.src
[0].sel
= sel
;
2420 alu
.src
[0].chan
= chan
;
2421 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[0], 3);
2422 r600_bytecode_src(&alu
.src
[2], &ctx
->src
[0], 0);
2423 alu
.dst
.sel
= ctx
->temp_reg
;
2428 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2432 if (ctx
->bc
->chip_class
== CAYMAN
) {
2433 for (i
= 0; i
< 3; i
++) {
2434 /* dst.z = exp(tmp.x) */
2435 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2436 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
);
2437 alu
.src
[0].sel
= ctx
->temp_reg
;
2438 alu
.src
[0].chan
= 0;
2439 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
2445 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2450 /* dst.z = exp(tmp.x) */
2451 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2452 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
);
2453 alu
.src
[0].sel
= ctx
->temp_reg
;
2454 alu
.src
[0].chan
= 0;
2455 tgsi_dst(ctx
, &inst
->Dst
[0], 2, &alu
.dst
);
2457 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2464 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2465 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
);
2466 alu
.src
[0].sel
= V_SQ_ALU_SRC_1
; /*1.0*/
2467 alu
.src
[0].chan
= 0;
2468 tgsi_dst(ctx
, &inst
->Dst
[0], 0, &alu
.dst
);
2469 alu
.dst
.write
= (inst
->Dst
[0].Register
.WriteMask
>> 0) & 1;
2470 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2474 /* dst.y = max(src.x, 0.0) */
2475 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2476 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX
);
2477 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 0);
2478 alu
.src
[1].sel
= V_SQ_ALU_SRC_0
; /*0.0*/
2479 alu
.src
[1].chan
= 0;
2480 tgsi_dst(ctx
, &inst
->Dst
[0], 1, &alu
.dst
);
2481 alu
.dst
.write
= (inst
->Dst
[0].Register
.WriteMask
>> 1) & 1;
2482 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2487 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2488 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
);
2489 alu
.src
[0].sel
= V_SQ_ALU_SRC_1
;
2490 alu
.src
[0].chan
= 0;
2491 tgsi_dst(ctx
, &inst
->Dst
[0], 3, &alu
.dst
);
2492 alu
.dst
.write
= (inst
->Dst
[0].Register
.WriteMask
>> 3) & 1;
2494 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2501 static int tgsi_rsq(struct r600_shader_ctx
*ctx
)
2503 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
2504 struct r600_bytecode_alu alu
;
2507 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2510 * For state trackers other than OpenGL, we'll want to use
2511 * _RECIPSQRT_IEEE instead.
2513 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED
);
2515 for (i
= 0; i
< inst
->Instruction
.NumSrcRegs
; i
++) {
2516 r600_bytecode_src(&alu
.src
[i
], &ctx
->src
[i
], 0);
2517 r600_bytecode_src_set_abs(&alu
.src
[i
]);
2519 alu
.dst
.sel
= ctx
->temp_reg
;
2522 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2525 /* replicate result */
2526 return tgsi_helper_tempx_replicate(ctx
);
2529 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx
*ctx
)
2531 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
2532 struct r600_bytecode_alu alu
;
2535 for (i
= 0; i
< 4; i
++) {
2536 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2537 alu
.src
[0].sel
= ctx
->temp_reg
;
2538 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
);
2540 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
2541 alu
.dst
.write
= (inst
->Dst
[0].Register
.WriteMask
>> i
) & 1;
2544 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2551 static int tgsi_trans_srcx_replicate(struct r600_shader_ctx
*ctx
)
2553 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
2554 struct r600_bytecode_alu alu
;
2557 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2558 alu
.inst
= ctx
->inst_info
->r600_opcode
;
2559 for (i
= 0; i
< inst
->Instruction
.NumSrcRegs
; i
++) {
2560 r600_bytecode_src(&alu
.src
[i
], &ctx
->src
[i
], 0);
2562 alu
.dst
.sel
= ctx
->temp_reg
;
2565 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2568 /* replicate result */
2569 return tgsi_helper_tempx_replicate(ctx
);
2572 static int cayman_pow(struct r600_shader_ctx
*ctx
)
2574 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
2576 struct r600_bytecode_alu alu
;
2577 int last_slot
= (inst
->Dst
[0].Register
.WriteMask
& 0x8) ? 4 : 3;
2579 for (i
= 0; i
< 3; i
++) {
2580 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2581 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
);
2582 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 0);
2583 alu
.dst
.sel
= ctx
->temp_reg
;
2588 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2594 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2595 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
);
2596 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[1], 0);
2597 alu
.src
[1].sel
= ctx
->temp_reg
;
2598 alu
.dst
.sel
= ctx
->temp_reg
;
2601 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2605 for (i
= 0; i
< last_slot
; i
++) {
2606 /* POW(a,b) = EXP2(b * LOG2(a))*/
2607 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2608 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
);
2609 alu
.src
[0].sel
= ctx
->temp_reg
;
2611 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
2612 alu
.dst
.write
= (inst
->Dst
[0].Register
.WriteMask
>> i
) & 1;
2613 if (i
== last_slot
- 1)
2615 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2622 static int tgsi_pow(struct r600_shader_ctx
*ctx
)
2624 struct r600_bytecode_alu alu
;
2628 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2629 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
);
2630 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 0);
2631 alu
.dst
.sel
= ctx
->temp_reg
;
2634 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2638 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2639 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
);
2640 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[1], 0);
2641 alu
.src
[1].sel
= ctx
->temp_reg
;
2642 alu
.dst
.sel
= ctx
->temp_reg
;
2645 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2648 /* POW(a,b) = EXP2(b * LOG2(a))*/
2649 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2650 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
);
2651 alu
.src
[0].sel
= ctx
->temp_reg
;
2652 alu
.dst
.sel
= ctx
->temp_reg
;
2655 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2658 return tgsi_helper_tempx_replicate(ctx
);
2661 static int tgsi_divmod(struct r600_shader_ctx
*ctx
, int mod
, int signed_op
)
2663 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
2664 struct r600_bytecode_alu alu
;
2666 unsigned write_mask
= inst
->Dst
[0].Register
.WriteMask
;
2667 int tmp0
= ctx
->temp_reg
;
2668 int tmp1
= r600_get_temp(ctx
);
2669 int tmp2
= r600_get_temp(ctx
);
2670 int tmp3
= r600_get_temp(ctx
);
2673 * we need to represent src1 as src2*q + r, where q - quotient, r - remainder
2675 * 1. tmp0.x = rcp (src2) = 2^32/src2 + e, where e is rounding error
2676 * 2. tmp0.z = lo (tmp0.x * src2)
2677 * 3. tmp0.w = -tmp0.z
2678 * 4. tmp0.y = hi (tmp0.x * src2)
2679 * 5. tmp0.z = (tmp0.y == 0 ? tmp0.w : tmp0.z) = abs(lo(rcp*src2))
2680 * 6. tmp0.w = hi (tmp0.z * tmp0.x) = e, rounding error
2681 * 7. tmp1.x = tmp0.x - tmp0.w
2682 * 8. tmp1.y = tmp0.x + tmp0.w
2683 * 9. tmp0.x = (tmp0.y == 0 ? tmp1.y : tmp1.x)
2684 * 10. tmp0.z = hi(tmp0.x * src1) = q
2685 * 11. tmp0.y = lo (tmp0.z * src2) = src2*q = src1 - r
2687 * 12. tmp0.w = src1 - tmp0.y = r
2688 * 13. tmp1.x = tmp0.w >= src2 = r >= src2 (uint comparison)
2689 * 14. tmp1.y = src1 >= tmp0.y = r >= 0 (uint comparison)
2693 * 15. tmp1.z = tmp0.z + 1 = q + 1
2694 * 16. tmp1.w = tmp0.z - 1 = q - 1
2698 * 15. tmp1.z = tmp0.w - src2 = r - src2
2699 * 16. tmp1.w = tmp0.w + src2 = r + src2
2703 * 17. tmp1.x = tmp1.x & tmp1.y
2705 * DIV: 18. tmp0.z = tmp1.x==0 ? tmp0.z : tmp1.z
2706 * MOD: 18. tmp0.z = tmp1.x==0 ? tmp0.w : tmp1.z
2708 * 19. tmp0.z = tmp1.y==0 ? tmp1.w : tmp0.z
2709 * 20. dst = src2==0 ? MAX_UINT : tmp0.z
2713 * Same as unsigned, using abs values of the operands,
2714 * and fixing the sign of the result in the end.
2717 for (i
= 0; i
< 4; i
++) {
2718 if (!(write_mask
& (1<<i
)))
2723 /* tmp2.x = -src0 */
2724 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2725 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT
);
2731 alu
.src
[0].sel
= V_SQ_ALU_SRC_0
;
2733 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[0], i
);
2736 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
2739 /* tmp2.y = -src1 */
2740 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2741 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT
);
2747 alu
.src
[0].sel
= V_SQ_ALU_SRC_0
;
2749 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[1], i
);
2752 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
2755 /* tmp2.z sign bit is set if src0 and src2 signs are different */
2756 /* it will be a sign of the quotient */
2759 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2760 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_XOR_INT
);
2766 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], i
);
2767 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[1], i
);
2770 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
2774 /* tmp2.x = |src0| */
2775 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2776 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGE_INT
);
2783 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], i
);
2784 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[0], i
);
2785 alu
.src
[2].sel
= tmp2
;
2786 alu
.src
[2].chan
= 0;
2789 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
2792 /* tmp2.y = |src1| */
2793 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2794 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGE_INT
);
2801 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[1], i
);
2802 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[1], i
);
2803 alu
.src
[2].sel
= tmp2
;
2804 alu
.src
[2].chan
= 1;
2807 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
2812 /* 1. tmp0.x = rcp_u (src2) = 2^32/src2 + e, where e is rounding error */
2813 if (ctx
->bc
->chip_class
== CAYMAN
) {
2814 /* tmp3.x = u2f(src2) */
2815 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2816 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT
);
2823 alu
.src
[0].sel
= tmp2
;
2824 alu
.src
[0].chan
= 1;
2826 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[1], i
);
2830 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
2833 /* tmp0.x = recip(tmp3.x) */
2834 for (j
= 0 ; j
< 3; j
++) {
2835 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2836 alu
.inst
= EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
;
2840 alu
.dst
.write
= (j
== 0);
2842 alu
.src
[0].sel
= tmp3
;
2843 alu
.src
[0].chan
= 0;
2847 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
2851 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2852 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
);
2854 alu
.src
[0].sel
= tmp0
;
2855 alu
.src
[0].chan
= 0;
2857 alu
.src
[1].sel
= V_SQ_ALU_SRC_LITERAL
;
2858 alu
.src
[1].value
= 0x4f800000;
2863 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
2867 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2868 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_UINT
);
2874 alu
.src
[0].sel
= tmp3
;
2875 alu
.src
[0].chan
= 0;
2878 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
2882 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2883 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_UINT
);
2890 alu
.src
[0].sel
= tmp2
;
2891 alu
.src
[0].chan
= 1;
2893 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[1], i
);
2897 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
2901 /* 2. tmp0.z = lo (tmp0.x * src2) */
2902 if (ctx
->bc
->chip_class
== CAYMAN
) {
2903 for (j
= 0 ; j
< 4; j
++) {
2904 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2905 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT
);
2909 alu
.dst
.write
= (j
== 2);
2911 alu
.src
[0].sel
= tmp0
;
2912 alu
.src
[0].chan
= 0;
2914 alu
.src
[1].sel
= tmp2
;
2915 alu
.src
[1].chan
= 1;
2917 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[1], i
);
2920 alu
.last
= (j
== 3);
2921 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
2925 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2926 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT
);
2932 alu
.src
[0].sel
= tmp0
;
2933 alu
.src
[0].chan
= 0;
2935 alu
.src
[1].sel
= tmp2
;
2936 alu
.src
[1].chan
= 1;
2938 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[1], i
);
2942 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
2946 /* 3. tmp0.w = -tmp0.z */
2947 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2948 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT
);
2954 alu
.src
[0].sel
= V_SQ_ALU_SRC_0
;
2955 alu
.src
[1].sel
= tmp0
;
2956 alu
.src
[1].chan
= 2;
2959 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
2962 /* 4. tmp0.y = hi (tmp0.x * src2) */
2963 if (ctx
->bc
->chip_class
== CAYMAN
) {
2964 for (j
= 0 ; j
< 4; j
++) {
2965 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2966 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT
);
2970 alu
.dst
.write
= (j
== 1);
2972 alu
.src
[0].sel
= tmp0
;
2973 alu
.src
[0].chan
= 0;
2976 alu
.src
[1].sel
= tmp2
;
2977 alu
.src
[1].chan
= 1;
2979 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[1], i
);
2981 alu
.last
= (j
== 3);
2982 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
2986 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
2987 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT
);
2993 alu
.src
[0].sel
= tmp0
;
2994 alu
.src
[0].chan
= 0;
2997 alu
.src
[1].sel
= tmp2
;
2998 alu
.src
[1].chan
= 1;
3000 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[1], i
);
3004 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3008 /* 5. tmp0.z = (tmp0.y == 0 ? tmp0.w : tmp0.z) = abs(lo(rcp*src)) */
3009 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3010 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDE_INT
);
3017 alu
.src
[0].sel
= tmp0
;
3018 alu
.src
[0].chan
= 1;
3019 alu
.src
[1].sel
= tmp0
;
3020 alu
.src
[1].chan
= 3;
3021 alu
.src
[2].sel
= tmp0
;
3022 alu
.src
[2].chan
= 2;
3025 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3028 /* 6. tmp0.w = hi (tmp0.z * tmp0.x) = e, rounding error */
3029 if (ctx
->bc
->chip_class
== CAYMAN
) {
3030 for (j
= 0 ; j
< 4; j
++) {
3031 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3032 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT
);
3036 alu
.dst
.write
= (j
== 3);
3038 alu
.src
[0].sel
= tmp0
;
3039 alu
.src
[0].chan
= 2;
3041 alu
.src
[1].sel
= tmp0
;
3042 alu
.src
[1].chan
= 0;
3044 alu
.last
= (j
== 3);
3045 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3049 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3050 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT
);
3056 alu
.src
[0].sel
= tmp0
;
3057 alu
.src
[0].chan
= 2;
3059 alu
.src
[1].sel
= tmp0
;
3060 alu
.src
[1].chan
= 0;
3063 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3067 /* 7. tmp1.x = tmp0.x - tmp0.w */
3068 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3069 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT
);
3075 alu
.src
[0].sel
= tmp0
;
3076 alu
.src
[0].chan
= 0;
3077 alu
.src
[1].sel
= tmp0
;
3078 alu
.src
[1].chan
= 3;
3081 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3084 /* 8. tmp1.y = tmp0.x + tmp0.w */
3085 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3086 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT
);
3092 alu
.src
[0].sel
= tmp0
;
3093 alu
.src
[0].chan
= 0;
3094 alu
.src
[1].sel
= tmp0
;
3095 alu
.src
[1].chan
= 3;
3098 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3101 /* 9. tmp0.x = (tmp0.y == 0 ? tmp1.y : tmp1.x) */
3102 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3103 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDE_INT
);
3110 alu
.src
[0].sel
= tmp0
;
3111 alu
.src
[0].chan
= 1;
3112 alu
.src
[1].sel
= tmp1
;
3113 alu
.src
[1].chan
= 1;
3114 alu
.src
[2].sel
= tmp1
;
3115 alu
.src
[2].chan
= 0;
3118 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3121 /* 10. tmp0.z = hi(tmp0.x * src1) = q */
3122 if (ctx
->bc
->chip_class
== CAYMAN
) {
3123 for (j
= 0 ; j
< 4; j
++) {
3124 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3125 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT
);
3129 alu
.dst
.write
= (j
== 2);
3131 alu
.src
[0].sel
= tmp0
;
3132 alu
.src
[0].chan
= 0;
3135 alu
.src
[1].sel
= tmp2
;
3136 alu
.src
[1].chan
= 0;
3138 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[0], i
);
3141 alu
.last
= (j
== 3);
3142 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3146 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3147 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT
);
3153 alu
.src
[0].sel
= tmp0
;
3154 alu
.src
[0].chan
= 0;
3157 alu
.src
[1].sel
= tmp2
;
3158 alu
.src
[1].chan
= 0;
3160 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[0], i
);
3164 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3168 /* 11. tmp0.y = lo (src2 * tmp0.z) = src2*q = src1 - r */
3169 if (ctx
->bc
->chip_class
== CAYMAN
) {
3170 for (j
= 0 ; j
< 4; j
++) {
3171 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3172 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT
);
3176 alu
.dst
.write
= (j
== 1);
3179 alu
.src
[0].sel
= tmp2
;
3180 alu
.src
[0].chan
= 1;
3182 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[1], i
);
3185 alu
.src
[1].sel
= tmp0
;
3186 alu
.src
[1].chan
= 2;
3188 alu
.last
= (j
== 3);
3189 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3193 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3194 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT
);
3201 alu
.src
[0].sel
= tmp2
;
3202 alu
.src
[0].chan
= 1;
3204 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[1], i
);
3207 alu
.src
[1].sel
= tmp0
;
3208 alu
.src
[1].chan
= 2;
3211 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3215 /* 12. tmp0.w = src1 - tmp0.y = r */
3216 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3217 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT
);
3224 alu
.src
[0].sel
= tmp2
;
3225 alu
.src
[0].chan
= 0;
3227 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], i
);
3230 alu
.src
[1].sel
= tmp0
;
3231 alu
.src
[1].chan
= 1;
3234 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3237 /* 13. tmp1.x = tmp0.w >= src2 = r >= src2 */
3238 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3239 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE_UINT
);
3245 alu
.src
[0].sel
= tmp0
;
3246 alu
.src
[0].chan
= 3;
3248 alu
.src
[1].sel
= tmp2
;
3249 alu
.src
[1].chan
= 1;
3251 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[1], i
);
3255 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3258 /* 14. tmp1.y = src1 >= tmp0.y = r >= 0 */
3259 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3260 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE_UINT
);
3267 alu
.src
[0].sel
= tmp2
;
3268 alu
.src
[0].chan
= 0;
3270 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], i
);
3273 alu
.src
[1].sel
= tmp0
;
3274 alu
.src
[1].chan
= 1;
3277 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3280 if (mod
) { /* UMOD */
3282 /* 15. tmp1.z = tmp0.w - src2 = r - src2 */
3283 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3284 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT
);
3290 alu
.src
[0].sel
= tmp0
;
3291 alu
.src
[0].chan
= 3;
3294 alu
.src
[1].sel
= tmp2
;
3295 alu
.src
[1].chan
= 1;
3297 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[1], i
);
3301 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3304 /* 16. tmp1.w = tmp0.w + src2 = r + src2 */
3305 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3306 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT
);
3312 alu
.src
[0].sel
= tmp0
;
3313 alu
.src
[0].chan
= 3;
3315 alu
.src
[1].sel
= tmp2
;
3316 alu
.src
[1].chan
= 1;
3318 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[1], i
);
3322 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3327 /* 15. tmp1.z = tmp0.z + 1 = q + 1 DIV */
3328 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3329 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT
);
3335 alu
.src
[0].sel
= tmp0
;
3336 alu
.src
[0].chan
= 2;
3337 alu
.src
[1].sel
= V_SQ_ALU_SRC_1_INT
;
3340 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3343 /* 16. tmp1.w = tmp0.z - 1 = q - 1 */
3344 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3345 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT
);
3351 alu
.src
[0].sel
= tmp0
;
3352 alu
.src
[0].chan
= 2;
3353 alu
.src
[1].sel
= V_SQ_ALU_SRC_M_1_INT
;
3356 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3361 /* 17. tmp1.x = tmp1.x & tmp1.y */
3362 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3363 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_AND_INT
);
3369 alu
.src
[0].sel
= tmp1
;
3370 alu
.src
[0].chan
= 0;
3371 alu
.src
[1].sel
= tmp1
;
3372 alu
.src
[1].chan
= 1;
3375 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3378 /* 18. tmp0.z = tmp1.x==0 ? tmp0.z : tmp1.z DIV */
3379 /* 18. tmp0.z = tmp1.x==0 ? tmp0.w : tmp1.z MOD */
3380 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3381 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDE_INT
);
3388 alu
.src
[0].sel
= tmp1
;
3389 alu
.src
[0].chan
= 0;
3390 alu
.src
[1].sel
= tmp0
;
3391 alu
.src
[1].chan
= mod
? 3 : 2;
3392 alu
.src
[2].sel
= tmp1
;
3393 alu
.src
[2].chan
= 2;
3396 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3399 /* 19. tmp0.z = tmp1.y==0 ? tmp1.w : tmp0.z */
3400 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3401 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDE_INT
);
3409 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
3412 alu
.src
[0].sel
= tmp1
;
3413 alu
.src
[0].chan
= 1;
3414 alu
.src
[1].sel
= tmp1
;
3415 alu
.src
[1].chan
= 3;
3416 alu
.src
[2].sel
= tmp0
;
3417 alu
.src
[2].chan
= 2;
3420 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3425 /* fix the sign of the result */
3429 /* tmp0.x = -tmp0.z */
3430 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3431 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT
);
3437 alu
.src
[0].sel
= V_SQ_ALU_SRC_0
;
3438 alu
.src
[1].sel
= tmp0
;
3439 alu
.src
[1].chan
= 2;
3442 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3445 /* sign of the remainder is the same as the sign of src0 */
3446 /* tmp0.x = src0>=0 ? tmp0.z : tmp0.x */
3447 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3448 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGE_INT
);
3451 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
3453 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], i
);
3454 alu
.src
[1].sel
= tmp0
;
3455 alu
.src
[1].chan
= 2;
3456 alu
.src
[2].sel
= tmp0
;
3457 alu
.src
[2].chan
= 0;
3460 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3465 /* tmp0.x = -tmp0.z */
3466 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3467 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT
);
3473 alu
.src
[0].sel
= V_SQ_ALU_SRC_0
;
3474 alu
.src
[1].sel
= tmp0
;
3475 alu
.src
[1].chan
= 2;
3478 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3481 /* fix the quotient sign (same as the sign of src0*src1) */
3482 /* tmp0.x = tmp2.z>=0 ? tmp0.z : tmp0.x */
3483 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3484 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGE_INT
);
3487 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
3489 alu
.src
[0].sel
= tmp2
;
3490 alu
.src
[0].chan
= 2;
3491 alu
.src
[1].sel
= tmp0
;
3492 alu
.src
[1].chan
= 2;
3493 alu
.src
[2].sel
= tmp0
;
3494 alu
.src
[2].chan
= 0;
3497 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
3505 static int tgsi_udiv(struct r600_shader_ctx
*ctx
)
3507 return tgsi_divmod(ctx
, 0, 0);
3510 static int tgsi_umod(struct r600_shader_ctx
*ctx
)
3512 return tgsi_divmod(ctx
, 1, 0);
3515 static int tgsi_idiv(struct r600_shader_ctx
*ctx
)
3517 return tgsi_divmod(ctx
, 0, 1);
3520 static int tgsi_imod(struct r600_shader_ctx
*ctx
)
3522 return tgsi_divmod(ctx
, 1, 1);
3526 static int tgsi_f2i(struct r600_shader_ctx
*ctx
)
3528 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
3529 struct r600_bytecode_alu alu
;
3531 unsigned write_mask
= inst
->Dst
[0].Register
.WriteMask
;
3532 int last_inst
= tgsi_last_instruction(write_mask
);
3534 for (i
= 0; i
< 4; i
++) {
3535 if (!(write_mask
& (1<<i
)))
3538 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3539 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC
);
3541 alu
.dst
.sel
= ctx
->temp_reg
;
3545 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], i
);
3548 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
3553 for (i
= 0; i
< 4; i
++) {
3554 if (!(write_mask
& (1<<i
)))
3557 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3558 alu
.inst
= ctx
->inst_info
->r600_opcode
;
3560 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
3562 alu
.src
[0].sel
= ctx
->temp_reg
;
3563 alu
.src
[0].chan
= i
;
3565 if (i
== last_inst
|| alu
.inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_UINT
)
3567 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
3575 static int tgsi_iabs(struct r600_shader_ctx
*ctx
)
3577 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
3578 struct r600_bytecode_alu alu
;
3580 unsigned write_mask
= inst
->Dst
[0].Register
.WriteMask
;
3581 int last_inst
= tgsi_last_instruction(write_mask
);
3584 for (i
= 0; i
< 4; i
++) {
3585 if (!(write_mask
& (1<<i
)))
3588 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3589 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT
);
3591 alu
.dst
.sel
= ctx
->temp_reg
;
3595 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[0], i
);
3596 alu
.src
[0].sel
= V_SQ_ALU_SRC_0
;
3600 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
3605 /* dst = (src >= 0 ? src : tmp) */
3606 for (i
= 0; i
< 4; i
++) {
3607 if (!(write_mask
& (1<<i
)))
3610 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3611 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGE_INT
);
3615 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
3617 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], i
);
3618 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[0], i
);
3619 alu
.src
[2].sel
= ctx
->temp_reg
;
3620 alu
.src
[2].chan
= i
;
3624 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
3631 static int tgsi_issg(struct r600_shader_ctx
*ctx
)
3633 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
3634 struct r600_bytecode_alu alu
;
3636 unsigned write_mask
= inst
->Dst
[0].Register
.WriteMask
;
3637 int last_inst
= tgsi_last_instruction(write_mask
);
3639 /* tmp = (src >= 0 ? src : -1) */
3640 for (i
= 0; i
< 4; i
++) {
3641 if (!(write_mask
& (1<<i
)))
3644 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3645 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGE_INT
);
3648 alu
.dst
.sel
= ctx
->temp_reg
;
3652 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], i
);
3653 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[0], i
);
3654 alu
.src
[2].sel
= V_SQ_ALU_SRC_M_1_INT
;
3658 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
3663 /* dst = (tmp > 0 ? 1 : tmp) */
3664 for (i
= 0; i
< 4; i
++) {
3665 if (!(write_mask
& (1<<i
)))
3668 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3669 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGT_INT
);
3673 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
3675 alu
.src
[0].sel
= ctx
->temp_reg
;
3676 alu
.src
[0].chan
= i
;
3678 alu
.src
[1].sel
= V_SQ_ALU_SRC_1_INT
;
3680 alu
.src
[2].sel
= ctx
->temp_reg
;
3681 alu
.src
[2].chan
= i
;
3685 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
3694 static int tgsi_ssg(struct r600_shader_ctx
*ctx
)
3696 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
3697 struct r600_bytecode_alu alu
;
3700 /* tmp = (src > 0 ? 1 : src) */
3701 for (i
= 0; i
< 4; i
++) {
3702 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3703 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGT
);
3706 alu
.dst
.sel
= ctx
->temp_reg
;
3709 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], i
);
3710 alu
.src
[1].sel
= V_SQ_ALU_SRC_1
;
3711 r600_bytecode_src(&alu
.src
[2], &ctx
->src
[0], i
);
3715 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
3720 /* dst = (-tmp > 0 ? -1 : tmp) */
3721 for (i
= 0; i
< 4; i
++) {
3722 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3723 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGT
);
3725 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
3727 alu
.src
[0].sel
= ctx
->temp_reg
;
3728 alu
.src
[0].chan
= i
;
3731 alu
.src
[1].sel
= V_SQ_ALU_SRC_1
;
3734 alu
.src
[2].sel
= ctx
->temp_reg
;
3735 alu
.src
[2].chan
= i
;
3739 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
3746 static int tgsi_helper_copy(struct r600_shader_ctx
*ctx
, struct tgsi_full_instruction
*inst
)
3748 struct r600_bytecode_alu alu
;
3751 for (i
= 0; i
< 4; i
++) {
3752 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3753 if (!(inst
->Dst
[0].Register
.WriteMask
& (1 << i
))) {
3754 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
);
3757 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
);
3758 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
3759 alu
.src
[0].sel
= ctx
->temp_reg
;
3760 alu
.src
[0].chan
= i
;
3765 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
3772 static int tgsi_op3(struct r600_shader_ctx
*ctx
)
3774 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
3775 struct r600_bytecode_alu alu
;
3777 int lasti
= tgsi_last_instruction(inst
->Dst
[0].Register
.WriteMask
);
3779 for (i
= 0; i
< lasti
+ 1; i
++) {
3780 if (!(inst
->Dst
[0].Register
.WriteMask
& (1 << i
)))
3783 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3784 alu
.inst
= ctx
->inst_info
->r600_opcode
;
3785 for (j
= 0; j
< inst
->Instruction
.NumSrcRegs
; j
++) {
3786 r600_bytecode_src(&alu
.src
[j
], &ctx
->src
[j
], i
);
3789 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
3796 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
3803 static int tgsi_dp(struct r600_shader_ctx
*ctx
)
3805 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
3806 struct r600_bytecode_alu alu
;
3809 for (i
= 0; i
< 4; i
++) {
3810 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3811 alu
.inst
= ctx
->inst_info
->r600_opcode
;
3812 for (j
= 0; j
< inst
->Instruction
.NumSrcRegs
; j
++) {
3813 r600_bytecode_src(&alu
.src
[j
], &ctx
->src
[j
], i
);
3816 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
3818 alu
.dst
.write
= (inst
->Dst
[0].Register
.WriteMask
>> i
) & 1;
3819 /* handle some special cases */
3820 switch (ctx
->inst_info
->tgsi_opcode
) {
3821 case TGSI_OPCODE_DP2
:
3823 alu
.src
[0].sel
= alu
.src
[1].sel
= V_SQ_ALU_SRC_0
;
3824 alu
.src
[0].chan
= alu
.src
[1].chan
= 0;
3827 case TGSI_OPCODE_DP3
:
3829 alu
.src
[0].sel
= alu
.src
[1].sel
= V_SQ_ALU_SRC_0
;
3830 alu
.src
[0].chan
= alu
.src
[1].chan
= 0;
3833 case TGSI_OPCODE_DPH
:
3835 alu
.src
[0].sel
= V_SQ_ALU_SRC_1
;
3836 alu
.src
[0].chan
= 0;
3846 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
3853 static inline boolean
tgsi_tex_src_requires_loading(struct r600_shader_ctx
*ctx
,
3856 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
3857 return (inst
->Src
[index
].Register
.File
!= TGSI_FILE_TEMPORARY
&&
3858 inst
->Src
[index
].Register
.File
!= TGSI_FILE_INPUT
&&
3859 inst
->Src
[index
].Register
.File
!= TGSI_FILE_OUTPUT
) ||
3860 ctx
->src
[index
].neg
|| ctx
->src
[index
].abs
;
3863 static inline unsigned tgsi_tex_get_src_gpr(struct r600_shader_ctx
*ctx
,
3866 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
3867 return ctx
->file_offset
[inst
->Src
[index
].Register
.File
] + inst
->Src
[index
].Register
.Index
;
3870 static int tgsi_tex(struct r600_shader_ctx
*ctx
)
3872 static float one_point_five
= 1.5f
;
3873 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
3874 struct r600_bytecode_tex tex
;
3875 struct r600_bytecode_alu alu
;
3879 bool read_compressed_msaa
= ctx
->bc
->msaa_texture_mode
== MSAA_TEXTURE_COMPRESSED
&&
3880 inst
->Instruction
.Opcode
== TGSI_OPCODE_TXF
&&
3881 (inst
->Texture
.Texture
== TGSI_TEXTURE_2D_MSAA
||
3882 inst
->Texture
.Texture
== TGSI_TEXTURE_2D_ARRAY_MSAA
);
3883 /* Texture fetch instructions can only use gprs as source.
3884 * Also they cannot negate the source or take the absolute value */
3885 const boolean src_requires_loading
= (inst
->Instruction
.Opcode
!= TGSI_OPCODE_TXQ_LZ
&&
3886 tgsi_tex_src_requires_loading(ctx
, 0)) ||
3887 read_compressed_msaa
;
3888 boolean src_loaded
= FALSE
;
3889 unsigned sampler_src_reg
= inst
->Instruction
.Opcode
== TGSI_OPCODE_TXQ_LZ
? 0 : 1;
3890 int8_t offset_x
= 0, offset_y
= 0, offset_z
= 0;
3891 boolean has_txq_cube_array_z
= false;
3893 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_TXQ
&&
3894 ((inst
->Texture
.Texture
== TGSI_TEXTURE_CUBE_ARRAY
||
3895 inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
)))
3896 if (inst
->Dst
[0].Register
.WriteMask
& 4) {
3897 ctx
->shader
->has_txq_cube_array_z_comp
= true;
3898 has_txq_cube_array_z
= true;
3901 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_TEX2
||
3902 inst
->Instruction
.Opcode
== TGSI_OPCODE_TXB2
||
3903 inst
->Instruction
.Opcode
== TGSI_OPCODE_TXL2
)
3904 sampler_src_reg
= 2;
3906 src_gpr
= tgsi_tex_get_src_gpr(ctx
, 0);
3908 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_TXF
) {
3909 /* get offset values */
3910 if (inst
->Texture
.NumOffsets
) {
3911 assert(inst
->Texture
.NumOffsets
== 1);
3913 offset_x
= ctx
->literals
[inst
->TexOffsets
[0].Index
+ inst
->TexOffsets
[0].SwizzleX
] << 1;
3914 offset_y
= ctx
->literals
[inst
->TexOffsets
[0].Index
+ inst
->TexOffsets
[0].SwizzleY
] << 1;
3915 offset_z
= ctx
->literals
[inst
->TexOffsets
[0].Index
+ inst
->TexOffsets
[0].SwizzleZ
] << 1;
3917 } else if (inst
->Instruction
.Opcode
== TGSI_OPCODE_TXD
) {
3918 /* TGSI moves the sampler to src reg 3 for TXD */
3919 sampler_src_reg
= 3;
3921 for (i
= 1; i
< 3; i
++) {
3922 /* set gradients h/v */
3923 memset(&tex
, 0, sizeof(struct r600_bytecode_tex
));
3924 tex
.inst
= (i
== 1) ? SQ_TEX_INST_SET_GRADIENTS_H
:
3925 SQ_TEX_INST_SET_GRADIENTS_V
;
3926 tex
.sampler_id
= tgsi_tex_get_src_gpr(ctx
, sampler_src_reg
);
3927 tex
.resource_id
= tex
.sampler_id
+ R600_MAX_CONST_BUFFERS
;
3929 if (tgsi_tex_src_requires_loading(ctx
, i
)) {
3930 tex
.src_gpr
= r600_get_temp(ctx
);
3936 for (j
= 0; j
< 4; j
++) {
3937 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3938 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
);
3939 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[i
], j
);
3940 alu
.dst
.sel
= tex
.src_gpr
;
3945 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
3951 tex
.src_gpr
= tgsi_tex_get_src_gpr(ctx
, i
);
3952 tex
.src_sel_x
= ctx
->src
[i
].swizzle
[0];
3953 tex
.src_sel_y
= ctx
->src
[i
].swizzle
[1];
3954 tex
.src_sel_z
= ctx
->src
[i
].swizzle
[2];
3955 tex
.src_sel_w
= ctx
->src
[i
].swizzle
[3];
3956 tex
.src_rel
= ctx
->src
[i
].rel
;
3958 tex
.dst_gpr
= ctx
->temp_reg
; /* just to avoid confusing the asm scheduler */
3959 tex
.dst_sel_x
= tex
.dst_sel_y
= tex
.dst_sel_z
= tex
.dst_sel_w
= 7;
3960 if (inst
->Texture
.Texture
!= TGSI_TEXTURE_RECT
) {
3961 tex
.coord_type_x
= 1;
3962 tex
.coord_type_y
= 1;
3963 tex
.coord_type_z
= 1;
3964 tex
.coord_type_w
= 1;
3966 r
= r600_bytecode_add_tex(ctx
->bc
, &tex
);
3970 } else if (inst
->Instruction
.Opcode
== TGSI_OPCODE_TXP
) {
3972 /* Add perspective divide */
3973 if (ctx
->bc
->chip_class
== CAYMAN
) {
3975 for (i
= 0; i
< 3; i
++) {
3976 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3977 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
);
3978 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 3);
3980 alu
.dst
.sel
= ctx
->temp_reg
;
3986 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
3993 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
3994 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
);
3995 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 3);
3997 alu
.dst
.sel
= ctx
->temp_reg
;
3998 alu
.dst
.chan
= out_chan
;
4001 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4006 for (i
= 0; i
< 3; i
++) {
4007 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4008 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
);
4009 alu
.src
[0].sel
= ctx
->temp_reg
;
4010 alu
.src
[0].chan
= out_chan
;
4011 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[0], i
);
4012 alu
.dst
.sel
= ctx
->temp_reg
;
4015 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4019 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4020 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
);
4021 alu
.src
[0].sel
= V_SQ_ALU_SRC_1
;
4022 alu
.src
[0].chan
= 0;
4023 alu
.dst
.sel
= ctx
->temp_reg
;
4027 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4031 src_gpr
= ctx
->temp_reg
;
4034 if ((inst
->Texture
.Texture
== TGSI_TEXTURE_CUBE
||
4035 inst
->Texture
.Texture
== TGSI_TEXTURE_CUBE_ARRAY
||
4036 inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE
||
4037 inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
) &&
4038 inst
->Instruction
.Opcode
!= TGSI_OPCODE_TXQ
&&
4039 inst
->Instruction
.Opcode
!= TGSI_OPCODE_TXQ_LZ
) {
4041 static const unsigned src0_swizzle
[] = {2, 2, 0, 1};
4042 static const unsigned src1_swizzle
[] = {1, 0, 2, 2};
4044 /* tmp1.xyzw = CUBE(R0.zzxy, R0.yxzz) */
4045 for (i
= 0; i
< 4; i
++) {
4046 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4047 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE
);
4048 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], src0_swizzle
[i
]);
4049 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[0], src1_swizzle
[i
]);
4050 alu
.dst
.sel
= ctx
->temp_reg
;
4055 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4060 /* tmp1.z = RCP_e(|tmp1.z|) */
4061 if (ctx
->bc
->chip_class
== CAYMAN
) {
4062 for (i
= 0; i
< 3; i
++) {
4063 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4064 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
);
4065 alu
.src
[0].sel
= ctx
->temp_reg
;
4066 alu
.src
[0].chan
= 2;
4068 alu
.dst
.sel
= ctx
->temp_reg
;
4074 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4079 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4080 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
);
4081 alu
.src
[0].sel
= ctx
->temp_reg
;
4082 alu
.src
[0].chan
= 2;
4084 alu
.dst
.sel
= ctx
->temp_reg
;
4088 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4093 /* MULADD R0.x, R0.x, PS1, (0x3FC00000, 1.5f).x
4094 * MULADD R0.y, R0.y, PS1, (0x3FC00000, 1.5f).x
4095 * muladd has no writemask, have to use another temp
4097 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4098 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD
);
4101 alu
.src
[0].sel
= ctx
->temp_reg
;
4102 alu
.src
[0].chan
= 0;
4103 alu
.src
[1].sel
= ctx
->temp_reg
;
4104 alu
.src
[1].chan
= 2;
4106 alu
.src
[2].sel
= V_SQ_ALU_SRC_LITERAL
;
4107 alu
.src
[2].chan
= 0;
4108 alu
.src
[2].value
= *(uint32_t *)&one_point_five
;
4110 alu
.dst
.sel
= ctx
->temp_reg
;
4114 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4118 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4119 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD
);
4122 alu
.src
[0].sel
= ctx
->temp_reg
;
4123 alu
.src
[0].chan
= 1;
4124 alu
.src
[1].sel
= ctx
->temp_reg
;
4125 alu
.src
[1].chan
= 2;
4127 alu
.src
[2].sel
= V_SQ_ALU_SRC_LITERAL
;
4128 alu
.src
[2].chan
= 0;
4129 alu
.src
[2].value
= *(uint32_t *)&one_point_five
;
4131 alu
.dst
.sel
= ctx
->temp_reg
;
4136 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4139 /* write initial compare value into Z component
4140 - W src 0 for shadow cube
4141 - X src 1 for shadow cube array */
4142 if (inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE
||
4143 inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
) {
4144 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4145 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
);
4146 if (inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
)
4147 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[1], 0);
4149 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 3);
4150 alu
.dst
.sel
= ctx
->temp_reg
;
4154 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4159 if (inst
->Texture
.Texture
== TGSI_TEXTURE_CUBE_ARRAY
||
4160 inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
) {
4161 if (ctx
->bc
->chip_class
>= EVERGREEN
) {
4162 int mytmp
= r600_get_temp(ctx
);
4163 static const float eight
= 8.0f
;
4164 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4165 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
);
4166 alu
.src
[0].sel
= ctx
->temp_reg
;
4167 alu
.src
[0].chan
= 3;
4168 alu
.dst
.sel
= mytmp
;
4172 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4176 /* have to multiply original layer by 8 and add to face id (temp.w) in Z */
4177 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4178 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD
);
4180 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 3);
4181 alu
.src
[1].sel
= V_SQ_ALU_SRC_LITERAL
;
4182 alu
.src
[1].chan
= 0;
4183 alu
.src
[1].value
= *(uint32_t *)&eight
;
4184 alu
.src
[2].sel
= mytmp
;
4185 alu
.src
[2].chan
= 0;
4186 alu
.dst
.sel
= ctx
->temp_reg
;
4190 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4193 } else if (ctx
->bc
->chip_class
< EVERGREEN
) {
4194 memset(&tex
, 0, sizeof(struct r600_bytecode_tex
));
4195 tex
.inst
= SQ_TEX_INST_SET_CUBEMAP_INDEX
;
4196 tex
.sampler_id
= tgsi_tex_get_src_gpr(ctx
, sampler_src_reg
);
4197 tex
.resource_id
= tex
.sampler_id
+ R600_MAX_CONST_BUFFERS
;
4198 tex
.src_gpr
= r600_get_temp(ctx
);
4203 tex
.dst_sel_x
= tex
.dst_sel_y
= tex
.dst_sel_z
= tex
.dst_sel_w
= 7;
4204 tex
.coord_type_x
= 1;
4205 tex
.coord_type_y
= 1;
4206 tex
.coord_type_z
= 1;
4207 tex
.coord_type_w
= 1;
4208 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4209 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
);
4210 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 3);
4211 alu
.dst
.sel
= tex
.src_gpr
;
4215 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4219 r
= r600_bytecode_add_tex(ctx
->bc
, &tex
);
4226 /* for cube forms of lod and bias we need to route things */
4227 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_TXB
||
4228 inst
->Instruction
.Opcode
== TGSI_OPCODE_TXL
||
4229 inst
->Instruction
.Opcode
== TGSI_OPCODE_TXB2
||
4230 inst
->Instruction
.Opcode
== TGSI_OPCODE_TXL2
) {
4231 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4232 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
);
4233 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_TXB2
||
4234 inst
->Instruction
.Opcode
== TGSI_OPCODE_TXL2
)
4235 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[1], 0);
4237 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 3);
4238 alu
.dst
.sel
= ctx
->temp_reg
;
4242 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4248 src_gpr
= ctx
->temp_reg
;
4251 if (src_requires_loading
&& !src_loaded
) {
4252 for (i
= 0; i
< 4; i
++) {
4253 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4254 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
);
4255 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], i
);
4256 alu
.dst
.sel
= ctx
->temp_reg
;
4261 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4266 src_gpr
= ctx
->temp_reg
;
4269 /* Obtain the sample index for reading a compressed MSAA color texture.
4270 * To read the FMASK, we use the ldfptr instruction, which tells us
4271 * where the samples are stored.
4272 * For uncompressed 8x MSAA surfaces, ldfptr should return 0x76543210,
4273 * which is the identity mapping. Each nibble says which physical sample
4274 * should be fetched to get that sample.
4276 * Assume src.z contains the sample index. It should be modified like this:
4277 * src.z = (ldfptr() >> (src.z * 4)) & 0xF;
4278 * Then fetch the texel with src.
4280 if (read_compressed_msaa
) {
4281 unsigned sample_chan
= inst
->Texture
.Texture
== TGSI_TEXTURE_2D_MSAA
? 3 : 4;
4282 unsigned temp
= r600_get_temp(ctx
);
4285 /* temp.w = ldfptr() */
4286 memset(&tex
, 0, sizeof(struct r600_bytecode_tex
));
4287 tex
.inst
= SQ_TEX_INST_LD
;
4288 tex
.inst_mod
= 1; /* to indicate this is ldfptr */
4289 tex
.sampler_id
= tgsi_tex_get_src_gpr(ctx
, sampler_src_reg
);
4290 tex
.resource_id
= tex
.sampler_id
+ R600_MAX_CONST_BUFFERS
;
4291 tex
.src_gpr
= src_gpr
;
4293 tex
.dst_sel_x
= 7; /* mask out these components */
4296 tex
.dst_sel_w
= 0; /* store X */
4301 tex
.offset_x
= offset_x
;
4302 tex
.offset_y
= offset_y
;
4303 tex
.offset_z
= offset_z
;
4304 r
= r600_bytecode_add_tex(ctx
->bc
, &tex
);
4308 /* temp.x = sample_index*4 */
4309 if (ctx
->bc
->chip_class
== CAYMAN
) {
4310 for (i
= 0 ; i
< 4; i
++) {
4311 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4312 alu
.inst
= ctx
->inst_info
->r600_opcode
;
4313 alu
.src
[0].sel
= src_gpr
;
4314 alu
.src
[0].chan
= sample_chan
;
4315 alu
.src
[1].sel
= V_SQ_ALU_SRC_LITERAL
;
4316 alu
.src
[1].value
= 4;
4319 alu
.dst
.write
= i
== 0;
4322 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4327 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4328 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_INT
);
4329 alu
.src
[0].sel
= src_gpr
;
4330 alu
.src
[0].chan
= sample_chan
;
4331 alu
.src
[1].sel
= V_SQ_ALU_SRC_LITERAL
;
4332 alu
.src
[1].value
= 4;
4337 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4342 /* sample_index = temp.w >> temp.x */
4343 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4344 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT
);
4345 alu
.src
[0].sel
= temp
;
4346 alu
.src
[0].chan
= 3;
4347 alu
.src
[1].sel
= temp
;
4348 alu
.src
[1].chan
= 0;
4349 alu
.dst
.sel
= src_gpr
;
4350 alu
.dst
.chan
= sample_chan
;
4353 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4357 /* sample_index & 0xF */
4358 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4359 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_AND_INT
);
4360 alu
.src
[0].sel
= src_gpr
;
4361 alu
.src
[0].chan
= sample_chan
;
4362 alu
.src
[1].sel
= V_SQ_ALU_SRC_LITERAL
;
4363 alu
.src
[1].value
= 0xF;
4364 alu
.dst
.sel
= src_gpr
;
4365 alu
.dst
.chan
= sample_chan
;
4368 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4372 /* visualize the FMASK */
4373 for (i
= 0; i
< 4; i
++) {
4374 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4375 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT
);
4376 alu
.src
[0].sel
= src_gpr
;
4377 alu
.src
[0].chan
= sample_chan
;
4378 alu
.dst
.sel
= ctx
->file_offset
[inst
->Dst
[0].Register
.File
] + inst
->Dst
[0].Register
.Index
;
4382 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4390 /* does this shader want a num layers from TXQ for a cube array? */
4391 if (has_txq_cube_array_z
) {
4392 int id
= tgsi_tex_get_src_gpr(ctx
, sampler_src_reg
);
4394 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4395 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
);
4397 alu
.src
[0].sel
= 512 + (id
/ 4);
4398 alu
.src
[0].kc_bank
= R600_TXQ_CONST_BUFFER
;
4399 alu
.src
[0].chan
= id
% 4;
4400 tgsi_dst(ctx
, &inst
->Dst
[0], 2, &alu
.dst
);
4402 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4405 /* disable writemask from texture instruction */
4406 inst
->Dst
[0].Register
.WriteMask
&= ~4;
4409 opcode
= ctx
->inst_info
->r600_opcode
;
4410 if (inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOW1D
||
4411 inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOW2D
||
4412 inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWRECT
||
4413 inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE
||
4414 inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOW1D_ARRAY
||
4415 inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOW2D_ARRAY
||
4416 inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
) {
4418 case SQ_TEX_INST_SAMPLE
:
4419 opcode
= SQ_TEX_INST_SAMPLE_C
;
4421 case SQ_TEX_INST_SAMPLE_L
:
4422 opcode
= SQ_TEX_INST_SAMPLE_C_L
;
4424 case SQ_TEX_INST_SAMPLE_LB
:
4425 opcode
= SQ_TEX_INST_SAMPLE_C_LB
;
4427 case SQ_TEX_INST_SAMPLE_G
:
4428 opcode
= SQ_TEX_INST_SAMPLE_C_G
;
4433 memset(&tex
, 0, sizeof(struct r600_bytecode_tex
));
4436 tex
.sampler_id
= tgsi_tex_get_src_gpr(ctx
, sampler_src_reg
);
4437 tex
.resource_id
= tex
.sampler_id
+ R600_MAX_CONST_BUFFERS
;
4438 tex
.src_gpr
= src_gpr
;
4439 tex
.dst_gpr
= ctx
->file_offset
[inst
->Dst
[0].Register
.File
] + inst
->Dst
[0].Register
.Index
;
4440 tex
.dst_sel_x
= (inst
->Dst
[0].Register
.WriteMask
& 1) ? 0 : 7;
4441 tex
.dst_sel_y
= (inst
->Dst
[0].Register
.WriteMask
& 2) ? 1 : 7;
4442 tex
.dst_sel_z
= (inst
->Dst
[0].Register
.WriteMask
& 4) ? 2 : 7;
4443 tex
.dst_sel_w
= (inst
->Dst
[0].Register
.WriteMask
& 8) ? 3 : 7;
4445 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_TXQ_LZ
) {
4450 } else if (src_loaded
) {
4456 tex
.src_sel_x
= ctx
->src
[0].swizzle
[0];
4457 tex
.src_sel_y
= ctx
->src
[0].swizzle
[1];
4458 tex
.src_sel_z
= ctx
->src
[0].swizzle
[2];
4459 tex
.src_sel_w
= ctx
->src
[0].swizzle
[3];
4460 tex
.src_rel
= ctx
->src
[0].rel
;
4463 if (inst
->Texture
.Texture
== TGSI_TEXTURE_CUBE
||
4464 inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE
||
4465 inst
->Texture
.Texture
== TGSI_TEXTURE_CUBE_ARRAY
||
4466 inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
) {
4470 tex
.src_sel_w
= 2; /* route Z compare or Lod value into W */
4473 if (inst
->Texture
.Texture
!= TGSI_TEXTURE_RECT
&&
4474 inst
->Texture
.Texture
!= TGSI_TEXTURE_SHADOWRECT
) {
4475 tex
.coord_type_x
= 1;
4476 tex
.coord_type_y
= 1;
4478 tex
.coord_type_z
= 1;
4479 tex
.coord_type_w
= 1;
4481 tex
.offset_x
= offset_x
;
4482 tex
.offset_y
= offset_y
;
4483 tex
.offset_z
= offset_z
;
4485 /* Put the depth for comparison in W.
4486 * TGSI_TEXTURE_SHADOW2D_ARRAY already has the depth in W.
4487 * Some instructions expect the depth in Z. */
4488 if ((inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOW1D
||
4489 inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOW2D
||
4490 inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWRECT
||
4491 inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOW1D_ARRAY
) &&
4492 opcode
!= SQ_TEX_INST_SAMPLE_C_L
&&
4493 opcode
!= SQ_TEX_INST_SAMPLE_C_LB
) {
4494 tex
.src_sel_w
= tex
.src_sel_z
;
4497 if (inst
->Texture
.Texture
== TGSI_TEXTURE_1D_ARRAY
||
4498 inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOW1D_ARRAY
) {
4499 if (opcode
== SQ_TEX_INST_SAMPLE_C_L
||
4500 opcode
== SQ_TEX_INST_SAMPLE_C_LB
) {
4501 /* the array index is read from Y */
4502 tex
.coord_type_y
= 0;
4504 /* the array index is read from Z */
4505 tex
.coord_type_z
= 0;
4506 tex
.src_sel_z
= tex
.src_sel_y
;
4508 } else if (inst
->Texture
.Texture
== TGSI_TEXTURE_2D_ARRAY
||
4509 inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOW2D_ARRAY
||
4510 ((inst
->Texture
.Texture
== TGSI_TEXTURE_CUBE_ARRAY
||
4511 inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
) &&
4512 (ctx
->bc
->chip_class
>= EVERGREEN
)))
4513 /* the array index is read from Z */
4514 tex
.coord_type_z
= 0;
4516 r
= r600_bytecode_add_tex(ctx
->bc
, &tex
);
4520 /* add shadow ambient support - gallium doesn't do it yet */
4524 static int tgsi_lrp(struct r600_shader_ctx
*ctx
)
4526 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
4527 struct r600_bytecode_alu alu
;
4528 int lasti
= tgsi_last_instruction(inst
->Dst
[0].Register
.WriteMask
);
4532 /* optimize if it's just an equal balance */
4533 if (ctx
->src
[0].sel
== V_SQ_ALU_SRC_0_5
) {
4534 for (i
= 0; i
< lasti
+ 1; i
++) {
4535 if (!(inst
->Dst
[0].Register
.WriteMask
& (1 << i
)))
4538 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4539 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD
);
4540 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[1], i
);
4541 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[2], i
);
4543 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
4548 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4556 for (i
= 0; i
< lasti
+ 1; i
++) {
4557 if (!(inst
->Dst
[0].Register
.WriteMask
& (1 << i
)))
4560 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4561 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD
);
4562 alu
.src
[0].sel
= V_SQ_ALU_SRC_1
;
4563 alu
.src
[0].chan
= 0;
4564 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[0], i
);
4565 r600_bytecode_src_toggle_neg(&alu
.src
[1]);
4566 alu
.dst
.sel
= ctx
->temp_reg
;
4572 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4577 /* (1 - src0) * src2 */
4578 for (i
= 0; i
< lasti
+ 1; i
++) {
4579 if (!(inst
->Dst
[0].Register
.WriteMask
& (1 << i
)))
4582 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4583 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
);
4584 alu
.src
[0].sel
= ctx
->temp_reg
;
4585 alu
.src
[0].chan
= i
;
4586 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[2], i
);
4587 alu
.dst
.sel
= ctx
->temp_reg
;
4593 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4598 /* src0 * src1 + (1 - src0) * src2 */
4599 for (i
= 0; i
< lasti
+ 1; i
++) {
4600 if (!(inst
->Dst
[0].Register
.WriteMask
& (1 << i
)))
4603 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4604 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD
);
4606 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], i
);
4607 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[1], i
);
4608 alu
.src
[2].sel
= ctx
->temp_reg
;
4609 alu
.src
[2].chan
= i
;
4611 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
4616 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4623 static int tgsi_cmp(struct r600_shader_ctx
*ctx
)
4625 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
4626 struct r600_bytecode_alu alu
;
4628 int lasti
= tgsi_last_instruction(inst
->Dst
[0].Register
.WriteMask
);
4630 for (i
= 0; i
< lasti
+ 1; i
++) {
4631 if (!(inst
->Dst
[0].Register
.WriteMask
& (1 << i
)))
4634 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4635 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGE
);
4636 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], i
);
4637 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[2], i
);
4638 r600_bytecode_src(&alu
.src
[2], &ctx
->src
[1], i
);
4639 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
4645 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4652 static int tgsi_xpd(struct r600_shader_ctx
*ctx
)
4654 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
4655 static const unsigned int src0_swizzle
[] = {2, 0, 1};
4656 static const unsigned int src1_swizzle
[] = {1, 2, 0};
4657 struct r600_bytecode_alu alu
;
4658 uint32_t use_temp
= 0;
4661 if (inst
->Dst
[0].Register
.WriteMask
!= 0xf)
4664 for (i
= 0; i
< 4; i
++) {
4665 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4666 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
);
4668 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], src0_swizzle
[i
]);
4669 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[1], src1_swizzle
[i
]);
4671 alu
.src
[0].sel
= V_SQ_ALU_SRC_0
;
4672 alu
.src
[0].chan
= i
;
4673 alu
.src
[1].sel
= V_SQ_ALU_SRC_0
;
4674 alu
.src
[1].chan
= i
;
4677 alu
.dst
.sel
= ctx
->temp_reg
;
4683 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4688 for (i
= 0; i
< 4; i
++) {
4689 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4690 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD
);
4693 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], src1_swizzle
[i
]);
4694 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[1], src0_swizzle
[i
]);
4696 alu
.src
[0].sel
= V_SQ_ALU_SRC_0
;
4697 alu
.src
[0].chan
= i
;
4698 alu
.src
[1].sel
= V_SQ_ALU_SRC_0
;
4699 alu
.src
[1].chan
= i
;
4702 alu
.src
[2].sel
= ctx
->temp_reg
;
4704 alu
.src
[2].chan
= i
;
4707 alu
.dst
.sel
= ctx
->temp_reg
;
4709 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
4715 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4720 return tgsi_helper_copy(ctx
, inst
);
4724 static int tgsi_exp(struct r600_shader_ctx
*ctx
)
4726 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
4727 struct r600_bytecode_alu alu
;
4731 /* result.x = 2^floor(src); */
4732 if (inst
->Dst
[0].Register
.WriteMask
& 1) {
4733 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4735 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR
);
4736 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 0);
4738 alu
.dst
.sel
= ctx
->temp_reg
;
4742 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4746 if (ctx
->bc
->chip_class
== CAYMAN
) {
4747 for (i
= 0; i
< 3; i
++) {
4748 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
);
4749 alu
.src
[0].sel
= ctx
->temp_reg
;
4750 alu
.src
[0].chan
= 0;
4752 alu
.dst
.sel
= ctx
->temp_reg
;
4754 alu
.dst
.write
= i
== 0;
4756 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4761 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
);
4762 alu
.src
[0].sel
= ctx
->temp_reg
;
4763 alu
.src
[0].chan
= 0;
4765 alu
.dst
.sel
= ctx
->temp_reg
;
4769 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4775 /* result.y = tmp - floor(tmp); */
4776 if ((inst
->Dst
[0].Register
.WriteMask
>> 1) & 1) {
4777 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4779 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT
);
4780 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 0);
4782 alu
.dst
.sel
= ctx
->temp_reg
;
4784 r
= tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
4793 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4798 /* result.z = RoughApprox2ToX(tmp);*/
4799 if ((inst
->Dst
[0].Register
.WriteMask
>> 2) & 0x1) {
4800 if (ctx
->bc
->chip_class
== CAYMAN
) {
4801 for (i
= 0; i
< 3; i
++) {
4802 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4803 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
);
4804 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 0);
4806 alu
.dst
.sel
= ctx
->temp_reg
;
4813 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4818 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4819 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
);
4820 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 0);
4822 alu
.dst
.sel
= ctx
->temp_reg
;
4828 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4834 /* result.w = 1.0;*/
4835 if ((inst
->Dst
[0].Register
.WriteMask
>> 3) & 0x1) {
4836 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4838 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
);
4839 alu
.src
[0].sel
= V_SQ_ALU_SRC_1
;
4840 alu
.src
[0].chan
= 0;
4842 alu
.dst
.sel
= ctx
->temp_reg
;
4846 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4850 return tgsi_helper_copy(ctx
, inst
);
4853 static int tgsi_log(struct r600_shader_ctx
*ctx
)
4855 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
4856 struct r600_bytecode_alu alu
;
4860 /* result.x = floor(log2(|src|)); */
4861 if (inst
->Dst
[0].Register
.WriteMask
& 1) {
4862 if (ctx
->bc
->chip_class
== CAYMAN
) {
4863 for (i
= 0; i
< 3; i
++) {
4864 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4866 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
);
4867 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 0);
4868 r600_bytecode_src_set_abs(&alu
.src
[0]);
4870 alu
.dst
.sel
= ctx
->temp_reg
;
4876 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4882 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4884 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
);
4885 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 0);
4886 r600_bytecode_src_set_abs(&alu
.src
[0]);
4888 alu
.dst
.sel
= ctx
->temp_reg
;
4892 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4897 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR
);
4898 alu
.src
[0].sel
= ctx
->temp_reg
;
4899 alu
.src
[0].chan
= 0;
4901 alu
.dst
.sel
= ctx
->temp_reg
;
4906 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4911 /* result.y = |src.x| / (2 ^ floor(log2(|src.x|))); */
4912 if ((inst
->Dst
[0].Register
.WriteMask
>> 1) & 1) {
4914 if (ctx
->bc
->chip_class
== CAYMAN
) {
4915 for (i
= 0; i
< 3; i
++) {
4916 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4918 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
);
4919 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 0);
4920 r600_bytecode_src_set_abs(&alu
.src
[0]);
4922 alu
.dst
.sel
= ctx
->temp_reg
;
4929 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4934 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4936 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
);
4937 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 0);
4938 r600_bytecode_src_set_abs(&alu
.src
[0]);
4940 alu
.dst
.sel
= ctx
->temp_reg
;
4945 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4950 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4952 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR
);
4953 alu
.src
[0].sel
= ctx
->temp_reg
;
4954 alu
.src
[0].chan
= 1;
4956 alu
.dst
.sel
= ctx
->temp_reg
;
4961 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4965 if (ctx
->bc
->chip_class
== CAYMAN
) {
4966 for (i
= 0; i
< 3; i
++) {
4967 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4968 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
);
4969 alu
.src
[0].sel
= ctx
->temp_reg
;
4970 alu
.src
[0].chan
= 1;
4972 alu
.dst
.sel
= ctx
->temp_reg
;
4979 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4984 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
4985 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
);
4986 alu
.src
[0].sel
= ctx
->temp_reg
;
4987 alu
.src
[0].chan
= 1;
4989 alu
.dst
.sel
= ctx
->temp_reg
;
4994 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
4999 if (ctx
->bc
->chip_class
== CAYMAN
) {
5000 for (i
= 0; i
< 3; i
++) {
5001 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
5002 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
);
5003 alu
.src
[0].sel
= ctx
->temp_reg
;
5004 alu
.src
[0].chan
= 1;
5006 alu
.dst
.sel
= ctx
->temp_reg
;
5013 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
5018 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
5019 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
);
5020 alu
.src
[0].sel
= ctx
->temp_reg
;
5021 alu
.src
[0].chan
= 1;
5023 alu
.dst
.sel
= ctx
->temp_reg
;
5028 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
5033 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
5035 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
);
5037 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 0);
5038 r600_bytecode_src_set_abs(&alu
.src
[0]);
5040 alu
.src
[1].sel
= ctx
->temp_reg
;
5041 alu
.src
[1].chan
= 1;
5043 alu
.dst
.sel
= ctx
->temp_reg
;
5048 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
5053 /* result.z = log2(|src|);*/
5054 if ((inst
->Dst
[0].Register
.WriteMask
>> 2) & 1) {
5055 if (ctx
->bc
->chip_class
== CAYMAN
) {
5056 for (i
= 0; i
< 3; i
++) {
5057 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
5059 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
);
5060 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 0);
5061 r600_bytecode_src_set_abs(&alu
.src
[0]);
5063 alu
.dst
.sel
= ctx
->temp_reg
;
5070 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
5075 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
5077 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
);
5078 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 0);
5079 r600_bytecode_src_set_abs(&alu
.src
[0]);
5081 alu
.dst
.sel
= ctx
->temp_reg
;
5086 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
5092 /* result.w = 1.0; */
5093 if ((inst
->Dst
[0].Register
.WriteMask
>> 3) & 1) {
5094 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
5096 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
);
5097 alu
.src
[0].sel
= V_SQ_ALU_SRC_1
;
5098 alu
.src
[0].chan
= 0;
5100 alu
.dst
.sel
= ctx
->temp_reg
;
5105 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
5110 return tgsi_helper_copy(ctx
, inst
);
5113 static int tgsi_eg_arl(struct r600_shader_ctx
*ctx
)
5115 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
5116 struct r600_bytecode_alu alu
;
5119 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
5121 switch (inst
->Instruction
.Opcode
) {
5122 case TGSI_OPCODE_ARL
:
5123 alu
.inst
= EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT_FLOOR
;
5125 case TGSI_OPCODE_ARR
:
5126 alu
.inst
= EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT
;
5128 case TGSI_OPCODE_UARL
:
5129 alu
.inst
= EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
;
5136 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 0);
5138 alu
.dst
.sel
= ctx
->bc
->ar_reg
;
5140 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
5144 ctx
->bc
->ar_loaded
= 0;
5147 static int tgsi_r600_arl(struct r600_shader_ctx
*ctx
)
5149 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
5150 struct r600_bytecode_alu alu
;
5153 switch (inst
->Instruction
.Opcode
) {
5154 case TGSI_OPCODE_ARL
:
5155 memset(&alu
, 0, sizeof(alu
));
5156 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR
;
5157 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 0);
5158 alu
.dst
.sel
= ctx
->bc
->ar_reg
;
5162 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
5165 memset(&alu
, 0, sizeof(alu
));
5166 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT
;
5167 alu
.src
[0].sel
= ctx
->bc
->ar_reg
;
5168 alu
.dst
.sel
= ctx
->bc
->ar_reg
;
5172 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
5175 case TGSI_OPCODE_ARR
:
5176 memset(&alu
, 0, sizeof(alu
));
5177 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT
;
5178 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 0);
5179 alu
.dst
.sel
= ctx
->bc
->ar_reg
;
5183 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
5186 case TGSI_OPCODE_UARL
:
5187 memset(&alu
, 0, sizeof(alu
));
5188 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
;
5189 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 0);
5190 alu
.dst
.sel
= ctx
->bc
->ar_reg
;
5194 if ((r
= r600_bytecode_add_alu(ctx
->bc
, &alu
)))
5202 ctx
->bc
->ar_loaded
= 0;
5206 static int tgsi_opdst(struct r600_shader_ctx
*ctx
)
5208 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
5209 struct r600_bytecode_alu alu
;
5212 for (i
= 0; i
< 4; i
++) {
5213 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
5215 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
);
5216 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
5218 if (i
== 0 || i
== 3) {
5219 alu
.src
[0].sel
= V_SQ_ALU_SRC_1
;
5221 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], i
);
5224 if (i
== 0 || i
== 2) {
5225 alu
.src
[1].sel
= V_SQ_ALU_SRC_1
;
5227 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[1], i
);
5231 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
5238 static int emit_logic_pred(struct r600_shader_ctx
*ctx
, int opcode
)
5240 struct r600_bytecode_alu alu
;
5243 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
5245 alu
.execute_mask
= 1;
5246 alu
.update_pred
= 1;
5248 alu
.dst
.sel
= ctx
->temp_reg
;
5252 r600_bytecode_src(&alu
.src
[0], &ctx
->src
[0], 0);
5253 alu
.src
[1].sel
= V_SQ_ALU_SRC_0
;
5254 alu
.src
[1].chan
= 0;
5258 r
= r600_bytecode_add_alu_type(ctx
->bc
, &alu
, CTX_INST(V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE
));
5264 static int pops(struct r600_shader_ctx
*ctx
, int pops
)
5266 unsigned force_pop
= ctx
->bc
->force_add_cf
;
5270 if (ctx
->bc
->cf_last
) {
5271 if (ctx
->bc
->cf_last
->inst
== CTX_INST(V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU
))
5273 else if (ctx
->bc
->cf_last
->inst
== CTX_INST(V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER
))
5278 ctx
->bc
->cf_last
->inst
= CTX_INST(V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER
);
5279 ctx
->bc
->force_add_cf
= 1;
5280 } else if (alu_pop
== 2) {
5281 ctx
->bc
->cf_last
->inst
= CTX_INST(V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER
);
5282 ctx
->bc
->force_add_cf
= 1;
5289 r600_bytecode_add_cfinst(ctx
->bc
, CTX_INST(V_SQ_CF_WORD1_SQ_CF_INST_POP
));
5290 ctx
->bc
->cf_last
->pop_count
= pops
;
5291 ctx
->bc
->cf_last
->cf_addr
= ctx
->bc
->cf_last
->id
+ 2;
5297 static inline void callstack_decrease_current(struct r600_shader_ctx
*ctx
, unsigned reason
)
5301 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
--;
5305 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
-= 4;
5308 /* TOODO : for 16 vp asic should -= 2; */
5309 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
--;
5314 static inline void callstack_check_depth(struct r600_shader_ctx
*ctx
, unsigned reason
, unsigned check_max_only
)
5316 if (check_max_only
) {
5329 if ((ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
+ diff
) >
5330 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].max
) {
5331 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].max
=
5332 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
+ diff
;
5338 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
++;
5342 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
+= 4;
5345 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
++;
5349 if ((ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
) >
5350 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].max
) {
5351 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].max
=
5352 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
;
5356 static void fc_set_mid(struct r600_shader_ctx
*ctx
, int fc_sp
)
5358 struct r600_cf_stack_entry
*sp
= &ctx
->bc
->fc_stack
[fc_sp
];
5360 sp
->mid
= realloc((void *)sp
->mid
,
5361 sizeof(struct r600_bytecode_cf
*) * (sp
->num_mid
+ 1));
5362 sp
->mid
[sp
->num_mid
] = ctx
->bc
->cf_last
;
5366 static void fc_pushlevel(struct r600_shader_ctx
*ctx
, int type
)
5369 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].type
= type
;
5370 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].start
= ctx
->bc
->cf_last
;
5373 static void fc_poplevel(struct r600_shader_ctx
*ctx
)
5375 struct r600_cf_stack_entry
*sp
= &ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
];
5385 static int emit_return(struct r600_shader_ctx
*ctx
)
5387 r600_bytecode_add_cfinst(ctx
->bc
, CTX_INST(V_SQ_CF_WORD1_SQ_CF_INST_RETURN
));
5391 static int emit_jump_to_offset(struct r600_shader_ctx
*ctx
, int pops
, int offset
)
5394 r600_bytecode_add_cfinst(ctx
->bc
, CTX_INST(V_SQ_CF_WORD1_SQ_CF_INST_JUMP
));
5395 ctx
->bc
->cf_last
->pop_count
= pops
;
5396 /* XXX work out offset */
5400 static int emit_setret_in_loop_flag(struct r600_shader_ctx
*ctx
, unsigned flag_value
)
5405 static void emit_testflag(struct r600_shader_ctx
*ctx
)
5410 static void emit_return_on_flag(struct r600_shader_ctx
*ctx
, unsigned ifidx
)
5413 emit_jump_to_offset(ctx
, 1, 4);
5414 emit_setret_in_loop_flag(ctx
, V_SQ_ALU_SRC_0
);
5415 pops(ctx
, ifidx
+ 1);
5419 static void break_loop_on_flag(struct r600_shader_ctx
*ctx
, unsigned fc_sp
)
5423 r600_bytecode_add_cfinst(ctx
->bc
, ctx
->inst_info
->r600_opcode
);
5424 ctx
->bc
->cf_last
->pop_count
= 1;
5426 fc_set_mid(ctx
, fc_sp
);
5432 static int tgsi_if(struct r600_shader_ctx
*ctx
)
5434 emit_logic_pred(ctx
, CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT
));
5436 r600_bytecode_add_cfinst(ctx
->bc
, CTX_INST(V_SQ_CF_WORD1_SQ_CF_INST_JUMP
));
5438 fc_pushlevel(ctx
, FC_IF
);
5440 callstack_check_depth(ctx
, FC_PUSH_VPM
, 0);
5444 static int tgsi_else(struct r600_shader_ctx
*ctx
)
5446 r600_bytecode_add_cfinst(ctx
->bc
, CTX_INST(V_SQ_CF_WORD1_SQ_CF_INST_ELSE
));
5447 ctx
->bc
->cf_last
->pop_count
= 1;
5449 fc_set_mid(ctx
, ctx
->bc
->fc_sp
);
5450 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].start
->cf_addr
= ctx
->bc
->cf_last
->id
;
5454 static int tgsi_endif(struct r600_shader_ctx
*ctx
)
5457 if (ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].type
!= FC_IF
) {
5458 R600_ERR("if/endif unbalanced in shader\n");
5462 if (ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].mid
== NULL
) {
5463 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].start
->cf_addr
= ctx
->bc
->cf_last
->id
+ 2;
5464 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].start
->pop_count
= 1;
5466 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].mid
[0]->cf_addr
= ctx
->bc
->cf_last
->id
+ 2;
5470 callstack_decrease_current(ctx
, FC_PUSH_VPM
);
5474 static int tgsi_bgnloop(struct r600_shader_ctx
*ctx
)
5476 /* LOOP_START_DX10 ignores the LOOP_CONFIG* registers, so it is not
5477 * limited to 4096 iterations, like the other LOOP_* instructions. */
5478 r600_bytecode_add_cfinst(ctx
->bc
, CTX_INST(V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_DX10
));
5480 fc_pushlevel(ctx
, FC_LOOP
);
5482 /* check stack depth */
5483 callstack_check_depth(ctx
, FC_LOOP
, 0);
5487 static int tgsi_endloop(struct r600_shader_ctx
*ctx
)
5491 r600_bytecode_add_cfinst(ctx
->bc
, CTX_INST(V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END
));
5493 if (ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].type
!= FC_LOOP
) {
5494 R600_ERR("loop/endloop in shader code are not paired.\n");
5498 /* fixup loop pointers - from r600isa
5499 LOOP END points to CF after LOOP START,
5500 LOOP START point to CF after LOOP END
5501 BRK/CONT point to LOOP END CF
5503 ctx
->bc
->cf_last
->cf_addr
= ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].start
->id
+ 2;
5505 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].start
->cf_addr
= ctx
->bc
->cf_last
->id
+ 2;
5507 for (i
= 0; i
< ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].num_mid
; i
++) {
5508 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].mid
[i
]->cf_addr
= ctx
->bc
->cf_last
->id
;
5510 /* XXX add LOOPRET support */
5512 callstack_decrease_current(ctx
, FC_LOOP
);
5516 static int tgsi_loop_brk_cont(struct r600_shader_ctx
*ctx
)
5520 for (fscp
= ctx
->bc
->fc_sp
; fscp
> 0; fscp
--)
5522 if (FC_LOOP
== ctx
->bc
->fc_stack
[fscp
].type
)
5527 R600_ERR("Break not inside loop/endloop pair\n");
5531 r600_bytecode_add_cfinst(ctx
->bc
, ctx
->inst_info
->r600_opcode
);
5533 fc_set_mid(ctx
, fscp
);
5535 callstack_check_depth(ctx
, FC_PUSH_VPM
, 1);
5539 static int tgsi_umad(struct r600_shader_ctx
*ctx
)
5541 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
5542 struct r600_bytecode_alu alu
;
5544 int lasti
= tgsi_last_instruction(inst
->Dst
[0].Register
.WriteMask
);
5547 for (i
= 0; i
< lasti
+ 1; i
++) {
5548 if (!(inst
->Dst
[0].Register
.WriteMask
& (1 << i
)))
5551 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
5554 alu
.dst
.sel
= ctx
->temp_reg
;
5557 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT
);
5558 for (j
= 0; j
< 2; j
++) {
5559 r600_bytecode_src(&alu
.src
[j
], &ctx
->src
[j
], i
);
5563 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
5569 for (i
= 0; i
< lasti
+ 1; i
++) {
5570 if (!(inst
->Dst
[0].Register
.WriteMask
& (1 << i
)))
5573 memset(&alu
, 0, sizeof(struct r600_bytecode_alu
));
5574 tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
5576 alu
.inst
= CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT
);
5578 alu
.src
[0].sel
= ctx
->temp_reg
;
5579 alu
.src
[0].chan
= i
;
5581 r600_bytecode_src(&alu
.src
[1], &ctx
->src
[2], i
);
5585 r
= r600_bytecode_add_alu(ctx
->bc
, &alu
);
5592 static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction
[] = {
5593 {TGSI_OPCODE_ARL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_r600_arl
},
5594 {TGSI_OPCODE_MOV
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
, tgsi_op2
},
5595 {TGSI_OPCODE_LIT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_lit
},
5598 * For state trackers other than OpenGL, we'll want to use
5599 * _RECIP_IEEE instead.
5601 {TGSI_OPCODE_RCP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED
, tgsi_trans_srcx_replicate
},
5603 {TGSI_OPCODE_RSQ
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_rsq
},
5604 {TGSI_OPCODE_EXP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_exp
},
5605 {TGSI_OPCODE_LOG
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_log
},
5606 {TGSI_OPCODE_MUL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
, tgsi_op2
},
5607 {TGSI_OPCODE_ADD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD
, tgsi_op2
},
5608 {TGSI_OPCODE_DP3
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
, tgsi_dp
},
5609 {TGSI_OPCODE_DP4
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
, tgsi_dp
},
5610 {TGSI_OPCODE_DST
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_opdst
},
5611 {TGSI_OPCODE_MIN
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN
, tgsi_op2
},
5612 {TGSI_OPCODE_MAX
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX
, tgsi_op2
},
5613 {TGSI_OPCODE_SLT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT
, tgsi_op2_swap
},
5614 {TGSI_OPCODE_SGE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE
, tgsi_op2
},
5615 {TGSI_OPCODE_MAD
, 1, V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD
, tgsi_op3
},
5616 {TGSI_OPCODE_SUB
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD
, tgsi_op2
},
5617 {TGSI_OPCODE_LRP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_lrp
},
5618 {TGSI_OPCODE_CND
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5620 {20, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5621 {TGSI_OPCODE_DP2A
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5623 {22, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5624 {23, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5625 {TGSI_OPCODE_FRC
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT
, tgsi_op2
},
5626 {TGSI_OPCODE_CLAMP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5627 {TGSI_OPCODE_FLR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR
, tgsi_op2
},
5628 {TGSI_OPCODE_ROUND
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RNDNE
, tgsi_op2
},
5629 {TGSI_OPCODE_EX2
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
, tgsi_trans_srcx_replicate
},
5630 {TGSI_OPCODE_LG2
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
, tgsi_trans_srcx_replicate
},
5631 {TGSI_OPCODE_POW
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_pow
},
5632 {TGSI_OPCODE_XPD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_xpd
},
5634 {32, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5635 {TGSI_OPCODE_ABS
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
, tgsi_op2
},
5636 {TGSI_OPCODE_RCC
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5637 {TGSI_OPCODE_DPH
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
, tgsi_dp
},
5638 {TGSI_OPCODE_COS
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS
, tgsi_trig
},
5639 {TGSI_OPCODE_DDX
, 0, SQ_TEX_INST_GET_GRADIENTS_H
, tgsi_tex
},
5640 {TGSI_OPCODE_DDY
, 0, SQ_TEX_INST_GET_GRADIENTS_V
, tgsi_tex
},
5641 {TGSI_OPCODE_KILP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
, tgsi_kill
}, /* predicated kill */
5642 {TGSI_OPCODE_PK2H
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5643 {TGSI_OPCODE_PK2US
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5644 {TGSI_OPCODE_PK4B
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5645 {TGSI_OPCODE_PK4UB
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5646 {TGSI_OPCODE_RFL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5647 {TGSI_OPCODE_SEQ
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE
, tgsi_op2
},
5648 {TGSI_OPCODE_SFL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5649 {TGSI_OPCODE_SGT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT
, tgsi_op2
},
5650 {TGSI_OPCODE_SIN
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN
, tgsi_trig
},
5651 {TGSI_OPCODE_SLE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE
, tgsi_op2_swap
},
5652 {TGSI_OPCODE_SNE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE
, tgsi_op2
},
5653 {TGSI_OPCODE_STR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5654 {TGSI_OPCODE_TEX
, 0, SQ_TEX_INST_SAMPLE
, tgsi_tex
},
5655 {TGSI_OPCODE_TXD
, 0, SQ_TEX_INST_SAMPLE_G
, tgsi_tex
},
5656 {TGSI_OPCODE_TXP
, 0, SQ_TEX_INST_SAMPLE
, tgsi_tex
},
5657 {TGSI_OPCODE_UP2H
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5658 {TGSI_OPCODE_UP2US
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5659 {TGSI_OPCODE_UP4B
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5660 {TGSI_OPCODE_UP4UB
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5661 {TGSI_OPCODE_X2D
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5662 {TGSI_OPCODE_ARA
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5663 {TGSI_OPCODE_ARR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_r600_arl
},
5664 {TGSI_OPCODE_BRA
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5665 {TGSI_OPCODE_CAL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5666 {TGSI_OPCODE_RET
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5667 {TGSI_OPCODE_SSG
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_ssg
},
5668 {TGSI_OPCODE_CMP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_cmp
},
5669 {TGSI_OPCODE_SCS
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_scs
},
5670 {TGSI_OPCODE_TXB
, 0, SQ_TEX_INST_SAMPLE_LB
, tgsi_tex
},
5671 {TGSI_OPCODE_NRM
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5672 {TGSI_OPCODE_DIV
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5673 {TGSI_OPCODE_DP2
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
, tgsi_dp
},
5674 {TGSI_OPCODE_TXL
, 0, SQ_TEX_INST_SAMPLE_L
, tgsi_tex
},
5675 {TGSI_OPCODE_BRK
, 0, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK
, tgsi_loop_brk_cont
},
5676 {TGSI_OPCODE_IF
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_if
},
5678 {75, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5679 {76, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5680 {TGSI_OPCODE_ELSE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_else
},
5681 {TGSI_OPCODE_ENDIF
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_endif
},
5683 {79, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5684 {80, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5685 {TGSI_OPCODE_PUSHA
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5686 {TGSI_OPCODE_POPA
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5687 {TGSI_OPCODE_CEIL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CEIL
, tgsi_op2
},
5688 {TGSI_OPCODE_I2F
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT
, tgsi_op2_trans
},
5689 {TGSI_OPCODE_NOT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOT_INT
, tgsi_op2
},
5690 {TGSI_OPCODE_TRUNC
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC
, tgsi_op2
},
5691 {TGSI_OPCODE_SHL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHL_INT
, tgsi_op2_trans
},
5693 {88, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5694 {TGSI_OPCODE_AND
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_AND_INT
, tgsi_op2
},
5695 {TGSI_OPCODE_OR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_OR_INT
, tgsi_op2
},
5696 {TGSI_OPCODE_MOD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_imod
},
5697 {TGSI_OPCODE_XOR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_XOR_INT
, tgsi_op2
},
5698 {TGSI_OPCODE_SAD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5699 {TGSI_OPCODE_TXF
, 0, SQ_TEX_INST_LD
, tgsi_tex
},
5700 {TGSI_OPCODE_TXQ
, 0, SQ_TEX_INST_GET_TEXTURE_RESINFO
, tgsi_tex
},
5701 {TGSI_OPCODE_CONT
, 0, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE
, tgsi_loop_brk_cont
},
5702 {TGSI_OPCODE_EMIT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5703 {TGSI_OPCODE_ENDPRIM
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5704 {TGSI_OPCODE_BGNLOOP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_bgnloop
},
5705 {TGSI_OPCODE_BGNSUB
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5706 {TGSI_OPCODE_ENDLOOP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_endloop
},
5707 {TGSI_OPCODE_ENDSUB
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5708 {TGSI_OPCODE_TXQ_LZ
, 0, SQ_TEX_INST_GET_TEXTURE_RESINFO
, tgsi_tex
},
5710 {104, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5711 {105, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5712 {106, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5713 {TGSI_OPCODE_NOP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5715 {108, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5716 {109, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5717 {110, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5718 {111, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5719 {TGSI_OPCODE_NRM4
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5720 {TGSI_OPCODE_CALLNZ
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5721 {TGSI_OPCODE_IFC
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5722 {TGSI_OPCODE_BREAKC
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5723 {TGSI_OPCODE_KIL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
, tgsi_kill
}, /* conditional kill */
5724 {TGSI_OPCODE_END
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_end
}, /* aka HALT */
5726 {118, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5727 {TGSI_OPCODE_F2I
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT
, tgsi_op2_trans
},
5728 {TGSI_OPCODE_IDIV
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_idiv
},
5729 {TGSI_OPCODE_IMAX
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX_INT
, tgsi_op2
},
5730 {TGSI_OPCODE_IMIN
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN_INT
, tgsi_op2
},
5731 {TGSI_OPCODE_INEG
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT
, tgsi_ineg
},
5732 {TGSI_OPCODE_ISGE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE_INT
, tgsi_op2
},
5733 {TGSI_OPCODE_ISHR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT
, tgsi_op2_trans
},
5734 {TGSI_OPCODE_ISLT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT_INT
, tgsi_op2_swap
},
5735 {TGSI_OPCODE_F2U
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_UINT
, tgsi_op2_trans
},
5736 {TGSI_OPCODE_U2F
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT
, tgsi_op2_trans
},
5737 {TGSI_OPCODE_UADD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT
, tgsi_op2
},
5738 {TGSI_OPCODE_UDIV
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_udiv
},
5739 {TGSI_OPCODE_UMAD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_umad
},
5740 {TGSI_OPCODE_UMAX
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX_UINT
, tgsi_op2
},
5741 {TGSI_OPCODE_UMIN
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN_UINT
, tgsi_op2
},
5742 {TGSI_OPCODE_UMOD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_umod
},
5743 {TGSI_OPCODE_UMUL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT
, tgsi_op2_trans
},
5744 {TGSI_OPCODE_USEQ
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE_INT
, tgsi_op2
},
5745 {TGSI_OPCODE_USGE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE_UINT
, tgsi_op2
},
5746 {TGSI_OPCODE_USHR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT
, tgsi_op2_trans
},
5747 {TGSI_OPCODE_USLT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT_UINT
, tgsi_op2_swap
},
5748 {TGSI_OPCODE_USNE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE_INT
, tgsi_op2_swap
},
5749 {TGSI_OPCODE_SWITCH
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5750 {TGSI_OPCODE_CASE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5751 {TGSI_OPCODE_DEFAULT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5752 {TGSI_OPCODE_ENDSWITCH
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5753 {TGSI_OPCODE_SAMPLE
, 0, 0, tgsi_unsupported
},
5754 {TGSI_OPCODE_SAMPLE_I
, 0, 0, tgsi_unsupported
},
5755 {TGSI_OPCODE_SAMPLE_I_MS
, 0, 0, tgsi_unsupported
},
5756 {TGSI_OPCODE_SAMPLE_B
, 0, 0, tgsi_unsupported
},
5757 {TGSI_OPCODE_SAMPLE_C
, 0, 0, tgsi_unsupported
},
5758 {TGSI_OPCODE_SAMPLE_C_LZ
, 0, 0, tgsi_unsupported
},
5759 {TGSI_OPCODE_SAMPLE_D
, 0, 0, tgsi_unsupported
},
5760 {TGSI_OPCODE_SAMPLE_L
, 0, 0, tgsi_unsupported
},
5761 {TGSI_OPCODE_GATHER4
, 0, 0, tgsi_unsupported
},
5762 {TGSI_OPCODE_SVIEWINFO
, 0, 0, tgsi_unsupported
},
5763 {TGSI_OPCODE_SAMPLE_POS
, 0, 0, tgsi_unsupported
},
5764 {TGSI_OPCODE_SAMPLE_INFO
, 0, 0, tgsi_unsupported
},
5765 {TGSI_OPCODE_UARL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT
, tgsi_r600_arl
},
5766 {TGSI_OPCODE_UCMP
, 0, 0, tgsi_unsupported
},
5767 {TGSI_OPCODE_IABS
, 0, 0, tgsi_iabs
},
5768 {TGSI_OPCODE_ISSG
, 0, 0, tgsi_issg
},
5769 {TGSI_OPCODE_LOAD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5770 {TGSI_OPCODE_STORE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5771 {TGSI_OPCODE_MFENCE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5772 {TGSI_OPCODE_LFENCE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5773 {TGSI_OPCODE_SFENCE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5774 {TGSI_OPCODE_BARRIER
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5775 {TGSI_OPCODE_ATOMUADD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5776 {TGSI_OPCODE_ATOMXCHG
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5777 {TGSI_OPCODE_ATOMCAS
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5778 {TGSI_OPCODE_ATOMAND
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5779 {TGSI_OPCODE_ATOMOR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5780 {TGSI_OPCODE_ATOMXOR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5781 {TGSI_OPCODE_ATOMUMIN
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5782 {TGSI_OPCODE_ATOMUMAX
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5783 {TGSI_OPCODE_ATOMIMIN
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5784 {TGSI_OPCODE_ATOMIMAX
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5785 {TGSI_OPCODE_TEX2
, 0, SQ_TEX_INST_SAMPLE
, tgsi_tex
},
5786 {TGSI_OPCODE_TXB2
, 0, SQ_TEX_INST_SAMPLE_LB
, tgsi_tex
},
5787 {TGSI_OPCODE_TXL2
, 0, SQ_TEX_INST_SAMPLE_L
, tgsi_tex
},
5788 {TGSI_OPCODE_LAST
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5791 static struct r600_shader_tgsi_instruction eg_shader_tgsi_instruction
[] = {
5792 {TGSI_OPCODE_ARL
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_eg_arl
},
5793 {TGSI_OPCODE_MOV
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
, tgsi_op2
},
5794 {TGSI_OPCODE_LIT
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_lit
},
5795 {TGSI_OPCODE_RCP
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
, tgsi_trans_srcx_replicate
},
5796 {TGSI_OPCODE_RSQ
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE
, tgsi_rsq
},
5797 {TGSI_OPCODE_EXP
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_exp
},
5798 {TGSI_OPCODE_LOG
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_log
},
5799 {TGSI_OPCODE_MUL
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
, tgsi_op2
},
5800 {TGSI_OPCODE_ADD
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD
, tgsi_op2
},
5801 {TGSI_OPCODE_DP3
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
, tgsi_dp
},
5802 {TGSI_OPCODE_DP4
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
, tgsi_dp
},
5803 {TGSI_OPCODE_DST
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_opdst
},
5804 {TGSI_OPCODE_MIN
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN
, tgsi_op2
},
5805 {TGSI_OPCODE_MAX
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX
, tgsi_op2
},
5806 {TGSI_OPCODE_SLT
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT
, tgsi_op2_swap
},
5807 {TGSI_OPCODE_SGE
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE
, tgsi_op2
},
5808 {TGSI_OPCODE_MAD
, 1, EG_V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD
, tgsi_op3
},
5809 {TGSI_OPCODE_SUB
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD
, tgsi_op2
},
5810 {TGSI_OPCODE_LRP
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_lrp
},
5811 {TGSI_OPCODE_CND
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5813 {20, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5814 {TGSI_OPCODE_DP2A
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5816 {22, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5817 {23, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5818 {TGSI_OPCODE_FRC
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT
, tgsi_op2
},
5819 {TGSI_OPCODE_CLAMP
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5820 {TGSI_OPCODE_FLR
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR
, tgsi_op2
},
5821 {TGSI_OPCODE_ROUND
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RNDNE
, tgsi_op2
},
5822 {TGSI_OPCODE_EX2
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
, tgsi_trans_srcx_replicate
},
5823 {TGSI_OPCODE_LG2
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
, tgsi_trans_srcx_replicate
},
5824 {TGSI_OPCODE_POW
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_pow
},
5825 {TGSI_OPCODE_XPD
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_xpd
},
5827 {32, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5828 {TGSI_OPCODE_ABS
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
, tgsi_op2
},
5829 {TGSI_OPCODE_RCC
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5830 {TGSI_OPCODE_DPH
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
, tgsi_dp
},
5831 {TGSI_OPCODE_COS
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS
, tgsi_trig
},
5832 {TGSI_OPCODE_DDX
, 0, SQ_TEX_INST_GET_GRADIENTS_H
, tgsi_tex
},
5833 {TGSI_OPCODE_DDY
, 0, SQ_TEX_INST_GET_GRADIENTS_V
, tgsi_tex
},
5834 {TGSI_OPCODE_KILP
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
, tgsi_kill
}, /* predicated kill */
5835 {TGSI_OPCODE_PK2H
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5836 {TGSI_OPCODE_PK2US
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5837 {TGSI_OPCODE_PK4B
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5838 {TGSI_OPCODE_PK4UB
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5839 {TGSI_OPCODE_RFL
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5840 {TGSI_OPCODE_SEQ
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE
, tgsi_op2
},
5841 {TGSI_OPCODE_SFL
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5842 {TGSI_OPCODE_SGT
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT
, tgsi_op2
},
5843 {TGSI_OPCODE_SIN
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN
, tgsi_trig
},
5844 {TGSI_OPCODE_SLE
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE
, tgsi_op2_swap
},
5845 {TGSI_OPCODE_SNE
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE
, tgsi_op2
},
5846 {TGSI_OPCODE_STR
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5847 {TGSI_OPCODE_TEX
, 0, SQ_TEX_INST_SAMPLE
, tgsi_tex
},
5848 {TGSI_OPCODE_TXD
, 0, SQ_TEX_INST_SAMPLE_G
, tgsi_tex
},
5849 {TGSI_OPCODE_TXP
, 0, SQ_TEX_INST_SAMPLE
, tgsi_tex
},
5850 {TGSI_OPCODE_UP2H
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5851 {TGSI_OPCODE_UP2US
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5852 {TGSI_OPCODE_UP4B
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5853 {TGSI_OPCODE_UP4UB
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5854 {TGSI_OPCODE_X2D
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5855 {TGSI_OPCODE_ARA
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5856 {TGSI_OPCODE_ARR
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_eg_arl
},
5857 {TGSI_OPCODE_BRA
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5858 {TGSI_OPCODE_CAL
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5859 {TGSI_OPCODE_RET
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5860 {TGSI_OPCODE_SSG
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_ssg
},
5861 {TGSI_OPCODE_CMP
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_cmp
},
5862 {TGSI_OPCODE_SCS
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_scs
},
5863 {TGSI_OPCODE_TXB
, 0, SQ_TEX_INST_SAMPLE_LB
, tgsi_tex
},
5864 {TGSI_OPCODE_NRM
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5865 {TGSI_OPCODE_DIV
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5866 {TGSI_OPCODE_DP2
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
, tgsi_dp
},
5867 {TGSI_OPCODE_TXL
, 0, SQ_TEX_INST_SAMPLE_L
, tgsi_tex
},
5868 {TGSI_OPCODE_BRK
, 0, EG_V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK
, tgsi_loop_brk_cont
},
5869 {TGSI_OPCODE_IF
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_if
},
5871 {75, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5872 {76, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5873 {TGSI_OPCODE_ELSE
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_else
},
5874 {TGSI_OPCODE_ENDIF
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_endif
},
5876 {79, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5877 {80, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5878 {TGSI_OPCODE_PUSHA
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5879 {TGSI_OPCODE_POPA
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5880 {TGSI_OPCODE_CEIL
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CEIL
, tgsi_op2
},
5881 {TGSI_OPCODE_I2F
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT
, tgsi_op2_trans
},
5882 {TGSI_OPCODE_NOT
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOT_INT
, tgsi_op2
},
5883 {TGSI_OPCODE_TRUNC
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC
, tgsi_op2
},
5884 {TGSI_OPCODE_SHL
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHL_INT
, tgsi_op2
},
5886 {88, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5887 {TGSI_OPCODE_AND
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_AND_INT
, tgsi_op2
},
5888 {TGSI_OPCODE_OR
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_OR_INT
, tgsi_op2
},
5889 {TGSI_OPCODE_MOD
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_imod
},
5890 {TGSI_OPCODE_XOR
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_XOR_INT
, tgsi_op2
},
5891 {TGSI_OPCODE_SAD
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5892 {TGSI_OPCODE_TXF
, 0, SQ_TEX_INST_LD
, tgsi_tex
},
5893 {TGSI_OPCODE_TXQ
, 0, SQ_TEX_INST_GET_TEXTURE_RESINFO
, tgsi_tex
},
5894 {TGSI_OPCODE_CONT
, 0, EG_V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE
, tgsi_loop_brk_cont
},
5895 {TGSI_OPCODE_EMIT
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5896 {TGSI_OPCODE_ENDPRIM
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5897 {TGSI_OPCODE_BGNLOOP
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_bgnloop
},
5898 {TGSI_OPCODE_BGNSUB
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5899 {TGSI_OPCODE_ENDLOOP
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_endloop
},
5900 {TGSI_OPCODE_ENDSUB
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5901 {TGSI_OPCODE_TXQ_LZ
, 0, SQ_TEX_INST_GET_TEXTURE_RESINFO
, tgsi_tex
},
5903 {104, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5904 {105, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5905 {106, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5906 {TGSI_OPCODE_NOP
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5908 {108, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5909 {109, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5910 {110, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5911 {111, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5912 {TGSI_OPCODE_NRM4
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5913 {TGSI_OPCODE_CALLNZ
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5914 {TGSI_OPCODE_IFC
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5915 {TGSI_OPCODE_BREAKC
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5916 {TGSI_OPCODE_KIL
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
, tgsi_kill
}, /* conditional kill */
5917 {TGSI_OPCODE_END
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_end
}, /* aka HALT */
5919 {118, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5920 {TGSI_OPCODE_F2I
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT
, tgsi_f2i
},
5921 {TGSI_OPCODE_IDIV
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_idiv
},
5922 {TGSI_OPCODE_IMAX
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX_INT
, tgsi_op2
},
5923 {TGSI_OPCODE_IMIN
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN_INT
, tgsi_op2
},
5924 {TGSI_OPCODE_INEG
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT
, tgsi_ineg
},
5925 {TGSI_OPCODE_ISGE
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE_INT
, tgsi_op2
},
5926 {TGSI_OPCODE_ISHR
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT
, tgsi_op2
},
5927 {TGSI_OPCODE_ISLT
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT_INT
, tgsi_op2_swap
},
5928 {TGSI_OPCODE_F2U
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_UINT
, tgsi_f2i
},
5929 {TGSI_OPCODE_U2F
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT
, tgsi_op2_trans
},
5930 {TGSI_OPCODE_UADD
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT
, tgsi_op2
},
5931 {TGSI_OPCODE_UDIV
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_udiv
},
5932 {TGSI_OPCODE_UMAD
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_umad
},
5933 {TGSI_OPCODE_UMAX
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX_UINT
, tgsi_op2
},
5934 {TGSI_OPCODE_UMIN
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN_UINT
, tgsi_op2
},
5935 {TGSI_OPCODE_UMOD
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_umod
},
5936 {TGSI_OPCODE_UMUL
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT
, tgsi_op2_trans
},
5937 {TGSI_OPCODE_USEQ
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE_INT
, tgsi_op2
},
5938 {TGSI_OPCODE_USGE
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE_UINT
, tgsi_op2
},
5939 {TGSI_OPCODE_USHR
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT
, tgsi_op2
},
5940 {TGSI_OPCODE_USLT
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT_UINT
, tgsi_op2_swap
},
5941 {TGSI_OPCODE_USNE
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE_INT
, tgsi_op2
},
5942 {TGSI_OPCODE_SWITCH
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5943 {TGSI_OPCODE_CASE
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5944 {TGSI_OPCODE_DEFAULT
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5945 {TGSI_OPCODE_ENDSWITCH
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5946 {TGSI_OPCODE_SAMPLE
, 0, 0, tgsi_unsupported
},
5947 {TGSI_OPCODE_SAMPLE_I
, 0, 0, tgsi_unsupported
},
5948 {TGSI_OPCODE_SAMPLE_I_MS
, 0, 0, tgsi_unsupported
},
5949 {TGSI_OPCODE_SAMPLE_B
, 0, 0, tgsi_unsupported
},
5950 {TGSI_OPCODE_SAMPLE_C
, 0, 0, tgsi_unsupported
},
5951 {TGSI_OPCODE_SAMPLE_C_LZ
, 0, 0, tgsi_unsupported
},
5952 {TGSI_OPCODE_SAMPLE_D
, 0, 0, tgsi_unsupported
},
5953 {TGSI_OPCODE_SAMPLE_L
, 0, 0, tgsi_unsupported
},
5954 {TGSI_OPCODE_GATHER4
, 0, 0, tgsi_unsupported
},
5955 {TGSI_OPCODE_SVIEWINFO
, 0, 0, tgsi_unsupported
},
5956 {TGSI_OPCODE_SAMPLE_POS
, 0, 0, tgsi_unsupported
},
5957 {TGSI_OPCODE_SAMPLE_INFO
, 0, 0, tgsi_unsupported
},
5958 {TGSI_OPCODE_UARL
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT
, tgsi_eg_arl
},
5959 {TGSI_OPCODE_UCMP
, 0, 0, tgsi_unsupported
},
5960 {TGSI_OPCODE_IABS
, 0, 0, tgsi_iabs
},
5961 {TGSI_OPCODE_ISSG
, 0, 0, tgsi_issg
},
5962 {TGSI_OPCODE_LOAD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5963 {TGSI_OPCODE_STORE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5964 {TGSI_OPCODE_MFENCE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5965 {TGSI_OPCODE_LFENCE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5966 {TGSI_OPCODE_SFENCE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5967 {TGSI_OPCODE_BARRIER
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5968 {TGSI_OPCODE_ATOMUADD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5969 {TGSI_OPCODE_ATOMXCHG
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5970 {TGSI_OPCODE_ATOMCAS
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5971 {TGSI_OPCODE_ATOMAND
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5972 {TGSI_OPCODE_ATOMOR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5973 {TGSI_OPCODE_ATOMXOR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5974 {TGSI_OPCODE_ATOMUMIN
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5975 {TGSI_OPCODE_ATOMUMAX
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5976 {TGSI_OPCODE_ATOMIMIN
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5977 {TGSI_OPCODE_ATOMIMAX
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5978 {TGSI_OPCODE_TEX2
, 0, SQ_TEX_INST_SAMPLE
, tgsi_tex
},
5979 {TGSI_OPCODE_TXB2
, 0, SQ_TEX_INST_SAMPLE_LB
, tgsi_tex
},
5980 {TGSI_OPCODE_TXL2
, 0, SQ_TEX_INST_SAMPLE_L
, tgsi_tex
},
5981 {TGSI_OPCODE_LAST
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
5984 static struct r600_shader_tgsi_instruction cm_shader_tgsi_instruction
[] = {
5985 {TGSI_OPCODE_ARL
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_eg_arl
},
5986 {TGSI_OPCODE_MOV
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
, tgsi_op2
},
5987 {TGSI_OPCODE_LIT
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_lit
},
5988 {TGSI_OPCODE_RCP
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
, cayman_emit_float_instr
},
5989 {TGSI_OPCODE_RSQ
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE
, cayman_emit_float_instr
},
5990 {TGSI_OPCODE_EXP
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_exp
},
5991 {TGSI_OPCODE_LOG
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_log
},
5992 {TGSI_OPCODE_MUL
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
, tgsi_op2
},
5993 {TGSI_OPCODE_ADD
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD
, tgsi_op2
},
5994 {TGSI_OPCODE_DP3
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
, tgsi_dp
},
5995 {TGSI_OPCODE_DP4
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
, tgsi_dp
},
5996 {TGSI_OPCODE_DST
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_opdst
},
5997 {TGSI_OPCODE_MIN
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN
, tgsi_op2
},
5998 {TGSI_OPCODE_MAX
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX
, tgsi_op2
},
5999 {TGSI_OPCODE_SLT
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT
, tgsi_op2_swap
},
6000 {TGSI_OPCODE_SGE
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE
, tgsi_op2
},
6001 {TGSI_OPCODE_MAD
, 1, EG_V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD
, tgsi_op3
},
6002 {TGSI_OPCODE_SUB
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD
, tgsi_op2
},
6003 {TGSI_OPCODE_LRP
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_lrp
},
6004 {TGSI_OPCODE_CND
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6006 {20, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6007 {TGSI_OPCODE_DP2A
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6009 {22, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6010 {23, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6011 {TGSI_OPCODE_FRC
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT
, tgsi_op2
},
6012 {TGSI_OPCODE_CLAMP
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6013 {TGSI_OPCODE_FLR
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR
, tgsi_op2
},
6014 {TGSI_OPCODE_ROUND
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RNDNE
, tgsi_op2
},
6015 {TGSI_OPCODE_EX2
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
, cayman_emit_float_instr
},
6016 {TGSI_OPCODE_LG2
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
, cayman_emit_float_instr
},
6017 {TGSI_OPCODE_POW
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, cayman_pow
},
6018 {TGSI_OPCODE_XPD
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_xpd
},
6020 {32, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6021 {TGSI_OPCODE_ABS
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
, tgsi_op2
},
6022 {TGSI_OPCODE_RCC
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6023 {TGSI_OPCODE_DPH
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
, tgsi_dp
},
6024 {TGSI_OPCODE_COS
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS
, cayman_trig
},
6025 {TGSI_OPCODE_DDX
, 0, SQ_TEX_INST_GET_GRADIENTS_H
, tgsi_tex
},
6026 {TGSI_OPCODE_DDY
, 0, SQ_TEX_INST_GET_GRADIENTS_V
, tgsi_tex
},
6027 {TGSI_OPCODE_KILP
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
, tgsi_kill
}, /* predicated kill */
6028 {TGSI_OPCODE_PK2H
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6029 {TGSI_OPCODE_PK2US
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6030 {TGSI_OPCODE_PK4B
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6031 {TGSI_OPCODE_PK4UB
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6032 {TGSI_OPCODE_RFL
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6033 {TGSI_OPCODE_SEQ
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE
, tgsi_op2
},
6034 {TGSI_OPCODE_SFL
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6035 {TGSI_OPCODE_SGT
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT
, tgsi_op2
},
6036 {TGSI_OPCODE_SIN
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN
, cayman_trig
},
6037 {TGSI_OPCODE_SLE
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE
, tgsi_op2_swap
},
6038 {TGSI_OPCODE_SNE
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE
, tgsi_op2
},
6039 {TGSI_OPCODE_STR
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6040 {TGSI_OPCODE_TEX
, 0, SQ_TEX_INST_SAMPLE
, tgsi_tex
},
6041 {TGSI_OPCODE_TXD
, 0, SQ_TEX_INST_SAMPLE_G
, tgsi_tex
},
6042 {TGSI_OPCODE_TXP
, 0, SQ_TEX_INST_SAMPLE
, tgsi_tex
},
6043 {TGSI_OPCODE_UP2H
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6044 {TGSI_OPCODE_UP2US
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6045 {TGSI_OPCODE_UP4B
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6046 {TGSI_OPCODE_UP4UB
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6047 {TGSI_OPCODE_X2D
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6048 {TGSI_OPCODE_ARA
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6049 {TGSI_OPCODE_ARR
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_eg_arl
},
6050 {TGSI_OPCODE_BRA
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6051 {TGSI_OPCODE_CAL
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6052 {TGSI_OPCODE_RET
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6053 {TGSI_OPCODE_SSG
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_ssg
},
6054 {TGSI_OPCODE_CMP
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_cmp
},
6055 {TGSI_OPCODE_SCS
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_scs
},
6056 {TGSI_OPCODE_TXB
, 0, SQ_TEX_INST_SAMPLE_LB
, tgsi_tex
},
6057 {TGSI_OPCODE_NRM
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6058 {TGSI_OPCODE_DIV
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6059 {TGSI_OPCODE_DP2
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
, tgsi_dp
},
6060 {TGSI_OPCODE_TXL
, 0, SQ_TEX_INST_SAMPLE_L
, tgsi_tex
},
6061 {TGSI_OPCODE_BRK
, 0, EG_V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK
, tgsi_loop_brk_cont
},
6062 {TGSI_OPCODE_IF
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_if
},
6064 {75, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6065 {76, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6066 {TGSI_OPCODE_ELSE
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_else
},
6067 {TGSI_OPCODE_ENDIF
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_endif
},
6069 {79, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6070 {80, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6071 {TGSI_OPCODE_PUSHA
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6072 {TGSI_OPCODE_POPA
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6073 {TGSI_OPCODE_CEIL
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CEIL
, tgsi_op2
},
6074 {TGSI_OPCODE_I2F
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT
, tgsi_op2
},
6075 {TGSI_OPCODE_NOT
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOT_INT
, tgsi_op2
},
6076 {TGSI_OPCODE_TRUNC
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC
, tgsi_op2
},
6077 {TGSI_OPCODE_SHL
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHL_INT
, tgsi_op2
},
6079 {88, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6080 {TGSI_OPCODE_AND
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_AND_INT
, tgsi_op2
},
6081 {TGSI_OPCODE_OR
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_OR_INT
, tgsi_op2
},
6082 {TGSI_OPCODE_MOD
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_imod
},
6083 {TGSI_OPCODE_XOR
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_XOR_INT
, tgsi_op2
},
6084 {TGSI_OPCODE_SAD
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6085 {TGSI_OPCODE_TXF
, 0, SQ_TEX_INST_LD
, tgsi_tex
},
6086 {TGSI_OPCODE_TXQ
, 0, SQ_TEX_INST_GET_TEXTURE_RESINFO
, tgsi_tex
},
6087 {TGSI_OPCODE_CONT
, 0, EG_V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE
, tgsi_loop_brk_cont
},
6088 {TGSI_OPCODE_EMIT
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6089 {TGSI_OPCODE_ENDPRIM
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6090 {TGSI_OPCODE_BGNLOOP
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_bgnloop
},
6091 {TGSI_OPCODE_BGNSUB
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6092 {TGSI_OPCODE_ENDLOOP
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_endloop
},
6093 {TGSI_OPCODE_ENDSUB
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6094 {TGSI_OPCODE_TXQ_LZ
, 0, SQ_TEX_INST_GET_TEXTURE_RESINFO
, tgsi_tex
},
6096 {104, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6097 {105, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6098 {106, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6099 {TGSI_OPCODE_NOP
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6101 {108, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6102 {109, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6103 {110, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6104 {111, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6105 {TGSI_OPCODE_NRM4
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6106 {TGSI_OPCODE_CALLNZ
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6107 {TGSI_OPCODE_IFC
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6108 {TGSI_OPCODE_BREAKC
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6109 {TGSI_OPCODE_KIL
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
, tgsi_kill
}, /* conditional kill */
6110 {TGSI_OPCODE_END
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_end
}, /* aka HALT */
6112 {118, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6113 {TGSI_OPCODE_F2I
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT
, tgsi_op2
},
6114 {TGSI_OPCODE_IDIV
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_idiv
},
6115 {TGSI_OPCODE_IMAX
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX_INT
, tgsi_op2
},
6116 {TGSI_OPCODE_IMIN
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN_INT
, tgsi_op2
},
6117 {TGSI_OPCODE_INEG
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT
, tgsi_ineg
},
6118 {TGSI_OPCODE_ISGE
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE_INT
, tgsi_op2
},
6119 {TGSI_OPCODE_ISHR
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT
, tgsi_op2
},
6120 {TGSI_OPCODE_ISLT
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT_INT
, tgsi_op2_swap
},
6121 {TGSI_OPCODE_F2U
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_UINT
, tgsi_op2
},
6122 {TGSI_OPCODE_U2F
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT
, tgsi_op2
},
6123 {TGSI_OPCODE_UADD
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT
, tgsi_op2
},
6124 {TGSI_OPCODE_UDIV
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_udiv
},
6125 {TGSI_OPCODE_UMAD
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_umad
},
6126 {TGSI_OPCODE_UMAX
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX_UINT
, tgsi_op2
},
6127 {TGSI_OPCODE_UMIN
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN_UINT
, tgsi_op2
},
6128 {TGSI_OPCODE_UMOD
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_umod
},
6129 {TGSI_OPCODE_UMUL
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_INT
, cayman_mul_int_instr
},
6130 {TGSI_OPCODE_USEQ
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE_INT
, tgsi_op2
},
6131 {TGSI_OPCODE_USGE
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE_UINT
, tgsi_op2
},
6132 {TGSI_OPCODE_USHR
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT
, tgsi_op2
},
6133 {TGSI_OPCODE_USLT
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT_UINT
, tgsi_op2_swap
},
6134 {TGSI_OPCODE_USNE
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE_INT
, tgsi_op2
},
6135 {TGSI_OPCODE_SWITCH
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6136 {TGSI_OPCODE_CASE
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6137 {TGSI_OPCODE_DEFAULT
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6138 {TGSI_OPCODE_ENDSWITCH
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6139 {TGSI_OPCODE_SAMPLE
, 0, 0, tgsi_unsupported
},
6140 {TGSI_OPCODE_SAMPLE_I
, 0, 0, tgsi_unsupported
},
6141 {TGSI_OPCODE_SAMPLE_I_MS
, 0, 0, tgsi_unsupported
},
6142 {TGSI_OPCODE_SAMPLE_B
, 0, 0, tgsi_unsupported
},
6143 {TGSI_OPCODE_SAMPLE_C
, 0, 0, tgsi_unsupported
},
6144 {TGSI_OPCODE_SAMPLE_C_LZ
, 0, 0, tgsi_unsupported
},
6145 {TGSI_OPCODE_SAMPLE_D
, 0, 0, tgsi_unsupported
},
6146 {TGSI_OPCODE_SAMPLE_L
, 0, 0, tgsi_unsupported
},
6147 {TGSI_OPCODE_GATHER4
, 0, 0, tgsi_unsupported
},
6148 {TGSI_OPCODE_SVIEWINFO
, 0, 0, tgsi_unsupported
},
6149 {TGSI_OPCODE_SAMPLE_POS
, 0, 0, tgsi_unsupported
},
6150 {TGSI_OPCODE_SAMPLE_INFO
, 0, 0, tgsi_unsupported
},
6151 {TGSI_OPCODE_UARL
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT
, tgsi_eg_arl
},
6152 {TGSI_OPCODE_UCMP
, 0, 0, tgsi_unsupported
},
6153 {TGSI_OPCODE_IABS
, 0, 0, tgsi_iabs
},
6154 {TGSI_OPCODE_ISSG
, 0, 0, tgsi_issg
},
6155 {TGSI_OPCODE_LOAD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6156 {TGSI_OPCODE_STORE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6157 {TGSI_OPCODE_MFENCE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6158 {TGSI_OPCODE_LFENCE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6159 {TGSI_OPCODE_SFENCE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6160 {TGSI_OPCODE_BARRIER
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6161 {TGSI_OPCODE_ATOMUADD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6162 {TGSI_OPCODE_ATOMXCHG
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6163 {TGSI_OPCODE_ATOMCAS
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6164 {TGSI_OPCODE_ATOMAND
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6165 {TGSI_OPCODE_ATOMOR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6166 {TGSI_OPCODE_ATOMXOR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6167 {TGSI_OPCODE_ATOMUMIN
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6168 {TGSI_OPCODE_ATOMUMAX
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6169 {TGSI_OPCODE_ATOMIMIN
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6170 {TGSI_OPCODE_ATOMIMAX
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
6171 {TGSI_OPCODE_TEX2
, 0, SQ_TEX_INST_SAMPLE
, tgsi_tex
},
6172 {TGSI_OPCODE_TXB2
, 0, SQ_TEX_INST_SAMPLE_LB
, tgsi_tex
},
6173 {TGSI_OPCODE_TXL2
, 0, SQ_TEX_INST_SAMPLE_L
, tgsi_tex
},
6174 {TGSI_OPCODE_LAST
, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},