2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 #include "pipe/p_shader_tokens.h"
24 #include "tgsi/tgsi_parse.h"
25 #include "tgsi/tgsi_scan.h"
26 #include "tgsi/tgsi_dump.h"
27 #include "util/u_format.h"
28 #include "r600_screen.h"
29 #include "r600_context.h"
30 #include "r600_shader.h"
38 struct r600_shader_tgsi_instruction
;
40 struct r600_shader_ctx
{
41 struct tgsi_shader_info info
;
42 struct tgsi_parse_context parse
;
43 const struct tgsi_token
*tokens
;
45 unsigned file_offset
[TGSI_FILE_COUNT
];
47 struct r600_shader_tgsi_instruction
*inst_info
;
49 struct r600_shader
*shader
;
55 struct r600_shader_tgsi_instruction
{
59 int (*process
)(struct r600_shader_ctx
*ctx
);
62 static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction
[];
63 static int r600_shader_from_tgsi(const struct tgsi_token
*tokens
, struct r600_shader
*shader
);
65 static int r600_shader_update(struct pipe_context
*ctx
, struct r600_shader
*shader
)
67 struct r600_context
*rctx
= r600_context(ctx
);
68 const struct util_format_description
*desc
;
69 enum pipe_format resource_format
[160];
70 unsigned i
, nresources
= 0;
71 struct r600_bc
*bc
= &shader
->bc
;
72 struct r600_bc_cf
*cf
;
73 struct r600_bc_vtx
*vtx
;
75 if (shader
->processor_type
!= TGSI_PROCESSOR_VERTEX
)
77 for (i
= 0; i
< rctx
->vertex_elements
->count
; i
++) {
78 resource_format
[nresources
++] = rctx
->vertex_elements
->elements
[i
].src_format
;
80 LIST_FOR_EACH_ENTRY(cf
, &bc
->cf
, list
) {
82 case V_SQ_CF_WORD1_SQ_CF_INST_VTX
:
83 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC
:
84 LIST_FOR_EACH_ENTRY(vtx
, &cf
->vtx
, list
) {
85 desc
= util_format_description(resource_format
[vtx
->buffer_id
]);
87 R600_ERR("unknown format %d\n", resource_format
[vtx
->buffer_id
]);
90 vtx
->dst_sel_x
= desc
->swizzle
[0];
91 vtx
->dst_sel_y
= desc
->swizzle
[1];
92 vtx
->dst_sel_z
= desc
->swizzle
[2];
93 vtx
->dst_sel_w
= desc
->swizzle
[3];
100 return r600_bc_build(&shader
->bc
);
103 int r600_pipe_shader_create(struct pipe_context
*ctx
,
104 struct r600_context_state
*rpshader
,
105 const struct tgsi_token
*tokens
)
107 struct r600_screen
*rscreen
= r600_screen(ctx
->screen
);
110 //fprintf(stderr, "--------------------------------------------------------------\n");
111 //tgsi_dump(tokens, 0);
112 if (rpshader
== NULL
)
114 rpshader
->shader
.family
= radeon_get_family(rscreen
->rw
);
115 r
= r600_shader_from_tgsi(tokens
, &rpshader
->shader
);
117 R600_ERR("translation from TGSI failed !\n");
120 r
= r600_bc_build(&rpshader
->shader
.bc
);
122 R600_ERR("building bytecode failed !\n");
125 //fprintf(stderr, "______________________________________________________________\n");
129 static int r600_pipe_shader_vs(struct pipe_context
*ctx
, struct r600_context_state
*rpshader
)
131 struct r600_screen
*rscreen
= r600_screen(ctx
->screen
);
132 struct r600_shader
*rshader
= &rpshader
->shader
;
133 struct radeon_state
*state
;
136 rpshader
->rstate
= radeon_state_decref(rpshader
->rstate
);
137 state
= radeon_state_shader(rscreen
->rw
, R600_STATE_SHADER
, 0, R600_SHADER_VS
);
140 for (i
= 0; i
< 10; i
++) {
141 state
->states
[R600_VS_SHADER__SPI_VS_OUT_ID_0
+ i
] = 0;
143 /* so far never got proper semantic id from tgsi */
144 for (i
= 0; i
< 32; i
++) {
145 tmp
= i
<< ((i
& 3) * 8);
146 state
->states
[R600_VS_SHADER__SPI_VS_OUT_ID_0
+ i
/ 4] |= tmp
;
148 state
->states
[R600_VS_SHADER__SPI_VS_OUT_CONFIG
] = S_0286C4_VS_EXPORT_COUNT(rshader
->noutput
- 2);
149 state
->states
[R600_VS_SHADER__SQ_PGM_RESOURCES_VS
] = S_028868_NUM_GPRS(rshader
->bc
.ngpr
) |
150 S_028868_STACK_SIZE(rshader
->bc
.nstack
);
151 rpshader
->rstate
= state
;
152 rpshader
->rstate
->bo
[0] = radeon_bo_incref(rscreen
->rw
, rpshader
->bo
);
153 rpshader
->rstate
->bo
[1] = radeon_bo_incref(rscreen
->rw
, rpshader
->bo
);
154 rpshader
->rstate
->nbo
= 2;
155 rpshader
->rstate
->placement
[0] = RADEON_GEM_DOMAIN_GTT
;
156 rpshader
->rstate
->placement
[2] = RADEON_GEM_DOMAIN_GTT
;
157 return radeon_state_pm4(state
);
160 static int r600_pipe_shader_ps(struct pipe_context
*ctx
, struct r600_context_state
*rpshader
)
162 const struct pipe_rasterizer_state
*rasterizer
;
163 struct r600_screen
*rscreen
= r600_screen(ctx
->screen
);
164 struct r600_shader
*rshader
= &rpshader
->shader
;
165 struct r600_context
*rctx
= r600_context(ctx
);
166 struct radeon_state
*state
;
167 unsigned i
, tmp
, exports_ps
, num_cout
;
169 rasterizer
= &rctx
->rasterizer
->state
.rasterizer
;
170 rpshader
->rstate
= radeon_state_decref(rpshader
->rstate
);
171 state
= radeon_state_shader(rscreen
->rw
, R600_STATE_SHADER
, 0, R600_SHADER_PS
);
174 for (i
= 0; i
< rshader
->ninput
; i
++) {
175 tmp
= S_028644_SEMANTIC(i
);
176 tmp
|= S_028644_SEL_CENTROID(1);
177 if (rshader
->input
[i
].name
== TGSI_SEMANTIC_COLOR
||
178 rshader
->input
[i
].name
== TGSI_SEMANTIC_BCOLOR
) {
179 tmp
|= S_028644_FLAT_SHADE(rshader
->flat_shade
);
181 if (rasterizer
->sprite_coord_enable
& (1 << i
)) {
182 tmp
|= S_028644_PT_SPRITE_TEX(1);
184 state
->states
[R600_PS_SHADER__SPI_PS_INPUT_CNTL_0
+ i
] = tmp
;
189 for (i
= 0; i
< rshader
->noutput
; i
++) {
190 if (rshader
->output
[i
].name
== TGSI_SEMANTIC_POSITION
)
192 else if (rshader
->output
[i
].name
== TGSI_SEMANTIC_COLOR
) {
193 exports_ps
|= (1 << (num_cout
+1));
198 /* always at least export 1 component per pixel */
201 state
->states
[R600_PS_SHADER__SPI_PS_IN_CONTROL_0
] = S_0286CC_NUM_INTERP(rshader
->ninput
) |
202 S_0286CC_PERSP_GRADIENT_ENA(1);
203 state
->states
[R600_PS_SHADER__SPI_PS_IN_CONTROL_1
] = 0x00000000;
204 state
->states
[R600_PS_SHADER__SQ_PGM_RESOURCES_PS
] = S_028868_NUM_GPRS(rshader
->bc
.ngpr
) |
205 S_028868_STACK_SIZE(rshader
->bc
.nstack
);
206 state
->states
[R600_PS_SHADER__SQ_PGM_EXPORTS_PS
] = exports_ps
;
207 rpshader
->rstate
= state
;
208 rpshader
->rstate
->bo
[0] = radeon_bo_incref(rscreen
->rw
, rpshader
->bo
);
209 rpshader
->rstate
->nbo
= 1;
210 rpshader
->rstate
->placement
[0] = RADEON_GEM_DOMAIN_GTT
;
211 return radeon_state_pm4(state
);
214 static int r600_pipe_shader(struct pipe_context
*ctx
, struct r600_context_state
*rpshader
)
216 struct r600_screen
*rscreen
= r600_screen(ctx
->screen
);
217 struct r600_context
*rctx
= r600_context(ctx
);
218 struct r600_shader
*rshader
= &rpshader
->shader
;
221 /* copy new shader */
222 radeon_bo_decref(rscreen
->rw
, rpshader
->bo
);
224 rpshader
->bo
= radeon_bo(rscreen
->rw
, 0, rshader
->bc
.ndw
* 4,
226 if (rpshader
->bo
== NULL
) {
229 radeon_bo_map(rscreen
->rw
, rpshader
->bo
);
230 memcpy(rpshader
->bo
->data
, rshader
->bc
.bytecode
, rshader
->bc
.ndw
* 4);
231 radeon_bo_unmap(rscreen
->rw
, rpshader
->bo
);
233 rshader
->flat_shade
= rctx
->flat_shade
;
234 switch (rshader
->processor_type
) {
235 case TGSI_PROCESSOR_VERTEX
:
236 r
= r600_pipe_shader_vs(ctx
, rpshader
);
238 case TGSI_PROCESSOR_FRAGMENT
:
239 r
= r600_pipe_shader_ps(ctx
, rpshader
);
248 int r600_pipe_shader_update(struct pipe_context
*ctx
, struct r600_context_state
*rpshader
)
250 struct r600_context
*rctx
= r600_context(ctx
);
253 if (rpshader
== NULL
)
255 /* there should be enough input */
256 if (rctx
->vertex_elements
->count
< rpshader
->shader
.bc
.nresource
) {
257 R600_ERR("%d resources provided, expecting %d\n",
258 rctx
->vertex_elements
->count
, rpshader
->shader
.bc
.nresource
);
261 r
= r600_shader_update(ctx
, &rpshader
->shader
);
264 return r600_pipe_shader(ctx
, rpshader
);
267 static int tgsi_is_supported(struct r600_shader_ctx
*ctx
)
269 struct tgsi_full_instruction
*i
= &ctx
->parse
.FullToken
.FullInstruction
;
272 if (i
->Instruction
.NumDstRegs
> 1) {
273 R600_ERR("too many dst (%d)\n", i
->Instruction
.NumDstRegs
);
276 if (i
->Instruction
.Predicate
) {
277 R600_ERR("predicate unsupported\n");
281 if (i
->Instruction
.Label
) {
282 R600_ERR("label unsupported\n");
286 for (j
= 0; j
< i
->Instruction
.NumSrcRegs
; j
++) {
287 if (i
->Src
[j
].Register
.Dimension
||
288 i
->Src
[j
].Register
.Absolute
) {
289 R600_ERR("unsupported src %d (dimension %d|absolute %d)\n", j
,
290 i
->Src
[j
].Register
.Dimension
,
291 i
->Src
[j
].Register
.Absolute
);
295 for (j
= 0; j
< i
->Instruction
.NumDstRegs
; j
++) {
296 if (i
->Dst
[j
].Register
.Dimension
) {
297 R600_ERR("unsupported dst (dimension)\n");
304 static int tgsi_declaration(struct r600_shader_ctx
*ctx
)
306 struct tgsi_full_declaration
*d
= &ctx
->parse
.FullToken
.FullDeclaration
;
307 struct r600_bc_vtx vtx
;
311 switch (d
->Declaration
.File
) {
312 case TGSI_FILE_INPUT
:
313 i
= ctx
->shader
->ninput
++;
314 ctx
->shader
->input
[i
].name
= d
->Semantic
.Name
;
315 ctx
->shader
->input
[i
].sid
= d
->Semantic
.Index
;
316 ctx
->shader
->input
[i
].interpolate
= d
->Declaration
.Interpolate
;
317 ctx
->shader
->input
[i
].gpr
= ctx
->file_offset
[TGSI_FILE_INPUT
] + i
;
318 if (ctx
->type
== TGSI_PROCESSOR_VERTEX
) {
319 /* turn input into fetch */
320 memset(&vtx
, 0, sizeof(struct r600_bc_vtx
));
324 /* register containing the index into the buffer */
327 vtx
.mega_fetch_count
= 0x1F;
328 vtx
.dst_gpr
= ctx
->shader
->input
[i
].gpr
;
333 r
= r600_bc_add_vtx(ctx
->bc
, &vtx
);
338 case TGSI_FILE_OUTPUT
:
339 i
= ctx
->shader
->noutput
++;
340 ctx
->shader
->output
[i
].name
= d
->Semantic
.Name
;
341 ctx
->shader
->output
[i
].sid
= d
->Semantic
.Index
;
342 ctx
->shader
->output
[i
].gpr
= ctx
->file_offset
[TGSI_FILE_OUTPUT
] + i
;
343 ctx
->shader
->output
[i
].interpolate
= d
->Declaration
.Interpolate
;
345 case TGSI_FILE_CONSTANT
:
346 case TGSI_FILE_TEMPORARY
:
347 case TGSI_FILE_SAMPLER
:
348 case TGSI_FILE_ADDRESS
:
351 R600_ERR("unsupported file %d declaration\n", d
->Declaration
.File
);
357 int r600_shader_from_tgsi(const struct tgsi_token
*tokens
, struct r600_shader
*shader
)
359 struct tgsi_full_immediate
*immediate
;
360 struct r600_shader_ctx ctx
;
361 struct r600_bc_output output
[32];
362 unsigned output_done
, noutput
;
366 ctx
.bc
= &shader
->bc
;
368 r
= r600_bc_init(ctx
.bc
, shader
->family
);
372 tgsi_scan_shader(tokens
, &ctx
.info
);
373 tgsi_parse_init(&ctx
.parse
, tokens
);
374 ctx
.type
= ctx
.parse
.FullHeader
.Processor
.Processor
;
375 shader
->processor_type
= ctx
.type
;
377 /* register allocations */
378 /* Values [0,127] correspond to GPR[0..127].
379 * Values [128,159] correspond to constant buffer bank 0
380 * Values [160,191] correspond to constant buffer bank 1
381 * Values [256,511] correspond to cfile constants c[0..255].
382 * Other special values are shown in the list below.
383 * 244 ALU_SRC_1_DBL_L: special constant 1.0 double-float, LSW. (RV670+)
384 * 245 ALU_SRC_1_DBL_M: special constant 1.0 double-float, MSW. (RV670+)
385 * 246 ALU_SRC_0_5_DBL_L: special constant 0.5 double-float, LSW. (RV670+)
386 * 247 ALU_SRC_0_5_DBL_M: special constant 0.5 double-float, MSW. (RV670+)
387 * 248 SQ_ALU_SRC_0: special constant 0.0.
388 * 249 SQ_ALU_SRC_1: special constant 1.0 float.
389 * 250 SQ_ALU_SRC_1_INT: special constant 1 integer.
390 * 251 SQ_ALU_SRC_M_1_INT: special constant -1 integer.
391 * 252 SQ_ALU_SRC_0_5: special constant 0.5 float.
392 * 253 SQ_ALU_SRC_LITERAL: literal constant.
393 * 254 SQ_ALU_SRC_PV: previous vector result.
394 * 255 SQ_ALU_SRC_PS: previous scalar result.
396 for (i
= 0; i
< TGSI_FILE_COUNT
; i
++) {
397 ctx
.file_offset
[i
] = 0;
399 if (ctx
.type
== TGSI_PROCESSOR_VERTEX
) {
400 ctx
.file_offset
[TGSI_FILE_INPUT
] = 1;
402 ctx
.file_offset
[TGSI_FILE_OUTPUT
] = ctx
.file_offset
[TGSI_FILE_INPUT
] +
403 ctx
.info
.file_count
[TGSI_FILE_INPUT
];
404 ctx
.file_offset
[TGSI_FILE_TEMPORARY
] = ctx
.file_offset
[TGSI_FILE_OUTPUT
] +
405 ctx
.info
.file_count
[TGSI_FILE_OUTPUT
];
406 ctx
.file_offset
[TGSI_FILE_CONSTANT
] = 256;
407 ctx
.file_offset
[TGSI_FILE_IMMEDIATE
] = 253;
408 ctx
.temp_reg
= ctx
.file_offset
[TGSI_FILE_TEMPORARY
] +
409 ctx
.info
.file_count
[TGSI_FILE_TEMPORARY
];
414 while (!tgsi_parse_end_of_tokens(&ctx
.parse
)) {
415 tgsi_parse_token(&ctx
.parse
);
416 switch (ctx
.parse
.FullToken
.Token
.Type
) {
417 case TGSI_TOKEN_TYPE_IMMEDIATE
:
418 immediate
= &ctx
.parse
.FullToken
.FullImmediate
;
419 ctx
.literals
= realloc(ctx
.literals
, (ctx
.nliterals
+ 1) * 16);
420 if(ctx
.literals
== NULL
) {
424 ctx
.literals
[ctx
.nliterals
* 4 + 0] = immediate
->u
[0].Uint
;
425 ctx
.literals
[ctx
.nliterals
* 4 + 1] = immediate
->u
[1].Uint
;
426 ctx
.literals
[ctx
.nliterals
* 4 + 2] = immediate
->u
[2].Uint
;
427 ctx
.literals
[ctx
.nliterals
* 4 + 3] = immediate
->u
[3].Uint
;
430 case TGSI_TOKEN_TYPE_DECLARATION
:
431 r
= tgsi_declaration(&ctx
);
435 case TGSI_TOKEN_TYPE_INSTRUCTION
:
436 r
= tgsi_is_supported(&ctx
);
439 opcode
= ctx
.parse
.FullToken
.FullInstruction
.Instruction
.Opcode
;
440 ctx
.inst_info
= &r600_shader_tgsi_instruction
[opcode
];
441 r
= ctx
.inst_info
->process(&ctx
);
444 r
= r600_bc_add_literal(ctx
.bc
, ctx
.value
);
449 R600_ERR("unsupported token type %d\n", ctx
.parse
.FullToken
.Token
.Type
);
455 noutput
= shader
->noutput
;
456 for (i
= 0, pos0
= 0; i
< noutput
; i
++) {
457 memset(&output
[i
], 0, sizeof(struct r600_bc_output
));
458 output
[i
].gpr
= shader
->output
[i
].gpr
;
459 output
[i
].elem_size
= 3;
460 output
[i
].swizzle_x
= 0;
461 output
[i
].swizzle_y
= 1;
462 output
[i
].swizzle_z
= 2;
463 output
[i
].swizzle_w
= 3;
464 output
[i
].barrier
= 1;
465 output
[i
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM
;
466 output
[i
].array_base
= i
- pos0
;
467 output
[i
].inst
= V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
;
469 case TGSI_PROCESSOR_VERTEX
:
470 if (shader
->output
[i
].name
== TGSI_SEMANTIC_POSITION
) {
471 output
[i
].array_base
= 60;
472 output
[i
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS
;
473 /* position doesn't count in array_base */
476 if (shader
->output
[i
].name
== TGSI_SEMANTIC_PSIZE
) {
477 output
[i
].array_base
= 61;
478 output
[i
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS
;
479 /* position doesn't count in array_base */
483 case TGSI_PROCESSOR_FRAGMENT
:
484 if (shader
->output
[i
].name
== TGSI_SEMANTIC_COLOR
) {
485 output
[i
].array_base
= shader
->output
[i
].sid
;
486 output
[i
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL
;
487 } else if (shader
->output
[i
].name
== TGSI_SEMANTIC_POSITION
) {
488 output
[i
].array_base
= 61;
489 output
[i
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL
;
491 R600_ERR("unsupported fragment output name %d\n", shader
->output
[i
].name
);
497 R600_ERR("unsupported processor type %d\n", ctx
.type
);
502 /* add fake param output for vertex shader if no param is exported */
503 if (ctx
.type
== TGSI_PROCESSOR_VERTEX
) {
504 for (i
= 0, pos0
= 0; i
< noutput
; i
++) {
505 if (output
[i
].type
== V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM
) {
511 memset(&output
[i
], 0, sizeof(struct r600_bc_output
));
513 output
[i
].elem_size
= 3;
514 output
[i
].swizzle_x
= 0;
515 output
[i
].swizzle_y
= 1;
516 output
[i
].swizzle_z
= 2;
517 output
[i
].swizzle_w
= 3;
518 output
[i
].barrier
= 1;
519 output
[i
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM
;
520 output
[i
].array_base
= 0;
521 output
[i
].inst
= V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
;
525 /* add fake pixel export */
526 if (ctx
.type
== TGSI_PROCESSOR_FRAGMENT
&& !noutput
) {
527 memset(&output
[0], 0, sizeof(struct r600_bc_output
));
529 output
[0].elem_size
= 3;
530 output
[0].swizzle_x
= 7;
531 output
[0].swizzle_y
= 7;
532 output
[0].swizzle_z
= 7;
533 output
[0].swizzle_w
= 7;
534 output
[0].barrier
= 1;
535 output
[0].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL
;
536 output
[0].array_base
= 0;
537 output
[0].inst
= V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
;
540 /* set export done on last export of each type */
541 for (i
= noutput
- 1, output_done
= 0; i
>= 0; i
--) {
542 if (i
== (noutput
- 1)) {
543 output
[i
].end_of_program
= 1;
545 if (!(output_done
& (1 << output
[i
].type
))) {
546 output_done
|= (1 << output
[i
].type
);
547 output
[i
].inst
= V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE
;
550 /* add output to bytecode */
551 for (i
= 0; i
< noutput
; i
++) {
552 r
= r600_bc_add_output(ctx
.bc
, &output
[i
]);
557 tgsi_parse_free(&ctx
.parse
);
561 tgsi_parse_free(&ctx
.parse
);
565 static int tgsi_unsupported(struct r600_shader_ctx
*ctx
)
567 R600_ERR("%d tgsi opcode unsupported\n", ctx
->inst_info
->tgsi_opcode
);
571 static int tgsi_end(struct r600_shader_ctx
*ctx
)
576 static int tgsi_src(struct r600_shader_ctx
*ctx
,
577 const struct tgsi_full_src_register
*tgsi_src
,
578 struct r600_bc_alu_src
*r600_src
)
581 memset(r600_src
, 0, sizeof(struct r600_bc_alu_src
));
582 r600_src
->sel
= tgsi_src
->Register
.Index
;
583 if (tgsi_src
->Register
.File
== TGSI_FILE_IMMEDIATE
) {
585 index
= tgsi_src
->Register
.Index
;
586 ctx
->value
[0] = ctx
->literals
[index
* 4 + 0];
587 ctx
->value
[1] = ctx
->literals
[index
* 4 + 1];
588 ctx
->value
[2] = ctx
->literals
[index
* 4 + 2];
589 ctx
->value
[3] = ctx
->literals
[index
* 4 + 3];
591 if (tgsi_src
->Register
.Indirect
)
592 r600_src
->rel
= V_SQ_REL_RELATIVE
;
593 r600_src
->neg
= tgsi_src
->Register
.Negate
;
594 r600_src
->sel
+= ctx
->file_offset
[tgsi_src
->Register
.File
];
598 static int tgsi_dst(struct r600_shader_ctx
*ctx
,
599 const struct tgsi_full_dst_register
*tgsi_dst
,
601 struct r600_bc_alu_dst
*r600_dst
)
603 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
605 r600_dst
->sel
= tgsi_dst
->Register
.Index
;
606 r600_dst
->sel
+= ctx
->file_offset
[tgsi_dst
->Register
.File
];
607 r600_dst
->chan
= swizzle
;
609 if (tgsi_dst
->Register
.Indirect
)
610 r600_dst
->rel
= V_SQ_REL_RELATIVE
;
611 if (inst
->Instruction
.Saturate
) {
617 static unsigned tgsi_chan(const struct tgsi_full_src_register
*tgsi_src
, unsigned swizzle
)
621 return tgsi_src
->Register
.SwizzleX
;
623 return tgsi_src
->Register
.SwizzleY
;
625 return tgsi_src
->Register
.SwizzleZ
;
627 return tgsi_src
->Register
.SwizzleW
;
633 static int tgsi_split_constant(struct r600_shader_ctx
*ctx
, struct r600_bc_alu_src r600_src
[3])
635 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
636 struct r600_bc_alu alu
;
637 int i
, j
, k
, nconst
, r
;
639 for (i
= 0, nconst
= 0; i
< inst
->Instruction
.NumSrcRegs
; i
++) {
640 if (inst
->Src
[i
].Register
.File
== TGSI_FILE_CONSTANT
) {
643 r
= tgsi_src(ctx
, &inst
->Src
[i
], &r600_src
[i
]);
648 for (i
= 0, j
= nconst
- 1; i
< inst
->Instruction
.NumSrcRegs
; i
++) {
649 if (inst
->Src
[j
].Register
.File
== TGSI_FILE_CONSTANT
&& j
> 0) {
650 for (k
= 0; k
< 4; k
++) {
651 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
652 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
;
653 alu
.src
[0].sel
= r600_src
[0].sel
;
655 alu
.dst
.sel
= ctx
->temp_reg
+ j
;
660 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
664 r600_src
[0].sel
= ctx
->temp_reg
+ j
;
671 static int tgsi_op2_s(struct r600_shader_ctx
*ctx
, int swap
)
673 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
674 struct r600_bc_alu_src r600_src
[3];
675 struct r600_bc_alu alu
;
679 for (i
= 0; i
< 4; i
++) {
680 if (inst
->Dst
[0].Register
.WriteMask
& (1 << i
)) {
685 r
= tgsi_split_constant(ctx
, r600_src
);
688 for (i
= 0; i
< lasti
+ 1; i
++) {
689 if (!(inst
->Dst
[0].Register
.WriteMask
& (1 << i
)))
692 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
693 r
= tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
697 alu
.inst
= ctx
->inst_info
->r600_opcode
;
699 for (j
= 0; j
< inst
->Instruction
.NumSrcRegs
; j
++) {
700 alu
.src
[j
] = r600_src
[j
];
701 alu
.src
[j
].chan
= tgsi_chan(&inst
->Src
[j
], i
);
704 alu
.src
[0] = r600_src
[1];
705 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[1], i
);
707 alu
.src
[1] = r600_src
[0];
708 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[0], i
);
710 /* handle some special cases */
711 switch (ctx
->inst_info
->tgsi_opcode
) {
712 case TGSI_OPCODE_SUB
:
715 case TGSI_OPCODE_ABS
:
724 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
731 static int tgsi_op2(struct r600_shader_ctx
*ctx
)
733 return tgsi_op2_s(ctx
, 0);
736 static int tgsi_op2_swap(struct r600_shader_ctx
*ctx
)
738 return tgsi_op2_s(ctx
, 1);
742 * r600 - trunc to -PI..PI range
743 * r700 - normalize by dividing by 2PI
746 static int tgsi_setup_trig(struct r600_shader_ctx
*ctx
,
747 struct r600_bc_alu_src r600_src
[3])
749 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
751 uint32_t lit_vals
[4];
752 struct r600_bc_alu alu
;
754 memset(lit_vals
, 0, 4*4);
755 r
= tgsi_split_constant(ctx
, r600_src
);
758 lit_vals
[0] = fui(1.0 /(3.1415926535 * 2));
759 lit_vals
[1] = fui(0.5f
);
761 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
762 alu
.inst
= V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD
;
766 alu
.dst
.sel
= ctx
->temp_reg
;
769 alu
.src
[0] = r600_src
[0];
770 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 0);
772 alu
.src
[1].sel
= V_SQ_ALU_SRC_LITERAL
;
774 alu
.src
[2].sel
= V_SQ_ALU_SRC_LITERAL
;
777 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
780 r
= r600_bc_add_literal(ctx
->bc
, lit_vals
);
784 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
785 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT
;
788 alu
.dst
.sel
= ctx
->temp_reg
;
791 alu
.src
[0].sel
= ctx
->temp_reg
;
794 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
798 if (ctx
->bc
->chiprev
== 0) {
799 lit_vals
[0] = fui(3.1415926535897f
* 2.0f
);
800 lit_vals
[1] = fui(-3.1415926535897f
);
802 lit_vals
[0] = fui(1.0f
);
803 lit_vals
[1] = fui(-0.5f
);
806 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
807 alu
.inst
= V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD
;
811 alu
.dst
.sel
= ctx
->temp_reg
;
814 alu
.src
[0].sel
= ctx
->temp_reg
;
817 alu
.src
[1].sel
= V_SQ_ALU_SRC_LITERAL
;
819 alu
.src
[2].sel
= V_SQ_ALU_SRC_LITERAL
;
822 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
825 r
= r600_bc_add_literal(ctx
->bc
, lit_vals
);
831 static int tgsi_trig(struct r600_shader_ctx
*ctx
)
833 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
834 struct r600_bc_alu_src r600_src
[3];
835 struct r600_bc_alu alu
;
838 r
= tgsi_split_constant(ctx
, r600_src
);
842 r
= tgsi_setup_trig(ctx
, r600_src
);
846 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
847 alu
.inst
= ctx
->inst_info
->r600_opcode
;
849 alu
.dst
.sel
= ctx
->temp_reg
;
852 alu
.src
[0].sel
= ctx
->temp_reg
;
855 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
859 /* replicate result */
860 for (i
= 0; i
< 4; i
++) {
861 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
862 alu
.src
[0].sel
= ctx
->temp_reg
;
863 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
;
865 r
= tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
868 alu
.dst
.write
= (inst
->Dst
[0].Register
.WriteMask
>> i
) & 1;
871 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
878 static int tgsi_scs(struct r600_shader_ctx
*ctx
)
880 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
881 struct r600_bc_alu_src r600_src
[3];
882 struct r600_bc_alu alu
;
885 r
= tgsi_split_constant(ctx
, r600_src
);
889 r
= tgsi_setup_trig(ctx
, r600_src
);
895 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
896 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS
;
897 r
= tgsi_dst(ctx
, &inst
->Dst
[0], 0, &alu
.dst
);
901 alu
.src
[0].sel
= ctx
->temp_reg
;
904 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
909 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
910 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN
;
911 r
= tgsi_dst(ctx
, &inst
->Dst
[0], 1, &alu
.dst
);
915 alu
.src
[0].sel
= ctx
->temp_reg
;
918 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
924 static int tgsi_kill(struct r600_shader_ctx
*ctx
)
926 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
927 struct r600_bc_alu alu
;
930 for (i
= 0; i
< 4; i
++) {
931 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
932 alu
.inst
= ctx
->inst_info
->r600_opcode
;
936 alu
.src
[0].sel
= V_SQ_ALU_SRC_0
;
938 if (ctx
->inst_info
->tgsi_opcode
== TGSI_OPCODE_KILP
) {
939 alu
.src
[1].sel
= V_SQ_ALU_SRC_1
;
942 r
= tgsi_src(ctx
, &inst
->Src
[0], &alu
.src
[1]);
945 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[0], i
);
950 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
954 r
= r600_bc_add_literal(ctx
->bc
, ctx
->value
);
958 /* kill must be last in ALU */
959 ctx
->bc
->force_add_cf
= 1;
960 ctx
->shader
->uses_kill
= TRUE
;
964 static int tgsi_lit(struct r600_shader_ctx
*ctx
)
966 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
967 struct r600_bc_alu alu
;
971 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
972 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
;
973 alu
.src
[0].sel
= V_SQ_ALU_SRC_1
; /*1.0*/
975 r
= tgsi_dst(ctx
, &inst
->Dst
[0], 0, &alu
.dst
);
978 alu
.dst
.write
= (inst
->Dst
[0].Register
.WriteMask
>> 0) & 1;
979 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
983 /* dst.y = max(src.x, 0.0) */
984 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
985 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX
;
986 r
= tgsi_src(ctx
, &inst
->Src
[0], &alu
.src
[0]);
989 alu
.src
[1].sel
= V_SQ_ALU_SRC_0
; /*0.0*/
990 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[0], 0);
991 r
= tgsi_dst(ctx
, &inst
->Dst
[0], 1, &alu
.dst
);
994 alu
.dst
.write
= (inst
->Dst
[0].Register
.WriteMask
>> 1) & 1;
995 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
999 /* dst.z = NOP - fill Z slot */
1000 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1001 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
;
1003 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1008 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1009 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
;
1010 alu
.src
[0].sel
= V_SQ_ALU_SRC_1
;
1011 alu
.src
[0].chan
= 0;
1012 r
= tgsi_dst(ctx
, &inst
->Dst
[0], 3, &alu
.dst
);
1015 alu
.dst
.write
= (inst
->Dst
[0].Register
.WriteMask
>> 3) & 1;
1017 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1021 r
= r600_bc_add_literal(ctx
->bc
, ctx
->value
);
1025 if (inst
->Dst
[0].Register
.WriteMask
& (1 << 2))
1030 /* dst.z = log(src.y) */
1031 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1032 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED
;
1033 r
= tgsi_src(ctx
, &inst
->Src
[0], &alu
.src
[0]);
1036 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 1);
1037 r
= tgsi_dst(ctx
, &inst
->Dst
[0], 2, &alu
.dst
);
1041 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1045 r
= r600_bc_add_literal(ctx
->bc
, ctx
->value
);
1048 chan
= alu
.dst
.chan
;
1051 /* tmp.x = amd MUL_LIT(src.w, dst.z, src.x ) */
1052 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1053 alu
.inst
= V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT
;
1054 r
= tgsi_src(ctx
, &inst
->Src
[0], &alu
.src
[0]);
1057 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 3);
1058 alu
.src
[1].sel
= sel
;
1059 alu
.src
[1].chan
= chan
;
1060 r
= tgsi_src(ctx
, &inst
->Src
[0], &alu
.src
[2]);
1063 alu
.src
[2].chan
= tgsi_chan(&inst
->Src
[0], 0);
1064 alu
.dst
.sel
= ctx
->temp_reg
;
1069 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1073 r
= r600_bc_add_literal(ctx
->bc
, ctx
->value
);
1076 /* dst.z = exp(tmp.x) */
1077 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1078 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
;
1079 alu
.src
[0].sel
= ctx
->temp_reg
;
1080 alu
.src
[0].chan
= 0;
1081 r
= tgsi_dst(ctx
, &inst
->Dst
[0], 2, &alu
.dst
);
1085 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1092 static int tgsi_trans(struct r600_shader_ctx
*ctx
)
1094 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1095 struct r600_bc_alu alu
;
1098 for (i
= 0; i
< 4; i
++) {
1099 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1100 if (inst
->Dst
[0].Register
.WriteMask
& (1 << i
)) {
1101 alu
.inst
= ctx
->inst_info
->r600_opcode
;
1102 for (j
= 0; j
< inst
->Instruction
.NumSrcRegs
; j
++) {
1103 r
= tgsi_src(ctx
, &inst
->Src
[j
], &alu
.src
[j
]);
1106 alu
.src
[j
].chan
= tgsi_chan(&inst
->Src
[j
], i
);
1108 r
= tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
1112 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1120 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx
*ctx
)
1122 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1123 struct r600_bc_alu alu
;
1126 for (i
= 0; i
< 4; i
++) {
1127 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1128 alu
.src
[0].sel
= ctx
->temp_reg
;
1129 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
;
1131 r
= tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
1134 alu
.dst
.write
= (inst
->Dst
[0].Register
.WriteMask
>> i
) & 1;
1137 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1144 static int tgsi_trans_srcx_replicate(struct r600_shader_ctx
*ctx
)
1146 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1147 struct r600_bc_alu alu
;
1150 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1151 alu
.inst
= ctx
->inst_info
->r600_opcode
;
1152 for (i
= 0; i
< inst
->Instruction
.NumSrcRegs
; i
++) {
1153 r
= tgsi_src(ctx
, &inst
->Src
[i
], &alu
.src
[i
]);
1156 alu
.src
[i
].chan
= tgsi_chan(&inst
->Src
[i
], 0);
1158 alu
.dst
.sel
= ctx
->temp_reg
;
1161 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1164 r
= r600_bc_add_literal(ctx
->bc
, ctx
->value
);
1167 /* replicate result */
1168 return tgsi_helper_tempx_replicate(ctx
);
1171 static int tgsi_pow(struct r600_shader_ctx
*ctx
)
1173 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1174 struct r600_bc_alu alu
;
1178 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1179 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
;
1180 r
= tgsi_src(ctx
, &inst
->Src
[0], &alu
.src
[0]);
1183 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 0);
1184 alu
.dst
.sel
= ctx
->temp_reg
;
1187 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1190 r
= r600_bc_add_literal(ctx
->bc
,ctx
->value
);
1194 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1195 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL_IEEE
;
1196 r
= tgsi_src(ctx
, &inst
->Src
[1], &alu
.src
[0]);
1199 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[1], 0);
1200 alu
.src
[1].sel
= ctx
->temp_reg
;
1201 alu
.dst
.sel
= ctx
->temp_reg
;
1204 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1207 r
= r600_bc_add_literal(ctx
->bc
,ctx
->value
);
1210 /* POW(a,b) = EXP2(b * LOG2(a))*/
1211 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1212 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
;
1213 alu
.src
[0].sel
= ctx
->temp_reg
;
1214 alu
.dst
.sel
= ctx
->temp_reg
;
1217 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1220 r
= r600_bc_add_literal(ctx
->bc
,ctx
->value
);
1223 return tgsi_helper_tempx_replicate(ctx
);
1226 static int tgsi_ssg(struct r600_shader_ctx
*ctx
)
1228 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1229 struct r600_bc_alu alu
;
1230 struct r600_bc_alu_src r600_src
[3];
1233 r
= tgsi_split_constant(ctx
, r600_src
);
1237 /* tmp = (src > 0 ? 1 : src) */
1238 for (i
= 0; i
< 4; i
++) {
1239 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1240 alu
.inst
= V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGT
;
1243 alu
.dst
.sel
= ctx
->temp_reg
;
1246 alu
.src
[0] = r600_src
[0];
1247 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], i
);
1249 alu
.src
[1].sel
= V_SQ_ALU_SRC_1
;
1251 alu
.src
[2] = r600_src
[0];
1252 alu
.src
[2].chan
= tgsi_chan(&inst
->Src
[0], i
);
1255 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1259 r
= r600_bc_add_literal(ctx
->bc
, ctx
->value
);
1263 /* dst = (-tmp > 0 ? -1 : tmp) */
1264 for (i
= 0; i
< 4; i
++) {
1265 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1266 alu
.inst
= V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGT
;
1268 r
= tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
1272 alu
.src
[0].sel
= ctx
->temp_reg
;
1273 alu
.src
[0].chan
= i
;
1276 alu
.src
[1].sel
= V_SQ_ALU_SRC_1
;
1279 alu
.src
[2].sel
= ctx
->temp_reg
;
1280 alu
.src
[2].chan
= i
;
1284 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1291 static int tgsi_helper_copy(struct r600_shader_ctx
*ctx
, struct tgsi_full_instruction
*inst
)
1293 struct r600_bc_alu alu
;
1296 r
= r600_bc_add_literal(ctx
->bc
, ctx
->value
);
1299 for (i
= 0; i
< 4; i
++) {
1300 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1301 if (!(inst
->Dst
[0].Register
.WriteMask
& (1 << i
))) {
1302 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
;
1305 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
;
1306 r
= tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
1309 alu
.src
[0].sel
= ctx
->temp_reg
;
1310 alu
.src
[0].chan
= i
;
1315 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1322 static int tgsi_op3(struct r600_shader_ctx
*ctx
)
1324 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1325 struct r600_bc_alu_src r600_src
[3];
1326 struct r600_bc_alu alu
;
1329 r
= tgsi_split_constant(ctx
, r600_src
);
1332 /* do it in 2 step as op3 doesn't support writemask */
1333 for (i
= 0; i
< 4; i
++) {
1334 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1335 alu
.inst
= ctx
->inst_info
->r600_opcode
;
1336 for (j
= 0; j
< inst
->Instruction
.NumSrcRegs
; j
++) {
1337 alu
.src
[j
] = r600_src
[j
];
1338 alu
.src
[j
].chan
= tgsi_chan(&inst
->Src
[j
], i
);
1340 alu
.dst
.sel
= ctx
->temp_reg
;
1347 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1351 return tgsi_helper_copy(ctx
, inst
);
1354 static int tgsi_dp(struct r600_shader_ctx
*ctx
)
1356 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1357 struct r600_bc_alu_src r600_src
[3];
1358 struct r600_bc_alu alu
;
1361 r
= tgsi_split_constant(ctx
, r600_src
);
1364 for (i
= 0; i
< 4; i
++) {
1365 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1366 alu
.inst
= ctx
->inst_info
->r600_opcode
;
1367 for (j
= 0; j
< inst
->Instruction
.NumSrcRegs
; j
++) {
1368 alu
.src
[j
] = r600_src
[j
];
1369 alu
.src
[j
].chan
= tgsi_chan(&inst
->Src
[j
], i
);
1371 alu
.dst
.sel
= ctx
->temp_reg
;
1374 /* handle some special cases */
1375 switch (ctx
->inst_info
->tgsi_opcode
) {
1376 case TGSI_OPCODE_DP2
:
1378 alu
.src
[0].sel
= alu
.src
[1].sel
= V_SQ_ALU_SRC_0
;
1379 alu
.src
[0].chan
= alu
.src
[1].chan
= 0;
1382 case TGSI_OPCODE_DP3
:
1384 alu
.src
[0].sel
= alu
.src
[1].sel
= V_SQ_ALU_SRC_0
;
1385 alu
.src
[0].chan
= alu
.src
[1].chan
= 0;
1388 case TGSI_OPCODE_DPH
:
1390 alu
.src
[0].sel
= V_SQ_ALU_SRC_1
;
1391 alu
.src
[0].chan
= 0;
1401 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1405 return tgsi_helper_copy(ctx
, inst
);
1408 static int tgsi_tex(struct r600_shader_ctx
*ctx
)
1410 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1411 struct r600_bc_tex tex
;
1412 struct r600_bc_alu alu
;
1416 src_gpr
= ctx
->file_offset
[inst
->Src
[0].Register
.File
] + inst
->Src
[0].Register
.Index
;
1418 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_TXP
) {
1419 /* Add perspective divide */
1420 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1421 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
;
1422 alu
.src
[0].sel
= src_gpr
;
1423 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 3);
1424 alu
.dst
.sel
= ctx
->temp_reg
;
1428 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1432 for (i
= 0; i
< 3; i
++) {
1433 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1434 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
;
1435 alu
.src
[0].sel
= ctx
->temp_reg
;
1436 alu
.src
[0].chan
= 3;
1437 alu
.src
[1].sel
= src_gpr
;
1438 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[0], i
);
1439 alu
.dst
.sel
= ctx
->temp_reg
;
1442 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1446 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1447 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
;
1448 alu
.src
[0].sel
= V_SQ_ALU_SRC_1
;
1449 alu
.src
[0].chan
= 0;
1450 alu
.dst
.sel
= ctx
->temp_reg
;
1454 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1457 src_gpr
= ctx
->temp_reg
;
1458 } else if (inst
->Src
[0].Register
.File
!= TGSI_FILE_TEMPORARY
) {
1459 for (i
= 0; i
< 4; i
++) {
1460 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1461 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
;
1462 alu
.src
[0].sel
= src_gpr
;
1463 alu
.src
[0].chan
= i
;
1464 alu
.dst
.sel
= ctx
->temp_reg
;
1469 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1473 src_gpr
= ctx
->temp_reg
;
1476 memset(&tex
, 0, sizeof(struct r600_bc_tex
));
1477 tex
.inst
= ctx
->inst_info
->r600_opcode
;
1478 tex
.resource_id
= ctx
->file_offset
[inst
->Src
[1].Register
.File
] + inst
->Src
[1].Register
.Index
;
1479 tex
.sampler_id
= tex
.resource_id
;
1480 tex
.src_gpr
= src_gpr
;
1481 tex
.dst_gpr
= ctx
->file_offset
[inst
->Dst
[0].Register
.File
] + inst
->Dst
[0].Register
.Index
;
1491 if (inst
->Texture
.Texture
!= TGSI_TEXTURE_RECT
) {
1492 tex
.coord_type_x
= 1;
1493 tex
.coord_type_y
= 1;
1494 tex
.coord_type_z
= 1;
1495 tex
.coord_type_w
= 1;
1497 return r600_bc_add_tex(ctx
->bc
, &tex
);
1500 static int tgsi_lrp(struct r600_shader_ctx
*ctx
)
1502 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1503 struct r600_bc_alu_src r600_src
[3];
1504 struct r600_bc_alu alu
;
1508 r
= tgsi_split_constant(ctx
, r600_src
);
1512 for (i
= 0; i
< 4; i
++) {
1513 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1514 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD
;
1515 alu
.src
[0].sel
= V_SQ_ALU_SRC_1
;
1516 alu
.src
[0].chan
= 0;
1517 alu
.src
[1] = r600_src
[0];
1518 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[0], i
);
1520 alu
.dst
.sel
= ctx
->temp_reg
;
1526 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1530 r
= r600_bc_add_literal(ctx
->bc
, ctx
->value
);
1534 /* (1 - src0) * src2 */
1535 for (i
= 0; i
< 4; i
++) {
1536 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1537 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
;
1538 alu
.src
[0].sel
= ctx
->temp_reg
;
1539 alu
.src
[0].chan
= i
;
1540 alu
.src
[1] = r600_src
[2];
1541 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[2], i
);
1542 alu
.dst
.sel
= ctx
->temp_reg
;
1548 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1552 r
= r600_bc_add_literal(ctx
->bc
, ctx
->value
);
1556 /* src0 * src1 + (1 - src0) * src2 */
1557 for (i
= 0; i
< 4; i
++) {
1558 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1559 alu
.inst
= V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD
;
1561 alu
.src
[0] = r600_src
[0];
1562 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], i
);
1563 alu
.src
[1] = r600_src
[1];
1564 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[1], i
);
1565 alu
.src
[2].sel
= ctx
->temp_reg
;
1566 alu
.src
[2].chan
= i
;
1567 alu
.dst
.sel
= ctx
->temp_reg
;
1572 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1576 return tgsi_helper_copy(ctx
, inst
);
1579 static int tgsi_cmp(struct r600_shader_ctx
*ctx
)
1581 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1582 struct r600_bc_alu_src r600_src
[3];
1583 struct r600_bc_alu alu
;
1587 r
= tgsi_split_constant(ctx
, r600_src
);
1591 if (inst
->Dst
[0].Register
.WriteMask
!= 0xf)
1594 for (i
= 0; i
< 4; i
++) {
1595 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1596 alu
.inst
= V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGE
;
1597 alu
.src
[0] = r600_src
[0];
1598 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], i
);
1600 alu
.src
[1] = r600_src
[2];
1601 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[2], i
);
1603 alu
.src
[2] = r600_src
[1];
1604 alu
.src
[2].chan
= tgsi_chan(&inst
->Src
[1], i
);
1607 alu
.dst
.sel
= ctx
->temp_reg
;
1609 r
= tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
1618 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1623 return tgsi_helper_copy(ctx
, inst
);
1627 static int tgsi_xpd(struct r600_shader_ctx
*ctx
)
1629 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1630 struct r600_bc_alu_src r600_src
[3];
1631 struct r600_bc_alu alu
;
1632 uint32_t use_temp
= 0;
1635 if (inst
->Dst
[0].Register
.WriteMask
!= 0xf)
1638 r
= tgsi_split_constant(ctx
, r600_src
);
1642 for (i
= 0; i
< 4; i
++) {
1643 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1644 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
;
1646 alu
.src
[0] = r600_src
[0];
1649 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 2);
1652 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 0);
1655 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 1);
1658 alu
.src
[0].sel
= V_SQ_ALU_SRC_0
;
1659 alu
.src
[0].chan
= i
;
1662 alu
.src
[1] = r600_src
[1];
1665 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[1], 1);
1668 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[1], 2);
1671 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[1], 0);
1674 alu
.src
[1].sel
= V_SQ_ALU_SRC_0
;
1675 alu
.src
[1].chan
= i
;
1678 alu
.dst
.sel
= ctx
->temp_reg
;
1684 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1689 for (i
= 0; i
< 4; i
++) {
1690 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1691 alu
.inst
= V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD
;
1693 alu
.src
[0] = r600_src
[0];
1696 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 1);
1699 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 2);
1702 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 0);
1705 alu
.src
[0].sel
= V_SQ_ALU_SRC_0
;
1706 alu
.src
[0].chan
= i
;
1709 alu
.src
[1] = r600_src
[1];
1712 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[1], 2);
1715 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[1], 0);
1718 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[1], 1);
1721 alu
.src
[1].sel
= V_SQ_ALU_SRC_0
;
1722 alu
.src
[1].chan
= i
;
1725 alu
.src
[2].sel
= ctx
->temp_reg
;
1727 alu
.src
[2].chan
= i
;
1730 alu
.dst
.sel
= ctx
->temp_reg
;
1732 r
= tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
1741 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1746 return tgsi_helper_copy(ctx
, inst
);
1750 static int tgsi_exp(struct r600_shader_ctx
*ctx
)
1752 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1753 struct r600_bc_alu_src r600_src
[3];
1754 struct r600_bc_alu alu
;
1757 /* result.x = 2^floor(src); */
1758 if (inst
->Dst
[0].Register
.WriteMask
& 1) {
1759 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1761 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR
;
1762 r
= tgsi_src(ctx
, &inst
->Src
[0], &alu
.src
[0]);
1766 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 0);
1768 alu
.dst
.sel
= ctx
->temp_reg
;
1772 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1776 r
= r600_bc_add_literal(ctx
->bc
, ctx
->value
);
1780 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
;
1781 alu
.src
[0].sel
= ctx
->temp_reg
;
1782 alu
.src
[0].chan
= 0;
1784 alu
.dst
.sel
= ctx
->temp_reg
;
1788 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1792 r
= r600_bc_add_literal(ctx
->bc
, ctx
->value
);
1797 /* result.y = tmp - floor(tmp); */
1798 if ((inst
->Dst
[0].Register
.WriteMask
>> 1) & 1) {
1799 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1801 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT
;
1802 alu
.src
[0] = r600_src
[0];
1803 r
= tgsi_src(ctx
, &inst
->Src
[0], &alu
.src
[0]);
1806 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 0);
1808 alu
.dst
.sel
= ctx
->temp_reg
;
1809 // r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1817 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1820 r
= r600_bc_add_literal(ctx
->bc
, ctx
->value
);
1825 /* result.z = RoughApprox2ToX(tmp);*/
1826 if ((inst
->Dst
[0].Register
.WriteMask
>> 2) & 0x1) {
1827 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1828 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
;
1829 r
= tgsi_src(ctx
, &inst
->Src
[0], &alu
.src
[0]);
1832 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 0);
1834 alu
.dst
.sel
= ctx
->temp_reg
;
1840 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1843 r
= r600_bc_add_literal(ctx
->bc
, ctx
->value
);
1848 /* result.w = 1.0;*/
1849 if ((inst
->Dst
[0].Register
.WriteMask
>> 3) & 0x1) {
1850 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1852 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
;
1853 alu
.src
[0].sel
= V_SQ_ALU_SRC_1
;
1854 alu
.src
[0].chan
= 0;
1856 alu
.dst
.sel
= ctx
->temp_reg
;
1860 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1863 r
= r600_bc_add_literal(ctx
->bc
, ctx
->value
);
1867 return tgsi_helper_copy(ctx
, inst
);
1870 static int tgsi_arl(struct r600_shader_ctx
*ctx
)
1872 /* TODO from r600c, ar values don't persist between clauses */
1873 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1874 struct r600_bc_alu alu
;
1876 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1878 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR
;
1880 r
= tgsi_src(ctx
, &inst
->Src
[0], &alu
.src
[0]);
1883 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 0);
1887 r
= r600_bc_add_alu_type(ctx
->bc
, &alu
, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU
);
1893 static int tgsi_opdst(struct r600_shader_ctx
*ctx
)
1895 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1896 struct r600_bc_alu alu
;
1899 for (i
= 0; i
< 4; i
++) {
1900 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1902 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
;
1903 r
= tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
1907 if (i
== 0 || i
== 3) {
1908 alu
.src
[0].sel
= V_SQ_ALU_SRC_1
;
1910 r
= tgsi_src(ctx
, &inst
->Src
[0], &alu
.src
[0]);
1913 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], i
);
1916 if (i
== 0 || i
== 2) {
1917 alu
.src
[1].sel
= V_SQ_ALU_SRC_1
;
1919 r
= tgsi_src(ctx
, &inst
->Src
[1], &alu
.src
[1]);
1922 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[1], i
);
1926 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1933 static int emit_logic_pred(struct r600_shader_ctx
*ctx
, int opcode
)
1935 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1936 struct r600_bc_alu alu
;
1939 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1943 alu
.dst
.sel
= ctx
->temp_reg
;
1947 r
= tgsi_src(ctx
, &inst
->Src
[0], &alu
.src
[0]);
1950 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 0);
1951 alu
.src
[1].sel
= V_SQ_ALU_SRC_0
;
1952 alu
.src
[1].chan
= 0;
1956 r
= r600_bc_add_alu_type(ctx
->bc
, &alu
, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE
);
1962 static int pops(struct r600_shader_ctx
*ctx
, int pops
)
1964 r600_bc_add_cfinst(ctx
->bc
, V_SQ_CF_WORD1_SQ_CF_INST_POP
);
1965 ctx
->bc
->cf_last
->pop_count
= pops
;
1969 static inline void callstack_decrease_current(struct r600_shader_ctx
*ctx
, unsigned reason
)
1973 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
--;
1977 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
-= 4;
1980 /* TOODO : for 16 vp asic should -= 2; */
1981 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
--;
1986 static inline void callstack_check_depth(struct r600_shader_ctx
*ctx
, unsigned reason
, unsigned check_max_only
)
1988 if (check_max_only
) {
1998 if ((ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
+ diff
) >
1999 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].max
) {
2000 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].max
=
2001 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
+ diff
;
2007 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
++;
2011 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
+= 4;
2014 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
++;
2018 if ((ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
) >
2019 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].max
) {
2020 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].max
=
2021 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
;
2025 static void fc_set_mid(struct r600_shader_ctx
*ctx
, int fc_sp
)
2027 struct r600_cf_stack_entry
*sp
= &ctx
->bc
->fc_stack
[fc_sp
];
2029 sp
->mid
= (struct r600_bc_cf
**)realloc((void *)sp
->mid
,
2030 sizeof(struct r600_bc_cf
*) * (sp
->num_mid
+ 1));
2031 sp
->mid
[sp
->num_mid
] = ctx
->bc
->cf_last
;
2035 static void fc_pushlevel(struct r600_shader_ctx
*ctx
, int type
)
2038 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].type
= type
;
2039 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].start
= ctx
->bc
->cf_last
;
2042 static void fc_poplevel(struct r600_shader_ctx
*ctx
)
2044 struct r600_cf_stack_entry
*sp
= &ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
];
2056 static int emit_return(struct r600_shader_ctx
*ctx
)
2058 r600_bc_add_cfinst(ctx
->bc
, V_SQ_CF_WORD1_SQ_CF_INST_RETURN
);
2062 static int emit_jump_to_offset(struct r600_shader_ctx
*ctx
, int pops
, int offset
)
2065 r600_bc_add_cfinst(ctx
->bc
, V_SQ_CF_WORD1_SQ_CF_INST_JUMP
);
2066 ctx
->bc
->cf_last
->pop_count
= pops
;
2067 /* TODO work out offset */
2071 static int emit_setret_in_loop_flag(struct r600_shader_ctx
*ctx
, unsigned flag_value
)
2076 static void emit_testflag(struct r600_shader_ctx
*ctx
)
2081 static void emit_return_on_flag(struct r600_shader_ctx
*ctx
, unsigned ifidx
)
2084 emit_jump_to_offset(ctx
, 1, 4);
2085 emit_setret_in_loop_flag(ctx
, V_SQ_ALU_SRC_0
);
2086 pops(ctx
, ifidx
+ 1);
2090 static void break_loop_on_flag(struct r600_shader_ctx
*ctx
, unsigned fc_sp
)
2094 r600_bc_add_cfinst(ctx
->bc
, ctx
->inst_info
->r600_opcode
);
2095 ctx
->bc
->cf_last
->pop_count
= 1;
2097 fc_set_mid(ctx
, fc_sp
);
2103 static int tgsi_if(struct r600_shader_ctx
*ctx
)
2105 emit_logic_pred(ctx
, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE
);
2107 r600_bc_add_cfinst(ctx
->bc
, V_SQ_CF_WORD1_SQ_CF_INST_JUMP
);
2109 fc_pushlevel(ctx
, FC_IF
);
2111 callstack_check_depth(ctx
, FC_PUSH_VPM
, 0);
2115 static int tgsi_else(struct r600_shader_ctx
*ctx
)
2117 r600_bc_add_cfinst(ctx
->bc
, V_SQ_CF_WORD1_SQ_CF_INST_ELSE
);
2118 ctx
->bc
->cf_last
->pop_count
= 1;
2120 fc_set_mid(ctx
, ctx
->bc
->fc_sp
);
2121 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].start
->cf_addr
= ctx
->bc
->cf_last
->id
;
2125 static int tgsi_endif(struct r600_shader_ctx
*ctx
)
2128 if (ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].type
!= FC_IF
) {
2129 R600_ERR("if/endif unbalanced in shader\n");
2133 if (ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].mid
== NULL
) {
2134 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].start
->cf_addr
= ctx
->bc
->cf_last
->id
+ 2;
2135 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].start
->pop_count
= 1;
2137 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].mid
[0]->cf_addr
= ctx
->bc
->cf_last
->id
+ 2;
2141 callstack_decrease_current(ctx
, FC_PUSH_VPM
);
2145 static int tgsi_bgnloop(struct r600_shader_ctx
*ctx
)
2147 r600_bc_add_cfinst(ctx
->bc
, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL
);
2149 fc_pushlevel(ctx
, FC_LOOP
);
2151 /* check stack depth */
2152 callstack_check_depth(ctx
, FC_LOOP
, 0);
2156 static int tgsi_endloop(struct r600_shader_ctx
*ctx
)
2160 r600_bc_add_cfinst(ctx
->bc
, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END
);
2162 if (ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].type
!= FC_LOOP
) {
2163 R600_ERR("loop/endloop in shader code are not paired.\n");
2167 /* fixup loop pointers - from r600isa
2168 LOOP END points to CF after LOOP START,
2169 LOOP START point to CF after LOOP END
2170 BRK/CONT point to LOOP END CF
2172 ctx
->bc
->cf_last
->cf_addr
= ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].start
->id
+ 2;
2174 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].start
->cf_addr
= ctx
->bc
->cf_last
->id
+ 2;
2176 for (i
= 0; i
< ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].num_mid
; i
++) {
2177 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].mid
[i
]->cf_addr
= ctx
->bc
->cf_last
->id
;
2179 /* TODO add LOOPRET support */
2181 callstack_decrease_current(ctx
, FC_LOOP
);
2185 static int tgsi_loop_brk_cont(struct r600_shader_ctx
*ctx
)
2189 for (fscp
= ctx
->bc
->fc_sp
; fscp
> 0; fscp
--)
2191 if (FC_LOOP
== ctx
->bc
->fc_stack
[fscp
].type
)
2196 R600_ERR("Break not inside loop/endloop pair\n");
2200 r600_bc_add_cfinst(ctx
->bc
, ctx
->inst_info
->r600_opcode
);
2201 ctx
->bc
->cf_last
->pop_count
= 1;
2203 fc_set_mid(ctx
, fscp
);
2206 callstack_check_depth(ctx
, FC_PUSH_VPM
, 1);
2210 static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction
[] = {
2211 {TGSI_OPCODE_ARL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_arl
},
2212 {TGSI_OPCODE_MOV
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
, tgsi_op2
},
2213 {TGSI_OPCODE_LIT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_lit
},
2214 {TGSI_OPCODE_RCP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
, tgsi_trans_srcx_replicate
},
2215 {TGSI_OPCODE_RSQ
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE
, tgsi_trans_srcx_replicate
},
2216 {TGSI_OPCODE_EXP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_exp
},
2217 {TGSI_OPCODE_LOG
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2218 {TGSI_OPCODE_MUL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
, tgsi_op2
},
2219 {TGSI_OPCODE_ADD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD
, tgsi_op2
},
2220 {TGSI_OPCODE_DP3
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
, tgsi_dp
},
2221 {TGSI_OPCODE_DP4
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
, tgsi_dp
},
2222 {TGSI_OPCODE_DST
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_opdst
},
2223 {TGSI_OPCODE_MIN
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN
, tgsi_op2
},
2224 {TGSI_OPCODE_MAX
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX
, tgsi_op2
},
2225 {TGSI_OPCODE_SLT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT
, tgsi_op2_swap
},
2226 {TGSI_OPCODE_SGE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE
, tgsi_op2
},
2227 {TGSI_OPCODE_MAD
, 1, V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD
, tgsi_op3
},
2228 {TGSI_OPCODE_SUB
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD
, tgsi_op2
},
2229 {TGSI_OPCODE_LRP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_lrp
},
2230 {TGSI_OPCODE_CND
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2232 {20, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2233 {TGSI_OPCODE_DP2A
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2235 {22, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2236 {23, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2237 {TGSI_OPCODE_FRC
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT
, tgsi_op2
},
2238 {TGSI_OPCODE_CLAMP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2239 {TGSI_OPCODE_FLR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR
, tgsi_op2
},
2240 {TGSI_OPCODE_ROUND
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2241 {TGSI_OPCODE_EX2
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
, tgsi_trans_srcx_replicate
},
2242 {TGSI_OPCODE_LG2
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
, tgsi_trans_srcx_replicate
},
2243 {TGSI_OPCODE_POW
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_pow
},
2244 {TGSI_OPCODE_XPD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_xpd
},
2246 {32, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2247 {TGSI_OPCODE_ABS
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
, tgsi_op2
},
2248 {TGSI_OPCODE_RCC
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2249 {TGSI_OPCODE_DPH
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
, tgsi_dp
},
2250 {TGSI_OPCODE_COS
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS
, tgsi_trig
},
2251 {TGSI_OPCODE_DDX
, 0, SQ_TEX_INST_GET_GRADIENTS_H
, tgsi_tex
},
2252 {TGSI_OPCODE_DDY
, 0, SQ_TEX_INST_GET_GRADIENTS_V
, tgsi_tex
},
2253 {TGSI_OPCODE_KILP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
, tgsi_kill
}, /* predicated kill */
2254 {TGSI_OPCODE_PK2H
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2255 {TGSI_OPCODE_PK2US
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2256 {TGSI_OPCODE_PK4B
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2257 {TGSI_OPCODE_PK4UB
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2258 {TGSI_OPCODE_RFL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2259 {TGSI_OPCODE_SEQ
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE
, tgsi_op2
},
2260 {TGSI_OPCODE_SFL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2261 {TGSI_OPCODE_SGT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT
, tgsi_op2
},
2262 {TGSI_OPCODE_SIN
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN
, tgsi_trig
},
2263 {TGSI_OPCODE_SLE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE
, tgsi_op2_swap
},
2264 {TGSI_OPCODE_SNE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE
, tgsi_op2
},
2265 {TGSI_OPCODE_STR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2266 {TGSI_OPCODE_TEX
, 0, SQ_TEX_INST_SAMPLE
, tgsi_tex
},
2267 {TGSI_OPCODE_TXD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2268 {TGSI_OPCODE_TXP
, 0, SQ_TEX_INST_SAMPLE
, tgsi_tex
},
2269 {TGSI_OPCODE_UP2H
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2270 {TGSI_OPCODE_UP2US
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2271 {TGSI_OPCODE_UP4B
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2272 {TGSI_OPCODE_UP4UB
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2273 {TGSI_OPCODE_X2D
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2274 {TGSI_OPCODE_ARA
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2275 {TGSI_OPCODE_ARR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2276 {TGSI_OPCODE_BRA
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2277 {TGSI_OPCODE_CAL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2278 {TGSI_OPCODE_RET
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2279 {TGSI_OPCODE_SSG
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_ssg
},
2280 {TGSI_OPCODE_CMP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_cmp
},
2281 {TGSI_OPCODE_SCS
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_scs
},
2282 {TGSI_OPCODE_TXB
, 0, SQ_TEX_INST_SAMPLE_L
, tgsi_tex
},
2283 {TGSI_OPCODE_NRM
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2284 {TGSI_OPCODE_DIV
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2285 {TGSI_OPCODE_DP2
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
, tgsi_dp
},
2286 {TGSI_OPCODE_TXL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2287 {TGSI_OPCODE_BRK
, 0, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK
, tgsi_loop_brk_cont
},
2288 {TGSI_OPCODE_IF
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_if
},
2290 {75, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2291 {76, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2292 {TGSI_OPCODE_ELSE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_else
},
2293 {TGSI_OPCODE_ENDIF
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_endif
},
2295 {79, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2296 {80, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2297 {TGSI_OPCODE_PUSHA
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2298 {TGSI_OPCODE_POPA
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2299 {TGSI_OPCODE_CEIL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2300 {TGSI_OPCODE_I2F
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2301 {TGSI_OPCODE_NOT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2302 {TGSI_OPCODE_TRUNC
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC
, tgsi_trans_srcx_replicate
},
2303 {TGSI_OPCODE_SHL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2305 {88, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2306 {TGSI_OPCODE_AND
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2307 {TGSI_OPCODE_OR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2308 {TGSI_OPCODE_MOD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2309 {TGSI_OPCODE_XOR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2310 {TGSI_OPCODE_SAD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2311 {TGSI_OPCODE_TXF
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2312 {TGSI_OPCODE_TXQ
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2313 {TGSI_OPCODE_CONT
, 0, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE
, tgsi_loop_brk_cont
},
2314 {TGSI_OPCODE_EMIT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2315 {TGSI_OPCODE_ENDPRIM
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2316 {TGSI_OPCODE_BGNLOOP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_bgnloop
},
2317 {TGSI_OPCODE_BGNSUB
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2318 {TGSI_OPCODE_ENDLOOP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_endloop
},
2319 {TGSI_OPCODE_ENDSUB
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2321 {103, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2322 {104, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2323 {105, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2324 {106, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2325 {TGSI_OPCODE_NOP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2327 {108, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2328 {109, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2329 {110, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2330 {111, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2331 {TGSI_OPCODE_NRM4
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2332 {TGSI_OPCODE_CALLNZ
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2333 {TGSI_OPCODE_IFC
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2334 {TGSI_OPCODE_BREAKC
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2335 {TGSI_OPCODE_KIL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
, tgsi_kill
}, /* conditional kill */
2336 {TGSI_OPCODE_END
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_end
}, /* aka HALT */
2338 {118, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2339 {TGSI_OPCODE_F2I
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2340 {TGSI_OPCODE_IDIV
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2341 {TGSI_OPCODE_IMAX
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2342 {TGSI_OPCODE_IMIN
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2343 {TGSI_OPCODE_INEG
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2344 {TGSI_OPCODE_ISGE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2345 {TGSI_OPCODE_ISHR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2346 {TGSI_OPCODE_ISLT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2347 {TGSI_OPCODE_F2U
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2348 {TGSI_OPCODE_U2F
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2349 {TGSI_OPCODE_UADD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2350 {TGSI_OPCODE_UDIV
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2351 {TGSI_OPCODE_UMAD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2352 {TGSI_OPCODE_UMAX
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2353 {TGSI_OPCODE_UMIN
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2354 {TGSI_OPCODE_UMOD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2355 {TGSI_OPCODE_UMUL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2356 {TGSI_OPCODE_USEQ
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2357 {TGSI_OPCODE_USGE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2358 {TGSI_OPCODE_USHR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2359 {TGSI_OPCODE_USLT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2360 {TGSI_OPCODE_USNE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2361 {TGSI_OPCODE_SWITCH
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2362 {TGSI_OPCODE_CASE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2363 {TGSI_OPCODE_DEFAULT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2364 {TGSI_OPCODE_ENDSWITCH
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2365 {TGSI_OPCODE_LAST
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},