2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 #include "pipe/p_shader_tokens.h"
24 #include "tgsi/tgsi_parse.h"
25 #include "tgsi/tgsi_scan.h"
26 #include "tgsi/tgsi_dump.h"
27 #include "util/u_format.h"
28 #include "r600_screen.h"
29 #include "r600_context.h"
30 #include "r600_shader.h"
38 struct r600_shader_tgsi_instruction
;
40 struct r600_shader_ctx
{
41 struct tgsi_shader_info info
;
42 struct tgsi_parse_context parse
;
43 const struct tgsi_token
*tokens
;
45 unsigned file_offset
[TGSI_FILE_COUNT
];
47 struct r600_shader_tgsi_instruction
*inst_info
;
49 struct r600_shader
*shader
;
55 struct r600_shader_tgsi_instruction
{
59 int (*process
)(struct r600_shader_ctx
*ctx
);
62 static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction
[];
63 static int r600_shader_from_tgsi(const struct tgsi_token
*tokens
, struct r600_shader
*shader
);
65 static int r600_shader_update(struct pipe_context
*ctx
, struct r600_shader
*shader
)
67 struct r600_context
*rctx
= r600_context(ctx
);
68 const struct util_format_description
*desc
;
69 enum pipe_format resource_format
[160];
70 unsigned i
, nresources
= 0;
71 struct r600_bc
*bc
= &shader
->bc
;
72 struct r600_bc_cf
*cf
;
73 struct r600_bc_vtx
*vtx
;
75 if (shader
->processor_type
!= TGSI_PROCESSOR_VERTEX
)
77 for (i
= 0; i
< rctx
->vertex_elements
->count
; i
++) {
78 resource_format
[nresources
++] = rctx
->vertex_elements
->elements
[i
].src_format
;
80 LIST_FOR_EACH_ENTRY(cf
, &bc
->cf
, list
) {
82 case V_SQ_CF_WORD1_SQ_CF_INST_VTX
:
83 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC
:
84 LIST_FOR_EACH_ENTRY(vtx
, &cf
->vtx
, list
) {
85 desc
= util_format_description(resource_format
[vtx
->buffer_id
]);
87 R600_ERR("unknown format %d\n", resource_format
[vtx
->buffer_id
]);
90 vtx
->dst_sel_x
= desc
->swizzle
[0];
91 vtx
->dst_sel_y
= desc
->swizzle
[1];
92 vtx
->dst_sel_z
= desc
->swizzle
[2];
93 vtx
->dst_sel_w
= desc
->swizzle
[3];
100 return r600_bc_build(&shader
->bc
);
103 int r600_pipe_shader_create(struct pipe_context
*ctx
,
104 struct r600_context_state
*rpshader
,
105 const struct tgsi_token
*tokens
)
107 struct r600_screen
*rscreen
= r600_screen(ctx
->screen
);
110 //fprintf(stderr, "--------------------------------------------------------------\n");
111 //tgsi_dump(tokens, 0);
112 if (rpshader
== NULL
)
114 rpshader
->shader
.family
= radeon_get_family(rscreen
->rw
);
115 r
= r600_shader_from_tgsi(tokens
, &rpshader
->shader
);
117 R600_ERR("translation from TGSI failed !\n");
120 r
= r600_bc_build(&rpshader
->shader
.bc
);
122 R600_ERR("building bytecode failed !\n");
125 //fprintf(stderr, "______________________________________________________________\n");
129 static int r600_pipe_shader_vs(struct pipe_context
*ctx
, struct r600_context_state
*rpshader
)
131 struct r600_screen
*rscreen
= r600_screen(ctx
->screen
);
132 struct r600_shader
*rshader
= &rpshader
->shader
;
133 struct radeon_state
*state
;
136 rpshader
->rstate
= radeon_state_decref(rpshader
->rstate
);
137 state
= radeon_state(rscreen
->rw
, R600_VS_SHADER_TYPE
, R600_VS_SHADER
);
140 for (i
= 0; i
< 10; i
++) {
141 state
->states
[R600_VS_SHADER__SPI_VS_OUT_ID_0
+ i
] = 0;
143 /* so far never got proper semantic id from tgsi */
144 for (i
= 0; i
< 32; i
++) {
145 tmp
= i
<< ((i
& 3) * 8);
146 state
->states
[R600_VS_SHADER__SPI_VS_OUT_ID_0
+ i
/ 4] |= tmp
;
148 state
->states
[R600_VS_SHADER__SPI_VS_OUT_CONFIG
] = S_0286C4_VS_EXPORT_COUNT(rshader
->noutput
- 2);
149 state
->states
[R600_VS_SHADER__SQ_PGM_RESOURCES_VS
] = S_028868_NUM_GPRS(rshader
->bc
.ngpr
) |
150 S_028868_STACK_SIZE(rshader
->bc
.nstack
);
151 rpshader
->rstate
= state
;
152 rpshader
->rstate
->bo
[0] = radeon_bo_incref(rscreen
->rw
, rpshader
->bo
);
153 rpshader
->rstate
->bo
[1] = radeon_bo_incref(rscreen
->rw
, rpshader
->bo
);
154 rpshader
->rstate
->nbo
= 2;
155 rpshader
->rstate
->placement
[0] = RADEON_GEM_DOMAIN_GTT
;
156 rpshader
->rstate
->placement
[2] = RADEON_GEM_DOMAIN_GTT
;
157 return radeon_state_pm4(state
);
160 static int r600_pipe_shader_ps(struct pipe_context
*ctx
, struct r600_context_state
*rpshader
)
162 const struct pipe_rasterizer_state
*rasterizer
;
163 struct r600_screen
*rscreen
= r600_screen(ctx
->screen
);
164 struct r600_shader
*rshader
= &rpshader
->shader
;
165 struct r600_context
*rctx
= r600_context(ctx
);
166 struct radeon_state
*state
;
167 unsigned i
, tmp
, exports_ps
, num_cout
;
169 rasterizer
= &rctx
->rasterizer
->state
.rasterizer
;
170 rpshader
->rstate
= radeon_state_decref(rpshader
->rstate
);
171 state
= radeon_state(rscreen
->rw
, R600_PS_SHADER_TYPE
, R600_PS_SHADER
);
174 for (i
= 0; i
< rshader
->ninput
; i
++) {
175 tmp
= S_028644_SEMANTIC(i
);
176 tmp
|= S_028644_SEL_CENTROID(1);
177 if (rshader
->input
[i
].name
== TGSI_SEMANTIC_COLOR
||
178 rshader
->input
[i
].name
== TGSI_SEMANTIC_BCOLOR
) {
179 tmp
|= S_028644_FLAT_SHADE(rshader
->flat_shade
);
181 if (rasterizer
->sprite_coord_enable
& (1 << i
)) {
182 tmp
|= S_028644_PT_SPRITE_TEX(1);
184 state
->states
[R600_PS_SHADER__SPI_PS_INPUT_CNTL_0
+ i
] = tmp
;
189 for (i
= 0; i
< rshader
->noutput
; i
++) {
190 if (rshader
->output
[i
].name
== TGSI_SEMANTIC_POSITION
)
192 else if (rshader
->output
[i
].name
== TGSI_SEMANTIC_COLOR
) {
193 exports_ps
|= (1 << (num_cout
+1));
198 /* always at least export 1 component per pixel */
201 state
->states
[R600_PS_SHADER__SPI_PS_IN_CONTROL_0
] = S_0286CC_NUM_INTERP(rshader
->ninput
) |
202 S_0286CC_PERSP_GRADIENT_ENA(1);
203 state
->states
[R600_PS_SHADER__SPI_PS_IN_CONTROL_1
] = 0x00000000;
204 state
->states
[R600_PS_SHADER__SQ_PGM_RESOURCES_PS
] = S_028868_NUM_GPRS(rshader
->bc
.ngpr
) |
205 S_028868_STACK_SIZE(rshader
->bc
.nstack
);
206 state
->states
[R600_PS_SHADER__SQ_PGM_EXPORTS_PS
] = exports_ps
;
207 rpshader
->rstate
= state
;
208 rpshader
->rstate
->bo
[0] = radeon_bo_incref(rscreen
->rw
, rpshader
->bo
);
209 rpshader
->rstate
->nbo
= 1;
210 rpshader
->rstate
->placement
[0] = RADEON_GEM_DOMAIN_GTT
;
211 return radeon_state_pm4(state
);
214 static int r600_pipe_shader(struct pipe_context
*ctx
, struct r600_context_state
*rpshader
)
216 struct r600_screen
*rscreen
= r600_screen(ctx
->screen
);
217 struct r600_context
*rctx
= r600_context(ctx
);
218 struct r600_shader
*rshader
= &rpshader
->shader
;
221 /* copy new shader */
222 radeon_bo_decref(rscreen
->rw
, rpshader
->bo
);
224 rpshader
->bo
= radeon_bo(rscreen
->rw
, 0, rshader
->bc
.ndw
* 4,
226 if (rpshader
->bo
== NULL
) {
229 radeon_bo_map(rscreen
->rw
, rpshader
->bo
);
230 memcpy(rpshader
->bo
->data
, rshader
->bc
.bytecode
, rshader
->bc
.ndw
* 4);
231 radeon_bo_unmap(rscreen
->rw
, rpshader
->bo
);
233 rshader
->flat_shade
= rctx
->flat_shade
;
234 switch (rshader
->processor_type
) {
235 case TGSI_PROCESSOR_VERTEX
:
236 r
= r600_pipe_shader_vs(ctx
, rpshader
);
238 case TGSI_PROCESSOR_FRAGMENT
:
239 r
= r600_pipe_shader_ps(ctx
, rpshader
);
248 int r600_pipe_shader_update(struct pipe_context
*ctx
, struct r600_context_state
*rpshader
)
250 struct r600_context
*rctx
= r600_context(ctx
);
253 if (rpshader
== NULL
)
255 /* there should be enough input */
256 if (rctx
->vertex_elements
->count
< rpshader
->shader
.bc
.nresource
) {
257 R600_ERR("%d resources provided, expecting %d\n",
258 rctx
->vertex_elements
->count
, rpshader
->shader
.bc
.nresource
);
261 r
= r600_shader_update(ctx
, &rpshader
->shader
);
264 return r600_pipe_shader(ctx
, rpshader
);
267 static int tgsi_is_supported(struct r600_shader_ctx
*ctx
)
269 struct tgsi_full_instruction
*i
= &ctx
->parse
.FullToken
.FullInstruction
;
272 if (i
->Instruction
.NumDstRegs
> 1) {
273 R600_ERR("too many dst (%d)\n", i
->Instruction
.NumDstRegs
);
276 if (i
->Instruction
.Predicate
) {
277 R600_ERR("predicate unsupported\n");
281 if (i
->Instruction
.Label
) {
282 R600_ERR("label unsupported\n");
286 for (j
= 0; j
< i
->Instruction
.NumSrcRegs
; j
++) {
287 if (i
->Src
[j
].Register
.Indirect
||
288 i
->Src
[j
].Register
.Dimension
||
289 i
->Src
[j
].Register
.Absolute
) {
290 R600_ERR("unsupported src (indirect|dimension|absolute)\n");
294 for (j
= 0; j
< i
->Instruction
.NumDstRegs
; j
++) {
295 if (i
->Dst
[j
].Register
.Indirect
|| i
->Dst
[j
].Register
.Dimension
) {
296 R600_ERR("unsupported dst (indirect|dimension)\n");
303 static int tgsi_declaration(struct r600_shader_ctx
*ctx
)
305 struct tgsi_full_declaration
*d
= &ctx
->parse
.FullToken
.FullDeclaration
;
306 struct r600_bc_vtx vtx
;
310 switch (d
->Declaration
.File
) {
311 case TGSI_FILE_INPUT
:
312 i
= ctx
->shader
->ninput
++;
313 ctx
->shader
->input
[i
].name
= d
->Semantic
.Name
;
314 ctx
->shader
->input
[i
].sid
= d
->Semantic
.Index
;
315 ctx
->shader
->input
[i
].interpolate
= d
->Declaration
.Interpolate
;
316 ctx
->shader
->input
[i
].gpr
= ctx
->file_offset
[TGSI_FILE_INPUT
] + i
;
317 if (ctx
->type
== TGSI_PROCESSOR_VERTEX
) {
318 /* turn input into fetch */
319 memset(&vtx
, 0, sizeof(struct r600_bc_vtx
));
323 /* register containing the index into the buffer */
326 vtx
.mega_fetch_count
= 0x1F;
327 vtx
.dst_gpr
= ctx
->shader
->input
[i
].gpr
;
332 r
= r600_bc_add_vtx(ctx
->bc
, &vtx
);
337 case TGSI_FILE_OUTPUT
:
338 i
= ctx
->shader
->noutput
++;
339 ctx
->shader
->output
[i
].name
= d
->Semantic
.Name
;
340 ctx
->shader
->output
[i
].sid
= d
->Semantic
.Index
;
341 ctx
->shader
->output
[i
].gpr
= ctx
->file_offset
[TGSI_FILE_OUTPUT
] + i
;
342 ctx
->shader
->output
[i
].interpolate
= d
->Declaration
.Interpolate
;
344 case TGSI_FILE_CONSTANT
:
345 case TGSI_FILE_TEMPORARY
:
346 case TGSI_FILE_SAMPLER
:
349 R600_ERR("unsupported file %d declaration\n", d
->Declaration
.File
);
355 int r600_shader_from_tgsi(const struct tgsi_token
*tokens
, struct r600_shader
*shader
)
357 struct tgsi_full_immediate
*immediate
;
358 struct r600_shader_ctx ctx
;
359 struct r600_bc_output output
[32];
360 unsigned output_done
, noutput
;
364 ctx
.bc
= &shader
->bc
;
366 r
= r600_bc_init(ctx
.bc
, shader
->family
);
370 tgsi_scan_shader(tokens
, &ctx
.info
);
371 tgsi_parse_init(&ctx
.parse
, tokens
);
372 ctx
.type
= ctx
.parse
.FullHeader
.Processor
.Processor
;
373 shader
->processor_type
= ctx
.type
;
375 /* register allocations */
376 /* Values [0,127] correspond to GPR[0..127].
377 * Values [128,159] correspond to constant buffer bank 0
378 * Values [160,191] correspond to constant buffer bank 1
379 * Values [256,511] correspond to cfile constants c[0..255].
380 * Other special values are shown in the list below.
381 * 244 ALU_SRC_1_DBL_L: special constant 1.0 double-float, LSW. (RV670+)
382 * 245 ALU_SRC_1_DBL_M: special constant 1.0 double-float, MSW. (RV670+)
383 * 246 ALU_SRC_0_5_DBL_L: special constant 0.5 double-float, LSW. (RV670+)
384 * 247 ALU_SRC_0_5_DBL_M: special constant 0.5 double-float, MSW. (RV670+)
385 * 248 SQ_ALU_SRC_0: special constant 0.0.
386 * 249 SQ_ALU_SRC_1: special constant 1.0 float.
387 * 250 SQ_ALU_SRC_1_INT: special constant 1 integer.
388 * 251 SQ_ALU_SRC_M_1_INT: special constant -1 integer.
389 * 252 SQ_ALU_SRC_0_5: special constant 0.5 float.
390 * 253 SQ_ALU_SRC_LITERAL: literal constant.
391 * 254 SQ_ALU_SRC_PV: previous vector result.
392 * 255 SQ_ALU_SRC_PS: previous scalar result.
394 for (i
= 0; i
< TGSI_FILE_COUNT
; i
++) {
395 ctx
.file_offset
[i
] = 0;
397 if (ctx
.type
== TGSI_PROCESSOR_VERTEX
) {
398 ctx
.file_offset
[TGSI_FILE_INPUT
] = 1;
400 ctx
.file_offset
[TGSI_FILE_OUTPUT
] = ctx
.file_offset
[TGSI_FILE_INPUT
] +
401 ctx
.info
.file_count
[TGSI_FILE_INPUT
];
402 ctx
.file_offset
[TGSI_FILE_TEMPORARY
] = ctx
.file_offset
[TGSI_FILE_OUTPUT
] +
403 ctx
.info
.file_count
[TGSI_FILE_OUTPUT
];
404 ctx
.file_offset
[TGSI_FILE_CONSTANT
] = 256;
405 ctx
.file_offset
[TGSI_FILE_IMMEDIATE
] = 253;
406 ctx
.temp_reg
= ctx
.file_offset
[TGSI_FILE_TEMPORARY
] +
407 ctx
.info
.file_count
[TGSI_FILE_TEMPORARY
];
412 while (!tgsi_parse_end_of_tokens(&ctx
.parse
)) {
413 tgsi_parse_token(&ctx
.parse
);
414 switch (ctx
.parse
.FullToken
.Token
.Type
) {
415 case TGSI_TOKEN_TYPE_IMMEDIATE
:
416 immediate
= &ctx
.parse
.FullToken
.FullImmediate
;
417 ctx
.literals
= realloc(ctx
.literals
, (ctx
.nliterals
+ 1) * 16);
418 if(ctx
.literals
== NULL
) {
422 ctx
.literals
[ctx
.nliterals
* 4 + 0] = immediate
->u
[0].Uint
;
423 ctx
.literals
[ctx
.nliterals
* 4 + 1] = immediate
->u
[1].Uint
;
424 ctx
.literals
[ctx
.nliterals
* 4 + 2] = immediate
->u
[2].Uint
;
425 ctx
.literals
[ctx
.nliterals
* 4 + 3] = immediate
->u
[3].Uint
;
428 case TGSI_TOKEN_TYPE_DECLARATION
:
429 r
= tgsi_declaration(&ctx
);
433 case TGSI_TOKEN_TYPE_INSTRUCTION
:
434 r
= tgsi_is_supported(&ctx
);
437 opcode
= ctx
.parse
.FullToken
.FullInstruction
.Instruction
.Opcode
;
438 ctx
.inst_info
= &r600_shader_tgsi_instruction
[opcode
];
439 r
= ctx
.inst_info
->process(&ctx
);
442 r
= r600_bc_add_literal(ctx
.bc
, ctx
.value
);
447 R600_ERR("unsupported token type %d\n", ctx
.parse
.FullToken
.Token
.Type
);
453 noutput
= shader
->noutput
;
454 for (i
= 0, pos0
= 0; i
< noutput
; i
++) {
455 memset(&output
[i
], 0, sizeof(struct r600_bc_output
));
456 output
[i
].gpr
= shader
->output
[i
].gpr
;
457 output
[i
].elem_size
= 3;
458 output
[i
].swizzle_x
= 0;
459 output
[i
].swizzle_y
= 1;
460 output
[i
].swizzle_z
= 2;
461 output
[i
].swizzle_w
= 3;
462 output
[i
].barrier
= 1;
463 output
[i
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM
;
464 output
[i
].array_base
= i
- pos0
;
465 output
[i
].inst
= V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
;
467 case TGSI_PROCESSOR_VERTEX
:
468 if (shader
->output
[i
].name
== TGSI_SEMANTIC_POSITION
) {
469 output
[i
].array_base
= 60;
470 output
[i
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS
;
471 /* position doesn't count in array_base */
474 if (shader
->output
[i
].name
== TGSI_SEMANTIC_PSIZE
) {
475 output
[i
].array_base
= 61;
476 output
[i
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS
;
477 /* position doesn't count in array_base */
481 case TGSI_PROCESSOR_FRAGMENT
:
482 if (shader
->output
[i
].name
== TGSI_SEMANTIC_COLOR
) {
483 output
[i
].array_base
= shader
->output
[i
].sid
;
484 output
[i
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL
;
485 } else if (shader
->output
[i
].name
== TGSI_SEMANTIC_POSITION
) {
486 output
[i
].array_base
= 61;
487 output
[i
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL
;
489 R600_ERR("unsupported fragment output name %d\n", shader
->output
[i
].name
);
495 R600_ERR("unsupported processor type %d\n", ctx
.type
);
500 /* add fake param output for vertex shader if no param is exported */
501 if (ctx
.type
== TGSI_PROCESSOR_VERTEX
) {
502 for (i
= 0, pos0
= 0; i
< noutput
; i
++) {
503 if (output
[i
].type
== V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM
) {
509 memset(&output
[i
], 0, sizeof(struct r600_bc_output
));
511 output
[i
].elem_size
= 3;
512 output
[i
].swizzle_x
= 0;
513 output
[i
].swizzle_y
= 1;
514 output
[i
].swizzle_z
= 2;
515 output
[i
].swizzle_w
= 3;
516 output
[i
].barrier
= 1;
517 output
[i
].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM
;
518 output
[i
].array_base
= 0;
519 output
[i
].inst
= V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
;
523 /* add fake pixel export */
524 if (ctx
.type
== TGSI_PROCESSOR_FRAGMENT
&& !noutput
) {
525 memset(&output
[0], 0, sizeof(struct r600_bc_output
));
527 output
[0].elem_size
= 3;
528 output
[0].swizzle_x
= 7;
529 output
[0].swizzle_y
= 7;
530 output
[0].swizzle_z
= 7;
531 output
[0].swizzle_w
= 7;
532 output
[0].barrier
= 1;
533 output
[0].type
= V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL
;
534 output
[0].array_base
= 0;
535 output
[0].inst
= V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
;
538 /* set export done on last export of each type */
539 for (i
= noutput
- 1, output_done
= 0; i
>= 0; i
--) {
540 if (i
== (noutput
- 1)) {
541 output
[i
].end_of_program
= 1;
543 if (!(output_done
& (1 << output
[i
].type
))) {
544 output_done
|= (1 << output
[i
].type
);
545 output
[i
].inst
= V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE
;
548 /* add output to bytecode */
549 for (i
= 0; i
< noutput
; i
++) {
550 r
= r600_bc_add_output(ctx
.bc
, &output
[i
]);
555 tgsi_parse_free(&ctx
.parse
);
559 tgsi_parse_free(&ctx
.parse
);
563 static int tgsi_unsupported(struct r600_shader_ctx
*ctx
)
565 R600_ERR("%d tgsi opcode unsupported\n", ctx
->inst_info
->tgsi_opcode
);
569 static int tgsi_end(struct r600_shader_ctx
*ctx
)
574 static int tgsi_src(struct r600_shader_ctx
*ctx
,
575 const struct tgsi_full_src_register
*tgsi_src
,
576 struct r600_bc_alu_src
*r600_src
)
579 memset(r600_src
, 0, sizeof(struct r600_bc_alu_src
));
580 r600_src
->sel
= tgsi_src
->Register
.Index
;
581 if (tgsi_src
->Register
.File
== TGSI_FILE_IMMEDIATE
) {
583 index
= tgsi_src
->Register
.Index
;
584 ctx
->value
[0] = ctx
->literals
[index
* 4 + 0];
585 ctx
->value
[1] = ctx
->literals
[index
* 4 + 1];
586 ctx
->value
[2] = ctx
->literals
[index
* 4 + 2];
587 ctx
->value
[3] = ctx
->literals
[index
* 4 + 3];
589 r600_src
->neg
= tgsi_src
->Register
.Negate
;
590 r600_src
->sel
+= ctx
->file_offset
[tgsi_src
->Register
.File
];
594 static int tgsi_dst(struct r600_shader_ctx
*ctx
,
595 const struct tgsi_full_dst_register
*tgsi_dst
,
597 struct r600_bc_alu_dst
*r600_dst
)
599 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
601 r600_dst
->sel
= tgsi_dst
->Register
.Index
;
602 r600_dst
->sel
+= ctx
->file_offset
[tgsi_dst
->Register
.File
];
603 r600_dst
->chan
= swizzle
;
605 if (inst
->Instruction
.Saturate
) {
611 static unsigned tgsi_chan(const struct tgsi_full_src_register
*tgsi_src
, unsigned swizzle
)
615 return tgsi_src
->Register
.SwizzleX
;
617 return tgsi_src
->Register
.SwizzleY
;
619 return tgsi_src
->Register
.SwizzleZ
;
621 return tgsi_src
->Register
.SwizzleW
;
627 static int tgsi_split_constant(struct r600_shader_ctx
*ctx
, struct r600_bc_alu_src r600_src
[3])
629 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
630 struct r600_bc_alu alu
;
631 int i
, j
, k
, nconst
, r
;
633 for (i
= 0, nconst
= 0; i
< inst
->Instruction
.NumSrcRegs
; i
++) {
634 if (inst
->Src
[i
].Register
.File
== TGSI_FILE_CONSTANT
) {
637 r
= tgsi_src(ctx
, &inst
->Src
[i
], &r600_src
[i
]);
642 for (i
= 0, j
= nconst
- 1; i
< inst
->Instruction
.NumSrcRegs
; i
++) {
643 if (inst
->Src
[j
].Register
.File
== TGSI_FILE_CONSTANT
&& j
> 0) {
644 for (k
= 0; k
< 4; k
++) {
645 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
646 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
;
647 alu
.src
[0].sel
= r600_src
[0].sel
;
649 alu
.dst
.sel
= ctx
->temp_reg
+ j
;
654 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
658 r600_src
[0].sel
= ctx
->temp_reg
+ j
;
665 static int tgsi_op2_s(struct r600_shader_ctx
*ctx
, int swap
)
667 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
668 struct r600_bc_alu_src r600_src
[3];
669 struct r600_bc_alu alu
;
673 for (i
= 0; i
< 4; i
++) {
674 if (inst
->Dst
[0].Register
.WriteMask
& (1 << i
)) {
679 r
= tgsi_split_constant(ctx
, r600_src
);
682 for (i
= 0; i
< lasti
+ 1; i
++) {
683 if (!(inst
->Dst
[0].Register
.WriteMask
& (1 << i
)))
686 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
687 r
= tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
691 alu
.inst
= ctx
->inst_info
->r600_opcode
;
693 for (j
= 0; j
< inst
->Instruction
.NumSrcRegs
; j
++) {
694 alu
.src
[j
] = r600_src
[j
];
695 alu
.src
[j
].chan
= tgsi_chan(&inst
->Src
[j
], i
);
698 alu
.src
[0] = r600_src
[1];
699 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[1], i
);
701 alu
.src
[1] = r600_src
[0];
702 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[0], i
);
704 /* handle some special cases */
705 switch (ctx
->inst_info
->tgsi_opcode
) {
706 case TGSI_OPCODE_SUB
:
709 case TGSI_OPCODE_ABS
:
718 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
725 static int tgsi_op2(struct r600_shader_ctx
*ctx
)
727 return tgsi_op2_s(ctx
, 0);
730 static int tgsi_op2_swap(struct r600_shader_ctx
*ctx
)
732 return tgsi_op2_s(ctx
, 1);
736 * r600 - trunc to -PI..PI range
737 * r700 - normalize by dividing by 2PI
740 static int tgsi_trig(struct r600_shader_ctx
*ctx
)
742 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
743 struct r600_bc_alu_src r600_src
[3];
744 struct r600_bc_alu alu
;
746 uint32_t lit_vals
[4];
748 memset(lit_vals
, 0, 4*4);
749 r
= tgsi_split_constant(ctx
, r600_src
);
752 lit_vals
[0] = fui(1.0 /(3.1415926535 * 2));
753 lit_vals
[1] = fui(0.5f
);
755 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
756 alu
.inst
= V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD
;
760 alu
.dst
.sel
= ctx
->temp_reg
;
763 alu
.src
[0] = r600_src
[0];
764 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 0);
766 alu
.src
[1].sel
= V_SQ_ALU_SRC_LITERAL
;
768 alu
.src
[2].sel
= V_SQ_ALU_SRC_LITERAL
;
771 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
774 r
= r600_bc_add_literal(ctx
->bc
, lit_vals
);
778 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
779 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT
;
782 alu
.dst
.sel
= ctx
->temp_reg
;
785 alu
.src
[0].sel
= ctx
->temp_reg
;
788 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
792 if (ctx
->bc
->chiprev
== 0) {
793 lit_vals
[0] = fui(3.1415926535897f
* 2.0f
);
794 lit_vals
[1] = fui(-3.1415926535897f
);
796 lit_vals
[0] = fui(1.0f
);
797 lit_vals
[1] = fui(-0.5f
);
800 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
801 alu
.inst
= V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD
;
805 alu
.dst
.sel
= ctx
->temp_reg
;
808 alu
.src
[0].sel
= ctx
->temp_reg
;
811 alu
.src
[1].sel
= V_SQ_ALU_SRC_LITERAL
;
813 alu
.src
[2].sel
= V_SQ_ALU_SRC_LITERAL
;
816 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
819 r
= r600_bc_add_literal(ctx
->bc
, lit_vals
);
823 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
824 alu
.inst
= ctx
->inst_info
->r600_opcode
;
826 alu
.dst
.sel
= ctx
->temp_reg
;
829 alu
.src
[0].sel
= ctx
->temp_reg
;
832 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
836 /* replicate result */
837 for (i
= 0; i
< 4; i
++) {
838 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
839 alu
.src
[0].sel
= ctx
->temp_reg
;
840 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
;
842 r
= tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
845 alu
.dst
.write
= (inst
->Dst
[0].Register
.WriteMask
>> i
) & 1;
848 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
855 static int tgsi_kill(struct r600_shader_ctx
*ctx
)
857 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
858 struct r600_bc_alu alu
;
861 for (i
= 0; i
< 4; i
++) {
862 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
863 alu
.inst
= ctx
->inst_info
->r600_opcode
;
865 alu
.src
[0].sel
= V_SQ_ALU_SRC_0
;
866 r
= tgsi_src(ctx
, &inst
->Src
[0], &alu
.src
[1]);
869 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[0], i
);
873 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
880 static int tgsi_lit(struct r600_shader_ctx
*ctx
)
882 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
883 struct r600_bc_alu alu
;
887 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
888 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
;
889 alu
.src
[0].sel
= V_SQ_ALU_SRC_1
; /*1.0*/
891 r
= tgsi_dst(ctx
, &inst
->Dst
[0], 0, &alu
.dst
);
894 alu
.dst
.write
= (inst
->Dst
[0].Register
.WriteMask
>> 0) & 1;
895 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
899 /* dst.y = max(src.x, 0.0) */
900 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
901 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX
;
902 r
= tgsi_src(ctx
, &inst
->Src
[0], &alu
.src
[0]);
905 alu
.src
[1].sel
= V_SQ_ALU_SRC_0
; /*0.0*/
906 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[0], 0);
907 r
= tgsi_dst(ctx
, &inst
->Dst
[0], 1, &alu
.dst
);
910 alu
.dst
.write
= (inst
->Dst
[0].Register
.WriteMask
>> 1) & 1;
911 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
915 /* dst.z = NOP - fill Z slot */
916 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
917 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
;
919 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
924 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
925 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
;
926 alu
.src
[0].sel
= V_SQ_ALU_SRC_1
;
928 r
= tgsi_dst(ctx
, &inst
->Dst
[0], 3, &alu
.dst
);
931 alu
.dst
.write
= (inst
->Dst
[0].Register
.WriteMask
>> 3) & 1;
933 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
937 if (inst
->Dst
[0].Register
.WriteMask
& (1 << 2))
942 /* dst.z = log(src.y) */
943 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
944 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED
;
945 r
= tgsi_src(ctx
, &inst
->Src
[0], &alu
.src
[0]);
948 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 1);
949 r
= tgsi_dst(ctx
, &inst
->Dst
[0], 2, &alu
.dst
);
953 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
960 /* tmp.x = amd MUL_LIT(src.w, dst.z, src.x ) */
961 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
962 alu
.inst
= V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT
;
963 r
= tgsi_src(ctx
, &inst
->Src
[0], &alu
.src
[0]);
966 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 3);
967 alu
.src
[1].sel
= sel
;
968 alu
.src
[1].chan
= chan
;
969 r
= tgsi_src(ctx
, &inst
->Src
[0], &alu
.src
[2]);
972 alu
.src
[2].chan
= tgsi_chan(&inst
->Src
[0], 0);
973 alu
.dst
.sel
= ctx
->temp_reg
;
978 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
982 /* dst.z = exp(tmp.x) */
983 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
984 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
;
985 alu
.src
[0].sel
= ctx
->temp_reg
;
987 r
= tgsi_dst(ctx
, &inst
->Dst
[0], 2, &alu
.dst
);
991 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
998 static int tgsi_trans(struct r600_shader_ctx
*ctx
)
1000 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1001 struct r600_bc_alu alu
;
1004 for (i
= 0; i
< 4; i
++) {
1005 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1006 if (inst
->Dst
[0].Register
.WriteMask
& (1 << i
)) {
1007 alu
.inst
= ctx
->inst_info
->r600_opcode
;
1008 for (j
= 0; j
< inst
->Instruction
.NumSrcRegs
; j
++) {
1009 r
= tgsi_src(ctx
, &inst
->Src
[j
], &alu
.src
[j
]);
1012 alu
.src
[j
].chan
= tgsi_chan(&inst
->Src
[j
], i
);
1014 r
= tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
1018 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1026 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx
*ctx
)
1028 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1029 struct r600_bc_alu alu
;
1032 for (i
= 0; i
< 4; i
++) {
1033 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1034 alu
.src
[0].sel
= ctx
->temp_reg
;
1035 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
;
1037 r
= tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
1040 alu
.dst
.write
= (inst
->Dst
[0].Register
.WriteMask
>> i
) & 1;
1043 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1050 static int tgsi_trans_srcx_replicate(struct r600_shader_ctx
*ctx
)
1052 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1053 struct r600_bc_alu alu
;
1056 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1057 alu
.inst
= ctx
->inst_info
->r600_opcode
;
1058 for (i
= 0; i
< inst
->Instruction
.NumSrcRegs
; i
++) {
1059 r
= tgsi_src(ctx
, &inst
->Src
[i
], &alu
.src
[i
]);
1062 alu
.src
[i
].chan
= tgsi_chan(&inst
->Src
[i
], 0);
1064 alu
.dst
.sel
= ctx
->temp_reg
;
1067 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1070 /* replicate result */
1071 return tgsi_helper_tempx_replicate(ctx
);
1074 static int tgsi_pow(struct r600_shader_ctx
*ctx
)
1076 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1077 struct r600_bc_alu alu
;
1081 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1082 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
;
1083 r
= tgsi_src(ctx
, &inst
->Src
[0], &alu
.src
[0]);
1086 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 0);
1087 alu
.dst
.sel
= ctx
->temp_reg
;
1090 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1093 r
= r600_bc_add_literal(ctx
->bc
,ctx
->value
);
1097 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1098 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL_IEEE
;
1099 r
= tgsi_src(ctx
, &inst
->Src
[1], &alu
.src
[0]);
1102 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[1], 0);
1103 alu
.src
[1].sel
= ctx
->temp_reg
;
1104 alu
.dst
.sel
= ctx
->temp_reg
;
1107 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1110 r
= r600_bc_add_literal(ctx
->bc
,ctx
->value
);
1113 /* POW(a,b) = EXP2(b * LOG2(a))*/
1114 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1115 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
;
1116 alu
.src
[0].sel
= ctx
->temp_reg
;
1117 alu
.dst
.sel
= ctx
->temp_reg
;
1120 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1123 r
= r600_bc_add_literal(ctx
->bc
,ctx
->value
);
1126 return tgsi_helper_tempx_replicate(ctx
);
1129 static int tgsi_ssg(struct r600_shader_ctx
*ctx
)
1131 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1132 struct r600_bc_alu alu
;
1133 struct r600_bc_alu_src r600_src
[3];
1136 r
= tgsi_split_constant(ctx
, r600_src
);
1140 /* tmp = (src > 0 ? 1 : src) */
1141 for (i
= 0; i
< 4; i
++) {
1142 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1143 alu
.inst
= V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGT
;
1146 alu
.dst
.sel
= ctx
->temp_reg
;
1149 alu
.src
[0] = r600_src
[0];
1150 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], i
);
1152 alu
.src
[1].sel
= V_SQ_ALU_SRC_1
;
1154 alu
.src
[2] = r600_src
[0];
1155 alu
.src
[2].chan
= tgsi_chan(&inst
->Src
[0], i
);
1158 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1162 r
= r600_bc_add_literal(ctx
->bc
, ctx
->value
);
1166 /* dst = (-tmp > 0 ? -1 : tmp) */
1167 for (i
= 0; i
< 4; i
++) {
1168 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1169 alu
.inst
= V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGT
;
1171 r
= tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
1175 alu
.src
[0].sel
= ctx
->temp_reg
;
1176 alu
.src
[0].chan
= i
;
1179 alu
.src
[1].sel
= V_SQ_ALU_SRC_1
;
1182 alu
.src
[2].sel
= ctx
->temp_reg
;
1183 alu
.src
[2].chan
= i
;
1187 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1194 static int tgsi_helper_copy(struct r600_shader_ctx
*ctx
, struct tgsi_full_instruction
*inst
)
1196 struct r600_bc_alu alu
;
1199 r
= r600_bc_add_literal(ctx
->bc
, ctx
->value
);
1202 for (i
= 0; i
< 4; i
++) {
1203 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1204 if (!(inst
->Dst
[0].Register
.WriteMask
& (1 << i
))) {
1205 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
;
1208 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
;
1209 r
= tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
1212 alu
.src
[0].sel
= ctx
->temp_reg
;
1213 alu
.src
[0].chan
= i
;
1218 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1225 static int tgsi_op3(struct r600_shader_ctx
*ctx
)
1227 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1228 struct r600_bc_alu_src r600_src
[3];
1229 struct r600_bc_alu alu
;
1232 r
= tgsi_split_constant(ctx
, r600_src
);
1235 /* do it in 2 step as op3 doesn't support writemask */
1236 for (i
= 0; i
< 4; i
++) {
1237 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1238 alu
.inst
= ctx
->inst_info
->r600_opcode
;
1239 for (j
= 0; j
< inst
->Instruction
.NumSrcRegs
; j
++) {
1240 alu
.src
[j
] = r600_src
[j
];
1241 alu
.src
[j
].chan
= tgsi_chan(&inst
->Src
[j
], i
);
1243 alu
.dst
.sel
= ctx
->temp_reg
;
1250 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1254 return tgsi_helper_copy(ctx
, inst
);
1257 static int tgsi_dp(struct r600_shader_ctx
*ctx
)
1259 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1260 struct r600_bc_alu_src r600_src
[3];
1261 struct r600_bc_alu alu
;
1264 r
= tgsi_split_constant(ctx
, r600_src
);
1267 for (i
= 0; i
< 4; i
++) {
1268 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1269 alu
.inst
= ctx
->inst_info
->r600_opcode
;
1270 for (j
= 0; j
< inst
->Instruction
.NumSrcRegs
; j
++) {
1271 alu
.src
[j
] = r600_src
[j
];
1272 alu
.src
[j
].chan
= tgsi_chan(&inst
->Src
[j
], i
);
1274 alu
.dst
.sel
= ctx
->temp_reg
;
1277 /* handle some special cases */
1278 switch (ctx
->inst_info
->tgsi_opcode
) {
1279 case TGSI_OPCODE_DP2
:
1281 alu
.src
[0].sel
= alu
.src
[1].sel
= V_SQ_ALU_SRC_0
;
1282 alu
.src
[0].chan
= alu
.src
[1].chan
= 0;
1285 case TGSI_OPCODE_DP3
:
1287 alu
.src
[0].sel
= alu
.src
[1].sel
= V_SQ_ALU_SRC_0
;
1288 alu
.src
[0].chan
= alu
.src
[1].chan
= 0;
1291 case TGSI_OPCODE_DPH
:
1293 alu
.src
[0].sel
= V_SQ_ALU_SRC_1
;
1294 alu
.src
[0].chan
= 0;
1304 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1308 return tgsi_helper_copy(ctx
, inst
);
1311 static int tgsi_tex(struct r600_shader_ctx
*ctx
)
1313 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1314 struct r600_bc_tex tex
;
1315 struct r600_bc_alu alu
;
1319 src_gpr
= ctx
->file_offset
[inst
->Src
[0].Register
.File
] + inst
->Src
[0].Register
.Index
;
1321 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_TXP
) {
1322 /* Add perspective divide */
1323 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1324 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
;
1325 alu
.src
[0].sel
= src_gpr
;
1326 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 3);
1327 alu
.dst
.sel
= ctx
->temp_reg
;
1331 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1335 for (i
= 0; i
< 3; i
++) {
1336 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1337 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
;
1338 alu
.src
[0].sel
= ctx
->temp_reg
;
1339 alu
.src
[0].chan
= 3;
1340 alu
.src
[1].sel
= src_gpr
;
1341 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[0], i
);
1342 alu
.dst
.sel
= ctx
->temp_reg
;
1345 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1349 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1350 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
;
1351 alu
.src
[0].sel
= V_SQ_ALU_SRC_1
;
1352 alu
.src
[0].chan
= 0;
1353 alu
.dst
.sel
= ctx
->temp_reg
;
1357 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1360 src_gpr
= ctx
->temp_reg
;
1361 } else if (inst
->Src
[0].Register
.File
!= TGSI_FILE_TEMPORARY
) {
1362 for (i
= 0; i
< 4; i
++) {
1363 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1364 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
;
1365 alu
.src
[0].sel
= src_gpr
;
1366 alu
.src
[0].chan
= i
;
1367 alu
.dst
.sel
= ctx
->temp_reg
;
1372 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1376 src_gpr
= ctx
->temp_reg
;
1379 memset(&tex
, 0, sizeof(struct r600_bc_tex
));
1380 tex
.inst
= ctx
->inst_info
->r600_opcode
;
1381 tex
.resource_id
= ctx
->file_offset
[inst
->Src
[1].Register
.File
] + inst
->Src
[1].Register
.Index
;
1382 tex
.sampler_id
= tex
.resource_id
;
1383 tex
.src_gpr
= src_gpr
;
1384 tex
.dst_gpr
= ctx
->file_offset
[inst
->Dst
[0].Register
.File
] + inst
->Dst
[0].Register
.Index
;
1394 if (inst
->Texture
.Texture
!= TGSI_TEXTURE_RECT
) {
1395 tex
.coord_type_x
= 1;
1396 tex
.coord_type_y
= 1;
1397 tex
.coord_type_z
= 1;
1398 tex
.coord_type_w
= 1;
1400 return r600_bc_add_tex(ctx
->bc
, &tex
);
1403 static int tgsi_lrp(struct r600_shader_ctx
*ctx
)
1405 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1406 struct r600_bc_alu_src r600_src
[3];
1407 struct r600_bc_alu alu
;
1411 r
= tgsi_split_constant(ctx
, r600_src
);
1415 for (i
= 0; i
< 4; i
++) {
1416 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1417 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD
;
1418 alu
.src
[0].sel
= V_SQ_ALU_SRC_1
;
1419 alu
.src
[0].chan
= 0;
1420 alu
.src
[1] = r600_src
[0];
1421 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[0], i
);
1423 alu
.dst
.sel
= ctx
->temp_reg
;
1429 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1433 r
= r600_bc_add_literal(ctx
->bc
, ctx
->value
);
1437 /* (1 - src0) * src2 */
1438 for (i
= 0; i
< 4; i
++) {
1439 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1440 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
;
1441 alu
.src
[0].sel
= ctx
->temp_reg
;
1442 alu
.src
[0].chan
= i
;
1443 alu
.src
[1] = r600_src
[2];
1444 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[2], i
);
1445 alu
.dst
.sel
= ctx
->temp_reg
;
1451 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1455 r
= r600_bc_add_literal(ctx
->bc
, ctx
->value
);
1459 /* src0 * src1 + (1 - src0) * src2 */
1460 for (i
= 0; i
< 4; i
++) {
1461 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1462 alu
.inst
= V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD
;
1464 alu
.src
[0] = r600_src
[0];
1465 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], i
);
1466 alu
.src
[1] = r600_src
[1];
1467 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[1], i
);
1468 alu
.src
[2].sel
= ctx
->temp_reg
;
1469 alu
.src
[2].chan
= i
;
1470 alu
.dst
.sel
= ctx
->temp_reg
;
1475 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1479 return tgsi_helper_copy(ctx
, inst
);
1482 static int tgsi_cmp(struct r600_shader_ctx
*ctx
)
1484 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1485 struct r600_bc_alu_src r600_src
[3];
1486 struct r600_bc_alu alu
;
1490 r
= tgsi_split_constant(ctx
, r600_src
);
1494 if (inst
->Dst
[0].Register
.WriteMask
!= 0xf)
1497 for (i
= 0; i
< 4; i
++) {
1498 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1499 alu
.inst
= V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGE
;
1500 alu
.src
[0] = r600_src
[0];
1501 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], i
);
1503 alu
.src
[1] = r600_src
[2];
1504 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[2], i
);
1506 alu
.src
[2] = r600_src
[1];
1507 alu
.src
[2].chan
= tgsi_chan(&inst
->Src
[1], i
);
1510 alu
.dst
.sel
= ctx
->temp_reg
;
1512 r
= tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
1521 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1526 return tgsi_helper_copy(ctx
, inst
);
1530 static int tgsi_xpd(struct r600_shader_ctx
*ctx
)
1532 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1533 struct r600_bc_alu_src r600_src
[3];
1534 struct r600_bc_alu alu
;
1535 uint32_t use_temp
= 0;
1538 if (inst
->Dst
[0].Register
.WriteMask
!= 0xf)
1541 r
= tgsi_split_constant(ctx
, r600_src
);
1545 for (i
= 0; i
< 4; i
++) {
1546 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1547 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
;
1549 alu
.src
[0] = r600_src
[0];
1552 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 2);
1555 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 0);
1558 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 1);
1561 alu
.src
[0].sel
= V_SQ_ALU_SRC_0
;
1562 alu
.src
[0].chan
= i
;
1565 alu
.src
[1] = r600_src
[1];
1568 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[1], 1);
1571 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[1], 2);
1574 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[1], 0);
1577 alu
.src
[1].sel
= V_SQ_ALU_SRC_0
;
1578 alu
.src
[1].chan
= i
;
1581 alu
.dst
.sel
= ctx
->temp_reg
;
1587 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1592 for (i
= 0; i
< 4; i
++) {
1593 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1594 alu
.inst
= V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD
;
1596 alu
.src
[0] = r600_src
[0];
1599 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 1);
1602 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 2);
1605 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 0);
1608 alu
.src
[0].sel
= V_SQ_ALU_SRC_0
;
1609 alu
.src
[0].chan
= i
;
1612 alu
.src
[1] = r600_src
[1];
1615 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[1], 2);
1618 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[1], 0);
1621 alu
.src
[1].chan
= tgsi_chan(&inst
->Src
[1], 1);
1624 alu
.src
[1].sel
= V_SQ_ALU_SRC_0
;
1625 alu
.src
[1].chan
= i
;
1628 alu
.src
[2].sel
= ctx
->temp_reg
;
1630 alu
.src
[2].chan
= i
;
1633 alu
.dst
.sel
= ctx
->temp_reg
;
1635 r
= tgsi_dst(ctx
, &inst
->Dst
[0], i
, &alu
.dst
);
1644 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1649 return tgsi_helper_copy(ctx
, inst
);
1653 static int tgsi_exp(struct r600_shader_ctx
*ctx
)
1655 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1656 struct r600_bc_alu_src r600_src
[3];
1657 struct r600_bc_alu alu
;
1660 /* result.x = 2^floor(src); */
1661 if (inst
->Dst
[0].Register
.WriteMask
& 1) {
1662 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1664 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR
;
1665 r
= tgsi_src(ctx
, &inst
->Src
[0], &alu
.src
[0]);
1669 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 0);
1671 alu
.dst
.sel
= ctx
->temp_reg
;
1675 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1679 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
;
1680 alu
.src
[0].sel
= ctx
->temp_reg
;
1681 alu
.src
[0].chan
= 0;
1683 alu
.dst
.sel
= ctx
->temp_reg
;
1687 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1692 /* result.y = tmp - floor(tmp); */
1693 if ((inst
->Dst
[0].Register
.WriteMask
>> 1) & 1) {
1694 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1696 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT
;
1697 alu
.src
[0] = r600_src
[0];
1698 r
= tgsi_src(ctx
, &inst
->Src
[0], &alu
.src
[0]);
1701 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 0);
1703 alu
.dst
.sel
= ctx
->temp_reg
;
1704 // r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1712 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1717 /* result.z = RoughApprox2ToX(tmp);*/
1718 if ((inst
->Dst
[0].Register
.WriteMask
>> 2) & 0x1) {
1719 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1720 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
;
1721 r
= tgsi_src(ctx
, &inst
->Src
[0], &alu
.src
[0]);
1724 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 0);
1726 alu
.dst
.sel
= ctx
->temp_reg
;
1732 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1738 /* result.w = 1.0;*/
1739 if ((inst
->Dst
[0].Register
.WriteMask
>> 3) & 0x1) {
1740 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1742 alu
.inst
= V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
;
1743 alu
.src
[0].sel
= V_SQ_ALU_SRC_1
;
1744 alu
.src
[0].chan
= 0;
1746 alu
.dst
.sel
= ctx
->temp_reg
;
1750 r
= r600_bc_add_alu(ctx
->bc
, &alu
);
1754 return tgsi_helper_copy(ctx
, inst
);
1757 static int emit_logic_pred(struct r600_shader_ctx
*ctx
, int opcode
)
1759 struct tgsi_full_instruction
*inst
= &ctx
->parse
.FullToken
.FullInstruction
;
1760 struct r600_bc_alu alu
;
1763 memset(&alu
, 0, sizeof(struct r600_bc_alu
));
1767 alu
.dst
.sel
= ctx
->temp_reg
;
1771 r
= tgsi_src(ctx
, &inst
->Src
[0], &alu
.src
[0]);
1774 alu
.src
[0].chan
= tgsi_chan(&inst
->Src
[0], 0);
1775 alu
.src
[1].sel
= V_SQ_ALU_SRC_0
;
1776 alu
.src
[1].chan
= 0;
1780 r
= r600_bc_add_alu_type(ctx
->bc
, &alu
, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE
);
1786 static int pops(struct r600_shader_ctx
*ctx
, int pops
)
1788 r600_bc_add_cfinst(ctx
->bc
, V_SQ_CF_WORD1_SQ_CF_INST_POP
);
1789 ctx
->bc
->cf_last
->pop_count
= pops
;
1793 static inline void callstack_decrease_current(struct r600_shader_ctx
*ctx
, unsigned reason
)
1797 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
--;
1801 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
-= 4;
1804 /* TOODO : for 16 vp asic should -= 2; */
1805 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
--;
1810 static inline void callstack_check_depth(struct r600_shader_ctx
*ctx
, unsigned reason
, unsigned check_max_only
)
1812 if (check_max_only
) {
1822 if ((ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
+ diff
) >
1823 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].max
) {
1824 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].max
=
1825 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
+ diff
;
1831 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
++;
1835 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
+= 4;
1838 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
++;
1842 if ((ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
) >
1843 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].max
) {
1844 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].max
=
1845 ctx
->bc
->callstack
[ctx
->bc
->call_sp
].current
;
1849 static void fc_set_mid(struct r600_shader_ctx
*ctx
, int fc_sp
)
1851 struct r600_cf_stack_entry
*sp
= &ctx
->bc
->fc_stack
[fc_sp
];
1853 sp
->mid
= (struct r600_bc_cf
**)realloc((void *)sp
->mid
,
1854 sizeof(struct r600_bc_cf
*) * (sp
->num_mid
+ 1));
1855 sp
->mid
[sp
->num_mid
] = ctx
->bc
->cf_last
;
1859 static void fc_pushlevel(struct r600_shader_ctx
*ctx
, int type
)
1862 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].type
= type
;
1863 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].start
= ctx
->bc
->cf_last
;
1866 static void fc_poplevel(struct r600_shader_ctx
*ctx
)
1868 struct r600_cf_stack_entry
*sp
= &ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
];
1880 static int emit_return(struct r600_shader_ctx
*ctx
)
1882 r600_bc_add_cfinst(ctx
->bc
, V_SQ_CF_WORD1_SQ_CF_INST_RETURN
);
1886 static int emit_jump_to_offset(struct r600_shader_ctx
*ctx
, int pops
, int offset
)
1889 r600_bc_add_cfinst(ctx
->bc
, V_SQ_CF_WORD1_SQ_CF_INST_JUMP
);
1890 ctx
->bc
->cf_last
->pop_count
= pops
;
1891 /* TODO work out offset */
1895 static int emit_setret_in_loop_flag(struct r600_shader_ctx
*ctx
, unsigned flag_value
)
1900 static void emit_testflag(struct r600_shader_ctx
*ctx
)
1905 static void emit_return_on_flag(struct r600_shader_ctx
*ctx
, unsigned ifidx
)
1908 emit_jump_to_offset(ctx
, 1, 4);
1909 emit_setret_in_loop_flag(ctx
, V_SQ_ALU_SRC_0
);
1910 pops(ctx
, ifidx
+ 1);
1914 static void break_loop_on_flag(struct r600_shader_ctx
*ctx
, unsigned fc_sp
)
1918 r600_bc_add_cfinst(ctx
->bc
, ctx
->inst_info
->r600_opcode
);
1919 ctx
->bc
->cf_last
->pop_count
= 1;
1921 fc_set_mid(ctx
, fc_sp
);
1927 static int tgsi_if(struct r600_shader_ctx
*ctx
)
1929 emit_logic_pred(ctx
, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE
);
1931 r600_bc_add_cfinst(ctx
->bc
, V_SQ_CF_WORD1_SQ_CF_INST_JUMP
);
1933 fc_pushlevel(ctx
, FC_IF
);
1935 callstack_check_depth(ctx
, FC_PUSH_VPM
, 0);
1939 static int tgsi_else(struct r600_shader_ctx
*ctx
)
1941 r600_bc_add_cfinst(ctx
->bc
, V_SQ_CF_WORD1_SQ_CF_INST_ELSE
);
1942 ctx
->bc
->cf_last
->pop_count
= 1;
1944 fc_set_mid(ctx
, ctx
->bc
->fc_sp
);
1945 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].start
->cf_addr
= ctx
->bc
->cf_last
->id
;
1949 static int tgsi_endif(struct r600_shader_ctx
*ctx
)
1952 if (ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].type
!= FC_IF
) {
1953 R600_ERR("if/endif unbalanced in shader\n");
1957 if (ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].mid
== NULL
) {
1958 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].start
->cf_addr
= ctx
->bc
->cf_last
->id
+ 2;
1959 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].start
->pop_count
= 1;
1961 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].mid
[0]->cf_addr
= ctx
->bc
->cf_last
->id
+ 2;
1965 callstack_decrease_current(ctx
, FC_PUSH_VPM
);
1969 static int tgsi_bgnloop(struct r600_shader_ctx
*ctx
)
1971 r600_bc_add_cfinst(ctx
->bc
, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL
);
1973 fc_pushlevel(ctx
, FC_LOOP
);
1975 /* check stack depth */
1976 callstack_check_depth(ctx
, FC_LOOP
, 0);
1980 static int tgsi_endloop(struct r600_shader_ctx
*ctx
)
1984 r600_bc_add_cfinst(ctx
->bc
, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END
);
1986 if (ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].type
!= FC_LOOP
) {
1987 R600_ERR("loop/endloop in shader code are not paired.\n");
1991 /* fixup loop pointers - from r600isa
1992 LOOP END points to CF after LOOP START,
1993 LOOP START point to CF after LOOP END
1994 BRK/CONT point to LOOP END CF
1996 ctx
->bc
->cf_last
->cf_addr
= ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].start
->id
+ 2;
1998 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].start
->cf_addr
= ctx
->bc
->cf_last
->id
+ 2;
2000 for (i
= 0; i
< ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].num_mid
; i
++) {
2001 ctx
->bc
->fc_stack
[ctx
->bc
->fc_sp
].mid
[i
]->cf_addr
= ctx
->bc
->cf_last
->id
;
2003 /* TODO add LOOPRET support */
2005 callstack_decrease_current(ctx
, FC_LOOP
);
2009 static int tgsi_loop_brk_cont(struct r600_shader_ctx
*ctx
)
2013 for (fscp
= ctx
->bc
->fc_sp
; fscp
> 0; fscp
--)
2015 if (FC_LOOP
== ctx
->bc
->fc_stack
[fscp
].type
)
2020 R600_ERR("Break not inside loop/endloop pair\n");
2024 r600_bc_add_cfinst(ctx
->bc
, ctx
->inst_info
->r600_opcode
);
2025 ctx
->bc
->cf_last
->pop_count
= 1;
2027 fc_set_mid(ctx
, fscp
);
2030 callstack_check_depth(ctx
, FC_PUSH_VPM
, 1);
2034 static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction
[] = {
2035 {TGSI_OPCODE_ARL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2036 {TGSI_OPCODE_MOV
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
, tgsi_op2
},
2037 {TGSI_OPCODE_LIT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_lit
},
2038 {TGSI_OPCODE_RCP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
, tgsi_trans_srcx_replicate
},
2039 {TGSI_OPCODE_RSQ
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE
, tgsi_trans_srcx_replicate
},
2040 {TGSI_OPCODE_EXP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_exp
},
2041 {TGSI_OPCODE_LOG
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2042 {TGSI_OPCODE_MUL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
, tgsi_op2
},
2043 {TGSI_OPCODE_ADD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD
, tgsi_op2
},
2044 {TGSI_OPCODE_DP3
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
, tgsi_dp
},
2045 {TGSI_OPCODE_DP4
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
, tgsi_dp
},
2046 {TGSI_OPCODE_DST
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2047 {TGSI_OPCODE_MIN
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN
, tgsi_op2
},
2048 {TGSI_OPCODE_MAX
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX
, tgsi_op2
},
2049 {TGSI_OPCODE_SLT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT
, tgsi_op2_swap
},
2050 {TGSI_OPCODE_SGE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE
, tgsi_op2
},
2051 {TGSI_OPCODE_MAD
, 1, V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD
, tgsi_op3
},
2052 {TGSI_OPCODE_SUB
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD
, tgsi_op2
},
2053 {TGSI_OPCODE_LRP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_lrp
},
2054 {TGSI_OPCODE_CND
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2056 {20, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2057 {TGSI_OPCODE_DP2A
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2059 {22, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2060 {23, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2061 {TGSI_OPCODE_FRC
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT
, tgsi_op2
},
2062 {TGSI_OPCODE_CLAMP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2063 {TGSI_OPCODE_FLR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR
, tgsi_op2
},
2064 {TGSI_OPCODE_ROUND
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2065 {TGSI_OPCODE_EX2
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
, tgsi_trans_srcx_replicate
},
2066 {TGSI_OPCODE_LG2
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
, tgsi_trans_srcx_replicate
},
2067 {TGSI_OPCODE_POW
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_pow
},
2068 {TGSI_OPCODE_XPD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_xpd
},
2070 {32, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2071 {TGSI_OPCODE_ABS
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
, tgsi_op2
},
2072 {TGSI_OPCODE_RCC
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2073 {TGSI_OPCODE_DPH
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
, tgsi_dp
},
2074 {TGSI_OPCODE_COS
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS
, tgsi_trig
},
2075 {TGSI_OPCODE_DDX
, 0, SQ_TEX_INST_GET_GRADIENTS_H
, tgsi_tex
},
2076 {TGSI_OPCODE_DDY
, 0, SQ_TEX_INST_GET_GRADIENTS_V
, tgsi_tex
},
2077 {TGSI_OPCODE_KILP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
}, /* predicated kill */
2078 {TGSI_OPCODE_PK2H
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2079 {TGSI_OPCODE_PK2US
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2080 {TGSI_OPCODE_PK4B
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2081 {TGSI_OPCODE_PK4UB
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2082 {TGSI_OPCODE_RFL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2083 {TGSI_OPCODE_SEQ
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE
, tgsi_op2
},
2084 {TGSI_OPCODE_SFL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2085 {TGSI_OPCODE_SGT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT
, tgsi_op2
},
2086 {TGSI_OPCODE_SIN
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN
, tgsi_trig
},
2087 {TGSI_OPCODE_SLE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE
, tgsi_op2_swap
},
2088 {TGSI_OPCODE_SNE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE
, tgsi_op2
},
2089 {TGSI_OPCODE_STR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2090 {TGSI_OPCODE_TEX
, 0, SQ_TEX_INST_SAMPLE
, tgsi_tex
},
2091 {TGSI_OPCODE_TXD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2092 {TGSI_OPCODE_TXP
, 0, SQ_TEX_INST_SAMPLE
, tgsi_tex
},
2093 {TGSI_OPCODE_UP2H
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2094 {TGSI_OPCODE_UP2US
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2095 {TGSI_OPCODE_UP4B
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2096 {TGSI_OPCODE_UP4UB
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2097 {TGSI_OPCODE_X2D
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2098 {TGSI_OPCODE_ARA
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2099 {TGSI_OPCODE_ARR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2100 {TGSI_OPCODE_BRA
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2101 {TGSI_OPCODE_CAL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2102 {TGSI_OPCODE_RET
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2103 {TGSI_OPCODE_SSG
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_ssg
},
2104 {TGSI_OPCODE_CMP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_cmp
},
2105 {TGSI_OPCODE_SCS
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2106 {TGSI_OPCODE_TXB
, 0, SQ_TEX_INST_SAMPLE_L
, tgsi_tex
},
2107 {TGSI_OPCODE_NRM
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2108 {TGSI_OPCODE_DIV
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2109 {TGSI_OPCODE_DP2
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
, tgsi_dp
},
2110 {TGSI_OPCODE_TXL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2111 {TGSI_OPCODE_BRK
, 0, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK
, tgsi_loop_brk_cont
},
2112 {TGSI_OPCODE_IF
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_if
},
2114 {75, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2115 {76, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2116 {TGSI_OPCODE_ELSE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_else
},
2117 {TGSI_OPCODE_ENDIF
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_endif
},
2119 {79, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2120 {80, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2121 {TGSI_OPCODE_PUSHA
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2122 {TGSI_OPCODE_POPA
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2123 {TGSI_OPCODE_CEIL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2124 {TGSI_OPCODE_I2F
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2125 {TGSI_OPCODE_NOT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2126 {TGSI_OPCODE_TRUNC
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC
, tgsi_trans_srcx_replicate
},
2127 {TGSI_OPCODE_SHL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2129 {88, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2130 {TGSI_OPCODE_AND
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2131 {TGSI_OPCODE_OR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2132 {TGSI_OPCODE_MOD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2133 {TGSI_OPCODE_XOR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2134 {TGSI_OPCODE_SAD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2135 {TGSI_OPCODE_TXF
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2136 {TGSI_OPCODE_TXQ
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2137 {TGSI_OPCODE_CONT
, 0, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE
, tgsi_loop_brk_cont
},
2138 {TGSI_OPCODE_EMIT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2139 {TGSI_OPCODE_ENDPRIM
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2140 {TGSI_OPCODE_BGNLOOP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_bgnloop
},
2141 {TGSI_OPCODE_BGNSUB
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2142 {TGSI_OPCODE_ENDLOOP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_endloop
},
2143 {TGSI_OPCODE_ENDSUB
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2145 {103, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2146 {104, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2147 {105, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2148 {106, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2149 {TGSI_OPCODE_NOP
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2151 {108, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2152 {109, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2153 {110, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2154 {111, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2155 {TGSI_OPCODE_NRM4
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2156 {TGSI_OPCODE_CALLNZ
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2157 {TGSI_OPCODE_IFC
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2158 {TGSI_OPCODE_BREAKC
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2159 {TGSI_OPCODE_KIL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
, tgsi_kill
}, /* conditional kill */
2160 {TGSI_OPCODE_END
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_end
}, /* aka HALT */
2162 {118, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2163 {TGSI_OPCODE_F2I
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2164 {TGSI_OPCODE_IDIV
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2165 {TGSI_OPCODE_IMAX
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2166 {TGSI_OPCODE_IMIN
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2167 {TGSI_OPCODE_INEG
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2168 {TGSI_OPCODE_ISGE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2169 {TGSI_OPCODE_ISHR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2170 {TGSI_OPCODE_ISLT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2171 {TGSI_OPCODE_F2U
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2172 {TGSI_OPCODE_U2F
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2173 {TGSI_OPCODE_UADD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2174 {TGSI_OPCODE_UDIV
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2175 {TGSI_OPCODE_UMAD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2176 {TGSI_OPCODE_UMAX
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2177 {TGSI_OPCODE_UMIN
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2178 {TGSI_OPCODE_UMOD
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2179 {TGSI_OPCODE_UMUL
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2180 {TGSI_OPCODE_USEQ
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2181 {TGSI_OPCODE_USGE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2182 {TGSI_OPCODE_USHR
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2183 {TGSI_OPCODE_USLT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2184 {TGSI_OPCODE_USNE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2185 {TGSI_OPCODE_SWITCH
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2186 {TGSI_OPCODE_CASE
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2187 {TGSI_OPCODE_DEFAULT
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2188 {TGSI_OPCODE_ENDSWITCH
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},
2189 {TGSI_OPCODE_LAST
, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
, tgsi_unsupported
},