r600g: add FRC, FLR, DDX and DDY
[mesa.git] / src / gallium / drivers / r600 / r600_shader.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "pipe/p_shader_tokens.h"
24 #include "tgsi/tgsi_parse.h"
25 #include "tgsi/tgsi_scan.h"
26 #include "tgsi/tgsi_dump.h"
27 #include "util/u_format.h"
28 #include "r600_screen.h"
29 #include "r600_context.h"
30 #include "r600_shader.h"
31 #include "r600_asm.h"
32 #include "r600_sq.h"
33 #include "r600d.h"
34 #include <stdio.h>
35 #include <errno.h>
36
37
38 struct r600_shader_tgsi_instruction;
39
40 struct r600_shader_ctx {
41 struct tgsi_shader_info info;
42 struct tgsi_parse_context parse;
43 const struct tgsi_token *tokens;
44 unsigned type;
45 unsigned file_offset[TGSI_FILE_COUNT];
46 unsigned temp_reg;
47 struct r600_shader_tgsi_instruction *inst_info;
48 struct r600_bc *bc;
49 struct r600_shader *shader;
50 u32 value[4];
51 };
52
53 struct r600_shader_tgsi_instruction {
54 unsigned tgsi_opcode;
55 unsigned is_op3;
56 unsigned r600_opcode;
57 int (*process)(struct r600_shader_ctx *ctx);
58 };
59
60 static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[];
61 static int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader);
62
63 static int r600_shader_update(struct pipe_context *ctx, struct r600_shader *shader)
64 {
65 struct r600_context *rctx = r600_context(ctx);
66 const struct util_format_description *desc;
67 enum pipe_format resource_format[160];
68 unsigned i, nresources = 0;
69 struct r600_bc *bc = &shader->bc;
70 struct r600_bc_cf *cf;
71 struct r600_bc_vtx *vtx;
72
73 if (shader->processor_type != TGSI_PROCESSOR_VERTEX)
74 return 0;
75 for (i = 0; i < rctx->vertex_elements->count; i++) {
76 resource_format[nresources++] = rctx->vertex_elements->elements[i].src_format;
77 }
78 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
79 switch (cf->inst) {
80 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
81 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
82 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
83 desc = util_format_description(resource_format[vtx->buffer_id]);
84 if (desc == NULL) {
85 R600_ERR("unknown format %d\n", resource_format[vtx->buffer_id]);
86 return -EINVAL;
87 }
88 vtx->dst_sel_x = desc->swizzle[0];
89 vtx->dst_sel_y = desc->swizzle[1];
90 vtx->dst_sel_z = desc->swizzle[2];
91 vtx->dst_sel_w = desc->swizzle[3];
92 }
93 break;
94 default:
95 break;
96 }
97 }
98 return r600_bc_build(&shader->bc);
99 }
100
101 int r600_pipe_shader_create(struct pipe_context *ctx,
102 struct r600_context_state *rpshader,
103 const struct tgsi_token *tokens)
104 {
105 struct r600_screen *rscreen = r600_screen(ctx->screen);
106 int r;
107
108 fprintf(stderr, "--------------------------------------------------------------\n");
109 tgsi_dump(tokens, 0);
110 if (rpshader == NULL)
111 return -ENOMEM;
112 rpshader->shader.family = radeon_get_family(rscreen->rw);
113 r = r600_shader_from_tgsi(tokens, &rpshader->shader);
114 if (r) {
115 R600_ERR("translation from TGSI failed !\n");
116 return r;
117 }
118 r = r600_bc_build(&rpshader->shader.bc);
119 if (r) {
120 R600_ERR("building bytecode failed !\n");
121 return r;
122 }
123 fprintf(stderr, "______________________________________________________________\n");
124 return 0;
125 }
126
127 static int r600_pipe_shader_vs(struct pipe_context *ctx, struct r600_context_state *rpshader)
128 {
129 struct r600_screen *rscreen = r600_screen(ctx->screen);
130 struct r600_shader *rshader = &rpshader->shader;
131 struct radeon_state *state;
132 unsigned i, tmp;
133
134 rpshader->rstate = radeon_state_decref(rpshader->rstate);
135 state = radeon_state(rscreen->rw, R600_VS_SHADER_TYPE, R600_VS_SHADER);
136 if (state == NULL)
137 return -ENOMEM;
138 for (i = 0; i < 10; i++) {
139 state->states[R600_VS_SHADER__SPI_VS_OUT_ID_0 + i] = 0;
140 }
141 /* so far never got proper semantic id from tgsi */
142 for (i = 0; i < 32; i++) {
143 tmp = i << ((i & 3) * 8);
144 state->states[R600_VS_SHADER__SPI_VS_OUT_ID_0 + i / 4] |= tmp;
145 }
146 state->states[R600_VS_SHADER__SPI_VS_OUT_CONFIG] = S_0286C4_VS_EXPORT_COUNT(rshader->noutput - 2);
147 state->states[R600_VS_SHADER__SQ_PGM_RESOURCES_VS] = S_028868_NUM_GPRS(rshader->bc.ngpr);
148 rpshader->rstate = state;
149 rpshader->rstate->bo[0] = radeon_bo_incref(rscreen->rw, rpshader->bo);
150 rpshader->rstate->bo[1] = radeon_bo_incref(rscreen->rw, rpshader->bo);
151 rpshader->rstate->nbo = 2;
152 rpshader->rstate->placement[0] = RADEON_GEM_DOMAIN_GTT;
153 return radeon_state_pm4(state);
154 }
155
156 static int r600_pipe_shader_ps(struct pipe_context *ctx, struct r600_context_state *rpshader)
157 {
158 const struct pipe_rasterizer_state *rasterizer;
159 struct r600_screen *rscreen = r600_screen(ctx->screen);
160 struct r600_shader *rshader = &rpshader->shader;
161 struct r600_context *rctx = r600_context(ctx);
162 struct radeon_state *state;
163 unsigned i, tmp, exports_ps, num_cout;
164
165 rasterizer = &rctx->rasterizer->state.rasterizer;
166 rpshader->rstate = radeon_state_decref(rpshader->rstate);
167 state = radeon_state(rscreen->rw, R600_PS_SHADER_TYPE, R600_PS_SHADER);
168 if (state == NULL)
169 return -ENOMEM;
170 for (i = 0; i < rshader->ninput; i++) {
171 tmp = S_028644_SEMANTIC(i);
172 tmp |= S_028644_SEL_CENTROID(1);
173 if (rshader->input[i].name == TGSI_SEMANTIC_COLOR ||
174 rshader->input[i].name == TGSI_SEMANTIC_BCOLOR) {
175 tmp |= S_028644_FLAT_SHADE(rshader->flat_shade);
176 }
177 if (rasterizer->sprite_coord_enable & (1 << i)) {
178 tmp |= S_028644_PT_SPRITE_TEX(1);
179 }
180 state->states[R600_PS_SHADER__SPI_PS_INPUT_CNTL_0 + i] = tmp;
181 }
182
183 exports_ps = 0;
184 num_cout = 0;
185 for (i = 0; i < rshader->noutput; i++) {
186 if (rshader->output[i].name == TGSI_SEMANTIC_POSITION)
187 exports_ps |= 1;
188 else if (rshader->output[i].name == TGSI_SEMANTIC_COLOR) {
189 exports_ps |= (1 << (num_cout+1));
190 num_cout++;
191 }
192 }
193 state->states[R600_PS_SHADER__SPI_PS_IN_CONTROL_0] = S_0286CC_NUM_INTERP(rshader->ninput) |
194 S_0286CC_PERSP_GRADIENT_ENA(1);
195 state->states[R600_PS_SHADER__SPI_PS_IN_CONTROL_1] = 0x00000000;
196 state->states[R600_PS_SHADER__SQ_PGM_RESOURCES_PS] = S_028868_NUM_GPRS(rshader->bc.ngpr);
197 state->states[R600_PS_SHADER__SQ_PGM_EXPORTS_PS] = exports_ps;
198 rpshader->rstate = state;
199 rpshader->rstate->bo[0] = radeon_bo_incref(rscreen->rw, rpshader->bo);
200 rpshader->rstate->nbo = 1;
201 rpshader->rstate->placement[0] = RADEON_GEM_DOMAIN_GTT;
202 return radeon_state_pm4(state);
203 }
204
205 static int r600_pipe_shader(struct pipe_context *ctx, struct r600_context_state *rpshader)
206 {
207 struct r600_screen *rscreen = r600_screen(ctx->screen);
208 struct r600_context *rctx = r600_context(ctx);
209 struct r600_shader *rshader = &rpshader->shader;
210 int r;
211
212 /* copy new shader */
213 radeon_bo_decref(rscreen->rw, rpshader->bo);
214 rpshader->bo = NULL;
215 rpshader->bo = radeon_bo(rscreen->rw, 0, rshader->bc.ndw * 4,
216 4096, NULL);
217 if (rpshader->bo == NULL) {
218 return -ENOMEM;
219 }
220 radeon_bo_map(rscreen->rw, rpshader->bo);
221 memcpy(rpshader->bo->data, rshader->bc.bytecode, rshader->bc.ndw * 4);
222 radeon_bo_unmap(rscreen->rw, rpshader->bo);
223 /* build state */
224 rshader->flat_shade = rctx->flat_shade;
225 switch (rshader->processor_type) {
226 case TGSI_PROCESSOR_VERTEX:
227 r = r600_pipe_shader_vs(ctx, rpshader);
228 break;
229 case TGSI_PROCESSOR_FRAGMENT:
230 r = r600_pipe_shader_ps(ctx, rpshader);
231 break;
232 default:
233 r = -EINVAL;
234 break;
235 }
236 return r;
237 }
238
239 int r600_pipe_shader_update(struct pipe_context *ctx, struct r600_context_state *rpshader)
240 {
241 struct r600_context *rctx = r600_context(ctx);
242 int r;
243
244 if (rpshader == NULL)
245 return -EINVAL;
246 /* there should be enough input */
247 if (rctx->vertex_elements->count < rpshader->shader.bc.nresource) {
248 R600_ERR("%d resources provided, expecting %d\n",
249 rctx->vertex_elements->count, rpshader->shader.bc.nresource);
250 return -EINVAL;
251 }
252 r = r600_shader_update(ctx, &rpshader->shader);
253 if (r)
254 return r;
255 return r600_pipe_shader(ctx, rpshader);
256 }
257
258 static int tgsi_is_supported(struct r600_shader_ctx *ctx)
259 {
260 struct tgsi_full_instruction *i = &ctx->parse.FullToken.FullInstruction;
261 int j;
262
263 if (i->Instruction.NumDstRegs > 1) {
264 R600_ERR("too many dst (%d)\n", i->Instruction.NumDstRegs);
265 return -EINVAL;
266 }
267 if (i->Instruction.Predicate) {
268 R600_ERR("predicate unsupported\n");
269 return -EINVAL;
270 }
271 if (i->Instruction.Label) {
272 R600_ERR("label unsupported\n");
273 return -EINVAL;
274 }
275 for (j = 0; j < i->Instruction.NumSrcRegs; j++) {
276 if (i->Src[j].Register.Indirect ||
277 i->Src[j].Register.Dimension ||
278 i->Src[j].Register.Absolute) {
279 R600_ERR("unsupported src (indirect|dimension|absolute)\n");
280 return -EINVAL;
281 }
282 }
283 for (j = 0; j < i->Instruction.NumDstRegs; j++) {
284 if (i->Dst[j].Register.Indirect || i->Dst[j].Register.Dimension) {
285 R600_ERR("unsupported dst (indirect|dimension)\n");
286 return -EINVAL;
287 }
288 }
289 return 0;
290 }
291
292 static int tgsi_declaration(struct r600_shader_ctx *ctx)
293 {
294 struct tgsi_full_declaration *d = &ctx->parse.FullToken.FullDeclaration;
295 struct r600_bc_vtx vtx;
296 unsigned i;
297 int r;
298
299 switch (d->Declaration.File) {
300 case TGSI_FILE_INPUT:
301 i = ctx->shader->ninput++;
302 ctx->shader->input[i].name = d->Semantic.Name;
303 ctx->shader->input[i].sid = d->Semantic.Index;
304 ctx->shader->input[i].interpolate = d->Declaration.Interpolate;
305 ctx->shader->input[i].gpr = ctx->file_offset[TGSI_FILE_INPUT] + i;
306 if (ctx->type == TGSI_PROCESSOR_VERTEX) {
307 /* turn input into fetch */
308 memset(&vtx, 0, sizeof(struct r600_bc_vtx));
309 vtx.inst = 0;
310 vtx.fetch_type = 0;
311 vtx.buffer_id = i;
312 /* register containing the index into the buffer */
313 vtx.src_gpr = 0;
314 vtx.src_sel_x = 0;
315 vtx.mega_fetch_count = 0x1F;
316 vtx.dst_gpr = ctx->shader->input[i].gpr;
317 vtx.dst_sel_x = 0;
318 vtx.dst_sel_y = 1;
319 vtx.dst_sel_z = 2;
320 vtx.dst_sel_w = 3;
321 r = r600_bc_add_vtx(ctx->bc, &vtx);
322 if (r)
323 return r;
324 }
325 break;
326 case TGSI_FILE_OUTPUT:
327 i = ctx->shader->noutput++;
328 ctx->shader->output[i].name = d->Semantic.Name;
329 ctx->shader->output[i].sid = d->Semantic.Index;
330 ctx->shader->output[i].gpr = ctx->file_offset[TGSI_FILE_OUTPUT] + i;
331 ctx->shader->output[i].interpolate = d->Declaration.Interpolate;
332 break;
333 case TGSI_FILE_CONSTANT:
334 case TGSI_FILE_TEMPORARY:
335 case TGSI_FILE_SAMPLER:
336 break;
337 default:
338 R600_ERR("unsupported file %d declaration\n", d->Declaration.File);
339 return -EINVAL;
340 }
341 return 0;
342 }
343
344 int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader)
345 {
346 struct tgsi_full_immediate *immediate;
347 struct r600_shader_ctx ctx;
348 struct r600_bc_output output[32];
349 unsigned output_done, noutput;
350 unsigned opcode;
351 int i, r = 0, pos0;
352
353 ctx.bc = &shader->bc;
354 ctx.shader = shader;
355 r = r600_bc_init(ctx.bc, shader->family);
356 if (r)
357 return r;
358 ctx.tokens = tokens;
359 tgsi_scan_shader(tokens, &ctx.info);
360 tgsi_parse_init(&ctx.parse, tokens);
361 ctx.type = ctx.parse.FullHeader.Processor.Processor;
362 shader->processor_type = ctx.type;
363
364 /* register allocations */
365 /* Values [0,127] correspond to GPR[0..127].
366 * Values [256,511] correspond to cfile constants c[0..255].
367 * Other special values are shown in the list below.
368 * 248 SQ_ALU_SRC_0: special constant 0.0.
369 * 249 SQ_ALU_SRC_1: special constant 1.0 float.
370 * 250 SQ_ALU_SRC_1_INT: special constant 1 integer.
371 * 251 SQ_ALU_SRC_M_1_INT: special constant -1 integer.
372 * 252 SQ_ALU_SRC_0_5: special constant 0.5 float.
373 * 253 SQ_ALU_SRC_LITERAL: literal constant.
374 * 254 SQ_ALU_SRC_PV: previous vector result.
375 * 255 SQ_ALU_SRC_PS: previous scalar result.
376 */
377 for (i = 0; i < TGSI_FILE_COUNT; i++) {
378 ctx.file_offset[i] = 0;
379 }
380 if (ctx.type == TGSI_PROCESSOR_VERTEX) {
381 ctx.file_offset[TGSI_FILE_INPUT] = 1;
382 }
383 ctx.file_offset[TGSI_FILE_OUTPUT] = ctx.file_offset[TGSI_FILE_INPUT] +
384 ctx.info.file_count[TGSI_FILE_INPUT];
385 ctx.file_offset[TGSI_FILE_TEMPORARY] = ctx.file_offset[TGSI_FILE_OUTPUT] +
386 ctx.info.file_count[TGSI_FILE_OUTPUT];
387 ctx.file_offset[TGSI_FILE_CONSTANT] = 256;
388 ctx.file_offset[TGSI_FILE_IMMEDIATE] = 253;
389 ctx.temp_reg = ctx.file_offset[TGSI_FILE_TEMPORARY] +
390 ctx.info.file_count[TGSI_FILE_TEMPORARY];
391
392 while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
393 tgsi_parse_token(&ctx.parse);
394 switch (ctx.parse.FullToken.Token.Type) {
395 case TGSI_TOKEN_TYPE_IMMEDIATE:
396 immediate = &ctx.parse.FullToken.FullImmediate;
397 ctx.value[0] = immediate->u[0].Uint;
398 ctx.value[1] = immediate->u[1].Uint;
399 ctx.value[2] = immediate->u[2].Uint;
400 ctx.value[3] = immediate->u[3].Uint;
401 break;
402 case TGSI_TOKEN_TYPE_DECLARATION:
403 r = tgsi_declaration(&ctx);
404 if (r)
405 goto out_err;
406 break;
407 case TGSI_TOKEN_TYPE_INSTRUCTION:
408 r = tgsi_is_supported(&ctx);
409 if (r)
410 goto out_err;
411 opcode = ctx.parse.FullToken.FullInstruction.Instruction.Opcode;
412 ctx.inst_info = &r600_shader_tgsi_instruction[opcode];
413 r = ctx.inst_info->process(&ctx);
414 if (r)
415 goto out_err;
416 r = r600_bc_add_literal(ctx.bc, ctx.value);
417 if (r)
418 goto out_err;
419 break;
420 default:
421 R600_ERR("unsupported token type %d\n", ctx.parse.FullToken.Token.Type);
422 r = -EINVAL;
423 goto out_err;
424 }
425 }
426 /* export output */
427 noutput = shader->noutput;
428 for (i = 0, pos0 = 0; i < noutput; i++) {
429 memset(&output[i], 0, sizeof(struct r600_bc_output));
430 output[i].gpr = shader->output[i].gpr;
431 output[i].elem_size = 3;
432 output[i].swizzle_x = 0;
433 output[i].swizzle_y = 1;
434 output[i].swizzle_z = 2;
435 output[i].swizzle_w = 3;
436 output[i].barrier = 1;
437 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
438 output[i].array_base = i - pos0;
439 output[i].inst = V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT;
440 switch (ctx.type) {
441 case TGSI_PROCESSOR_VERTEX:
442 if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
443 output[i].array_base = 60;
444 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
445 /* position doesn't count in array_base */
446 pos0++;
447 }
448 if (shader->output[i].name == TGSI_SEMANTIC_PSIZE) {
449 output[i].array_base = 61;
450 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
451 /* position doesn't count in array_base */
452 pos0++;
453 }
454 break;
455 case TGSI_PROCESSOR_FRAGMENT:
456 if (shader->output[i].name == TGSI_SEMANTIC_COLOR) {
457 output[i].array_base = shader->output[i].sid;
458 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
459 } else if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
460 output[i].array_base = 61;
461 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
462 } else {
463 R600_ERR("unsupported fragment output name %d\n", shader->output[i].name);
464 r = -EINVAL;
465 goto out_err;
466 }
467 break;
468 default:
469 R600_ERR("unsupported processor type %d\n", ctx.type);
470 r = -EINVAL;
471 goto out_err;
472 }
473 }
474 /* add fake param output for vertex shader if no param is exported */
475 if (ctx.type == TGSI_PROCESSOR_VERTEX) {
476 for (i = 0, pos0 = 0; i < noutput; i++) {
477 if (output[i].type == V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM) {
478 pos0 = 1;
479 break;
480 }
481 }
482 if (!pos0) {
483 memset(&output[i], 0, sizeof(struct r600_bc_output));
484 output[i].gpr = 0;
485 output[i].elem_size = 3;
486 output[i].swizzle_x = 0;
487 output[i].swizzle_y = 1;
488 output[i].swizzle_z = 2;
489 output[i].swizzle_w = 3;
490 output[i].barrier = 1;
491 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
492 output[i].array_base = 0;
493 output[i].inst = V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT;
494 noutput++;
495 }
496 }
497 /* add fake pixel export */
498 if (ctx.type == TGSI_PROCESSOR_FRAGMENT && !noutput) {
499 memset(&output[0], 0, sizeof(struct r600_bc_output));
500 output[0].gpr = 0;
501 output[0].elem_size = 3;
502 output[0].swizzle_x = 7;
503 output[0].swizzle_y = 7;
504 output[0].swizzle_z = 7;
505 output[0].swizzle_w = 7;
506 output[0].barrier = 1;
507 output[0].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
508 output[0].array_base = 0;
509 output[0].inst = V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT;
510 noutput++;
511 }
512 /* set export done on last export of each type */
513 for (i = noutput - 1, output_done = 0; i >= 0; i--) {
514 if (i == (noutput - 1)) {
515 output[i].end_of_program = 1;
516 }
517 if (!(output_done & (1 << output[i].type))) {
518 output_done |= (1 << output[i].type);
519 output[i].inst = V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE;
520 }
521 }
522 /* add output to bytecode */
523 for (i = 0; i < noutput; i++) {
524 r = r600_bc_add_output(ctx.bc, &output[i]);
525 if (r)
526 goto out_err;
527 }
528 tgsi_parse_free(&ctx.parse);
529 return 0;
530 out_err:
531 tgsi_parse_free(&ctx.parse);
532 return r;
533 }
534
535 static int tgsi_unsupported(struct r600_shader_ctx *ctx)
536 {
537 R600_ERR("%d tgsi opcode unsupported\n", ctx->inst_info->tgsi_opcode);
538 return -EINVAL;
539 }
540
541 static int tgsi_end(struct r600_shader_ctx *ctx)
542 {
543 return 0;
544 }
545
546 static int tgsi_src(struct r600_shader_ctx *ctx,
547 const struct tgsi_full_src_register *tgsi_src,
548 struct r600_bc_alu_src *r600_src)
549 {
550 memset(r600_src, 0, sizeof(struct r600_bc_alu_src));
551 r600_src->sel = tgsi_src->Register.Index;
552 if (tgsi_src->Register.File == TGSI_FILE_IMMEDIATE) {
553 r600_src->sel = 0;
554 }
555 r600_src->neg = tgsi_src->Register.Negate;
556 r600_src->sel += ctx->file_offset[tgsi_src->Register.File];
557 return 0;
558 }
559
560 static int tgsi_dst(struct r600_shader_ctx *ctx,
561 const struct tgsi_full_dst_register *tgsi_dst,
562 unsigned swizzle,
563 struct r600_bc_alu_dst *r600_dst)
564 {
565 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
566
567 r600_dst->sel = tgsi_dst->Register.Index;
568 r600_dst->sel += ctx->file_offset[tgsi_dst->Register.File];
569 r600_dst->chan = swizzle;
570 r600_dst->write = 1;
571 if (inst->Instruction.Saturate) {
572 r600_dst->clamp = 1;
573 }
574 return 0;
575 }
576
577 static unsigned tgsi_chan(const struct tgsi_full_src_register *tgsi_src, unsigned swizzle)
578 {
579 switch (swizzle) {
580 case 0:
581 return tgsi_src->Register.SwizzleX;
582 case 1:
583 return tgsi_src->Register.SwizzleY;
584 case 2:
585 return tgsi_src->Register.SwizzleZ;
586 case 3:
587 return tgsi_src->Register.SwizzleW;
588 default:
589 return 0;
590 }
591 }
592
593 static int tgsi_split_constant(struct r600_shader_ctx *ctx, struct r600_bc_alu_src r600_src[3])
594 {
595 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
596 struct r600_bc_alu alu;
597 int i, j, k, nconst, r;
598
599 for (i = 0, nconst = 0; i < inst->Instruction.NumSrcRegs; i++) {
600 if (inst->Src[i].Register.File == TGSI_FILE_CONSTANT) {
601 nconst++;
602 }
603 r = tgsi_src(ctx, &inst->Src[i], &r600_src[i]);
604 if (r) {
605 return r;
606 }
607 }
608 for (i = 0, j = nconst - 1; i < inst->Instruction.NumSrcRegs; i++) {
609 if (inst->Src[j].Register.File == TGSI_FILE_CONSTANT && j > 0) {
610 for (k = 0; k < 4; k++) {
611 memset(&alu, 0, sizeof(struct r600_bc_alu));
612 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
613 alu.src[0].sel = r600_src[0].sel;
614 alu.src[0].chan = k;
615 alu.dst.sel = ctx->temp_reg + j;
616 alu.dst.chan = k;
617 alu.dst.write = 1;
618 if (k == 3)
619 alu.last = 1;
620 r = r600_bc_add_alu(ctx->bc, &alu);
621 if (r)
622 return r;
623 }
624 r600_src[0].sel = ctx->temp_reg + j;
625 j--;
626 }
627 }
628 return 0;
629 }
630
631 static int tgsi_op2(struct r600_shader_ctx *ctx)
632 {
633 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
634 struct r600_bc_alu_src r600_src[3];
635 struct r600_bc_alu alu;
636 int i, j, r;
637
638 r = tgsi_split_constant(ctx, r600_src);
639 if (r)
640 return r;
641 for (i = 0; i < 4; i++) {
642 memset(&alu, 0, sizeof(struct r600_bc_alu));
643 if (!(inst->Dst[0].Register.WriteMask & (1 << i))) {
644 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP;
645 alu.dst.chan = i;
646 } else {
647 alu.inst = ctx->inst_info->r600_opcode;
648 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
649 alu.src[j] = r600_src[j];
650 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
651 }
652 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
653 if (r)
654 return r;
655 }
656 /* handle some special cases */
657 switch (ctx->inst_info->tgsi_opcode) {
658 case TGSI_OPCODE_SUB:
659 alu.src[1].neg = 1;
660 break;
661 case TGSI_OPCODE_ABS:
662 alu.src[0].abs = 1;
663 break;
664 default:
665 break;
666 }
667 if (i == 3) {
668 alu.last = 1;
669 }
670 r = r600_bc_add_alu(ctx->bc, &alu);
671 if (r)
672 return r;
673 }
674 return 0;
675 }
676
677 static int tgsi_kill(struct r600_shader_ctx *ctx)
678 {
679 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
680 struct r600_bc_alu alu;
681 int i, r;
682
683 for (i = 0; i < 4; i++) {
684 memset(&alu, 0, sizeof(struct r600_bc_alu));
685 alu.inst = ctx->inst_info->r600_opcode;
686 alu.dst.chan = i;
687 alu.src[0].sel = 248;
688 r = tgsi_src(ctx, &inst->Src[0], &alu.src[1]);
689 if (r)
690 return r;
691 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
692 if (i == 3) {
693 alu.last = 1;
694 }
695 r = r600_bc_add_alu(ctx->bc, &alu);
696 if (r)
697 return r;
698 }
699 return 0;
700 }
701
702 static int tgsi_slt(struct r600_shader_ctx *ctx)
703 {
704 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
705 struct r600_bc_alu_src r600_src[3];
706 struct r600_bc_alu alu;
707 int i, r;
708
709 r = tgsi_split_constant(ctx, r600_src);
710 if (r)
711 return r;
712 for (i = 0; i < 4; i++) {
713 memset(&alu, 0, sizeof(struct r600_bc_alu));
714 if (!(inst->Dst[0].Register.WriteMask & (1 << i))) {
715 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP;
716 alu.dst.chan = i;
717 } else {
718 alu.inst = ctx->inst_info->r600_opcode;
719 alu.src[1] = r600_src[0];
720 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
721 alu.src[0] = r600_src[1];
722 alu.src[0].chan = tgsi_chan(&inst->Src[1], i);
723 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
724 if (r)
725 return r;
726 }
727 if (i == 3) {
728 alu.last = 1;
729 }
730 r = r600_bc_add_alu(ctx->bc, &alu);
731 if (r)
732 return r;
733 }
734 return 0;
735 }
736
737 static int tgsi_lit(struct r600_shader_ctx *ctx)
738 {
739 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
740 struct r600_bc_alu alu;
741 int r;
742
743 /* dst.x, <- 1.0 */
744 memset(&alu, 0, sizeof(struct r600_bc_alu));
745 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
746 alu.src[0].sel = 249; /*1.0*/
747 alu.src[0].chan = 0;
748 r = tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
749 if (r)
750 return r;
751 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 0) & 1;
752 r = r600_bc_add_alu(ctx->bc, &alu);
753 if (r)
754 return r;
755
756 /* dst.y = max(src.x, 0.0) */
757 memset(&alu, 0, sizeof(struct r600_bc_alu));
758 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX;
759 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
760 if (r)
761 return r;
762 alu.src[1].sel = 248; /*0.0*/
763 alu.src[1].chan = tgsi_chan(&inst->Src[0], 0);
764 r = tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
765 if (r)
766 return r;
767 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 1) & 1;
768 r = r600_bc_add_alu(ctx->bc, &alu);
769 if (r)
770 return r;
771
772 /* dst.z = NOP - fill Z slot */
773 memset(&alu, 0, sizeof(struct r600_bc_alu));
774 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP;
775 alu.dst.chan = 2;
776 r = r600_bc_add_alu(ctx->bc, &alu);
777 if (r)
778 return r;
779
780 /* dst.w, <- 1.0 */
781 memset(&alu, 0, sizeof(struct r600_bc_alu));
782 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
783 alu.src[0].sel = 249;
784 alu.src[0].chan = 0;
785 r = tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
786 if (r)
787 return r;
788 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 3) & 1;
789 alu.last = 1;
790 r = r600_bc_add_alu(ctx->bc, &alu);
791 if (r)
792 return r;
793
794 if (inst->Dst[0].Register.WriteMask & (1 << 2))
795 {
796 int chan;
797 int sel;
798
799 /* dst.z = log(src.y) */
800 memset(&alu, 0, sizeof(struct r600_bc_alu));
801 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED;
802 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
803 if (r)
804 return r;
805 alu.src[0].chan = tgsi_chan(&inst->Src[0], 1);
806 r = tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
807 if (r)
808 return r;
809 alu.last = 1;
810 r = r600_bc_add_alu(ctx->bc, &alu);
811 if (r)
812 return r;
813
814 chan = alu.dst.chan;
815 sel = alu.dst.sel;
816
817 /* tmp.x = amd MUL_LIT(src.w, dst.z, src.x ) */
818 memset(&alu, 0, sizeof(struct r600_bc_alu));
819 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT;
820 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
821 if (r)
822 return r;
823 alu.src[0].chan = tgsi_chan(&inst->Src[0], 3);
824 alu.src[1].sel = sel;
825 alu.src[1].chan = chan;
826 r = tgsi_src(ctx, &inst->Src[0], &alu.src[2]);
827 if (r)
828 return r;
829 alu.src[2].chan = tgsi_chan(&inst->Src[0], 0);
830 alu.dst.sel = ctx->temp_reg;
831 alu.dst.chan = 0;
832 alu.dst.write = 1;
833 alu.is_op3 = 1;
834 alu.last = 1;
835 r = r600_bc_add_alu(ctx->bc, &alu);
836 if (r)
837 return r;
838
839 /* dst.z = exp(tmp.x) */
840 memset(&alu, 0, sizeof(struct r600_bc_alu));
841 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE;
842 alu.src[0].sel = ctx->temp_reg;
843 alu.src[0].chan = 0;
844 r = tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
845 if (r)
846 return r;
847 alu.last = 1;
848 r = r600_bc_add_alu(ctx->bc, &alu);
849 if (r)
850 return r;
851 }
852 return 0;
853 }
854
855 static int tgsi_trans(struct r600_shader_ctx *ctx)
856 {
857 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
858 struct r600_bc_alu alu;
859 int i, j, r;
860
861 for (i = 0; i < 4; i++) {
862 memset(&alu, 0, sizeof(struct r600_bc_alu));
863 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
864 alu.inst = ctx->inst_info->r600_opcode;
865 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
866 r = tgsi_src(ctx, &inst->Src[j], &alu.src[j]);
867 if (r)
868 return r;
869 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
870 }
871 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
872 if (r)
873 return r;
874 alu.last = 1;
875 r = r600_bc_add_alu(ctx->bc, &alu);
876 if (r)
877 return r;
878 }
879 }
880 return 0;
881 }
882
883 static int tgsi_trans_srcx_replicate(struct r600_shader_ctx *ctx)
884 {
885 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
886 struct r600_bc_alu alu;
887 int i, j, r;
888
889 memset(&alu, 0, sizeof(struct r600_bc_alu));
890 alu.inst = ctx->inst_info->r600_opcode;
891 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
892 r = tgsi_src(ctx, &inst->Src[j], &alu.src[j]);
893 if (r)
894 return r;
895 alu.src[j].chan = tgsi_chan(&inst->Src[j], 0);
896 }
897 alu.dst.sel = ctx->temp_reg;
898 alu.dst.write = 1;
899 alu.last = 1;
900 r = r600_bc_add_alu(ctx->bc, &alu);
901 if (r)
902 return r;
903 /* replicate result */
904 for (i = 0; i < 4; i++) {
905 memset(&alu, 0, sizeof(struct r600_bc_alu));
906 alu.src[0].sel = ctx->temp_reg;
907 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
908 alu.dst.chan = i;
909 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
910 if (r)
911 return r;
912 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
913 if (i == 3)
914 alu.last = 1;
915 r = r600_bc_add_alu(ctx->bc, &alu);
916 if (r)
917 return r;
918 }
919 return 0;
920 }
921
922 static int tgsi_helper_copy(struct r600_shader_ctx *ctx, struct tgsi_full_instruction *inst)
923 {
924 struct r600_bc_alu alu;
925 int i, r;
926
927 r = r600_bc_add_literal(ctx->bc, ctx->value);
928 if (r)
929 return r;
930 for (i = 0; i < 4; i++) {
931 memset(&alu, 0, sizeof(struct r600_bc_alu));
932 if (!(inst->Dst[0].Register.WriteMask & (1 << i))) {
933 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP;
934 alu.dst.chan = i;
935 } else {
936 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
937 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
938 if (r)
939 return r;
940 alu.src[0].sel = ctx->temp_reg;
941 alu.src[0].chan = i;
942 }
943 if (i == 3) {
944 alu.last = 1;
945 }
946 r = r600_bc_add_alu(ctx->bc, &alu);
947 if (r)
948 return r;
949 }
950 return 0;
951 }
952
953 static int tgsi_op3(struct r600_shader_ctx *ctx)
954 {
955 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
956 struct r600_bc_alu_src r600_src[3];
957 struct r600_bc_alu alu;
958 int i, j, r;
959
960 r = tgsi_split_constant(ctx, r600_src);
961 if (r)
962 return r;
963 /* do it in 2 step as op3 doesn't support writemask */
964 for (i = 0; i < 4; i++) {
965 memset(&alu, 0, sizeof(struct r600_bc_alu));
966 alu.inst = ctx->inst_info->r600_opcode;
967 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
968 alu.src[j] = r600_src[j];
969 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
970 }
971 alu.dst.sel = ctx->temp_reg;
972 alu.dst.chan = i;
973 alu.dst.write = 1;
974 alu.is_op3 = 1;
975 if (i == 3) {
976 alu.last = 1;
977 }
978 r = r600_bc_add_alu(ctx->bc, &alu);
979 if (r)
980 return r;
981 }
982 return tgsi_helper_copy(ctx, inst);
983 }
984
985 static int tgsi_dp(struct r600_shader_ctx *ctx)
986 {
987 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
988 struct r600_bc_alu_src r600_src[3];
989 struct r600_bc_alu alu;
990 int i, j, r;
991
992 r = tgsi_split_constant(ctx, r600_src);
993 if (r)
994 return r;
995 for (i = 0; i < 4; i++) {
996 memset(&alu, 0, sizeof(struct r600_bc_alu));
997 alu.inst = ctx->inst_info->r600_opcode;
998 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
999 alu.src[j] = r600_src[j];
1000 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
1001 }
1002 alu.dst.sel = ctx->temp_reg;
1003 alu.dst.chan = i;
1004 alu.dst.write = 1;
1005 /* handle some special cases */
1006 switch (ctx->inst_info->tgsi_opcode) {
1007 case TGSI_OPCODE_DP2:
1008 if (i > 1) {
1009 alu.src[0].sel = alu.src[1].sel = 248;
1010 alu.src[0].chan = alu.src[1].chan = 0;
1011 }
1012 break;
1013 case TGSI_OPCODE_DP3:
1014 if (i > 2) {
1015 alu.src[0].sel = alu.src[1].sel = 248;
1016 alu.src[0].chan = alu.src[1].chan = 0;
1017 }
1018 break;
1019 default:
1020 break;
1021 }
1022 if (i == 3) {
1023 alu.last = 1;
1024 }
1025 r = r600_bc_add_alu(ctx->bc, &alu);
1026 if (r)
1027 return r;
1028 }
1029 return tgsi_helper_copy(ctx, inst);
1030 }
1031
1032 static int tgsi_tex(struct r600_shader_ctx *ctx)
1033 {
1034 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1035 struct r600_bc_tex tex;
1036 struct r600_bc_alu alu;
1037 unsigned src_gpr;
1038 int r, i;
1039
1040 src_gpr = ctx->file_offset[inst->Src[0].Register.File] + inst->Src[0].Register.Index;
1041
1042 if (inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
1043 /* Add perspective divide */
1044 memset(&alu, 0, sizeof(struct r600_bc_alu));
1045 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE;
1046 alu.src[0].sel = src_gpr;
1047 alu.src[0].chan = tgsi_chan(&inst->Src[0], 3);
1048 alu.dst.sel = ctx->temp_reg;
1049 alu.dst.chan = 3;
1050 alu.last = 1;
1051 alu.dst.write = 1;
1052 r = r600_bc_add_alu(ctx->bc, &alu);
1053 if (r)
1054 return r;
1055
1056 for (i = 0; i < 3; i++) {
1057 memset(&alu, 0, sizeof(struct r600_bc_alu));
1058 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL;
1059 alu.src[0].sel = ctx->temp_reg;
1060 alu.src[0].chan = 3;
1061 alu.src[1].sel = src_gpr;
1062 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
1063 alu.dst.sel = ctx->temp_reg;
1064 alu.dst.chan = i;
1065 alu.dst.write = 1;
1066 r = r600_bc_add_alu(ctx->bc, &alu);
1067 if (r)
1068 return r;
1069 }
1070 memset(&alu, 0, sizeof(struct r600_bc_alu));
1071 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1072 alu.src[0].sel = 249;
1073 alu.src[0].chan = 0;
1074 alu.dst.sel = ctx->temp_reg;
1075 alu.dst.chan = 3;
1076 alu.last = 1;
1077 alu.dst.write = 1;
1078 r = r600_bc_add_alu(ctx->bc, &alu);
1079 if (r)
1080 return r;
1081 src_gpr = ctx->temp_reg;
1082 } else if (inst->Src[0].Register.File != TGSI_FILE_TEMPORARY) {
1083 for (i = 0; i < 4; i++) {
1084 memset(&alu, 0, sizeof(struct r600_bc_alu));
1085 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1086 alu.src[0].sel = src_gpr;
1087 alu.src[0].chan = i;
1088 alu.dst.sel = ctx->temp_reg;
1089 alu.dst.chan = i;
1090 if (i == 3)
1091 alu.last = 1;
1092 alu.dst.write = 1;
1093 r = r600_bc_add_alu(ctx->bc, &alu);
1094 if (r)
1095 return r;
1096 }
1097 src_gpr = ctx->temp_reg;
1098 }
1099
1100 memset(&tex, 0, sizeof(struct r600_bc_tex));
1101 tex.inst = ctx->inst_info->r600_opcode;
1102 tex.resource_id = ctx->file_offset[inst->Src[1].Register.File] + inst->Src[1].Register.Index;
1103 tex.sampler_id = tex.resource_id;
1104 tex.src_gpr = src_gpr;
1105 tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
1106 tex.dst_sel_x = 0;
1107 tex.dst_sel_y = 1;
1108 tex.dst_sel_z = 2;
1109 tex.dst_sel_w = 3;
1110 tex.src_sel_x = 0;
1111 tex.src_sel_y = 1;
1112 tex.src_sel_z = 2;
1113 tex.src_sel_w = 3;
1114
1115 if (inst->Texture.Texture != TGSI_TEXTURE_RECT) {
1116 tex.coord_type_x = 1;
1117 tex.coord_type_y = 1;
1118 tex.coord_type_z = 1;
1119 tex.coord_type_w = 1;
1120 }
1121 return r600_bc_add_tex(ctx->bc, &tex);
1122 }
1123
1124 static int tgsi_lrp(struct r600_shader_ctx *ctx)
1125 {
1126 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1127 struct r600_bc_alu_src r600_src[3];
1128 struct r600_bc_alu alu;
1129 unsigned i;
1130 int r;
1131
1132 r = tgsi_split_constant(ctx, r600_src);
1133 if (r)
1134 return r;
1135 /* 1 - src0 */
1136 for (i = 0; i < 4; i++) {
1137 memset(&alu, 0, sizeof(struct r600_bc_alu));
1138 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD;
1139 alu.src[0].sel = 249;
1140 alu.src[0].chan = 0;
1141 alu.src[1] = r600_src[0];
1142 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
1143 alu.src[1].neg = 1;
1144 alu.dst.sel = ctx->temp_reg;
1145 alu.dst.chan = i;
1146 if (i == 3) {
1147 alu.last = 1;
1148 }
1149 alu.dst.write = 1;
1150 r = r600_bc_add_alu(ctx->bc, &alu);
1151 if (r)
1152 return r;
1153 }
1154 r = r600_bc_add_literal(ctx->bc, ctx->value);
1155 if (r)
1156 return r;
1157
1158 /* (1 - src0) * src2 */
1159 for (i = 0; i < 4; i++) {
1160 memset(&alu, 0, sizeof(struct r600_bc_alu));
1161 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL;
1162 alu.src[0].sel = ctx->temp_reg;
1163 alu.src[0].chan = i;
1164 alu.src[1] = r600_src[2];
1165 alu.src[1].chan = tgsi_chan(&inst->Src[2], i);
1166 alu.dst.sel = ctx->temp_reg;
1167 alu.dst.chan = i;
1168 if (i == 3) {
1169 alu.last = 1;
1170 }
1171 alu.dst.write = 1;
1172 r = r600_bc_add_alu(ctx->bc, &alu);
1173 if (r)
1174 return r;
1175 }
1176 r = r600_bc_add_literal(ctx->bc, ctx->value);
1177 if (r)
1178 return r;
1179
1180 /* src0 * src1 + (1 - src0) * src2 */
1181 for (i = 0; i < 4; i++) {
1182 memset(&alu, 0, sizeof(struct r600_bc_alu));
1183 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD;
1184 alu.is_op3 = 1;
1185 alu.src[0] = r600_src[0];
1186 alu.src[0].chan = tgsi_chan(&inst->Src[0], i);
1187 alu.src[1] = r600_src[1];
1188 alu.src[1].chan = tgsi_chan(&inst->Src[1], i);
1189 alu.src[2].sel = ctx->temp_reg;
1190 alu.src[2].chan = i;
1191 alu.dst.sel = ctx->temp_reg;
1192 alu.dst.chan = i;
1193 if (i == 3) {
1194 alu.last = 1;
1195 }
1196 r = r600_bc_add_alu(ctx->bc, &alu);
1197 if (r)
1198 return r;
1199 }
1200 return tgsi_helper_copy(ctx, inst);
1201 }
1202
1203 static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] = {
1204 {TGSI_OPCODE_ARL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1205 {TGSI_OPCODE_MOV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV, tgsi_op2},
1206 {TGSI_OPCODE_LIT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_lit},
1207 {TGSI_OPCODE_RCP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE, tgsi_trans_srcx_replicate},
1208 {TGSI_OPCODE_RSQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE, tgsi_trans_srcx_replicate},
1209 {TGSI_OPCODE_EXP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1210 {TGSI_OPCODE_LOG, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1211 {TGSI_OPCODE_MUL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL, tgsi_op2},
1212 {TGSI_OPCODE_ADD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD, tgsi_op2},
1213 {TGSI_OPCODE_DP3, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4, tgsi_dp},
1214 {TGSI_OPCODE_DP4, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4, tgsi_dp},
1215 {TGSI_OPCODE_DST, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1216 {TGSI_OPCODE_MIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN, tgsi_op2},
1217 {TGSI_OPCODE_MAX, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX, tgsi_op2},
1218 {TGSI_OPCODE_SLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT, tgsi_slt},
1219 {TGSI_OPCODE_SGE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE, tgsi_op2},
1220 {TGSI_OPCODE_MAD, 1, V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD, tgsi_op3},
1221 {TGSI_OPCODE_SUB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD, tgsi_op2},
1222 {TGSI_OPCODE_LRP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_lrp},
1223 {TGSI_OPCODE_CND, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1224 /* gap */
1225 {20, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1226 {TGSI_OPCODE_DP2A, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1227 /* gap */
1228 {22, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1229 {23, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1230 {TGSI_OPCODE_FRC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT, tgsi_op2},
1231 {TGSI_OPCODE_CLAMP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1232 {TGSI_OPCODE_FLR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR, tgsi_op2},
1233 {TGSI_OPCODE_ROUND, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1234 {TGSI_OPCODE_EX2, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE, tgsi_trans_srcx_replicate},
1235 {TGSI_OPCODE_LG2, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE, tgsi_trans_srcx_replicate},
1236 {TGSI_OPCODE_POW, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1237 {TGSI_OPCODE_XPD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1238 /* gap */
1239 {32, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1240 {TGSI_OPCODE_ABS, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV, tgsi_op2},
1241 {TGSI_OPCODE_RCC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1242 {TGSI_OPCODE_DPH, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1243 {TGSI_OPCODE_COS, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1244 {TGSI_OPCODE_DDX, 0, SQ_TEX_INST_GET_GRADIENTS_H, tgsi_tex},
1245 {TGSI_OPCODE_DDY, 0, SQ_TEX_INST_GET_GRADIENTS_V, tgsi_tex},
1246 {TGSI_OPCODE_KILP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, /* predicated kill */
1247 {TGSI_OPCODE_PK2H, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1248 {TGSI_OPCODE_PK2US, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1249 {TGSI_OPCODE_PK4B, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1250 {TGSI_OPCODE_PK4UB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1251 {TGSI_OPCODE_RFL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1252 {TGSI_OPCODE_SEQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1253 {TGSI_OPCODE_SFL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1254 {TGSI_OPCODE_SGT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1255 {TGSI_OPCODE_SIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1256 {TGSI_OPCODE_SLE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE, tgsi_slt},
1257 {TGSI_OPCODE_SNE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1258 {TGSI_OPCODE_STR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1259 {TGSI_OPCODE_TEX, 0, SQ_TEX_INST_SAMPLE, tgsi_tex},
1260 {TGSI_OPCODE_TXD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1261 {TGSI_OPCODE_TXP, 0, SQ_TEX_INST_SAMPLE, tgsi_tex},
1262 {TGSI_OPCODE_UP2H, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1263 {TGSI_OPCODE_UP2US, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1264 {TGSI_OPCODE_UP4B, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1265 {TGSI_OPCODE_UP4UB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1266 {TGSI_OPCODE_X2D, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1267 {TGSI_OPCODE_ARA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1268 {TGSI_OPCODE_ARR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1269 {TGSI_OPCODE_BRA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1270 {TGSI_OPCODE_CAL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1271 {TGSI_OPCODE_RET, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1272 {TGSI_OPCODE_SSG, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, /* SGN */
1273 {TGSI_OPCODE_CMP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1274 {TGSI_OPCODE_SCS, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1275 {TGSI_OPCODE_TXB, 0, SQ_TEX_INST_SAMPLE_L, tgsi_tex},
1276 {TGSI_OPCODE_NRM, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1277 {TGSI_OPCODE_DIV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1278 {TGSI_OPCODE_DP2, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4, tgsi_dp},
1279 {TGSI_OPCODE_TXL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1280 {TGSI_OPCODE_BRK, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1281 {TGSI_OPCODE_IF, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1282 /* gap */
1283 {75, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1284 {76, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1285 {TGSI_OPCODE_ELSE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1286 {TGSI_OPCODE_ENDIF, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1287 /* gap */
1288 {79, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1289 {80, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1290 {TGSI_OPCODE_PUSHA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1291 {TGSI_OPCODE_POPA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1292 {TGSI_OPCODE_CEIL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1293 {TGSI_OPCODE_I2F, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1294 {TGSI_OPCODE_NOT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1295 {TGSI_OPCODE_TRUNC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC, tgsi_trans_srcx_replicate},
1296 {TGSI_OPCODE_SHL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1297 /* gap */
1298 {88, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1299 {TGSI_OPCODE_AND, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1300 {TGSI_OPCODE_OR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1301 {TGSI_OPCODE_MOD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1302 {TGSI_OPCODE_XOR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1303 {TGSI_OPCODE_SAD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1304 {TGSI_OPCODE_TXF, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1305 {TGSI_OPCODE_TXQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1306 {TGSI_OPCODE_CONT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1307 {TGSI_OPCODE_EMIT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1308 {TGSI_OPCODE_ENDPRIM, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1309 {TGSI_OPCODE_BGNLOOP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1310 {TGSI_OPCODE_BGNSUB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1311 {TGSI_OPCODE_ENDLOOP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1312 {TGSI_OPCODE_ENDSUB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1313 /* gap */
1314 {103, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1315 {104, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1316 {105, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1317 {106, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1318 {TGSI_OPCODE_NOP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1319 /* gap */
1320 {108, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1321 {109, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1322 {110, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1323 {111, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1324 {TGSI_OPCODE_NRM4, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1325 {TGSI_OPCODE_CALLNZ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1326 {TGSI_OPCODE_IFC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1327 {TGSI_OPCODE_BREAKC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1328 {TGSI_OPCODE_KIL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT, tgsi_kill}, /* conditional kill */
1329 {TGSI_OPCODE_END, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_end}, /* aka HALT */
1330 /* gap */
1331 {118, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1332 {TGSI_OPCODE_F2I, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1333 {TGSI_OPCODE_IDIV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1334 {TGSI_OPCODE_IMAX, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1335 {TGSI_OPCODE_IMIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1336 {TGSI_OPCODE_INEG, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1337 {TGSI_OPCODE_ISGE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1338 {TGSI_OPCODE_ISHR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1339 {TGSI_OPCODE_ISLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1340 {TGSI_OPCODE_F2U, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1341 {TGSI_OPCODE_U2F, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1342 {TGSI_OPCODE_UADD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1343 {TGSI_OPCODE_UDIV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1344 {TGSI_OPCODE_UMAD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1345 {TGSI_OPCODE_UMAX, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1346 {TGSI_OPCODE_UMIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1347 {TGSI_OPCODE_UMOD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1348 {TGSI_OPCODE_UMUL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1349 {TGSI_OPCODE_USEQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1350 {TGSI_OPCODE_USGE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1351 {TGSI_OPCODE_USHR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1352 {TGSI_OPCODE_USLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1353 {TGSI_OPCODE_USNE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1354 {TGSI_OPCODE_SWITCH, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1355 {TGSI_OPCODE_CASE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1356 {TGSI_OPCODE_DEFAULT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1357 {TGSI_OPCODE_ENDSWITCH, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1358 {TGSI_OPCODE_LAST, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1359 };