Revert "r600g: don't use dynamic state allocation for states"
[mesa.git] / src / gallium / drivers / r600 / r600_shader.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "pipe/p_shader_tokens.h"
24 #include "tgsi/tgsi_parse.h"
25 #include "tgsi/tgsi_scan.h"
26 #include "tgsi/tgsi_dump.h"
27 #include "util/u_format.h"
28 #include "r600_screen.h"
29 #include "r600_context.h"
30 #include "r600_shader.h"
31 #include "r600_asm.h"
32 #include "r600_sq.h"
33 #include "r600d.h"
34 #include <stdio.h>
35 #include <errno.h>
36
37
38 struct r600_shader_tgsi_instruction;
39
40 struct r600_shader_ctx {
41 struct tgsi_shader_info info;
42 struct tgsi_parse_context parse;
43 const struct tgsi_token *tokens;
44 unsigned type;
45 unsigned file_offset[TGSI_FILE_COUNT];
46 unsigned temp_reg;
47 struct r600_shader_tgsi_instruction *inst_info;
48 struct r600_bc *bc;
49 struct r600_shader *shader;
50 u32 value[4];
51 };
52
53 struct r600_shader_tgsi_instruction {
54 unsigned tgsi_opcode;
55 unsigned is_op3;
56 unsigned r600_opcode;
57 int (*process)(struct r600_shader_ctx *ctx);
58 };
59
60 static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[];
61 static int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader);
62
63 static int r600_shader_update(struct pipe_context *ctx, struct r600_shader *shader)
64 {
65 struct r600_context *rctx = r600_context(ctx);
66 const struct util_format_description *desc;
67 enum pipe_format resource_format[160];
68 unsigned i, nresources = 0;
69 struct r600_bc *bc = &shader->bc;
70 struct r600_bc_cf *cf;
71 struct r600_bc_vtx *vtx;
72
73 if (shader->processor_type != TGSI_PROCESSOR_VERTEX)
74 return 0;
75 for (i = 0; i < rctx->vertex_elements->count; i++) {
76 resource_format[nresources++] = rctx->vertex_elements->elements[i].src_format;
77 }
78 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
79 switch (cf->inst) {
80 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
81 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
82 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
83 desc = util_format_description(resource_format[vtx->buffer_id]);
84 if (desc == NULL) {
85 R600_ERR("unknown format %d\n", resource_format[vtx->buffer_id]);
86 return -EINVAL;
87 }
88 vtx->dst_sel_x = desc->swizzle[0];
89 vtx->dst_sel_y = desc->swizzle[1];
90 vtx->dst_sel_z = desc->swizzle[2];
91 vtx->dst_sel_w = desc->swizzle[3];
92 }
93 break;
94 default:
95 break;
96 }
97 }
98 return r600_bc_build(&shader->bc);
99 }
100
101 int r600_pipe_shader_create(struct pipe_context *ctx,
102 struct r600_context_state *rpshader,
103 const struct tgsi_token *tokens)
104 {
105 struct r600_screen *rscreen = r600_screen(ctx->screen);
106 int r;
107
108 fprintf(stderr, "--------------------------------------------------------------\n");
109 tgsi_dump(tokens, 0);
110 if (rpshader == NULL)
111 return -ENOMEM;
112 rpshader->shader.family = radeon_get_family(rscreen->rw);
113 r = r600_shader_from_tgsi(tokens, &rpshader->shader);
114 if (r) {
115 R600_ERR("translation from TGSI failed !\n");
116 return r;
117 }
118 r = r600_bc_build(&rpshader->shader.bc);
119 if (r) {
120 R600_ERR("building bytecode failed !\n");
121 return r;
122 }
123 fprintf(stderr, "______________________________________________________________\n");
124 return 0;
125 }
126
127 static int r600_pipe_shader_vs(struct pipe_context *ctx, struct r600_context_state *rpshader)
128 {
129 struct r600_screen *rscreen = r600_screen(ctx->screen);
130 struct r600_shader *rshader = &rpshader->shader;
131 struct radeon_state *state;
132 unsigned i, tmp;
133
134 rpshader->rstate = radeon_state_decref(rpshader->rstate);
135 state = radeon_state(rscreen->rw, R600_VS_SHADER_TYPE, R600_VS_SHADER);
136 if (state == NULL)
137 return -ENOMEM;
138 for (i = 0; i < 10; i++) {
139 state->states[R600_VS_SHADER__SPI_VS_OUT_ID_0 + i] = 0;
140 }
141 /* so far never got proper semantic id from tgsi */
142 for (i = 0; i < 32; i++) {
143 tmp = i << ((i & 3) * 8);
144 state->states[R600_VS_SHADER__SPI_VS_OUT_ID_0 + i / 4] |= tmp;
145 }
146 state->states[R600_VS_SHADER__SPI_VS_OUT_CONFIG] = S_0286C4_VS_EXPORT_COUNT(rshader->noutput - 2);
147 state->states[R600_VS_SHADER__SQ_PGM_RESOURCES_VS] = S_028868_NUM_GPRS(rshader->bc.ngpr);
148 rpshader->rstate = state;
149 rpshader->rstate->bo[0] = radeon_bo_incref(rscreen->rw, rpshader->bo);
150 rpshader->rstate->bo[1] = radeon_bo_incref(rscreen->rw, rpshader->bo);
151 rpshader->rstate->nbo = 2;
152 rpshader->rstate->placement[0] = RADEON_GEM_DOMAIN_GTT;
153 return radeon_state_pm4(state);
154 }
155
156 static int r600_pipe_shader_ps(struct pipe_context *ctx, struct r600_context_state *rpshader)
157 {
158 struct r600_screen *rscreen = r600_screen(ctx->screen);
159 struct r600_shader *rshader = &rpshader->shader;
160 struct radeon_state *state;
161 unsigned i, tmp, exports_ps, num_cout;
162
163 rpshader->rstate = radeon_state_decref(rpshader->rstate);
164 state = radeon_state(rscreen->rw, R600_PS_SHADER_TYPE, R600_PS_SHADER);
165 if (state == NULL)
166 return -ENOMEM;
167 for (i = 0; i < rshader->ninput; i++) {
168 tmp = S_028644_SEMANTIC(i);
169 tmp |= S_028644_SEL_CENTROID(1);
170 if (rshader->input[i].name == TGSI_SEMANTIC_COLOR ||
171 rshader->input[i].name == TGSI_SEMANTIC_BCOLOR) {
172 tmp |= S_028644_FLAT_SHADE(rshader->flat_shade);
173 }
174 state->states[R600_PS_SHADER__SPI_PS_INPUT_CNTL_0 + i] = tmp;
175 }
176
177 exports_ps = 0;
178 num_cout = 0;
179 for (i = 0; i < rshader->noutput; i++) {
180 if (rshader->output[i].name == TGSI_SEMANTIC_POSITION)
181 exports_ps |= 1;
182 else if (rshader->output[i].name == TGSI_SEMANTIC_COLOR) {
183 exports_ps |= (1 << (num_cout+1));
184 num_cout++;
185 }
186 }
187 state->states[R600_PS_SHADER__SPI_PS_IN_CONTROL_0] = S_0286CC_NUM_INTERP(rshader->ninput) |
188 S_0286CC_PERSP_GRADIENT_ENA(1);
189 state->states[R600_PS_SHADER__SPI_PS_IN_CONTROL_1] = 0x00000000;
190 state->states[R600_PS_SHADER__SQ_PGM_RESOURCES_PS] = S_028868_NUM_GPRS(rshader->bc.ngpr);
191 state->states[R600_PS_SHADER__SQ_PGM_EXPORTS_PS] = exports_ps;
192 rpshader->rstate = state;
193 rpshader->rstate->bo[0] = radeon_bo_incref(rscreen->rw, rpshader->bo);
194 rpshader->rstate->nbo = 1;
195 rpshader->rstate->placement[0] = RADEON_GEM_DOMAIN_GTT;
196 return radeon_state_pm4(state);
197 }
198
199 static int r600_pipe_shader(struct pipe_context *ctx, struct r600_context_state *rpshader)
200 {
201 struct r600_screen *rscreen = r600_screen(ctx->screen);
202 struct r600_context *rctx = r600_context(ctx);
203 struct r600_shader *rshader = &rpshader->shader;
204 int r;
205
206 /* copy new shader */
207 radeon_bo_decref(rscreen->rw, rpshader->bo);
208 rpshader->bo = NULL;
209 rpshader->bo = radeon_bo(rscreen->rw, 0, rshader->bc.ndw * 4,
210 4096, NULL);
211 if (rpshader->bo == NULL) {
212 return -ENOMEM;
213 }
214 radeon_bo_map(rscreen->rw, rpshader->bo);
215 memcpy(rpshader->bo->data, rshader->bc.bytecode, rshader->bc.ndw * 4);
216 radeon_bo_unmap(rscreen->rw, rpshader->bo);
217 /* build state */
218 rshader->flat_shade = rctx->flat_shade;
219 switch (rshader->processor_type) {
220 case TGSI_PROCESSOR_VERTEX:
221 r = r600_pipe_shader_vs(ctx, rpshader);
222 break;
223 case TGSI_PROCESSOR_FRAGMENT:
224 r = r600_pipe_shader_ps(ctx, rpshader);
225 break;
226 default:
227 r = -EINVAL;
228 break;
229 }
230 return r;
231 }
232
233 int r600_pipe_shader_update(struct pipe_context *ctx, struct r600_context_state *rpshader)
234 {
235 struct r600_context *rctx = r600_context(ctx);
236 int r;
237
238 if (rpshader == NULL)
239 return -EINVAL;
240 /* there should be enough input */
241 if (rctx->vertex_elements->count < rpshader->shader.bc.nresource) {
242 R600_ERR("%d resources provided, expecting %d\n",
243 rctx->vertex_elements->count, rpshader->shader.bc.nresource);
244 return -EINVAL;
245 }
246 r = r600_shader_update(ctx, &rpshader->shader);
247 if (r)
248 return r;
249 return r600_pipe_shader(ctx, rpshader);
250 }
251
252 static int tgsi_is_supported(struct r600_shader_ctx *ctx)
253 {
254 struct tgsi_full_instruction *i = &ctx->parse.FullToken.FullInstruction;
255 int j;
256
257 if (i->Instruction.NumDstRegs > 1) {
258 R600_ERR("too many dst (%d)\n", i->Instruction.NumDstRegs);
259 return -EINVAL;
260 }
261 if (i->Instruction.Predicate) {
262 R600_ERR("predicate unsupported\n");
263 return -EINVAL;
264 }
265 if (i->Instruction.Label) {
266 R600_ERR("label unsupported\n");
267 return -EINVAL;
268 }
269 for (j = 0; j < i->Instruction.NumSrcRegs; j++) {
270 if (i->Src[j].Register.Indirect ||
271 i->Src[j].Register.Dimension ||
272 i->Src[j].Register.Absolute) {
273 R600_ERR("unsupported src (indirect|dimension|absolute)\n");
274 return -EINVAL;
275 }
276 }
277 for (j = 0; j < i->Instruction.NumDstRegs; j++) {
278 if (i->Dst[j].Register.Indirect || i->Dst[j].Register.Dimension) {
279 R600_ERR("unsupported dst (indirect|dimension)\n");
280 return -EINVAL;
281 }
282 }
283 return 0;
284 }
285
286 static int tgsi_declaration(struct r600_shader_ctx *ctx)
287 {
288 struct tgsi_full_declaration *d = &ctx->parse.FullToken.FullDeclaration;
289 struct r600_bc_vtx vtx;
290 unsigned i;
291 int r;
292
293 switch (d->Declaration.File) {
294 case TGSI_FILE_INPUT:
295 i = ctx->shader->ninput++;
296 ctx->shader->input[i].name = d->Semantic.Name;
297 ctx->shader->input[i].sid = d->Semantic.Index;
298 ctx->shader->input[i].interpolate = d->Declaration.Interpolate;
299 ctx->shader->input[i].gpr = ctx->file_offset[TGSI_FILE_INPUT] + i;
300 if (ctx->type == TGSI_PROCESSOR_VERTEX) {
301 /* turn input into fetch */
302 memset(&vtx, 0, sizeof(struct r600_bc_vtx));
303 vtx.inst = 0;
304 vtx.fetch_type = 0;
305 vtx.buffer_id = i;
306 /* register containing the index into the buffer */
307 vtx.src_gpr = 0;
308 vtx.src_sel_x = 0;
309 vtx.mega_fetch_count = 0x1F;
310 vtx.dst_gpr = ctx->shader->input[i].gpr;
311 vtx.dst_sel_x = 0;
312 vtx.dst_sel_y = 1;
313 vtx.dst_sel_z = 2;
314 vtx.dst_sel_w = 3;
315 r = r600_bc_add_vtx(ctx->bc, &vtx);
316 if (r)
317 return r;
318 }
319 break;
320 case TGSI_FILE_OUTPUT:
321 i = ctx->shader->noutput++;
322 ctx->shader->output[i].name = d->Semantic.Name;
323 ctx->shader->output[i].sid = d->Semantic.Index;
324 ctx->shader->output[i].gpr = ctx->file_offset[TGSI_FILE_OUTPUT] + i;
325 ctx->shader->output[i].interpolate = d->Declaration.Interpolate;
326 break;
327 case TGSI_FILE_CONSTANT:
328 case TGSI_FILE_TEMPORARY:
329 case TGSI_FILE_SAMPLER:
330 break;
331 default:
332 R600_ERR("unsupported file %d declaration\n", d->Declaration.File);
333 return -EINVAL;
334 }
335 return 0;
336 }
337
338 int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader)
339 {
340 struct tgsi_full_immediate *immediate;
341 struct r600_shader_ctx ctx;
342 struct r600_bc_output output;
343 unsigned opcode;
344 int i, r = 0, pos0;
345
346 ctx.bc = &shader->bc;
347 ctx.shader = shader;
348 r = r600_bc_init(ctx.bc, shader->family);
349 if (r)
350 return r;
351 ctx.tokens = tokens;
352 tgsi_scan_shader(tokens, &ctx.info);
353 tgsi_parse_init(&ctx.parse, tokens);
354 ctx.type = ctx.parse.FullHeader.Processor.Processor;
355 shader->processor_type = ctx.type;
356
357 /* register allocations */
358 /* Values [0,127] correspond to GPR[0..127].
359 * Values [256,511] correspond to cfile constants c[0..255].
360 * Other special values are shown in the list below.
361 * 248 SQ_ALU_SRC_0: special constant 0.0.
362 * 249 SQ_ALU_SRC_1: special constant 1.0 float.
363 * 250 SQ_ALU_SRC_1_INT: special constant 1 integer.
364 * 251 SQ_ALU_SRC_M_1_INT: special constant -1 integer.
365 * 252 SQ_ALU_SRC_0_5: special constant 0.5 float.
366 * 253 SQ_ALU_SRC_LITERAL: literal constant.
367 * 254 SQ_ALU_SRC_PV: previous vector result.
368 * 255 SQ_ALU_SRC_PS: previous scalar result.
369 */
370 for (i = 0; i < TGSI_FILE_COUNT; i++) {
371 ctx.file_offset[i] = 0;
372 }
373 if (ctx.type == TGSI_PROCESSOR_VERTEX) {
374 ctx.file_offset[TGSI_FILE_INPUT] = 1;
375 }
376 ctx.file_offset[TGSI_FILE_OUTPUT] = ctx.file_offset[TGSI_FILE_INPUT] +
377 ctx.info.file_count[TGSI_FILE_INPUT];
378 ctx.file_offset[TGSI_FILE_TEMPORARY] = ctx.file_offset[TGSI_FILE_OUTPUT] +
379 ctx.info.file_count[TGSI_FILE_OUTPUT];
380 ctx.file_offset[TGSI_FILE_CONSTANT] = 256;
381 ctx.file_offset[TGSI_FILE_IMMEDIATE] = 253;
382 ctx.temp_reg = ctx.file_offset[TGSI_FILE_TEMPORARY] +
383 ctx.info.file_count[TGSI_FILE_TEMPORARY];
384
385 while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
386 tgsi_parse_token(&ctx.parse);
387 switch (ctx.parse.FullToken.Token.Type) {
388 case TGSI_TOKEN_TYPE_IMMEDIATE:
389 immediate = &ctx.parse.FullToken.FullImmediate;
390 ctx.value[0] = immediate->u[0].Uint;
391 ctx.value[1] = immediate->u[1].Uint;
392 ctx.value[2] = immediate->u[2].Uint;
393 ctx.value[3] = immediate->u[3].Uint;
394 break;
395 case TGSI_TOKEN_TYPE_DECLARATION:
396 r = tgsi_declaration(&ctx);
397 if (r)
398 goto out_err;
399 break;
400 case TGSI_TOKEN_TYPE_INSTRUCTION:
401 r = tgsi_is_supported(&ctx);
402 if (r)
403 goto out_err;
404 opcode = ctx.parse.FullToken.FullInstruction.Instruction.Opcode;
405 ctx.inst_info = &r600_shader_tgsi_instruction[opcode];
406 r = ctx.inst_info->process(&ctx);
407 if (r)
408 goto out_err;
409 r = r600_bc_add_literal(ctx.bc, ctx.value);
410 if (r)
411 goto out_err;
412 break;
413 default:
414 R600_ERR("unsupported token type %d\n", ctx.parse.FullToken.Token.Type);
415 r = -EINVAL;
416 goto out_err;
417 }
418 }
419 /* export output */
420 for (i = 0, pos0 = 0; i < shader->noutput; i++) {
421 memset(&output, 0, sizeof(struct r600_bc_output));
422 output.gpr = shader->output[i].gpr;
423 output.elem_size = 3;
424 output.swizzle_x = 0;
425 output.swizzle_y = 1;
426 output.swizzle_z = 2;
427 output.swizzle_w = 3;
428 output.barrier = 1;
429 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
430 output.array_base = i - pos0;
431 output.inst = V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE;
432 switch (ctx.type == TGSI_PROCESSOR_VERTEX) {
433 case TGSI_PROCESSOR_VERTEX:
434 if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
435 output.array_base = 60;
436 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
437 /* position doesn't count in array_base */
438 pos0 = 1;
439 }
440 break;
441 case TGSI_PROCESSOR_FRAGMENT:
442 if (shader->output[i].name == TGSI_SEMANTIC_COLOR) {
443 output.array_base = 0;
444 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
445 } else if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
446 output.array_base = 61;
447 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
448 } else {
449 R600_ERR("unsupported fragment output name %d\n", shader->output[i].name);
450 r = -EINVAL;
451 goto out_err;
452 }
453 break;
454 default:
455 R600_ERR("unsupported processor type %d\n", ctx.type);
456 r = -EINVAL;
457 goto out_err;
458 }
459 if (i == (shader->noutput - 1)) {
460 output.end_of_program = 1;
461 }
462 r = r600_bc_add_output(ctx.bc, &output);
463 if (r)
464 goto out_err;
465 }
466 tgsi_parse_free(&ctx.parse);
467 return 0;
468 out_err:
469 tgsi_parse_free(&ctx.parse);
470 return r;
471 }
472
473 static int tgsi_unsupported(struct r600_shader_ctx *ctx)
474 {
475 R600_ERR("%d tgsi opcode unsupported\n", ctx->inst_info->tgsi_opcode);
476 return -EINVAL;
477 }
478
479 static int tgsi_end(struct r600_shader_ctx *ctx)
480 {
481 return 0;
482 }
483
484 static int tgsi_src(struct r600_shader_ctx *ctx,
485 const struct tgsi_full_src_register *tgsi_src,
486 struct r600_bc_alu_src *r600_src)
487 {
488 memset(r600_src, 0, sizeof(struct r600_bc_alu_src));
489 r600_src->sel = tgsi_src->Register.Index;
490 if (tgsi_src->Register.File == TGSI_FILE_IMMEDIATE) {
491 r600_src->sel = 0;
492 }
493 r600_src->sel += ctx->file_offset[tgsi_src->Register.File];
494 return 0;
495 }
496
497 static int tgsi_dst(struct r600_shader_ctx *ctx,
498 const struct tgsi_full_dst_register *tgsi_dst,
499 unsigned swizzle,
500 struct r600_bc_alu_dst *r600_dst)
501 {
502 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
503
504 r600_dst->sel = tgsi_dst->Register.Index;
505 r600_dst->sel += ctx->file_offset[tgsi_dst->Register.File];
506 r600_dst->chan = swizzle;
507 r600_dst->write = 1;
508 if (inst->Instruction.Saturate) {
509 r600_dst->clamp = 1;
510 }
511 return 0;
512 }
513
514 static unsigned tgsi_chan(const struct tgsi_full_src_register *tgsi_src, unsigned swizzle)
515 {
516 switch (swizzle) {
517 case 0:
518 return tgsi_src->Register.SwizzleX;
519 case 1:
520 return tgsi_src->Register.SwizzleY;
521 case 2:
522 return tgsi_src->Register.SwizzleZ;
523 case 3:
524 return tgsi_src->Register.SwizzleW;
525 default:
526 return 0;
527 }
528 }
529
530 static int tgsi_split_constant(struct r600_shader_ctx *ctx, struct r600_bc_alu_src r600_src[3])
531 {
532 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
533 struct r600_bc_alu alu;
534 int i, j, k, nconst, r;
535
536 for (i = 0, nconst = 0; i < inst->Instruction.NumSrcRegs; i++) {
537 if (inst->Src[i].Register.File == TGSI_FILE_CONSTANT) {
538 nconst++;
539 }
540 r = tgsi_src(ctx, &inst->Src[i], &r600_src[i]);
541 if (r) {
542 return r;
543 }
544 }
545 for (i = 0, j = nconst - 1; i < inst->Instruction.NumSrcRegs; i++) {
546 if (inst->Src[j].Register.File == TGSI_FILE_CONSTANT && j > 0) {
547 for (k = 0; k < 4; k++) {
548 memset(&alu, 0, sizeof(struct r600_bc_alu));
549 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
550 alu.src[0].sel = r600_src[0].sel;
551 alu.src[0].chan = k;
552 alu.dst.sel = ctx->temp_reg + j;
553 alu.dst.chan = k;
554 alu.dst.write = 1;
555 if (k == 3)
556 alu.last = 1;
557 r = r600_bc_add_alu(ctx->bc, &alu);
558 if (r)
559 return r;
560 }
561 r600_src[0].sel = ctx->temp_reg + j;
562 j--;
563 }
564 }
565 return 0;
566 }
567
568 static int tgsi_op2(struct r600_shader_ctx *ctx)
569 {
570 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
571 struct r600_bc_alu_src r600_src[3];
572 struct r600_bc_alu alu;
573 int i, j, r;
574
575 r = tgsi_split_constant(ctx, r600_src);
576 if (r)
577 return r;
578 for (i = 0; i < 4; i++) {
579 memset(&alu, 0, sizeof(struct r600_bc_alu));
580 if (!(inst->Dst[0].Register.WriteMask & (1 << i))) {
581 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP;
582 alu.dst.chan = i;
583 } else {
584 alu.inst = ctx->inst_info->r600_opcode;
585 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
586 alu.src[j] = r600_src[j];
587 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
588 }
589 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
590 if (r)
591 return r;
592 }
593 /* handle some special cases */
594 switch (ctx->inst_info->tgsi_opcode) {
595 case TGSI_OPCODE_SUB:
596 alu.src[1].neg = 1;
597 break;
598 case TGSI_OPCODE_ABS:
599 alu.src[0].abs = 1;
600 break;
601 default:
602 break;
603 }
604 if (i == 3) {
605 alu.last = 1;
606 }
607 r = r600_bc_add_alu(ctx->bc, &alu);
608 if (r)
609 return r;
610 }
611 return 0;
612 }
613
614 static int tgsi_kill(struct r600_shader_ctx *ctx)
615 {
616 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
617 struct r600_bc_alu alu;
618 int i, r;
619
620 for (i = 0; i < 4; i++) {
621 memset(&alu, 0, sizeof(struct r600_bc_alu));
622 alu.inst = ctx->inst_info->r600_opcode;
623 alu.dst.chan = i;
624 alu.src[0].sel = 248;
625 r = tgsi_src(ctx, &inst->Src[0], &alu.src[1]);
626 if (r)
627 return r;
628 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
629 if (i == 3) {
630 alu.last = 1;
631 }
632 r = r600_bc_add_alu(ctx->bc, &alu);
633 if (r)
634 return r;
635 }
636 return 0;
637 }
638
639 static int tgsi_slt(struct r600_shader_ctx *ctx)
640 {
641 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
642 struct r600_bc_alu_src r600_src[3];
643 struct r600_bc_alu alu;
644 int i, r;
645
646 r = tgsi_split_constant(ctx, r600_src);
647 if (r)
648 return r;
649 for (i = 0; i < 4; i++) {
650 memset(&alu, 0, sizeof(struct r600_bc_alu));
651 if (!(inst->Dst[0].Register.WriteMask & (1 << i))) {
652 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP;
653 alu.dst.chan = i;
654 } else {
655 alu.inst = ctx->inst_info->r600_opcode;
656 alu.src[1] = r600_src[0];
657 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
658 alu.src[0] = r600_src[1];
659 alu.src[0].chan = tgsi_chan(&inst->Src[1], i);
660 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
661 if (r)
662 return r;
663 }
664 if (i == 3) {
665 alu.last = 1;
666 }
667 r = r600_bc_add_alu(ctx->bc, &alu);
668 if (r)
669 return r;
670 }
671 return 0;
672 }
673
674 static int tgsi_lit(struct r600_shader_ctx *ctx)
675 {
676 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
677 struct r600_bc_alu alu;
678 int r;
679
680 /* dst.x, <- 1.0 */
681 memset(&alu, 0, sizeof(struct r600_bc_alu));
682 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
683 alu.src[0].sel = 249; /*1.0*/
684 alu.src[0].chan = 0;
685 r = tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
686 if (r)
687 return r;
688 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 0) & 1;
689 r = r600_bc_add_alu(ctx->bc, &alu);
690 if (r)
691 return r;
692
693 /* dst.y = max(src.x, 0.0) */
694 memset(&alu, 0, sizeof(struct r600_bc_alu));
695 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX;
696 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
697 if (r)
698 return r;
699 alu.src[1].sel = 248; /*0.0*/
700 alu.src[1].chan = tgsi_chan(&inst->Src[0], 0);
701 r = tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
702 if (r)
703 return r;
704 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 1) & 1;
705 r = r600_bc_add_alu(ctx->bc, &alu);
706 if (r)
707 return r;
708
709 /* dst.z = NOP - fill Z slot */
710 memset(&alu, 0, sizeof(struct r600_bc_alu));
711 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP;
712 alu.dst.chan = 2;
713 r = r600_bc_add_alu(ctx->bc, &alu);
714 if (r)
715 return r;
716
717 /* dst.w, <- 1.0 */
718 memset(&alu, 0, sizeof(struct r600_bc_alu));
719 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
720 alu.src[0].sel = 249;
721 alu.src[0].chan = 0;
722 r = tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
723 if (r)
724 return r;
725 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 3) & 1;
726 alu.last = 1;
727 r = r600_bc_add_alu(ctx->bc, &alu);
728 if (r)
729 return r;
730
731 if (inst->Dst[0].Register.WriteMask & (1 << 2))
732 {
733 int chan;
734 int sel;
735
736 /* dst.z = log(src.y) */
737 memset(&alu, 0, sizeof(struct r600_bc_alu));
738 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED;
739 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
740 if (r)
741 return r;
742 alu.src[0].chan = tgsi_chan(&inst->Src[0], 1);
743 r = tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
744 if (r)
745 return r;
746 alu.last = 1;
747 r = r600_bc_add_alu(ctx->bc, &alu);
748 if (r)
749 return r;
750
751 chan = alu.dst.chan;
752 sel = alu.dst.sel;
753
754 /* tmp.x = amd MUL_LIT(src.w, dst.z, src.x ) */
755 memset(&alu, 0, sizeof(struct r600_bc_alu));
756 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT;
757 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
758 if (r)
759 return r;
760 alu.src[0].chan = tgsi_chan(&inst->Src[0], 3);
761 alu.src[1].sel = sel;
762 alu.src[1].chan = chan;
763 r = tgsi_src(ctx, &inst->Src[0], &alu.src[2]);
764 if (r)
765 return r;
766 alu.src[2].chan = tgsi_chan(&inst->Src[0], 0);
767 alu.dst.sel = ctx->temp_reg;
768 alu.dst.chan = 0;
769 alu.dst.write = 1;
770 alu.is_op3 = 1;
771 alu.last = 1;
772 r = r600_bc_add_alu(ctx->bc, &alu);
773 if (r)
774 return r;
775
776 /* dst.z = exp(tmp.x) */
777 memset(&alu, 0, sizeof(struct r600_bc_alu));
778 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE;
779 alu.src[0].sel = ctx->temp_reg;
780 alu.src[0].chan = 0;
781 r = tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
782 if (r)
783 return r;
784 alu.last = 1;
785 r = r600_bc_add_alu(ctx->bc, &alu);
786 if (r)
787 return r;
788 }
789 return 0;
790 }
791
792 static int tgsi_trans(struct r600_shader_ctx *ctx)
793 {
794 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
795 struct r600_bc_alu alu;
796 int i, j, r;
797
798 for (i = 0; i < 4; i++) {
799 memset(&alu, 0, sizeof(struct r600_bc_alu));
800 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
801 alu.inst = ctx->inst_info->r600_opcode;
802 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
803 r = tgsi_src(ctx, &inst->Src[j], &alu.src[j]);
804 if (r)
805 return r;
806 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
807 }
808 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
809 if (r)
810 return r;
811 alu.last = 1;
812 r = r600_bc_add_alu(ctx->bc, &alu);
813 if (r)
814 return r;
815 }
816 }
817 return 0;
818 }
819
820 static int tgsi_trans_srcx_replicate(struct r600_shader_ctx *ctx)
821 {
822 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
823 struct r600_bc_alu alu;
824 int i, j, r;
825
826 memset(&alu, 0, sizeof(struct r600_bc_alu));
827 alu.inst = ctx->inst_info->r600_opcode;
828 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
829 r = tgsi_src(ctx, &inst->Src[j], &alu.src[j]);
830 if (r)
831 return r;
832 alu.src[j].chan = tgsi_chan(&inst->Src[j], 0);
833 }
834 alu.dst.sel = ctx->temp_reg;
835 alu.dst.write = 1;
836 alu.last = 1;
837 r = r600_bc_add_alu(ctx->bc, &alu);
838 if (r)
839 return r;
840 /* replicate result */
841 for (i = 0; i < 4; i++) {
842 memset(&alu, 0, sizeof(struct r600_bc_alu));
843 alu.src[0].sel = ctx->temp_reg;
844 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
845 alu.dst.chan = i;
846 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
847 if (r)
848 return r;
849 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
850 if (i == 3)
851 alu.last = 1;
852 r = r600_bc_add_alu(ctx->bc, &alu);
853 if (r)
854 return r;
855 }
856 return 0;
857 }
858
859 static int tgsi_helper_copy(struct r600_shader_ctx *ctx, struct tgsi_full_instruction *inst)
860 {
861 struct r600_bc_alu alu;
862 int i, r;
863
864 r = r600_bc_add_literal(ctx->bc, ctx->value);
865 if (r)
866 return r;
867 for (i = 0; i < 4; i++) {
868 memset(&alu, 0, sizeof(struct r600_bc_alu));
869 if (!(inst->Dst[0].Register.WriteMask & (1 << i))) {
870 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP;
871 alu.dst.chan = i;
872 } else {
873 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
874 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
875 if (r)
876 return r;
877 alu.src[0].sel = ctx->temp_reg;
878 alu.src[0].chan = i;
879 }
880 if (i == 3) {
881 alu.last = 1;
882 }
883 r = r600_bc_add_alu(ctx->bc, &alu);
884 if (r)
885 return r;
886 }
887 return 0;
888 }
889
890 static int tgsi_op3(struct r600_shader_ctx *ctx)
891 {
892 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
893 struct r600_bc_alu_src r600_src[3];
894 struct r600_bc_alu alu;
895 int i, j, r;
896
897 r = tgsi_split_constant(ctx, r600_src);
898 if (r)
899 return r;
900 /* do it in 2 step as op3 doesn't support writemask */
901 for (i = 0; i < 4; i++) {
902 memset(&alu, 0, sizeof(struct r600_bc_alu));
903 alu.inst = ctx->inst_info->r600_opcode;
904 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
905 alu.src[j] = r600_src[j];
906 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
907 }
908 alu.dst.sel = ctx->temp_reg;
909 alu.dst.chan = i;
910 alu.dst.write = 1;
911 alu.is_op3 = 1;
912 if (i == 3) {
913 alu.last = 1;
914 }
915 r = r600_bc_add_alu(ctx->bc, &alu);
916 if (r)
917 return r;
918 }
919 return tgsi_helper_copy(ctx, inst);
920 }
921
922 static int tgsi_dp(struct r600_shader_ctx *ctx)
923 {
924 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
925 struct r600_bc_alu_src r600_src[3];
926 struct r600_bc_alu alu;
927 int i, j, r;
928
929 r = tgsi_split_constant(ctx, r600_src);
930 if (r)
931 return r;
932 for (i = 0; i < 4; i++) {
933 memset(&alu, 0, sizeof(struct r600_bc_alu));
934 alu.inst = ctx->inst_info->r600_opcode;
935 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
936 alu.src[j] = r600_src[j];
937 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
938 }
939 alu.dst.sel = ctx->temp_reg;
940 alu.dst.chan = i;
941 alu.dst.write = 1;
942 /* handle some special cases */
943 switch (ctx->inst_info->tgsi_opcode) {
944 case TGSI_OPCODE_DP2:
945 if (i > 1) {
946 alu.src[0].sel = alu.src[1].sel = 248;
947 alu.src[0].chan = alu.src[1].chan = 0;
948 }
949 break;
950 case TGSI_OPCODE_DP3:
951 if (i > 2) {
952 alu.src[0].sel = alu.src[1].sel = 248;
953 alu.src[0].chan = alu.src[1].chan = 0;
954 }
955 break;
956 default:
957 break;
958 }
959 if (i == 3) {
960 alu.last = 1;
961 }
962 r = r600_bc_add_alu(ctx->bc, &alu);
963 if (r)
964 return r;
965 }
966 return tgsi_helper_copy(ctx, inst);
967 }
968
969 static int tgsi_tex(struct r600_shader_ctx *ctx)
970 {
971 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
972 struct r600_bc_tex tex;
973 struct r600_bc_alu alu;
974 unsigned src_gpr;
975 int r;
976
977 src_gpr = ctx->file_offset[inst->Src[0].Register.File] + inst->Src[0].Register.Index;
978
979 /* Add perspective divide */
980 memset(&alu, 0, sizeof(struct r600_bc_alu));
981 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE;
982 alu.src[0].sel = src_gpr;
983 alu.src[0].chan = tgsi_chan(&inst->Src[0], 3);
984 alu.dst.sel = ctx->temp_reg;
985 alu.dst.chan = 3;
986 alu.last = 1;
987 alu.dst.write = 1;
988 r = r600_bc_add_alu(ctx->bc, &alu);
989 if (r)
990 return r;
991
992 memset(&alu, 0, sizeof(struct r600_bc_alu));
993 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL;
994 alu.src[0].sel = ctx->temp_reg;
995 alu.src[0].chan = 3;
996 alu.src[1].sel = src_gpr;
997 alu.src[1].chan = tgsi_chan(&inst->Src[0], 0);
998 alu.dst.sel = ctx->temp_reg;
999 alu.dst.chan = 0;
1000 alu.dst.write = 1;
1001 r = r600_bc_add_alu(ctx->bc, &alu);
1002 if (r)
1003 return r;
1004 memset(&alu, 0, sizeof(struct r600_bc_alu));
1005 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL;
1006 alu.src[0].sel = ctx->temp_reg;
1007 alu.src[0].chan = 3;
1008 alu.src[1].sel = src_gpr;
1009 alu.src[1].chan = tgsi_chan(&inst->Src[0], 1);
1010 alu.dst.sel = ctx->temp_reg;
1011 alu.dst.chan = 1;
1012 alu.dst.write = 1;
1013 r = r600_bc_add_alu(ctx->bc, &alu);
1014 if (r)
1015 return r;
1016 memset(&alu, 0, sizeof(struct r600_bc_alu));
1017 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL;
1018 alu.src[0].sel = ctx->temp_reg;
1019 alu.src[0].chan = 3;
1020 alu.src[1].sel = src_gpr;
1021 alu.src[1].chan = tgsi_chan(&inst->Src[0], 2);
1022 alu.dst.sel = ctx->temp_reg;
1023 alu.dst.chan = 2;
1024 alu.dst.write = 1;
1025 r = r600_bc_add_alu(ctx->bc, &alu);
1026 if (r)
1027 return r;
1028 memset(&alu, 0, sizeof(struct r600_bc_alu));
1029 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1030 alu.src[0].sel = 249;
1031 alu.src[0].chan = 0;
1032 alu.dst.sel = ctx->temp_reg;
1033 alu.dst.chan = 3;
1034 alu.last = 1;
1035 alu.dst.write = 1;
1036 r = r600_bc_add_alu(ctx->bc, &alu);
1037 if (r)
1038 return r;
1039 src_gpr = ctx->temp_reg;
1040
1041 /* TODO use temp if src_gpr is not a temporary reg (File != TEMPORARY) */
1042 memset(&tex, 0, sizeof(struct r600_bc_tex));
1043 tex.inst = ctx->inst_info->r600_opcode;
1044 tex.resource_id = ctx->file_offset[inst->Src[1].Register.File] + inst->Src[1].Register.Index;
1045 tex.sampler_id = tex.resource_id;
1046 tex.src_gpr = src_gpr;
1047 tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
1048 tex.dst_sel_x = 0;
1049 tex.dst_sel_y = 1;
1050 tex.dst_sel_z = 2;
1051 tex.dst_sel_w = 3;
1052 tex.src_sel_x = 0;
1053 tex.src_sel_y = 1;
1054 tex.src_sel_z = 2;
1055 tex.src_sel_w = 3;
1056
1057 if (inst->Texture.Texture != TGSI_TEXTURE_RECT) {
1058 tex.coord_type_x = 1;
1059 tex.coord_type_y = 1;
1060 tex.coord_type_z = 1;
1061 tex.coord_type_w = 1;
1062 }
1063 return r600_bc_add_tex(ctx->bc, &tex);
1064 }
1065
1066 static int tgsi_lrp(struct r600_shader_ctx *ctx)
1067 {
1068 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1069 struct r600_bc_alu_src r600_src[3];
1070 struct r600_bc_alu alu;
1071 unsigned i;
1072 int r;
1073
1074 r = tgsi_split_constant(ctx, r600_src);
1075 if (r)
1076 return r;
1077 /* 1 - src0 */
1078 for (i = 0; i < 4; i++) {
1079 memset(&alu, 0, sizeof(struct r600_bc_alu));
1080 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD;
1081 alu.src[0].sel = 249;
1082 alu.src[0].chan = 0;
1083 alu.src[1] = r600_src[0];
1084 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
1085 alu.src[1].neg = 1;
1086 alu.dst.sel = ctx->temp_reg;
1087 alu.dst.chan = i;
1088 if (i == 3) {
1089 alu.last = 1;
1090 }
1091 alu.dst.write = 1;
1092 r = r600_bc_add_alu(ctx->bc, &alu);
1093 if (r)
1094 return r;
1095 }
1096 r = r600_bc_add_literal(ctx->bc, ctx->value);
1097 if (r)
1098 return r;
1099
1100 /* (1 - src0) * src2 */
1101 for (i = 0; i < 4; i++) {
1102 memset(&alu, 0, sizeof(struct r600_bc_alu));
1103 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL;
1104 alu.src[0].sel = ctx->temp_reg;
1105 alu.src[0].chan = i;
1106 alu.src[1] = r600_src[2];
1107 alu.src[1].chan = tgsi_chan(&inst->Src[2], i);
1108 alu.dst.sel = ctx->temp_reg;
1109 alu.dst.chan = i;
1110 if (i == 3) {
1111 alu.last = 1;
1112 }
1113 alu.dst.write = 1;
1114 r = r600_bc_add_alu(ctx->bc, &alu);
1115 if (r)
1116 return r;
1117 }
1118 r = r600_bc_add_literal(ctx->bc, ctx->value);
1119 if (r)
1120 return r;
1121
1122 /* src0 * src1 + (1 - src0) * src2 */
1123 for (i = 0; i < 4; i++) {
1124 memset(&alu, 0, sizeof(struct r600_bc_alu));
1125 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD;
1126 alu.is_op3 = 1;
1127 alu.src[0] = r600_src[0];
1128 alu.src[0].chan = tgsi_chan(&inst->Src[0], i);
1129 alu.src[1] = r600_src[1];
1130 alu.src[1].chan = tgsi_chan(&inst->Src[1], i);
1131 alu.src[2].sel = ctx->temp_reg;
1132 alu.src[2].chan = i;
1133 alu.dst.sel = ctx->temp_reg;
1134 alu.dst.chan = i;
1135 if (i == 3) {
1136 alu.last = 1;
1137 }
1138 r = r600_bc_add_alu(ctx->bc, &alu);
1139 if (r)
1140 return r;
1141 }
1142 return tgsi_helper_copy(ctx, inst);
1143 }
1144
1145 static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] = {
1146 {TGSI_OPCODE_ARL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1147 {TGSI_OPCODE_MOV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV, tgsi_op2},
1148 {TGSI_OPCODE_LIT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_lit},
1149 {TGSI_OPCODE_RCP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE, tgsi_trans_srcx_replicate},
1150 {TGSI_OPCODE_RSQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE, tgsi_trans_srcx_replicate},
1151 {TGSI_OPCODE_EXP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1152 {TGSI_OPCODE_LOG, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1153 {TGSI_OPCODE_MUL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL, tgsi_op2},
1154 {TGSI_OPCODE_ADD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD, tgsi_op2},
1155 {TGSI_OPCODE_DP3, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4, tgsi_dp},
1156 {TGSI_OPCODE_DP4, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4, tgsi_dp},
1157 {TGSI_OPCODE_DST, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1158 {TGSI_OPCODE_MIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN, tgsi_op2},
1159 {TGSI_OPCODE_MAX, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX, tgsi_op2},
1160 {TGSI_OPCODE_SLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT, tgsi_slt},
1161 {TGSI_OPCODE_SGE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1162 {TGSI_OPCODE_MAD, 1, V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD, tgsi_op3},
1163 {TGSI_OPCODE_SUB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD, tgsi_op2},
1164 {TGSI_OPCODE_LRP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_lrp},
1165 {TGSI_OPCODE_CND, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1166 /* gap */
1167 {20, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1168 {TGSI_OPCODE_DP2A, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1169 /* gap */
1170 {22, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1171 {23, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1172 {TGSI_OPCODE_FRC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1173 {TGSI_OPCODE_CLAMP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1174 {TGSI_OPCODE_FLR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1175 {TGSI_OPCODE_ROUND, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1176 {TGSI_OPCODE_EX2, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE, tgsi_trans_srcx_replicate},
1177 {TGSI_OPCODE_LG2, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1178 {TGSI_OPCODE_POW, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1179 {TGSI_OPCODE_XPD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1180 /* gap */
1181 {32, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1182 {TGSI_OPCODE_ABS, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV, tgsi_op2},
1183 {TGSI_OPCODE_RCC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1184 {TGSI_OPCODE_DPH, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1185 {TGSI_OPCODE_COS, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1186 {TGSI_OPCODE_DDX, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1187 {TGSI_OPCODE_DDY, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1188 {TGSI_OPCODE_KILP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, /* predicated kill */
1189 {TGSI_OPCODE_PK2H, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1190 {TGSI_OPCODE_PK2US, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1191 {TGSI_OPCODE_PK4B, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1192 {TGSI_OPCODE_PK4UB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1193 {TGSI_OPCODE_RFL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1194 {TGSI_OPCODE_SEQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1195 {TGSI_OPCODE_SFL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1196 {TGSI_OPCODE_SGT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1197 {TGSI_OPCODE_SIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1198 {TGSI_OPCODE_SLE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1199 {TGSI_OPCODE_SNE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1200 {TGSI_OPCODE_STR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1201 {TGSI_OPCODE_TEX, 0, 0x10, tgsi_tex},
1202 {TGSI_OPCODE_TXD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1203 {TGSI_OPCODE_TXP, 0, 0x10, tgsi_tex},
1204 {TGSI_OPCODE_UP2H, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1205 {TGSI_OPCODE_UP2US, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1206 {TGSI_OPCODE_UP4B, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1207 {TGSI_OPCODE_UP4UB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1208 {TGSI_OPCODE_X2D, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1209 {TGSI_OPCODE_ARA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1210 {TGSI_OPCODE_ARR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1211 {TGSI_OPCODE_BRA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1212 {TGSI_OPCODE_CAL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1213 {TGSI_OPCODE_RET, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1214 {TGSI_OPCODE_SSG, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, /* SGN */
1215 {TGSI_OPCODE_CMP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1216 {TGSI_OPCODE_SCS, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1217 {TGSI_OPCODE_TXB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1218 {TGSI_OPCODE_NRM, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1219 {TGSI_OPCODE_DIV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1220 {TGSI_OPCODE_DP2, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4, tgsi_dp},
1221 {TGSI_OPCODE_TXL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1222 {TGSI_OPCODE_BRK, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1223 {TGSI_OPCODE_IF, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1224 /* gap */
1225 {75, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1226 {76, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1227 {TGSI_OPCODE_ELSE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1228 {TGSI_OPCODE_ENDIF, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1229 /* gap */
1230 {79, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1231 {80, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1232 {TGSI_OPCODE_PUSHA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1233 {TGSI_OPCODE_POPA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1234 {TGSI_OPCODE_CEIL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1235 {TGSI_OPCODE_I2F, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1236 {TGSI_OPCODE_NOT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1237 {TGSI_OPCODE_TRUNC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1238 {TGSI_OPCODE_SHL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1239 /* gap */
1240 {88, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1241 {TGSI_OPCODE_AND, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1242 {TGSI_OPCODE_OR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1243 {TGSI_OPCODE_MOD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1244 {TGSI_OPCODE_XOR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1245 {TGSI_OPCODE_SAD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1246 {TGSI_OPCODE_TXF, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1247 {TGSI_OPCODE_TXQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1248 {TGSI_OPCODE_CONT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1249 {TGSI_OPCODE_EMIT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1250 {TGSI_OPCODE_ENDPRIM, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1251 {TGSI_OPCODE_BGNLOOP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1252 {TGSI_OPCODE_BGNSUB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1253 {TGSI_OPCODE_ENDLOOP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1254 {TGSI_OPCODE_ENDSUB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1255 /* gap */
1256 {103, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1257 {104, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1258 {105, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1259 {106, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1260 {TGSI_OPCODE_NOP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1261 /* gap */
1262 {108, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1263 {109, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1264 {110, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1265 {111, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1266 {TGSI_OPCODE_NRM4, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1267 {TGSI_OPCODE_CALLNZ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1268 {TGSI_OPCODE_IFC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1269 {TGSI_OPCODE_BREAKC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1270 {TGSI_OPCODE_KIL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT, tgsi_kill}, /* conditional kill */
1271 {TGSI_OPCODE_END, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_end}, /* aka HALT */
1272 /* gap */
1273 {118, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1274 {TGSI_OPCODE_F2I, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1275 {TGSI_OPCODE_IDIV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1276 {TGSI_OPCODE_IMAX, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1277 {TGSI_OPCODE_IMIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1278 {TGSI_OPCODE_INEG, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1279 {TGSI_OPCODE_ISGE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1280 {TGSI_OPCODE_ISHR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1281 {TGSI_OPCODE_ISLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1282 {TGSI_OPCODE_F2U, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1283 {TGSI_OPCODE_U2F, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1284 {TGSI_OPCODE_UADD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1285 {TGSI_OPCODE_UDIV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1286 {TGSI_OPCODE_UMAD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1287 {TGSI_OPCODE_UMAX, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1288 {TGSI_OPCODE_UMIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1289 {TGSI_OPCODE_UMOD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1290 {TGSI_OPCODE_UMUL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1291 {TGSI_OPCODE_USEQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1292 {TGSI_OPCODE_USGE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1293 {TGSI_OPCODE_USHR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1294 {TGSI_OPCODE_USLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1295 {TGSI_OPCODE_USNE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1296 {TGSI_OPCODE_SWITCH, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1297 {TGSI_OPCODE_CASE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1298 {TGSI_OPCODE_DEFAULT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1299 {TGSI_OPCODE_ENDSWITCH, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1300 {TGSI_OPCODE_LAST, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
1301 };