r600g: add missing literals
[mesa.git] / src / gallium / drivers / r600 / r600_shader.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "pipe/p_shader_tokens.h"
24 #include "tgsi/tgsi_parse.h"
25 #include "tgsi/tgsi_scan.h"
26 #include "tgsi/tgsi_dump.h"
27 #include "util/u_format.h"
28 #include "r600_screen.h"
29 #include "r600_context.h"
30 #include "r600_shader.h"
31 #include "r600_asm.h"
32 #include "r600_sq.h"
33 #include "r600d.h"
34 #include <stdio.h>
35 #include <errno.h>
36
37
38 struct r600_shader_tgsi_instruction;
39
40 struct r600_shader_ctx {
41 struct tgsi_shader_info info;
42 struct tgsi_parse_context parse;
43 const struct tgsi_token *tokens;
44 unsigned type;
45 unsigned file_offset[TGSI_FILE_COUNT];
46 unsigned temp_reg;
47 struct r600_shader_tgsi_instruction *inst_info;
48 struct r600_bc *bc;
49 struct r600_shader *shader;
50 u32 value[4];
51 u32 *literals;
52 u32 nliterals;
53 };
54
55 struct r600_shader_tgsi_instruction {
56 unsigned tgsi_opcode;
57 unsigned is_op3;
58 unsigned r600_opcode;
59 int (*process)(struct r600_shader_ctx *ctx);
60 };
61
62 static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[];
63 static int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader);
64
65 static int r600_shader_update(struct pipe_context *ctx, struct r600_shader *shader)
66 {
67 struct r600_context *rctx = r600_context(ctx);
68 const struct util_format_description *desc;
69 enum pipe_format resource_format[160];
70 unsigned i, nresources = 0;
71 struct r600_bc *bc = &shader->bc;
72 struct r600_bc_cf *cf;
73 struct r600_bc_vtx *vtx;
74
75 if (shader->processor_type != TGSI_PROCESSOR_VERTEX)
76 return 0;
77 for (i = 0; i < rctx->vertex_elements->count; i++) {
78 resource_format[nresources++] = rctx->vertex_elements->elements[i].src_format;
79 }
80 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
81 switch (cf->inst) {
82 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
83 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
84 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
85 desc = util_format_description(resource_format[vtx->buffer_id]);
86 if (desc == NULL) {
87 R600_ERR("unknown format %d\n", resource_format[vtx->buffer_id]);
88 return -EINVAL;
89 }
90 vtx->dst_sel_x = desc->swizzle[0];
91 vtx->dst_sel_y = desc->swizzle[1];
92 vtx->dst_sel_z = desc->swizzle[2];
93 vtx->dst_sel_w = desc->swizzle[3];
94 }
95 break;
96 default:
97 break;
98 }
99 }
100 return r600_bc_build(&shader->bc);
101 }
102
103 int r600_pipe_shader_create(struct pipe_context *ctx,
104 struct r600_context_state *rpshader,
105 const struct tgsi_token *tokens)
106 {
107 struct r600_screen *rscreen = r600_screen(ctx->screen);
108 int r;
109
110 //fprintf(stderr, "--------------------------------------------------------------\n");
111 //tgsi_dump(tokens, 0);
112 if (rpshader == NULL)
113 return -ENOMEM;
114 rpshader->shader.family = radeon_get_family(rscreen->rw);
115 r = r600_shader_from_tgsi(tokens, &rpshader->shader);
116 if (r) {
117 R600_ERR("translation from TGSI failed !\n");
118 return r;
119 }
120 r = r600_bc_build(&rpshader->shader.bc);
121 if (r) {
122 R600_ERR("building bytecode failed !\n");
123 return r;
124 }
125 //fprintf(stderr, "______________________________________________________________\n");
126 return 0;
127 }
128
129 static int r600_pipe_shader_vs(struct pipe_context *ctx, struct r600_context_state *rpshader)
130 {
131 struct r600_screen *rscreen = r600_screen(ctx->screen);
132 struct r600_shader *rshader = &rpshader->shader;
133 struct radeon_state *state;
134 unsigned i, tmp;
135
136 rpshader->rstate = radeon_state_decref(rpshader->rstate);
137 state = radeon_state_shader(rscreen->rw, R600_STATE_SHADER, 0, R600_SHADER_VS);
138 if (state == NULL)
139 return -ENOMEM;
140 for (i = 0; i < 10; i++) {
141 state->states[R600_VS_SHADER__SPI_VS_OUT_ID_0 + i] = 0;
142 }
143 /* so far never got proper semantic id from tgsi */
144 for (i = 0; i < 32; i++) {
145 tmp = i << ((i & 3) * 8);
146 state->states[R600_VS_SHADER__SPI_VS_OUT_ID_0 + i / 4] |= tmp;
147 }
148 state->states[R600_VS_SHADER__SPI_VS_OUT_CONFIG] = S_0286C4_VS_EXPORT_COUNT(rshader->noutput - 2);
149 state->states[R600_VS_SHADER__SQ_PGM_RESOURCES_VS] = S_028868_NUM_GPRS(rshader->bc.ngpr) |
150 S_028868_STACK_SIZE(rshader->bc.nstack);
151 rpshader->rstate = state;
152 rpshader->rstate->bo[0] = radeon_bo_incref(rscreen->rw, rpshader->bo);
153 rpshader->rstate->bo[1] = radeon_bo_incref(rscreen->rw, rpshader->bo);
154 rpshader->rstate->nbo = 2;
155 rpshader->rstate->placement[0] = RADEON_GEM_DOMAIN_GTT;
156 rpshader->rstate->placement[2] = RADEON_GEM_DOMAIN_GTT;
157 return radeon_state_pm4(state);
158 }
159
160 static int r600_pipe_shader_ps(struct pipe_context *ctx, struct r600_context_state *rpshader)
161 {
162 const struct pipe_rasterizer_state *rasterizer;
163 struct r600_screen *rscreen = r600_screen(ctx->screen);
164 struct r600_shader *rshader = &rpshader->shader;
165 struct r600_context *rctx = r600_context(ctx);
166 struct radeon_state *state;
167 unsigned i, tmp, exports_ps, num_cout;
168
169 rasterizer = &rctx->rasterizer->state.rasterizer;
170 rpshader->rstate = radeon_state_decref(rpshader->rstate);
171 state = radeon_state_shader(rscreen->rw, R600_STATE_SHADER, 0, R600_SHADER_PS);
172 if (state == NULL)
173 return -ENOMEM;
174 for (i = 0; i < rshader->ninput; i++) {
175 tmp = S_028644_SEMANTIC(i);
176 tmp |= S_028644_SEL_CENTROID(1);
177 if (rshader->input[i].name == TGSI_SEMANTIC_COLOR ||
178 rshader->input[i].name == TGSI_SEMANTIC_BCOLOR) {
179 tmp |= S_028644_FLAT_SHADE(rshader->flat_shade);
180 }
181 if (rasterizer->sprite_coord_enable & (1 << i)) {
182 tmp |= S_028644_PT_SPRITE_TEX(1);
183 }
184 state->states[R600_PS_SHADER__SPI_PS_INPUT_CNTL_0 + i] = tmp;
185 }
186
187 exports_ps = 0;
188 num_cout = 0;
189 for (i = 0; i < rshader->noutput; i++) {
190 if (rshader->output[i].name == TGSI_SEMANTIC_POSITION)
191 exports_ps |= 1;
192 else if (rshader->output[i].name == TGSI_SEMANTIC_COLOR) {
193 exports_ps |= (1 << (num_cout+1));
194 num_cout++;
195 }
196 }
197 if (!exports_ps) {
198 /* always at least export 1 component per pixel */
199 exports_ps = 2;
200 }
201 state->states[R600_PS_SHADER__SPI_PS_IN_CONTROL_0] = S_0286CC_NUM_INTERP(rshader->ninput) |
202 S_0286CC_PERSP_GRADIENT_ENA(1);
203 state->states[R600_PS_SHADER__SPI_PS_IN_CONTROL_1] = 0x00000000;
204 state->states[R600_PS_SHADER__SQ_PGM_RESOURCES_PS] = S_028868_NUM_GPRS(rshader->bc.ngpr) |
205 S_028868_STACK_SIZE(rshader->bc.nstack);
206 state->states[R600_PS_SHADER__SQ_PGM_EXPORTS_PS] = exports_ps;
207 rpshader->rstate = state;
208 rpshader->rstate->bo[0] = radeon_bo_incref(rscreen->rw, rpshader->bo);
209 rpshader->rstate->nbo = 1;
210 rpshader->rstate->placement[0] = RADEON_GEM_DOMAIN_GTT;
211 return radeon_state_pm4(state);
212 }
213
214 static int r600_pipe_shader(struct pipe_context *ctx, struct r600_context_state *rpshader)
215 {
216 struct r600_screen *rscreen = r600_screen(ctx->screen);
217 struct r600_context *rctx = r600_context(ctx);
218 struct r600_shader *rshader = &rpshader->shader;
219 int r;
220
221 /* copy new shader */
222 radeon_bo_decref(rscreen->rw, rpshader->bo);
223 rpshader->bo = NULL;
224 rpshader->bo = radeon_bo(rscreen->rw, 0, rshader->bc.ndw * 4,
225 4096, NULL);
226 if (rpshader->bo == NULL) {
227 return -ENOMEM;
228 }
229 radeon_bo_map(rscreen->rw, rpshader->bo);
230 memcpy(rpshader->bo->data, rshader->bc.bytecode, rshader->bc.ndw * 4);
231 radeon_bo_unmap(rscreen->rw, rpshader->bo);
232 /* build state */
233 rshader->flat_shade = rctx->flat_shade;
234 switch (rshader->processor_type) {
235 case TGSI_PROCESSOR_VERTEX:
236 r = r600_pipe_shader_vs(ctx, rpshader);
237 break;
238 case TGSI_PROCESSOR_FRAGMENT:
239 r = r600_pipe_shader_ps(ctx, rpshader);
240 break;
241 default:
242 r = -EINVAL;
243 break;
244 }
245 return r;
246 }
247
248 int r600_pipe_shader_update(struct pipe_context *ctx, struct r600_context_state *rpshader)
249 {
250 struct r600_context *rctx = r600_context(ctx);
251 int r;
252
253 if (rpshader == NULL)
254 return -EINVAL;
255 /* there should be enough input */
256 if (rctx->vertex_elements->count < rpshader->shader.bc.nresource) {
257 R600_ERR("%d resources provided, expecting %d\n",
258 rctx->vertex_elements->count, rpshader->shader.bc.nresource);
259 return -EINVAL;
260 }
261 r = r600_shader_update(ctx, &rpshader->shader);
262 if (r)
263 return r;
264 return r600_pipe_shader(ctx, rpshader);
265 }
266
267 static int tgsi_is_supported(struct r600_shader_ctx *ctx)
268 {
269 struct tgsi_full_instruction *i = &ctx->parse.FullToken.FullInstruction;
270 int j;
271
272 if (i->Instruction.NumDstRegs > 1) {
273 R600_ERR("too many dst (%d)\n", i->Instruction.NumDstRegs);
274 return -EINVAL;
275 }
276 if (i->Instruction.Predicate) {
277 R600_ERR("predicate unsupported\n");
278 return -EINVAL;
279 }
280 #if 0
281 if (i->Instruction.Label) {
282 R600_ERR("label unsupported\n");
283 return -EINVAL;
284 }
285 #endif
286 for (j = 0; j < i->Instruction.NumSrcRegs; j++) {
287 if (i->Src[j].Register.Dimension ||
288 i->Src[j].Register.Absolute) {
289 R600_ERR("unsupported src %d (dimension %d|absolute %d)\n", j,
290 i->Src[j].Register.Dimension,
291 i->Src[j].Register.Absolute);
292 return -EINVAL;
293 }
294 }
295 for (j = 0; j < i->Instruction.NumDstRegs; j++) {
296 if (i->Dst[j].Register.Dimension) {
297 R600_ERR("unsupported dst (dimension)\n");
298 return -EINVAL;
299 }
300 }
301 return 0;
302 }
303
304 static int tgsi_declaration(struct r600_shader_ctx *ctx)
305 {
306 struct tgsi_full_declaration *d = &ctx->parse.FullToken.FullDeclaration;
307 struct r600_bc_vtx vtx;
308 unsigned i;
309 int r;
310
311 switch (d->Declaration.File) {
312 case TGSI_FILE_INPUT:
313 i = ctx->shader->ninput++;
314 ctx->shader->input[i].name = d->Semantic.Name;
315 ctx->shader->input[i].sid = d->Semantic.Index;
316 ctx->shader->input[i].interpolate = d->Declaration.Interpolate;
317 ctx->shader->input[i].gpr = ctx->file_offset[TGSI_FILE_INPUT] + i;
318 if (ctx->type == TGSI_PROCESSOR_VERTEX) {
319 /* turn input into fetch */
320 memset(&vtx, 0, sizeof(struct r600_bc_vtx));
321 vtx.inst = 0;
322 vtx.fetch_type = 0;
323 vtx.buffer_id = i;
324 /* register containing the index into the buffer */
325 vtx.src_gpr = 0;
326 vtx.src_sel_x = 0;
327 vtx.mega_fetch_count = 0x1F;
328 vtx.dst_gpr = ctx->shader->input[i].gpr;
329 vtx.dst_sel_x = 0;
330 vtx.dst_sel_y = 1;
331 vtx.dst_sel_z = 2;
332 vtx.dst_sel_w = 3;
333 r = r600_bc_add_vtx(ctx->bc, &vtx);
334 if (r)
335 return r;
336 }
337 break;
338 case TGSI_FILE_OUTPUT:
339 i = ctx->shader->noutput++;
340 ctx->shader->output[i].name = d->Semantic.Name;
341 ctx->shader->output[i].sid = d->Semantic.Index;
342 ctx->shader->output[i].gpr = ctx->file_offset[TGSI_FILE_OUTPUT] + i;
343 ctx->shader->output[i].interpolate = d->Declaration.Interpolate;
344 break;
345 case TGSI_FILE_CONSTANT:
346 case TGSI_FILE_TEMPORARY:
347 case TGSI_FILE_SAMPLER:
348 case TGSI_FILE_ADDRESS:
349 break;
350 default:
351 R600_ERR("unsupported file %d declaration\n", d->Declaration.File);
352 return -EINVAL;
353 }
354 return 0;
355 }
356
357 int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader)
358 {
359 struct tgsi_full_immediate *immediate;
360 struct r600_shader_ctx ctx;
361 struct r600_bc_output output[32];
362 unsigned output_done, noutput;
363 unsigned opcode;
364 int i, r = 0, pos0;
365
366 ctx.bc = &shader->bc;
367 ctx.shader = shader;
368 r = r600_bc_init(ctx.bc, shader->family);
369 if (r)
370 return r;
371 ctx.tokens = tokens;
372 tgsi_scan_shader(tokens, &ctx.info);
373 tgsi_parse_init(&ctx.parse, tokens);
374 ctx.type = ctx.parse.FullHeader.Processor.Processor;
375 shader->processor_type = ctx.type;
376
377 /* register allocations */
378 /* Values [0,127] correspond to GPR[0..127].
379 * Values [128,159] correspond to constant buffer bank 0
380 * Values [160,191] correspond to constant buffer bank 1
381 * Values [256,511] correspond to cfile constants c[0..255].
382 * Other special values are shown in the list below.
383 * 244 ALU_SRC_1_DBL_L: special constant 1.0 double-float, LSW. (RV670+)
384 * 245 ALU_SRC_1_DBL_M: special constant 1.0 double-float, MSW. (RV670+)
385 * 246 ALU_SRC_0_5_DBL_L: special constant 0.5 double-float, LSW. (RV670+)
386 * 247 ALU_SRC_0_5_DBL_M: special constant 0.5 double-float, MSW. (RV670+)
387 * 248 SQ_ALU_SRC_0: special constant 0.0.
388 * 249 SQ_ALU_SRC_1: special constant 1.0 float.
389 * 250 SQ_ALU_SRC_1_INT: special constant 1 integer.
390 * 251 SQ_ALU_SRC_M_1_INT: special constant -1 integer.
391 * 252 SQ_ALU_SRC_0_5: special constant 0.5 float.
392 * 253 SQ_ALU_SRC_LITERAL: literal constant.
393 * 254 SQ_ALU_SRC_PV: previous vector result.
394 * 255 SQ_ALU_SRC_PS: previous scalar result.
395 */
396 for (i = 0; i < TGSI_FILE_COUNT; i++) {
397 ctx.file_offset[i] = 0;
398 }
399 if (ctx.type == TGSI_PROCESSOR_VERTEX) {
400 ctx.file_offset[TGSI_FILE_INPUT] = 1;
401 }
402 ctx.file_offset[TGSI_FILE_OUTPUT] = ctx.file_offset[TGSI_FILE_INPUT] +
403 ctx.info.file_count[TGSI_FILE_INPUT];
404 ctx.file_offset[TGSI_FILE_TEMPORARY] = ctx.file_offset[TGSI_FILE_OUTPUT] +
405 ctx.info.file_count[TGSI_FILE_OUTPUT];
406 ctx.file_offset[TGSI_FILE_CONSTANT] = 256;
407 ctx.file_offset[TGSI_FILE_IMMEDIATE] = 253;
408 ctx.temp_reg = ctx.file_offset[TGSI_FILE_TEMPORARY] +
409 ctx.info.file_count[TGSI_FILE_TEMPORARY];
410
411 ctx.nliterals = 0;
412 ctx.literals = NULL;
413
414 while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
415 tgsi_parse_token(&ctx.parse);
416 switch (ctx.parse.FullToken.Token.Type) {
417 case TGSI_TOKEN_TYPE_IMMEDIATE:
418 immediate = &ctx.parse.FullToken.FullImmediate;
419 ctx.literals = realloc(ctx.literals, (ctx.nliterals + 1) * 16);
420 if(ctx.literals == NULL) {
421 r = -ENOMEM;
422 goto out_err;
423 }
424 ctx.literals[ctx.nliterals * 4 + 0] = immediate->u[0].Uint;
425 ctx.literals[ctx.nliterals * 4 + 1] = immediate->u[1].Uint;
426 ctx.literals[ctx.nliterals * 4 + 2] = immediate->u[2].Uint;
427 ctx.literals[ctx.nliterals * 4 + 3] = immediate->u[3].Uint;
428 ctx.nliterals++;
429 break;
430 case TGSI_TOKEN_TYPE_DECLARATION:
431 r = tgsi_declaration(&ctx);
432 if (r)
433 goto out_err;
434 break;
435 case TGSI_TOKEN_TYPE_INSTRUCTION:
436 r = tgsi_is_supported(&ctx);
437 if (r)
438 goto out_err;
439 opcode = ctx.parse.FullToken.FullInstruction.Instruction.Opcode;
440 ctx.inst_info = &r600_shader_tgsi_instruction[opcode];
441 r = ctx.inst_info->process(&ctx);
442 if (r)
443 goto out_err;
444 r = r600_bc_add_literal(ctx.bc, ctx.value);
445 if (r)
446 goto out_err;
447 break;
448 default:
449 R600_ERR("unsupported token type %d\n", ctx.parse.FullToken.Token.Type);
450 r = -EINVAL;
451 goto out_err;
452 }
453 }
454 /* export output */
455 noutput = shader->noutput;
456 for (i = 0, pos0 = 0; i < noutput; i++) {
457 memset(&output[i], 0, sizeof(struct r600_bc_output));
458 output[i].gpr = shader->output[i].gpr;
459 output[i].elem_size = 3;
460 output[i].swizzle_x = 0;
461 output[i].swizzle_y = 1;
462 output[i].swizzle_z = 2;
463 output[i].swizzle_w = 3;
464 output[i].barrier = 1;
465 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
466 output[i].array_base = i - pos0;
467 output[i].inst = V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT;
468 switch (ctx.type) {
469 case TGSI_PROCESSOR_VERTEX:
470 if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
471 output[i].array_base = 60;
472 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
473 /* position doesn't count in array_base */
474 pos0++;
475 }
476 if (shader->output[i].name == TGSI_SEMANTIC_PSIZE) {
477 output[i].array_base = 61;
478 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
479 /* position doesn't count in array_base */
480 pos0++;
481 }
482 break;
483 case TGSI_PROCESSOR_FRAGMENT:
484 if (shader->output[i].name == TGSI_SEMANTIC_COLOR) {
485 output[i].array_base = shader->output[i].sid;
486 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
487 } else if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
488 output[i].array_base = 61;
489 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
490 } else {
491 R600_ERR("unsupported fragment output name %d\n", shader->output[i].name);
492 r = -EINVAL;
493 goto out_err;
494 }
495 break;
496 default:
497 R600_ERR("unsupported processor type %d\n", ctx.type);
498 r = -EINVAL;
499 goto out_err;
500 }
501 }
502 /* add fake param output for vertex shader if no param is exported */
503 if (ctx.type == TGSI_PROCESSOR_VERTEX) {
504 for (i = 0, pos0 = 0; i < noutput; i++) {
505 if (output[i].type == V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM) {
506 pos0 = 1;
507 break;
508 }
509 }
510 if (!pos0) {
511 memset(&output[i], 0, sizeof(struct r600_bc_output));
512 output[i].gpr = 0;
513 output[i].elem_size = 3;
514 output[i].swizzle_x = 0;
515 output[i].swizzle_y = 1;
516 output[i].swizzle_z = 2;
517 output[i].swizzle_w = 3;
518 output[i].barrier = 1;
519 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
520 output[i].array_base = 0;
521 output[i].inst = V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT;
522 noutput++;
523 }
524 }
525 /* add fake pixel export */
526 if (ctx.type == TGSI_PROCESSOR_FRAGMENT && !noutput) {
527 memset(&output[0], 0, sizeof(struct r600_bc_output));
528 output[0].gpr = 0;
529 output[0].elem_size = 3;
530 output[0].swizzle_x = 7;
531 output[0].swizzle_y = 7;
532 output[0].swizzle_z = 7;
533 output[0].swizzle_w = 7;
534 output[0].barrier = 1;
535 output[0].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
536 output[0].array_base = 0;
537 output[0].inst = V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT;
538 noutput++;
539 }
540 /* set export done on last export of each type */
541 for (i = noutput - 1, output_done = 0; i >= 0; i--) {
542 if (i == (noutput - 1)) {
543 output[i].end_of_program = 1;
544 }
545 if (!(output_done & (1 << output[i].type))) {
546 output_done |= (1 << output[i].type);
547 output[i].inst = V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE;
548 }
549 }
550 /* add output to bytecode */
551 for (i = 0; i < noutput; i++) {
552 r = r600_bc_add_output(ctx.bc, &output[i]);
553 if (r)
554 goto out_err;
555 }
556 free(ctx.literals);
557 tgsi_parse_free(&ctx.parse);
558 return 0;
559 out_err:
560 free(ctx.literals);
561 tgsi_parse_free(&ctx.parse);
562 return r;
563 }
564
565 static int tgsi_unsupported(struct r600_shader_ctx *ctx)
566 {
567 R600_ERR("%d tgsi opcode unsupported\n", ctx->inst_info->tgsi_opcode);
568 return -EINVAL;
569 }
570
571 static int tgsi_end(struct r600_shader_ctx *ctx)
572 {
573 return 0;
574 }
575
576 static int tgsi_src(struct r600_shader_ctx *ctx,
577 const struct tgsi_full_src_register *tgsi_src,
578 struct r600_bc_alu_src *r600_src)
579 {
580 int index;
581 memset(r600_src, 0, sizeof(struct r600_bc_alu_src));
582 r600_src->sel = tgsi_src->Register.Index;
583 if (tgsi_src->Register.File == TGSI_FILE_IMMEDIATE) {
584 r600_src->sel = 0;
585 index = tgsi_src->Register.Index;
586 ctx->value[0] = ctx->literals[index * 4 + 0];
587 ctx->value[1] = ctx->literals[index * 4 + 1];
588 ctx->value[2] = ctx->literals[index * 4 + 2];
589 ctx->value[3] = ctx->literals[index * 4 + 3];
590 }
591 if (tgsi_src->Register.Indirect)
592 r600_src->rel = V_SQ_REL_RELATIVE;
593 r600_src->neg = tgsi_src->Register.Negate;
594 r600_src->sel += ctx->file_offset[tgsi_src->Register.File];
595 return 0;
596 }
597
598 static int tgsi_dst(struct r600_shader_ctx *ctx,
599 const struct tgsi_full_dst_register *tgsi_dst,
600 unsigned swizzle,
601 struct r600_bc_alu_dst *r600_dst)
602 {
603 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
604
605 r600_dst->sel = tgsi_dst->Register.Index;
606 r600_dst->sel += ctx->file_offset[tgsi_dst->Register.File];
607 r600_dst->chan = swizzle;
608 r600_dst->write = 1;
609 if (tgsi_dst->Register.Indirect)
610 r600_dst->rel = V_SQ_REL_RELATIVE;
611 if (inst->Instruction.Saturate) {
612 r600_dst->clamp = 1;
613 }
614 return 0;
615 }
616
617 static unsigned tgsi_chan(const struct tgsi_full_src_register *tgsi_src, unsigned swizzle)
618 {
619 switch (swizzle) {
620 case 0:
621 return tgsi_src->Register.SwizzleX;
622 case 1:
623 return tgsi_src->Register.SwizzleY;
624 case 2:
625 return tgsi_src->Register.SwizzleZ;
626 case 3:
627 return tgsi_src->Register.SwizzleW;
628 default:
629 return 0;
630 }
631 }
632
633 static int tgsi_split_constant(struct r600_shader_ctx *ctx, struct r600_bc_alu_src r600_src[3])
634 {
635 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
636 struct r600_bc_alu alu;
637 int i, j, k, nconst, r;
638
639 for (i = 0, nconst = 0; i < inst->Instruction.NumSrcRegs; i++) {
640 if (inst->Src[i].Register.File == TGSI_FILE_CONSTANT) {
641 nconst++;
642 }
643 r = tgsi_src(ctx, &inst->Src[i], &r600_src[i]);
644 if (r) {
645 return r;
646 }
647 }
648 for (i = 0, j = nconst - 1; i < inst->Instruction.NumSrcRegs; i++) {
649 if (inst->Src[j].Register.File == TGSI_FILE_CONSTANT && j > 0) {
650 for (k = 0; k < 4; k++) {
651 memset(&alu, 0, sizeof(struct r600_bc_alu));
652 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
653 alu.src[0].sel = r600_src[0].sel;
654 alu.src[0].chan = k;
655 alu.dst.sel = ctx->temp_reg + j;
656 alu.dst.chan = k;
657 alu.dst.write = 1;
658 if (k == 3)
659 alu.last = 1;
660 r = r600_bc_add_alu(ctx->bc, &alu);
661 if (r)
662 return r;
663 }
664 r600_src[0].sel = ctx->temp_reg + j;
665 j--;
666 }
667 }
668 return 0;
669 }
670
671 static int tgsi_op2_s(struct r600_shader_ctx *ctx, int swap)
672 {
673 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
674 struct r600_bc_alu_src r600_src[3];
675 struct r600_bc_alu alu;
676 int i, j, r;
677 int lasti = 0;
678
679 for (i = 0; i < 4; i++) {
680 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
681 lasti = i;
682 }
683 }
684
685 r = tgsi_split_constant(ctx, r600_src);
686 if (r)
687 return r;
688 for (i = 0; i < lasti + 1; i++) {
689 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
690 continue;
691
692 memset(&alu, 0, sizeof(struct r600_bc_alu));
693 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
694 if (r)
695 return r;
696
697 alu.inst = ctx->inst_info->r600_opcode;
698 if (!swap) {
699 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
700 alu.src[j] = r600_src[j];
701 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
702 }
703 } else {
704 alu.src[0] = r600_src[1];
705 alu.src[0].chan = tgsi_chan(&inst->Src[1], i);
706
707 alu.src[1] = r600_src[0];
708 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
709 }
710 /* handle some special cases */
711 switch (ctx->inst_info->tgsi_opcode) {
712 case TGSI_OPCODE_SUB:
713 alu.src[1].neg = 1;
714 break;
715 case TGSI_OPCODE_ABS:
716 alu.src[0].abs = 1;
717 break;
718 default:
719 break;
720 }
721 if (i == lasti) {
722 alu.last = 1;
723 }
724 r = r600_bc_add_alu(ctx->bc, &alu);
725 if (r)
726 return r;
727 }
728 return 0;
729 }
730
731 static int tgsi_op2(struct r600_shader_ctx *ctx)
732 {
733 return tgsi_op2_s(ctx, 0);
734 }
735
736 static int tgsi_op2_swap(struct r600_shader_ctx *ctx)
737 {
738 return tgsi_op2_s(ctx, 1);
739 }
740
741 /*
742 * r600 - trunc to -PI..PI range
743 * r700 - normalize by dividing by 2PI
744 * see fdo bug 27901
745 */
746 static int tgsi_setup_trig(struct r600_shader_ctx *ctx,
747 struct r600_bc_alu_src r600_src[3])
748 {
749 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
750 int r;
751 uint32_t lit_vals[4];
752 struct r600_bc_alu alu;
753
754 memset(lit_vals, 0, 4*4);
755 r = tgsi_split_constant(ctx, r600_src);
756 if (r)
757 return r;
758 lit_vals[0] = fui(1.0 /(3.1415926535 * 2));
759 lit_vals[1] = fui(0.5f);
760
761 memset(&alu, 0, sizeof(struct r600_bc_alu));
762 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD;
763 alu.is_op3 = 1;
764
765 alu.dst.chan = 0;
766 alu.dst.sel = ctx->temp_reg;
767 alu.dst.write = 1;
768
769 alu.src[0] = r600_src[0];
770 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
771
772 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
773 alu.src[1].chan = 0;
774 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
775 alu.src[2].chan = 1;
776 alu.last = 1;
777 r = r600_bc_add_alu(ctx->bc, &alu);
778 if (r)
779 return r;
780 r = r600_bc_add_literal(ctx->bc, lit_vals);
781 if (r)
782 return r;
783
784 memset(&alu, 0, sizeof(struct r600_bc_alu));
785 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT;
786
787 alu.dst.chan = 0;
788 alu.dst.sel = ctx->temp_reg;
789 alu.dst.write = 1;
790
791 alu.src[0].sel = ctx->temp_reg;
792 alu.src[0].chan = 0;
793 alu.last = 1;
794 r = r600_bc_add_alu(ctx->bc, &alu);
795 if (r)
796 return r;
797
798 if (ctx->bc->chiprev == 0) {
799 lit_vals[0] = fui(3.1415926535897f * 2.0f);
800 lit_vals[1] = fui(-3.1415926535897f);
801 } else {
802 lit_vals[0] = fui(1.0f);
803 lit_vals[1] = fui(-0.5f);
804 }
805
806 memset(&alu, 0, sizeof(struct r600_bc_alu));
807 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD;
808 alu.is_op3 = 1;
809
810 alu.dst.chan = 0;
811 alu.dst.sel = ctx->temp_reg;
812 alu.dst.write = 1;
813
814 alu.src[0].sel = ctx->temp_reg;
815 alu.src[0].chan = 0;
816
817 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
818 alu.src[1].chan = 0;
819 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
820 alu.src[2].chan = 1;
821 alu.last = 1;
822 r = r600_bc_add_alu(ctx->bc, &alu);
823 if (r)
824 return r;
825 r = r600_bc_add_literal(ctx->bc, lit_vals);
826 if (r)
827 return r;
828 return 0;
829 }
830
831 static int tgsi_trig(struct r600_shader_ctx *ctx)
832 {
833 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
834 struct r600_bc_alu_src r600_src[3];
835 struct r600_bc_alu alu;
836 int i, r;
837
838 r = tgsi_split_constant(ctx, r600_src);
839 if (r)
840 return r;
841
842 r = tgsi_setup_trig(ctx, r600_src);
843 if (r)
844 return r;
845
846 memset(&alu, 0, sizeof(struct r600_bc_alu));
847 alu.inst = ctx->inst_info->r600_opcode;
848 alu.dst.chan = 0;
849 alu.dst.sel = ctx->temp_reg;
850 alu.dst.write = 1;
851
852 alu.src[0].sel = ctx->temp_reg;
853 alu.src[0].chan = 0;
854 alu.last = 1;
855 r = r600_bc_add_alu(ctx->bc, &alu);
856 if (r)
857 return r;
858
859 /* replicate result */
860 for (i = 0; i < 4; i++) {
861 memset(&alu, 0, sizeof(struct r600_bc_alu));
862 alu.src[0].sel = ctx->temp_reg;
863 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
864 alu.dst.chan = i;
865 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
866 if (r)
867 return r;
868 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
869 if (i == 3)
870 alu.last = 1;
871 r = r600_bc_add_alu(ctx->bc, &alu);
872 if (r)
873 return r;
874 }
875 return 0;
876 }
877
878 static int tgsi_scs(struct r600_shader_ctx *ctx)
879 {
880 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
881 struct r600_bc_alu_src r600_src[3];
882 struct r600_bc_alu alu;
883 int r;
884
885 r = tgsi_split_constant(ctx, r600_src);
886 if (r)
887 return r;
888
889 r = tgsi_setup_trig(ctx, r600_src);
890 if (r)
891 return r;
892
893
894 /* dst.x = COS */
895 memset(&alu, 0, sizeof(struct r600_bc_alu));
896 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS;
897 r = tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
898 if (r)
899 return r;
900
901 alu.src[0].sel = ctx->temp_reg;
902 alu.src[0].chan = 0;
903 alu.last = 1;
904 r = r600_bc_add_alu(ctx->bc, &alu);
905 if (r)
906 return r;
907
908 /* dst.y = SIN */
909 memset(&alu, 0, sizeof(struct r600_bc_alu));
910 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN;
911 r = tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
912 if (r)
913 return r;
914
915 alu.src[0].sel = ctx->temp_reg;
916 alu.src[0].chan = 0;
917 alu.last = 1;
918 r = r600_bc_add_alu(ctx->bc, &alu);
919 if (r)
920 return r;
921 return 0;
922 }
923
924 static int tgsi_kill(struct r600_shader_ctx *ctx)
925 {
926 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
927 struct r600_bc_alu alu;
928 int i, r;
929
930 for (i = 0; i < 4; i++) {
931 memset(&alu, 0, sizeof(struct r600_bc_alu));
932 alu.inst = ctx->inst_info->r600_opcode;
933
934 alu.dst.chan = i;
935
936 alu.src[0].sel = V_SQ_ALU_SRC_0;
937
938 if (ctx->inst_info->tgsi_opcode == TGSI_OPCODE_KILP) {
939 alu.src[1].sel = V_SQ_ALU_SRC_1;
940 alu.src[1].neg = 1;
941 } else {
942 r = tgsi_src(ctx, &inst->Src[0], &alu.src[1]);
943 if (r)
944 return r;
945 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
946 }
947 if (i == 3) {
948 alu.last = 1;
949 }
950 r = r600_bc_add_alu(ctx->bc, &alu);
951 if (r)
952 return r;
953 }
954 r = r600_bc_add_literal(ctx->bc, ctx->value);
955 if (r)
956 return r;
957
958 /* kill must be last in ALU */
959 ctx->bc->force_add_cf = 1;
960 ctx->shader->uses_kill = TRUE;
961 return 0;
962 }
963
964 static int tgsi_lit(struct r600_shader_ctx *ctx)
965 {
966 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
967 struct r600_bc_alu alu;
968 int r;
969
970 /* dst.x, <- 1.0 */
971 memset(&alu, 0, sizeof(struct r600_bc_alu));
972 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
973 alu.src[0].sel = V_SQ_ALU_SRC_1; /*1.0*/
974 alu.src[0].chan = 0;
975 r = tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
976 if (r)
977 return r;
978 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 0) & 1;
979 r = r600_bc_add_alu(ctx->bc, &alu);
980 if (r)
981 return r;
982
983 /* dst.y = max(src.x, 0.0) */
984 memset(&alu, 0, sizeof(struct r600_bc_alu));
985 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX;
986 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
987 if (r)
988 return r;
989 alu.src[1].sel = V_SQ_ALU_SRC_0; /*0.0*/
990 alu.src[1].chan = tgsi_chan(&inst->Src[0], 0);
991 r = tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
992 if (r)
993 return r;
994 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 1) & 1;
995 r = r600_bc_add_alu(ctx->bc, &alu);
996 if (r)
997 return r;
998
999 /* dst.z = NOP - fill Z slot */
1000 memset(&alu, 0, sizeof(struct r600_bc_alu));
1001 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP;
1002 alu.dst.chan = 2;
1003 r = r600_bc_add_alu(ctx->bc, &alu);
1004 if (r)
1005 return r;
1006
1007 /* dst.w, <- 1.0 */
1008 memset(&alu, 0, sizeof(struct r600_bc_alu));
1009 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1010 alu.src[0].sel = V_SQ_ALU_SRC_1;
1011 alu.src[0].chan = 0;
1012 r = tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
1013 if (r)
1014 return r;
1015 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 3) & 1;
1016 alu.last = 1;
1017 r = r600_bc_add_alu(ctx->bc, &alu);
1018 if (r)
1019 return r;
1020
1021 r = r600_bc_add_literal(ctx->bc, ctx->value);
1022 if (r)
1023 return r;
1024
1025 if (inst->Dst[0].Register.WriteMask & (1 << 2))
1026 {
1027 int chan;
1028 int sel;
1029
1030 /* dst.z = log(src.y) */
1031 memset(&alu, 0, sizeof(struct r600_bc_alu));
1032 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED;
1033 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1034 if (r)
1035 return r;
1036 alu.src[0].chan = tgsi_chan(&inst->Src[0], 1);
1037 r = tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
1038 if (r)
1039 return r;
1040 alu.last = 1;
1041 r = r600_bc_add_alu(ctx->bc, &alu);
1042 if (r)
1043 return r;
1044
1045 r = r600_bc_add_literal(ctx->bc, ctx->value);
1046 if (r)
1047 return r;
1048 chan = alu.dst.chan;
1049 sel = alu.dst.sel;
1050
1051 /* tmp.x = amd MUL_LIT(src.w, dst.z, src.x ) */
1052 memset(&alu, 0, sizeof(struct r600_bc_alu));
1053 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT;
1054 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1055 if (r)
1056 return r;
1057 alu.src[0].chan = tgsi_chan(&inst->Src[0], 3);
1058 alu.src[1].sel = sel;
1059 alu.src[1].chan = chan;
1060 r = tgsi_src(ctx, &inst->Src[0], &alu.src[2]);
1061 if (r)
1062 return r;
1063 alu.src[2].chan = tgsi_chan(&inst->Src[0], 0);
1064 alu.dst.sel = ctx->temp_reg;
1065 alu.dst.chan = 0;
1066 alu.dst.write = 1;
1067 alu.is_op3 = 1;
1068 alu.last = 1;
1069 r = r600_bc_add_alu(ctx->bc, &alu);
1070 if (r)
1071 return r;
1072
1073 r = r600_bc_add_literal(ctx->bc, ctx->value);
1074 if (r)
1075 return r;
1076 /* dst.z = exp(tmp.x) */
1077 memset(&alu, 0, sizeof(struct r600_bc_alu));
1078 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE;
1079 alu.src[0].sel = ctx->temp_reg;
1080 alu.src[0].chan = 0;
1081 r = tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
1082 if (r)
1083 return r;
1084 alu.last = 1;
1085 r = r600_bc_add_alu(ctx->bc, &alu);
1086 if (r)
1087 return r;
1088 }
1089 return 0;
1090 }
1091
1092 static int tgsi_trans(struct r600_shader_ctx *ctx)
1093 {
1094 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1095 struct r600_bc_alu alu;
1096 int i, j, r;
1097
1098 for (i = 0; i < 4; i++) {
1099 memset(&alu, 0, sizeof(struct r600_bc_alu));
1100 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
1101 alu.inst = ctx->inst_info->r600_opcode;
1102 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
1103 r = tgsi_src(ctx, &inst->Src[j], &alu.src[j]);
1104 if (r)
1105 return r;
1106 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
1107 }
1108 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1109 if (r)
1110 return r;
1111 alu.last = 1;
1112 r = r600_bc_add_alu(ctx->bc, &alu);
1113 if (r)
1114 return r;
1115 }
1116 }
1117 return 0;
1118 }
1119
1120 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx)
1121 {
1122 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1123 struct r600_bc_alu alu;
1124 int i, r;
1125
1126 for (i = 0; i < 4; i++) {
1127 memset(&alu, 0, sizeof(struct r600_bc_alu));
1128 alu.src[0].sel = ctx->temp_reg;
1129 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1130 alu.dst.chan = i;
1131 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1132 if (r)
1133 return r;
1134 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
1135 if (i == 3)
1136 alu.last = 1;
1137 r = r600_bc_add_alu(ctx->bc, &alu);
1138 if (r)
1139 return r;
1140 }
1141 return 0;
1142 }
1143
1144 static int tgsi_trans_srcx_replicate(struct r600_shader_ctx *ctx)
1145 {
1146 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1147 struct r600_bc_alu alu;
1148 int i, r;
1149
1150 memset(&alu, 0, sizeof(struct r600_bc_alu));
1151 alu.inst = ctx->inst_info->r600_opcode;
1152 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
1153 r = tgsi_src(ctx, &inst->Src[i], &alu.src[i]);
1154 if (r)
1155 return r;
1156 alu.src[i].chan = tgsi_chan(&inst->Src[i], 0);
1157 }
1158 alu.dst.sel = ctx->temp_reg;
1159 alu.dst.write = 1;
1160 alu.last = 1;
1161 r = r600_bc_add_alu(ctx->bc, &alu);
1162 if (r)
1163 return r;
1164 r = r600_bc_add_literal(ctx->bc, ctx->value);
1165 if (r)
1166 return r;
1167 /* replicate result */
1168 return tgsi_helper_tempx_replicate(ctx);
1169 }
1170
1171 static int tgsi_pow(struct r600_shader_ctx *ctx)
1172 {
1173 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1174 struct r600_bc_alu alu;
1175 int r;
1176
1177 /* LOG2(a) */
1178 memset(&alu, 0, sizeof(struct r600_bc_alu));
1179 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE;
1180 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1181 if (r)
1182 return r;
1183 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1184 alu.dst.sel = ctx->temp_reg;
1185 alu.dst.write = 1;
1186 alu.last = 1;
1187 r = r600_bc_add_alu(ctx->bc, &alu);
1188 if (r)
1189 return r;
1190 r = r600_bc_add_literal(ctx->bc,ctx->value);
1191 if (r)
1192 return r;
1193 /* b * LOG2(a) */
1194 memset(&alu, 0, sizeof(struct r600_bc_alu));
1195 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL_IEEE;
1196 r = tgsi_src(ctx, &inst->Src[1], &alu.src[0]);
1197 if (r)
1198 return r;
1199 alu.src[0].chan = tgsi_chan(&inst->Src[1], 0);
1200 alu.src[1].sel = ctx->temp_reg;
1201 alu.dst.sel = ctx->temp_reg;
1202 alu.dst.write = 1;
1203 alu.last = 1;
1204 r = r600_bc_add_alu(ctx->bc, &alu);
1205 if (r)
1206 return r;
1207 r = r600_bc_add_literal(ctx->bc,ctx->value);
1208 if (r)
1209 return r;
1210 /* POW(a,b) = EXP2(b * LOG2(a))*/
1211 memset(&alu, 0, sizeof(struct r600_bc_alu));
1212 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE;
1213 alu.src[0].sel = ctx->temp_reg;
1214 alu.dst.sel = ctx->temp_reg;
1215 alu.dst.write = 1;
1216 alu.last = 1;
1217 r = r600_bc_add_alu(ctx->bc, &alu);
1218 if (r)
1219 return r;
1220 r = r600_bc_add_literal(ctx->bc,ctx->value);
1221 if (r)
1222 return r;
1223 return tgsi_helper_tempx_replicate(ctx);
1224 }
1225
1226 static int tgsi_ssg(struct r600_shader_ctx *ctx)
1227 {
1228 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1229 struct r600_bc_alu alu;
1230 struct r600_bc_alu_src r600_src[3];
1231 int i, r;
1232
1233 r = tgsi_split_constant(ctx, r600_src);
1234 if (r)
1235 return r;
1236
1237 /* tmp = (src > 0 ? 1 : src) */
1238 for (i = 0; i < 4; i++) {
1239 memset(&alu, 0, sizeof(struct r600_bc_alu));
1240 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGT;
1241 alu.is_op3 = 1;
1242
1243 alu.dst.sel = ctx->temp_reg;
1244 alu.dst.chan = i;
1245
1246 alu.src[0] = r600_src[0];
1247 alu.src[0].chan = tgsi_chan(&inst->Src[0], i);
1248
1249 alu.src[1].sel = V_SQ_ALU_SRC_1;
1250
1251 alu.src[2] = r600_src[0];
1252 alu.src[2].chan = tgsi_chan(&inst->Src[0], i);
1253 if (i == 3)
1254 alu.last = 1;
1255 r = r600_bc_add_alu(ctx->bc, &alu);
1256 if (r)
1257 return r;
1258 }
1259 r = r600_bc_add_literal(ctx->bc, ctx->value);
1260 if (r)
1261 return r;
1262
1263 /* dst = (-tmp > 0 ? -1 : tmp) */
1264 for (i = 0; i < 4; i++) {
1265 memset(&alu, 0, sizeof(struct r600_bc_alu));
1266 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGT;
1267 alu.is_op3 = 1;
1268 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1269 if (r)
1270 return r;
1271
1272 alu.src[0].sel = ctx->temp_reg;
1273 alu.src[0].chan = i;
1274 alu.src[0].neg = 1;
1275
1276 alu.src[1].sel = V_SQ_ALU_SRC_1;
1277 alu.src[1].neg = 1;
1278
1279 alu.src[2].sel = ctx->temp_reg;
1280 alu.src[2].chan = i;
1281
1282 if (i == 3)
1283 alu.last = 1;
1284 r = r600_bc_add_alu(ctx->bc, &alu);
1285 if (r)
1286 return r;
1287 }
1288 return 0;
1289 }
1290
1291 static int tgsi_helper_copy(struct r600_shader_ctx *ctx, struct tgsi_full_instruction *inst)
1292 {
1293 struct r600_bc_alu alu;
1294 int i, r;
1295
1296 r = r600_bc_add_literal(ctx->bc, ctx->value);
1297 if (r)
1298 return r;
1299 for (i = 0; i < 4; i++) {
1300 memset(&alu, 0, sizeof(struct r600_bc_alu));
1301 if (!(inst->Dst[0].Register.WriteMask & (1 << i))) {
1302 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP;
1303 alu.dst.chan = i;
1304 } else {
1305 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1306 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1307 if (r)
1308 return r;
1309 alu.src[0].sel = ctx->temp_reg;
1310 alu.src[0].chan = i;
1311 }
1312 if (i == 3) {
1313 alu.last = 1;
1314 }
1315 r = r600_bc_add_alu(ctx->bc, &alu);
1316 if (r)
1317 return r;
1318 }
1319 return 0;
1320 }
1321
1322 static int tgsi_op3(struct r600_shader_ctx *ctx)
1323 {
1324 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1325 struct r600_bc_alu_src r600_src[3];
1326 struct r600_bc_alu alu;
1327 int i, j, r;
1328
1329 r = tgsi_split_constant(ctx, r600_src);
1330 if (r)
1331 return r;
1332 /* do it in 2 step as op3 doesn't support writemask */
1333 for (i = 0; i < 4; i++) {
1334 memset(&alu, 0, sizeof(struct r600_bc_alu));
1335 alu.inst = ctx->inst_info->r600_opcode;
1336 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
1337 alu.src[j] = r600_src[j];
1338 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
1339 }
1340 alu.dst.sel = ctx->temp_reg;
1341 alu.dst.chan = i;
1342 alu.dst.write = 1;
1343 alu.is_op3 = 1;
1344 if (i == 3) {
1345 alu.last = 1;
1346 }
1347 r = r600_bc_add_alu(ctx->bc, &alu);
1348 if (r)
1349 return r;
1350 }
1351 return tgsi_helper_copy(ctx, inst);
1352 }
1353
1354 static int tgsi_dp(struct r600_shader_ctx *ctx)
1355 {
1356 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1357 struct r600_bc_alu_src r600_src[3];
1358 struct r600_bc_alu alu;
1359 int i, j, r;
1360
1361 r = tgsi_split_constant(ctx, r600_src);
1362 if (r)
1363 return r;
1364 for (i = 0; i < 4; i++) {
1365 memset(&alu, 0, sizeof(struct r600_bc_alu));
1366 alu.inst = ctx->inst_info->r600_opcode;
1367 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
1368 alu.src[j] = r600_src[j];
1369 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
1370 }
1371 alu.dst.sel = ctx->temp_reg;
1372 alu.dst.chan = i;
1373 alu.dst.write = 1;
1374 /* handle some special cases */
1375 switch (ctx->inst_info->tgsi_opcode) {
1376 case TGSI_OPCODE_DP2:
1377 if (i > 1) {
1378 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
1379 alu.src[0].chan = alu.src[1].chan = 0;
1380 }
1381 break;
1382 case TGSI_OPCODE_DP3:
1383 if (i > 2) {
1384 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
1385 alu.src[0].chan = alu.src[1].chan = 0;
1386 }
1387 break;
1388 case TGSI_OPCODE_DPH:
1389 if (i == 3) {
1390 alu.src[0].sel = V_SQ_ALU_SRC_1;
1391 alu.src[0].chan = 0;
1392 alu.src[0].neg = 0;
1393 }
1394 break;
1395 default:
1396 break;
1397 }
1398 if (i == 3) {
1399 alu.last = 1;
1400 }
1401 r = r600_bc_add_alu(ctx->bc, &alu);
1402 if (r)
1403 return r;
1404 }
1405 return tgsi_helper_copy(ctx, inst);
1406 }
1407
1408 static int tgsi_tex(struct r600_shader_ctx *ctx)
1409 {
1410 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1411 struct r600_bc_tex tex;
1412 struct r600_bc_alu alu;
1413 unsigned src_gpr;
1414 int r, i;
1415
1416 src_gpr = ctx->file_offset[inst->Src[0].Register.File] + inst->Src[0].Register.Index;
1417
1418 if (inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
1419 /* Add perspective divide */
1420 memset(&alu, 0, sizeof(struct r600_bc_alu));
1421 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE;
1422 alu.src[0].sel = src_gpr;
1423 alu.src[0].chan = tgsi_chan(&inst->Src[0], 3);
1424 alu.dst.sel = ctx->temp_reg;
1425 alu.dst.chan = 3;
1426 alu.last = 1;
1427 alu.dst.write = 1;
1428 r = r600_bc_add_alu(ctx->bc, &alu);
1429 if (r)
1430 return r;
1431
1432 for (i = 0; i < 3; i++) {
1433 memset(&alu, 0, sizeof(struct r600_bc_alu));
1434 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL;
1435 alu.src[0].sel = ctx->temp_reg;
1436 alu.src[0].chan = 3;
1437 alu.src[1].sel = src_gpr;
1438 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
1439 alu.dst.sel = ctx->temp_reg;
1440 alu.dst.chan = i;
1441 alu.dst.write = 1;
1442 r = r600_bc_add_alu(ctx->bc, &alu);
1443 if (r)
1444 return r;
1445 }
1446 memset(&alu, 0, sizeof(struct r600_bc_alu));
1447 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1448 alu.src[0].sel = V_SQ_ALU_SRC_1;
1449 alu.src[0].chan = 0;
1450 alu.dst.sel = ctx->temp_reg;
1451 alu.dst.chan = 3;
1452 alu.last = 1;
1453 alu.dst.write = 1;
1454 r = r600_bc_add_alu(ctx->bc, &alu);
1455 if (r)
1456 return r;
1457 src_gpr = ctx->temp_reg;
1458 } else if (inst->Src[0].Register.File != TGSI_FILE_TEMPORARY) {
1459 for (i = 0; i < 4; i++) {
1460 memset(&alu, 0, sizeof(struct r600_bc_alu));
1461 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1462 alu.src[0].sel = src_gpr;
1463 alu.src[0].chan = i;
1464 alu.dst.sel = ctx->temp_reg;
1465 alu.dst.chan = i;
1466 if (i == 3)
1467 alu.last = 1;
1468 alu.dst.write = 1;
1469 r = r600_bc_add_alu(ctx->bc, &alu);
1470 if (r)
1471 return r;
1472 }
1473 src_gpr = ctx->temp_reg;
1474 }
1475
1476 memset(&tex, 0, sizeof(struct r600_bc_tex));
1477 tex.inst = ctx->inst_info->r600_opcode;
1478 tex.resource_id = ctx->file_offset[inst->Src[1].Register.File] + inst->Src[1].Register.Index;
1479 tex.sampler_id = tex.resource_id;
1480 tex.src_gpr = src_gpr;
1481 tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
1482 tex.dst_sel_x = 0;
1483 tex.dst_sel_y = 1;
1484 tex.dst_sel_z = 2;
1485 tex.dst_sel_w = 3;
1486 tex.src_sel_x = 0;
1487 tex.src_sel_y = 1;
1488 tex.src_sel_z = 2;
1489 tex.src_sel_w = 3;
1490
1491 if (inst->Texture.Texture != TGSI_TEXTURE_RECT) {
1492 tex.coord_type_x = 1;
1493 tex.coord_type_y = 1;
1494 tex.coord_type_z = 1;
1495 tex.coord_type_w = 1;
1496 }
1497 return r600_bc_add_tex(ctx->bc, &tex);
1498 }
1499
1500 static int tgsi_lrp(struct r600_shader_ctx *ctx)
1501 {
1502 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1503 struct r600_bc_alu_src r600_src[3];
1504 struct r600_bc_alu alu;
1505 unsigned i;
1506 int r;
1507
1508 r = tgsi_split_constant(ctx, r600_src);
1509 if (r)
1510 return r;
1511 /* 1 - src0 */
1512 for (i = 0; i < 4; i++) {
1513 memset(&alu, 0, sizeof(struct r600_bc_alu));
1514 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD;
1515 alu.src[0].sel = V_SQ_ALU_SRC_1;
1516 alu.src[0].chan = 0;
1517 alu.src[1] = r600_src[0];
1518 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
1519 alu.src[1].neg = 1;
1520 alu.dst.sel = ctx->temp_reg;
1521 alu.dst.chan = i;
1522 if (i == 3) {
1523 alu.last = 1;
1524 }
1525 alu.dst.write = 1;
1526 r = r600_bc_add_alu(ctx->bc, &alu);
1527 if (r)
1528 return r;
1529 }
1530 r = r600_bc_add_literal(ctx->bc, ctx->value);
1531 if (r)
1532 return r;
1533
1534 /* (1 - src0) * src2 */
1535 for (i = 0; i < 4; i++) {
1536 memset(&alu, 0, sizeof(struct r600_bc_alu));
1537 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL;
1538 alu.src[0].sel = ctx->temp_reg;
1539 alu.src[0].chan = i;
1540 alu.src[1] = r600_src[2];
1541 alu.src[1].chan = tgsi_chan(&inst->Src[2], i);
1542 alu.dst.sel = ctx->temp_reg;
1543 alu.dst.chan = i;
1544 if (i == 3) {
1545 alu.last = 1;
1546 }
1547 alu.dst.write = 1;
1548 r = r600_bc_add_alu(ctx->bc, &alu);
1549 if (r)
1550 return r;
1551 }
1552 r = r600_bc_add_literal(ctx->bc, ctx->value);
1553 if (r)
1554 return r;
1555
1556 /* src0 * src1 + (1 - src0) * src2 */
1557 for (i = 0; i < 4; i++) {
1558 memset(&alu, 0, sizeof(struct r600_bc_alu));
1559 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD;
1560 alu.is_op3 = 1;
1561 alu.src[0] = r600_src[0];
1562 alu.src[0].chan = tgsi_chan(&inst->Src[0], i);
1563 alu.src[1] = r600_src[1];
1564 alu.src[1].chan = tgsi_chan(&inst->Src[1], i);
1565 alu.src[2].sel = ctx->temp_reg;
1566 alu.src[2].chan = i;
1567 alu.dst.sel = ctx->temp_reg;
1568 alu.dst.chan = i;
1569 if (i == 3) {
1570 alu.last = 1;
1571 }
1572 r = r600_bc_add_alu(ctx->bc, &alu);
1573 if (r)
1574 return r;
1575 }
1576 return tgsi_helper_copy(ctx, inst);
1577 }
1578
1579 static int tgsi_cmp(struct r600_shader_ctx *ctx)
1580 {
1581 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1582 struct r600_bc_alu_src r600_src[3];
1583 struct r600_bc_alu alu;
1584 int use_temp = 0;
1585 int i, r;
1586
1587 r = tgsi_split_constant(ctx, r600_src);
1588 if (r)
1589 return r;
1590
1591 if (inst->Dst[0].Register.WriteMask != 0xf)
1592 use_temp = 1;
1593
1594 for (i = 0; i < 4; i++) {
1595 memset(&alu, 0, sizeof(struct r600_bc_alu));
1596 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGE;
1597 alu.src[0] = r600_src[0];
1598 alu.src[0].chan = tgsi_chan(&inst->Src[0], i);
1599
1600 alu.src[1] = r600_src[2];
1601 alu.src[1].chan = tgsi_chan(&inst->Src[2], i);
1602
1603 alu.src[2] = r600_src[1];
1604 alu.src[2].chan = tgsi_chan(&inst->Src[1], i);
1605
1606 if (use_temp)
1607 alu.dst.sel = ctx->temp_reg;
1608 else {
1609 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1610 if (r)
1611 return r;
1612 }
1613 alu.dst.chan = i;
1614 alu.dst.write = 1;
1615 alu.is_op3 = 1;
1616 if (i == 3)
1617 alu.last = 1;
1618 r = r600_bc_add_alu(ctx->bc, &alu);
1619 if (r)
1620 return r;
1621 }
1622 if (use_temp)
1623 return tgsi_helper_copy(ctx, inst);
1624 return 0;
1625 }
1626
1627 static int tgsi_xpd(struct r600_shader_ctx *ctx)
1628 {
1629 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1630 struct r600_bc_alu_src r600_src[3];
1631 struct r600_bc_alu alu;
1632 uint32_t use_temp = 0;
1633 int i, r;
1634
1635 if (inst->Dst[0].Register.WriteMask != 0xf)
1636 use_temp = 1;
1637
1638 r = tgsi_split_constant(ctx, r600_src);
1639 if (r)
1640 return r;
1641
1642 for (i = 0; i < 4; i++) {
1643 memset(&alu, 0, sizeof(struct r600_bc_alu));
1644 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL;
1645
1646 alu.src[0] = r600_src[0];
1647 switch (i) {
1648 case 0:
1649 alu.src[0].chan = tgsi_chan(&inst->Src[0], 2);
1650 break;
1651 case 1:
1652 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1653 break;
1654 case 2:
1655 alu.src[0].chan = tgsi_chan(&inst->Src[0], 1);
1656 break;
1657 case 3:
1658 alu.src[0].sel = V_SQ_ALU_SRC_0;
1659 alu.src[0].chan = i;
1660 }
1661
1662 alu.src[1] = r600_src[1];
1663 switch (i) {
1664 case 0:
1665 alu.src[1].chan = tgsi_chan(&inst->Src[1], 1);
1666 break;
1667 case 1:
1668 alu.src[1].chan = tgsi_chan(&inst->Src[1], 2);
1669 break;
1670 case 2:
1671 alu.src[1].chan = tgsi_chan(&inst->Src[1], 0);
1672 break;
1673 case 3:
1674 alu.src[1].sel = V_SQ_ALU_SRC_0;
1675 alu.src[1].chan = i;
1676 }
1677
1678 alu.dst.sel = ctx->temp_reg;
1679 alu.dst.chan = i;
1680 alu.dst.write = 1;
1681
1682 if (i == 3)
1683 alu.last = 1;
1684 r = r600_bc_add_alu(ctx->bc, &alu);
1685 if (r)
1686 return r;
1687 }
1688
1689 for (i = 0; i < 4; i++) {
1690 memset(&alu, 0, sizeof(struct r600_bc_alu));
1691 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD;
1692
1693 alu.src[0] = r600_src[0];
1694 switch (i) {
1695 case 0:
1696 alu.src[0].chan = tgsi_chan(&inst->Src[0], 1);
1697 break;
1698 case 1:
1699 alu.src[0].chan = tgsi_chan(&inst->Src[0], 2);
1700 break;
1701 case 2:
1702 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1703 break;
1704 case 3:
1705 alu.src[0].sel = V_SQ_ALU_SRC_0;
1706 alu.src[0].chan = i;
1707 }
1708
1709 alu.src[1] = r600_src[1];
1710 switch (i) {
1711 case 0:
1712 alu.src[1].chan = tgsi_chan(&inst->Src[1], 2);
1713 break;
1714 case 1:
1715 alu.src[1].chan = tgsi_chan(&inst->Src[1], 0);
1716 break;
1717 case 2:
1718 alu.src[1].chan = tgsi_chan(&inst->Src[1], 1);
1719 break;
1720 case 3:
1721 alu.src[1].sel = V_SQ_ALU_SRC_0;
1722 alu.src[1].chan = i;
1723 }
1724
1725 alu.src[2].sel = ctx->temp_reg;
1726 alu.src[2].neg = 1;
1727 alu.src[2].chan = i;
1728
1729 if (use_temp)
1730 alu.dst.sel = ctx->temp_reg;
1731 else {
1732 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1733 if (r)
1734 return r;
1735 }
1736 alu.dst.chan = i;
1737 alu.dst.write = 1;
1738 alu.is_op3 = 1;
1739 if (i == 3)
1740 alu.last = 1;
1741 r = r600_bc_add_alu(ctx->bc, &alu);
1742 if (r)
1743 return r;
1744 }
1745 if (use_temp)
1746 return tgsi_helper_copy(ctx, inst);
1747 return 0;
1748 }
1749
1750 static int tgsi_exp(struct r600_shader_ctx *ctx)
1751 {
1752 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1753 struct r600_bc_alu_src r600_src[3];
1754 struct r600_bc_alu alu;
1755 int r;
1756
1757 /* result.x = 2^floor(src); */
1758 if (inst->Dst[0].Register.WriteMask & 1) {
1759 memset(&alu, 0, sizeof(struct r600_bc_alu));
1760
1761 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR;
1762 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1763 if (r)
1764 return r;
1765
1766 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1767
1768 alu.dst.sel = ctx->temp_reg;
1769 alu.dst.chan = 0;
1770 alu.dst.write = 1;
1771 alu.last = 1;
1772 r = r600_bc_add_alu(ctx->bc, &alu);
1773 if (r)
1774 return r;
1775
1776 r = r600_bc_add_literal(ctx->bc, ctx->value);
1777 if (r)
1778 return r;
1779
1780 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE;
1781 alu.src[0].sel = ctx->temp_reg;
1782 alu.src[0].chan = 0;
1783
1784 alu.dst.sel = ctx->temp_reg;
1785 alu.dst.chan = 0;
1786 alu.dst.write = 1;
1787 alu.last = 1;
1788 r = r600_bc_add_alu(ctx->bc, &alu);
1789 if (r)
1790 return r;
1791
1792 r = r600_bc_add_literal(ctx->bc, ctx->value);
1793 if (r)
1794 return r;
1795 }
1796
1797 /* result.y = tmp - floor(tmp); */
1798 if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
1799 memset(&alu, 0, sizeof(struct r600_bc_alu));
1800
1801 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT;
1802 alu.src[0] = r600_src[0];
1803 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1804 if (r)
1805 return r;
1806 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1807
1808 alu.dst.sel = ctx->temp_reg;
1809 // r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1810 // if (r)
1811 // return r;
1812 alu.dst.write = 1;
1813 alu.dst.chan = 1;
1814
1815 alu.last = 1;
1816
1817 r = r600_bc_add_alu(ctx->bc, &alu);
1818 if (r)
1819 return r;
1820 r = r600_bc_add_literal(ctx->bc, ctx->value);
1821 if (r)
1822 return r;
1823 }
1824
1825 /* result.z = RoughApprox2ToX(tmp);*/
1826 if ((inst->Dst[0].Register.WriteMask >> 2) & 0x1) {
1827 memset(&alu, 0, sizeof(struct r600_bc_alu));
1828 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE;
1829 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1830 if (r)
1831 return r;
1832 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1833
1834 alu.dst.sel = ctx->temp_reg;
1835 alu.dst.write = 1;
1836 alu.dst.chan = 2;
1837
1838 alu.last = 1;
1839
1840 r = r600_bc_add_alu(ctx->bc, &alu);
1841 if (r)
1842 return r;
1843 r = r600_bc_add_literal(ctx->bc, ctx->value);
1844 if (r)
1845 return r;
1846 }
1847
1848 /* result.w = 1.0;*/
1849 if ((inst->Dst[0].Register.WriteMask >> 3) & 0x1) {
1850 memset(&alu, 0, sizeof(struct r600_bc_alu));
1851
1852 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1853 alu.src[0].sel = V_SQ_ALU_SRC_1;
1854 alu.src[0].chan = 0;
1855
1856 alu.dst.sel = ctx->temp_reg;
1857 alu.dst.chan = 3;
1858 alu.dst.write = 1;
1859 alu.last = 1;
1860 r = r600_bc_add_alu(ctx->bc, &alu);
1861 if (r)
1862 return r;
1863 r = r600_bc_add_literal(ctx->bc, ctx->value);
1864 if (r)
1865 return r;
1866 }
1867 return tgsi_helper_copy(ctx, inst);
1868 }
1869
1870 static int tgsi_arl(struct r600_shader_ctx *ctx)
1871 {
1872 /* TODO from r600c, ar values don't persist between clauses */
1873 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1874 struct r600_bc_alu alu;
1875 int r;
1876 memset(&alu, 0, sizeof(struct r600_bc_alu));
1877
1878 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR;
1879
1880 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1881 if (r)
1882 return r;
1883 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1884
1885 alu.last = 1;
1886
1887 r = r600_bc_add_alu_type(ctx->bc, &alu, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU);
1888 if (r)
1889 return r;
1890 return 0;
1891 }
1892
1893 static int tgsi_opdst(struct r600_shader_ctx *ctx)
1894 {
1895 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1896 struct r600_bc_alu alu;
1897 int i, r = 0;
1898
1899 for (i = 0; i < 4; i++) {
1900 memset(&alu, 0, sizeof(struct r600_bc_alu));
1901
1902 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL;
1903 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1904 if (r)
1905 return r;
1906
1907 if (i == 0 || i == 3) {
1908 alu.src[0].sel = V_SQ_ALU_SRC_1;
1909 } else {
1910 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1911 if (r)
1912 return r;
1913 alu.src[0].chan = tgsi_chan(&inst->Src[0], i);
1914 }
1915
1916 if (i == 0 || i == 2) {
1917 alu.src[1].sel = V_SQ_ALU_SRC_1;
1918 } else {
1919 r = tgsi_src(ctx, &inst->Src[1], &alu.src[1]);
1920 if (r)
1921 return r;
1922 alu.src[1].chan = tgsi_chan(&inst->Src[1], i);
1923 }
1924 if (i == 3)
1925 alu.last = 1;
1926 r = r600_bc_add_alu(ctx->bc, &alu);
1927 if (r)
1928 return r;
1929 }
1930 return 0;
1931 }
1932
1933 static int emit_logic_pred(struct r600_shader_ctx *ctx, int opcode)
1934 {
1935 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1936 struct r600_bc_alu alu;
1937 int r;
1938
1939 memset(&alu, 0, sizeof(struct r600_bc_alu));
1940 alu.inst = opcode;
1941 alu.predicate = 1;
1942
1943 alu.dst.sel = ctx->temp_reg;
1944 alu.dst.write = 1;
1945 alu.dst.chan = 0;
1946
1947 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1948 if (r)
1949 return r;
1950 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1951 alu.src[1].sel = V_SQ_ALU_SRC_0;
1952 alu.src[1].chan = 0;
1953
1954 alu.last = 1;
1955
1956 r = r600_bc_add_alu_type(ctx->bc, &alu, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE);
1957 if (r)
1958 return r;
1959 return 0;
1960 }
1961
1962 static int pops(struct r600_shader_ctx *ctx, int pops)
1963 {
1964 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_POP);
1965 ctx->bc->cf_last->pop_count = pops;
1966 return 0;
1967 }
1968
1969 static inline void callstack_decrease_current(struct r600_shader_ctx *ctx, unsigned reason)
1970 {
1971 switch(reason) {
1972 case FC_PUSH_VPM:
1973 ctx->bc->callstack[ctx->bc->call_sp].current--;
1974 break;
1975 case FC_PUSH_WQM:
1976 case FC_LOOP:
1977 ctx->bc->callstack[ctx->bc->call_sp].current -= 4;
1978 break;
1979 case FC_REP:
1980 /* TOODO : for 16 vp asic should -= 2; */
1981 ctx->bc->callstack[ctx->bc->call_sp].current --;
1982 break;
1983 }
1984 }
1985
1986 static inline void callstack_check_depth(struct r600_shader_ctx *ctx, unsigned reason, unsigned check_max_only)
1987 {
1988 if (check_max_only) {
1989 int diff;
1990 switch (reason) {
1991 case FC_PUSH_VPM:
1992 diff = 1;
1993 break;
1994 case FC_PUSH_WQM:
1995 diff = 4;
1996 break;
1997 }
1998 if ((ctx->bc->callstack[ctx->bc->call_sp].current + diff) >
1999 ctx->bc->callstack[ctx->bc->call_sp].max) {
2000 ctx->bc->callstack[ctx->bc->call_sp].max =
2001 ctx->bc->callstack[ctx->bc->call_sp].current + diff;
2002 }
2003 return;
2004 }
2005 switch (reason) {
2006 case FC_PUSH_VPM:
2007 ctx->bc->callstack[ctx->bc->call_sp].current++;
2008 break;
2009 case FC_PUSH_WQM:
2010 case FC_LOOP:
2011 ctx->bc->callstack[ctx->bc->call_sp].current += 4;
2012 break;
2013 case FC_REP:
2014 ctx->bc->callstack[ctx->bc->call_sp].current++;
2015 break;
2016 }
2017
2018 if ((ctx->bc->callstack[ctx->bc->call_sp].current) >
2019 ctx->bc->callstack[ctx->bc->call_sp].max) {
2020 ctx->bc->callstack[ctx->bc->call_sp].max =
2021 ctx->bc->callstack[ctx->bc->call_sp].current;
2022 }
2023 }
2024
2025 static void fc_set_mid(struct r600_shader_ctx *ctx, int fc_sp)
2026 {
2027 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[fc_sp];
2028
2029 sp->mid = (struct r600_bc_cf **)realloc((void *)sp->mid,
2030 sizeof(struct r600_bc_cf *) * (sp->num_mid + 1));
2031 sp->mid[sp->num_mid] = ctx->bc->cf_last;
2032 sp->num_mid++;
2033 }
2034
2035 static void fc_pushlevel(struct r600_shader_ctx *ctx, int type)
2036 {
2037 ctx->bc->fc_sp++;
2038 ctx->bc->fc_stack[ctx->bc->fc_sp].type = type;
2039 ctx->bc->fc_stack[ctx->bc->fc_sp].start = ctx->bc->cf_last;
2040 }
2041
2042 static void fc_poplevel(struct r600_shader_ctx *ctx)
2043 {
2044 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[ctx->bc->fc_sp];
2045 if (sp->mid) {
2046 free(sp->mid);
2047 sp->mid = NULL;
2048 }
2049 sp->num_mid = 0;
2050 sp->start = NULL;
2051 sp->type = 0;
2052 ctx->bc->fc_sp--;
2053 }
2054
2055 #if 0
2056 static int emit_return(struct r600_shader_ctx *ctx)
2057 {
2058 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_RETURN);
2059 return 0;
2060 }
2061
2062 static int emit_jump_to_offset(struct r600_shader_ctx *ctx, int pops, int offset)
2063 {
2064
2065 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_JUMP);
2066 ctx->bc->cf_last->pop_count = pops;
2067 /* TODO work out offset */
2068 return 0;
2069 }
2070
2071 static int emit_setret_in_loop_flag(struct r600_shader_ctx *ctx, unsigned flag_value)
2072 {
2073 return 0;
2074 }
2075
2076 static void emit_testflag(struct r600_shader_ctx *ctx)
2077 {
2078
2079 }
2080
2081 static void emit_return_on_flag(struct r600_shader_ctx *ctx, unsigned ifidx)
2082 {
2083 emit_testflag(ctx);
2084 emit_jump_to_offset(ctx, 1, 4);
2085 emit_setret_in_loop_flag(ctx, V_SQ_ALU_SRC_0);
2086 pops(ctx, ifidx + 1);
2087 emit_return(ctx);
2088 }
2089
2090 static void break_loop_on_flag(struct r600_shader_ctx *ctx, unsigned fc_sp)
2091 {
2092 emit_testflag(ctx);
2093
2094 r600_bc_add_cfinst(ctx->bc, ctx->inst_info->r600_opcode);
2095 ctx->bc->cf_last->pop_count = 1;
2096
2097 fc_set_mid(ctx, fc_sp);
2098
2099 pops(ctx, 1);
2100 }
2101 #endif
2102
2103 static int tgsi_if(struct r600_shader_ctx *ctx)
2104 {
2105 emit_logic_pred(ctx, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE);
2106
2107 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_JUMP);
2108
2109 fc_pushlevel(ctx, FC_IF);
2110
2111 callstack_check_depth(ctx, FC_PUSH_VPM, 0);
2112 return 0;
2113 }
2114
2115 static int tgsi_else(struct r600_shader_ctx *ctx)
2116 {
2117 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_ELSE);
2118 ctx->bc->cf_last->pop_count = 1;
2119
2120 fc_set_mid(ctx, ctx->bc->fc_sp);
2121 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id;
2122 return 0;
2123 }
2124
2125 static int tgsi_endif(struct r600_shader_ctx *ctx)
2126 {
2127 pops(ctx, 1);
2128 if (ctx->bc->fc_stack[ctx->bc->fc_sp].type != FC_IF) {
2129 R600_ERR("if/endif unbalanced in shader\n");
2130 return -1;
2131 }
2132
2133 if (ctx->bc->fc_stack[ctx->bc->fc_sp].mid == NULL) {
2134 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id + 2;
2135 ctx->bc->fc_stack[ctx->bc->fc_sp].start->pop_count = 1;
2136 } else {
2137 ctx->bc->fc_stack[ctx->bc->fc_sp].mid[0]->cf_addr = ctx->bc->cf_last->id + 2;
2138 }
2139 fc_poplevel(ctx);
2140
2141 callstack_decrease_current(ctx, FC_PUSH_VPM);
2142 return 0;
2143 }
2144
2145 static int tgsi_bgnloop(struct r600_shader_ctx *ctx)
2146 {
2147 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL);
2148
2149 fc_pushlevel(ctx, FC_LOOP);
2150
2151 /* check stack depth */
2152 callstack_check_depth(ctx, FC_LOOP, 0);
2153 return 0;
2154 }
2155
2156 static int tgsi_endloop(struct r600_shader_ctx *ctx)
2157 {
2158 int i;
2159
2160 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END);
2161
2162 if (ctx->bc->fc_stack[ctx->bc->fc_sp].type != FC_LOOP) {
2163 R600_ERR("loop/endloop in shader code are not paired.\n");
2164 return -EINVAL;
2165 }
2166
2167 /* fixup loop pointers - from r600isa
2168 LOOP END points to CF after LOOP START,
2169 LOOP START point to CF after LOOP END
2170 BRK/CONT point to LOOP END CF
2171 */
2172 ctx->bc->cf_last->cf_addr = ctx->bc->fc_stack[ctx->bc->fc_sp].start->id + 2;
2173
2174 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id + 2;
2175
2176 for (i = 0; i < ctx->bc->fc_stack[ctx->bc->fc_sp].num_mid; i++) {
2177 ctx->bc->fc_stack[ctx->bc->fc_sp].mid[i]->cf_addr = ctx->bc->cf_last->id;
2178 }
2179 /* TODO add LOOPRET support */
2180 fc_poplevel(ctx);
2181 callstack_decrease_current(ctx, FC_LOOP);
2182 return 0;
2183 }
2184
2185 static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx)
2186 {
2187 unsigned int fscp;
2188
2189 for (fscp = ctx->bc->fc_sp; fscp > 0; fscp--)
2190 {
2191 if (FC_LOOP == ctx->bc->fc_stack[fscp].type)
2192 break;
2193 }
2194
2195 if (fscp == 0) {
2196 R600_ERR("Break not inside loop/endloop pair\n");
2197 return -EINVAL;
2198 }
2199
2200 r600_bc_add_cfinst(ctx->bc, ctx->inst_info->r600_opcode);
2201 ctx->bc->cf_last->pop_count = 1;
2202
2203 fc_set_mid(ctx, fscp);
2204
2205 pops(ctx, 1);
2206 callstack_check_depth(ctx, FC_PUSH_VPM, 1);
2207 return 0;
2208 }
2209
2210 static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] = {
2211 {TGSI_OPCODE_ARL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_arl},
2212 {TGSI_OPCODE_MOV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV, tgsi_op2},
2213 {TGSI_OPCODE_LIT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_lit},
2214 {TGSI_OPCODE_RCP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE, tgsi_trans_srcx_replicate},
2215 {TGSI_OPCODE_RSQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE, tgsi_trans_srcx_replicate},
2216 {TGSI_OPCODE_EXP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_exp},
2217 {TGSI_OPCODE_LOG, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2218 {TGSI_OPCODE_MUL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL, tgsi_op2},
2219 {TGSI_OPCODE_ADD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD, tgsi_op2},
2220 {TGSI_OPCODE_DP3, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4, tgsi_dp},
2221 {TGSI_OPCODE_DP4, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4, tgsi_dp},
2222 {TGSI_OPCODE_DST, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_opdst},
2223 {TGSI_OPCODE_MIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN, tgsi_op2},
2224 {TGSI_OPCODE_MAX, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX, tgsi_op2},
2225 {TGSI_OPCODE_SLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT, tgsi_op2_swap},
2226 {TGSI_OPCODE_SGE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE, tgsi_op2},
2227 {TGSI_OPCODE_MAD, 1, V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD, tgsi_op3},
2228 {TGSI_OPCODE_SUB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD, tgsi_op2},
2229 {TGSI_OPCODE_LRP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_lrp},
2230 {TGSI_OPCODE_CND, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2231 /* gap */
2232 {20, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2233 {TGSI_OPCODE_DP2A, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2234 /* gap */
2235 {22, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2236 {23, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2237 {TGSI_OPCODE_FRC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT, tgsi_op2},
2238 {TGSI_OPCODE_CLAMP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2239 {TGSI_OPCODE_FLR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR, tgsi_op2},
2240 {TGSI_OPCODE_ROUND, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2241 {TGSI_OPCODE_EX2, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE, tgsi_trans_srcx_replicate},
2242 {TGSI_OPCODE_LG2, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE, tgsi_trans_srcx_replicate},
2243 {TGSI_OPCODE_POW, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_pow},
2244 {TGSI_OPCODE_XPD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_xpd},
2245 /* gap */
2246 {32, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2247 {TGSI_OPCODE_ABS, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV, tgsi_op2},
2248 {TGSI_OPCODE_RCC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2249 {TGSI_OPCODE_DPH, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4, tgsi_dp},
2250 {TGSI_OPCODE_COS, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS, tgsi_trig},
2251 {TGSI_OPCODE_DDX, 0, SQ_TEX_INST_GET_GRADIENTS_H, tgsi_tex},
2252 {TGSI_OPCODE_DDY, 0, SQ_TEX_INST_GET_GRADIENTS_V, tgsi_tex},
2253 {TGSI_OPCODE_KILP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT, tgsi_kill}, /* predicated kill */
2254 {TGSI_OPCODE_PK2H, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2255 {TGSI_OPCODE_PK2US, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2256 {TGSI_OPCODE_PK4B, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2257 {TGSI_OPCODE_PK4UB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2258 {TGSI_OPCODE_RFL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2259 {TGSI_OPCODE_SEQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE, tgsi_op2},
2260 {TGSI_OPCODE_SFL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2261 {TGSI_OPCODE_SGT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT, tgsi_op2},
2262 {TGSI_OPCODE_SIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN, tgsi_trig},
2263 {TGSI_OPCODE_SLE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE, tgsi_op2_swap},
2264 {TGSI_OPCODE_SNE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE, tgsi_op2},
2265 {TGSI_OPCODE_STR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2266 {TGSI_OPCODE_TEX, 0, SQ_TEX_INST_SAMPLE, tgsi_tex},
2267 {TGSI_OPCODE_TXD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2268 {TGSI_OPCODE_TXP, 0, SQ_TEX_INST_SAMPLE, tgsi_tex},
2269 {TGSI_OPCODE_UP2H, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2270 {TGSI_OPCODE_UP2US, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2271 {TGSI_OPCODE_UP4B, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2272 {TGSI_OPCODE_UP4UB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2273 {TGSI_OPCODE_X2D, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2274 {TGSI_OPCODE_ARA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2275 {TGSI_OPCODE_ARR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2276 {TGSI_OPCODE_BRA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2277 {TGSI_OPCODE_CAL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2278 {TGSI_OPCODE_RET, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2279 {TGSI_OPCODE_SSG, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_ssg},
2280 {TGSI_OPCODE_CMP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_cmp},
2281 {TGSI_OPCODE_SCS, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_scs},
2282 {TGSI_OPCODE_TXB, 0, SQ_TEX_INST_SAMPLE_L, tgsi_tex},
2283 {TGSI_OPCODE_NRM, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2284 {TGSI_OPCODE_DIV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2285 {TGSI_OPCODE_DP2, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4, tgsi_dp},
2286 {TGSI_OPCODE_TXL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2287 {TGSI_OPCODE_BRK, 0, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK, tgsi_loop_brk_cont},
2288 {TGSI_OPCODE_IF, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_if},
2289 /* gap */
2290 {75, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2291 {76, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2292 {TGSI_OPCODE_ELSE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_else},
2293 {TGSI_OPCODE_ENDIF, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_endif},
2294 /* gap */
2295 {79, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2296 {80, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2297 {TGSI_OPCODE_PUSHA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2298 {TGSI_OPCODE_POPA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2299 {TGSI_OPCODE_CEIL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2300 {TGSI_OPCODE_I2F, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2301 {TGSI_OPCODE_NOT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2302 {TGSI_OPCODE_TRUNC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC, tgsi_trans_srcx_replicate},
2303 {TGSI_OPCODE_SHL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2304 /* gap */
2305 {88, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2306 {TGSI_OPCODE_AND, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2307 {TGSI_OPCODE_OR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2308 {TGSI_OPCODE_MOD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2309 {TGSI_OPCODE_XOR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2310 {TGSI_OPCODE_SAD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2311 {TGSI_OPCODE_TXF, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2312 {TGSI_OPCODE_TXQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2313 {TGSI_OPCODE_CONT, 0, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE, tgsi_loop_brk_cont},
2314 {TGSI_OPCODE_EMIT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2315 {TGSI_OPCODE_ENDPRIM, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2316 {TGSI_OPCODE_BGNLOOP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_bgnloop},
2317 {TGSI_OPCODE_BGNSUB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2318 {TGSI_OPCODE_ENDLOOP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_endloop},
2319 {TGSI_OPCODE_ENDSUB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2320 /* gap */
2321 {103, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2322 {104, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2323 {105, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2324 {106, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2325 {TGSI_OPCODE_NOP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2326 /* gap */
2327 {108, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2328 {109, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2329 {110, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2330 {111, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2331 {TGSI_OPCODE_NRM4, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2332 {TGSI_OPCODE_CALLNZ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2333 {TGSI_OPCODE_IFC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2334 {TGSI_OPCODE_BREAKC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2335 {TGSI_OPCODE_KIL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT, tgsi_kill}, /* conditional kill */
2336 {TGSI_OPCODE_END, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_end}, /* aka HALT */
2337 /* gap */
2338 {118, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2339 {TGSI_OPCODE_F2I, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2340 {TGSI_OPCODE_IDIV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2341 {TGSI_OPCODE_IMAX, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2342 {TGSI_OPCODE_IMIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2343 {TGSI_OPCODE_INEG, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2344 {TGSI_OPCODE_ISGE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2345 {TGSI_OPCODE_ISHR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2346 {TGSI_OPCODE_ISLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2347 {TGSI_OPCODE_F2U, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2348 {TGSI_OPCODE_U2F, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2349 {TGSI_OPCODE_UADD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2350 {TGSI_OPCODE_UDIV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2351 {TGSI_OPCODE_UMAD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2352 {TGSI_OPCODE_UMAX, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2353 {TGSI_OPCODE_UMIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2354 {TGSI_OPCODE_UMOD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2355 {TGSI_OPCODE_UMUL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2356 {TGSI_OPCODE_USEQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2357 {TGSI_OPCODE_USGE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2358 {TGSI_OPCODE_USHR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2359 {TGSI_OPCODE_USLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2360 {TGSI_OPCODE_USNE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2361 {TGSI_OPCODE_SWITCH, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2362 {TGSI_OPCODE_CASE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2363 {TGSI_OPCODE_DEFAULT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2364 {TGSI_OPCODE_ENDSWITCH, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2365 {TGSI_OPCODE_LAST, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2366 };