r600g: fix LIT tests
[mesa.git] / src / gallium / drivers / r600 / r600_shader.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "pipe/p_shader_tokens.h"
24 #include "tgsi/tgsi_parse.h"
25 #include "tgsi/tgsi_scan.h"
26 #include "tgsi/tgsi_dump.h"
27 #include "util/u_format.h"
28 #include "r600_screen.h"
29 #include "r600_context.h"
30 #include "r600_shader.h"
31 #include "r600_asm.h"
32 #include "r600_sq.h"
33 #include "r600d.h"
34 #include <stdio.h>
35 #include <errno.h>
36
37
38 struct r600_shader_tgsi_instruction;
39
40 struct r600_shader_ctx {
41 struct tgsi_shader_info info;
42 struct tgsi_parse_context parse;
43 const struct tgsi_token *tokens;
44 unsigned type;
45 unsigned file_offset[TGSI_FILE_COUNT];
46 unsigned temp_reg;
47 struct r600_shader_tgsi_instruction *inst_info;
48 struct r600_bc *bc;
49 struct r600_shader *shader;
50 u32 value[4];
51 u32 *literals;
52 u32 nliterals;
53 };
54
55 struct r600_shader_tgsi_instruction {
56 unsigned tgsi_opcode;
57 unsigned is_op3;
58 unsigned r600_opcode;
59 int (*process)(struct r600_shader_ctx *ctx);
60 };
61
62 static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[];
63 static int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader);
64
65 static int r600_shader_update(struct pipe_context *ctx, struct r600_shader *shader)
66 {
67 struct r600_context *rctx = r600_context(ctx);
68 const struct util_format_description *desc;
69 enum pipe_format resource_format[160];
70 unsigned i, nresources = 0;
71 struct r600_bc *bc = &shader->bc;
72 struct r600_bc_cf *cf;
73 struct r600_bc_vtx *vtx;
74
75 if (shader->processor_type != TGSI_PROCESSOR_VERTEX)
76 return 0;
77 for (i = 0; i < rctx->vertex_elements->count; i++) {
78 resource_format[nresources++] = rctx->vertex_elements->elements[i].src_format;
79 }
80 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
81 switch (cf->inst) {
82 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
83 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
84 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
85 desc = util_format_description(resource_format[vtx->buffer_id]);
86 if (desc == NULL) {
87 R600_ERR("unknown format %d\n", resource_format[vtx->buffer_id]);
88 return -EINVAL;
89 }
90 vtx->dst_sel_x = desc->swizzle[0];
91 vtx->dst_sel_y = desc->swizzle[1];
92 vtx->dst_sel_z = desc->swizzle[2];
93 vtx->dst_sel_w = desc->swizzle[3];
94 }
95 break;
96 default:
97 break;
98 }
99 }
100 return r600_bc_build(&shader->bc);
101 }
102
103 int r600_pipe_shader_create(struct pipe_context *ctx,
104 struct r600_context_state *rpshader,
105 const struct tgsi_token *tokens)
106 {
107 struct r600_screen *rscreen = r600_screen(ctx->screen);
108 int r;
109
110 //fprintf(stderr, "--------------------------------------------------------------\n");
111 //tgsi_dump(tokens, 0);
112 if (rpshader == NULL)
113 return -ENOMEM;
114 rpshader->shader.family = radeon_get_family(rscreen->rw);
115 r = r600_shader_from_tgsi(tokens, &rpshader->shader);
116 if (r) {
117 R600_ERR("translation from TGSI failed !\n");
118 return r;
119 }
120 r = r600_bc_build(&rpshader->shader.bc);
121 if (r) {
122 R600_ERR("building bytecode failed !\n");
123 return r;
124 }
125 //fprintf(stderr, "______________________________________________________________\n");
126 return 0;
127 }
128
129 static int r600_pipe_shader_vs(struct pipe_context *ctx, struct r600_context_state *rpshader)
130 {
131 struct r600_screen *rscreen = r600_screen(ctx->screen);
132 struct r600_shader *rshader = &rpshader->shader;
133 struct radeon_state *state;
134 unsigned i, tmp;
135
136 rpshader->rstate = radeon_state_decref(rpshader->rstate);
137 state = radeon_state_shader(rscreen->rw, R600_STATE_SHADER, 0, R600_SHADER_VS);
138 if (state == NULL)
139 return -ENOMEM;
140 for (i = 0; i < 10; i++) {
141 state->states[R600_VS_SHADER__SPI_VS_OUT_ID_0 + i] = 0;
142 }
143 /* so far never got proper semantic id from tgsi */
144 for (i = 0; i < 32; i++) {
145 tmp = i << ((i & 3) * 8);
146 state->states[R600_VS_SHADER__SPI_VS_OUT_ID_0 + i / 4] |= tmp;
147 }
148 state->states[R600_VS_SHADER__SPI_VS_OUT_CONFIG] = S_0286C4_VS_EXPORT_COUNT(rshader->noutput - 2);
149 state->states[R600_VS_SHADER__SQ_PGM_RESOURCES_VS] = S_028868_NUM_GPRS(rshader->bc.ngpr) |
150 S_028868_STACK_SIZE(rshader->bc.nstack);
151 rpshader->rstate = state;
152 rpshader->rstate->bo[0] = radeon_bo_incref(rscreen->rw, rpshader->bo);
153 rpshader->rstate->bo[1] = radeon_bo_incref(rscreen->rw, rpshader->bo);
154 rpshader->rstate->nbo = 2;
155 rpshader->rstate->placement[0] = RADEON_GEM_DOMAIN_GTT;
156 rpshader->rstate->placement[2] = RADEON_GEM_DOMAIN_GTT;
157 return radeon_state_pm4(state);
158 }
159
160 static int r600_pipe_shader_ps(struct pipe_context *ctx, struct r600_context_state *rpshader)
161 {
162 const struct pipe_rasterizer_state *rasterizer;
163 struct r600_screen *rscreen = r600_screen(ctx->screen);
164 struct r600_shader *rshader = &rpshader->shader;
165 struct r600_context *rctx = r600_context(ctx);
166 struct radeon_state *state;
167 unsigned i, tmp, exports_ps, num_cout;
168
169 rasterizer = &rctx->rasterizer->state.rasterizer;
170 rpshader->rstate = radeon_state_decref(rpshader->rstate);
171 state = radeon_state_shader(rscreen->rw, R600_STATE_SHADER, 0, R600_SHADER_PS);
172 if (state == NULL)
173 return -ENOMEM;
174 for (i = 0; i < rshader->ninput; i++) {
175 tmp = S_028644_SEMANTIC(i);
176 tmp |= S_028644_SEL_CENTROID(1);
177 if (rshader->input[i].name == TGSI_SEMANTIC_COLOR ||
178 rshader->input[i].name == TGSI_SEMANTIC_BCOLOR) {
179 tmp |= S_028644_FLAT_SHADE(rshader->flat_shade);
180 }
181 if (rasterizer->sprite_coord_enable & (1 << i)) {
182 tmp |= S_028644_PT_SPRITE_TEX(1);
183 }
184 state->states[R600_PS_SHADER__SPI_PS_INPUT_CNTL_0 + i] = tmp;
185 }
186
187 exports_ps = 0;
188 num_cout = 0;
189 for (i = 0; i < rshader->noutput; i++) {
190 if (rshader->output[i].name == TGSI_SEMANTIC_POSITION)
191 exports_ps |= 1;
192 else if (rshader->output[i].name == TGSI_SEMANTIC_COLOR) {
193 exports_ps |= (1 << (num_cout+1));
194 num_cout++;
195 }
196 }
197 if (!exports_ps) {
198 /* always at least export 1 component per pixel */
199 exports_ps = 2;
200 }
201 state->states[R600_PS_SHADER__SPI_PS_IN_CONTROL_0] = S_0286CC_NUM_INTERP(rshader->ninput) |
202 S_0286CC_PERSP_GRADIENT_ENA(1);
203 state->states[R600_PS_SHADER__SPI_PS_IN_CONTROL_1] = 0x00000000;
204 state->states[R600_PS_SHADER__SQ_PGM_RESOURCES_PS] = S_028868_NUM_GPRS(rshader->bc.ngpr) |
205 S_028868_STACK_SIZE(rshader->bc.nstack);
206 state->states[R600_PS_SHADER__SQ_PGM_EXPORTS_PS] = exports_ps;
207 rpshader->rstate = state;
208 rpshader->rstate->bo[0] = radeon_bo_incref(rscreen->rw, rpshader->bo);
209 rpshader->rstate->nbo = 1;
210 rpshader->rstate->placement[0] = RADEON_GEM_DOMAIN_GTT;
211 return radeon_state_pm4(state);
212 }
213
214 static int r600_pipe_shader(struct pipe_context *ctx, struct r600_context_state *rpshader)
215 {
216 struct r600_screen *rscreen = r600_screen(ctx->screen);
217 struct r600_context *rctx = r600_context(ctx);
218 struct r600_shader *rshader = &rpshader->shader;
219 int r;
220
221 /* copy new shader */
222 radeon_bo_decref(rscreen->rw, rpshader->bo);
223 rpshader->bo = NULL;
224 rpshader->bo = radeon_bo(rscreen->rw, 0, rshader->bc.ndw * 4,
225 4096, NULL);
226 if (rpshader->bo == NULL) {
227 return -ENOMEM;
228 }
229 radeon_bo_map(rscreen->rw, rpshader->bo);
230 memcpy(rpshader->bo->data, rshader->bc.bytecode, rshader->bc.ndw * 4);
231 radeon_bo_unmap(rscreen->rw, rpshader->bo);
232 /* build state */
233 rshader->flat_shade = rctx->flat_shade;
234 switch (rshader->processor_type) {
235 case TGSI_PROCESSOR_VERTEX:
236 r = r600_pipe_shader_vs(ctx, rpshader);
237 break;
238 case TGSI_PROCESSOR_FRAGMENT:
239 r = r600_pipe_shader_ps(ctx, rpshader);
240 break;
241 default:
242 r = -EINVAL;
243 break;
244 }
245 return r;
246 }
247
248 int r600_pipe_shader_update(struct pipe_context *ctx, struct r600_context_state *rpshader)
249 {
250 struct r600_context *rctx = r600_context(ctx);
251 int r;
252
253 if (rpshader == NULL)
254 return -EINVAL;
255 /* there should be enough input */
256 if (rctx->vertex_elements->count < rpshader->shader.bc.nresource) {
257 R600_ERR("%d resources provided, expecting %d\n",
258 rctx->vertex_elements->count, rpshader->shader.bc.nresource);
259 return -EINVAL;
260 }
261 r = r600_shader_update(ctx, &rpshader->shader);
262 if (r)
263 return r;
264 return r600_pipe_shader(ctx, rpshader);
265 }
266
267 static int tgsi_is_supported(struct r600_shader_ctx *ctx)
268 {
269 struct tgsi_full_instruction *i = &ctx->parse.FullToken.FullInstruction;
270 int j;
271
272 if (i->Instruction.NumDstRegs > 1) {
273 R600_ERR("too many dst (%d)\n", i->Instruction.NumDstRegs);
274 return -EINVAL;
275 }
276 if (i->Instruction.Predicate) {
277 R600_ERR("predicate unsupported\n");
278 return -EINVAL;
279 }
280 #if 0
281 if (i->Instruction.Label) {
282 R600_ERR("label unsupported\n");
283 return -EINVAL;
284 }
285 #endif
286 for (j = 0; j < i->Instruction.NumSrcRegs; j++) {
287 if (i->Src[j].Register.Dimension ||
288 i->Src[j].Register.Absolute) {
289 R600_ERR("unsupported src %d (dimension %d|absolute %d)\n", j,
290 i->Src[j].Register.Dimension,
291 i->Src[j].Register.Absolute);
292 return -EINVAL;
293 }
294 }
295 for (j = 0; j < i->Instruction.NumDstRegs; j++) {
296 if (i->Dst[j].Register.Dimension) {
297 R600_ERR("unsupported dst (dimension)\n");
298 return -EINVAL;
299 }
300 }
301 return 0;
302 }
303
304 static int tgsi_declaration(struct r600_shader_ctx *ctx)
305 {
306 struct tgsi_full_declaration *d = &ctx->parse.FullToken.FullDeclaration;
307 struct r600_bc_vtx vtx;
308 unsigned i;
309 int r;
310
311 switch (d->Declaration.File) {
312 case TGSI_FILE_INPUT:
313 i = ctx->shader->ninput++;
314 ctx->shader->input[i].name = d->Semantic.Name;
315 ctx->shader->input[i].sid = d->Semantic.Index;
316 ctx->shader->input[i].interpolate = d->Declaration.Interpolate;
317 ctx->shader->input[i].gpr = ctx->file_offset[TGSI_FILE_INPUT] + i;
318 if (ctx->type == TGSI_PROCESSOR_VERTEX) {
319 /* turn input into fetch */
320 memset(&vtx, 0, sizeof(struct r600_bc_vtx));
321 vtx.inst = 0;
322 vtx.fetch_type = 0;
323 vtx.buffer_id = i;
324 /* register containing the index into the buffer */
325 vtx.src_gpr = 0;
326 vtx.src_sel_x = 0;
327 vtx.mega_fetch_count = 0x1F;
328 vtx.dst_gpr = ctx->shader->input[i].gpr;
329 vtx.dst_sel_x = 0;
330 vtx.dst_sel_y = 1;
331 vtx.dst_sel_z = 2;
332 vtx.dst_sel_w = 3;
333 r = r600_bc_add_vtx(ctx->bc, &vtx);
334 if (r)
335 return r;
336 }
337 break;
338 case TGSI_FILE_OUTPUT:
339 i = ctx->shader->noutput++;
340 ctx->shader->output[i].name = d->Semantic.Name;
341 ctx->shader->output[i].sid = d->Semantic.Index;
342 ctx->shader->output[i].gpr = ctx->file_offset[TGSI_FILE_OUTPUT] + i;
343 ctx->shader->output[i].interpolate = d->Declaration.Interpolate;
344 break;
345 case TGSI_FILE_CONSTANT:
346 case TGSI_FILE_TEMPORARY:
347 case TGSI_FILE_SAMPLER:
348 case TGSI_FILE_ADDRESS:
349 break;
350 default:
351 R600_ERR("unsupported file %d declaration\n", d->Declaration.File);
352 return -EINVAL;
353 }
354 return 0;
355 }
356
357 int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader)
358 {
359 struct tgsi_full_immediate *immediate;
360 struct r600_shader_ctx ctx;
361 struct r600_bc_output output[32];
362 unsigned output_done, noutput;
363 unsigned opcode;
364 int i, r = 0, pos0;
365
366 ctx.bc = &shader->bc;
367 ctx.shader = shader;
368 r = r600_bc_init(ctx.bc, shader->family);
369 if (r)
370 return r;
371 ctx.tokens = tokens;
372 tgsi_scan_shader(tokens, &ctx.info);
373 tgsi_parse_init(&ctx.parse, tokens);
374 ctx.type = ctx.parse.FullHeader.Processor.Processor;
375 shader->processor_type = ctx.type;
376
377 /* register allocations */
378 /* Values [0,127] correspond to GPR[0..127].
379 * Values [128,159] correspond to constant buffer bank 0
380 * Values [160,191] correspond to constant buffer bank 1
381 * Values [256,511] correspond to cfile constants c[0..255].
382 * Other special values are shown in the list below.
383 * 244 ALU_SRC_1_DBL_L: special constant 1.0 double-float, LSW. (RV670+)
384 * 245 ALU_SRC_1_DBL_M: special constant 1.0 double-float, MSW. (RV670+)
385 * 246 ALU_SRC_0_5_DBL_L: special constant 0.5 double-float, LSW. (RV670+)
386 * 247 ALU_SRC_0_5_DBL_M: special constant 0.5 double-float, MSW. (RV670+)
387 * 248 SQ_ALU_SRC_0: special constant 0.0.
388 * 249 SQ_ALU_SRC_1: special constant 1.0 float.
389 * 250 SQ_ALU_SRC_1_INT: special constant 1 integer.
390 * 251 SQ_ALU_SRC_M_1_INT: special constant -1 integer.
391 * 252 SQ_ALU_SRC_0_5: special constant 0.5 float.
392 * 253 SQ_ALU_SRC_LITERAL: literal constant.
393 * 254 SQ_ALU_SRC_PV: previous vector result.
394 * 255 SQ_ALU_SRC_PS: previous scalar result.
395 */
396 for (i = 0; i < TGSI_FILE_COUNT; i++) {
397 ctx.file_offset[i] = 0;
398 }
399 if (ctx.type == TGSI_PROCESSOR_VERTEX) {
400 ctx.file_offset[TGSI_FILE_INPUT] = 1;
401 }
402 ctx.file_offset[TGSI_FILE_OUTPUT] = ctx.file_offset[TGSI_FILE_INPUT] +
403 ctx.info.file_count[TGSI_FILE_INPUT];
404 ctx.file_offset[TGSI_FILE_TEMPORARY] = ctx.file_offset[TGSI_FILE_OUTPUT] +
405 ctx.info.file_count[TGSI_FILE_OUTPUT];
406 ctx.file_offset[TGSI_FILE_CONSTANT] = 256;
407 ctx.file_offset[TGSI_FILE_IMMEDIATE] = 253;
408 ctx.temp_reg = ctx.file_offset[TGSI_FILE_TEMPORARY] +
409 ctx.info.file_count[TGSI_FILE_TEMPORARY];
410
411 ctx.nliterals = 0;
412 ctx.literals = NULL;
413
414 while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
415 tgsi_parse_token(&ctx.parse);
416 switch (ctx.parse.FullToken.Token.Type) {
417 case TGSI_TOKEN_TYPE_IMMEDIATE:
418 immediate = &ctx.parse.FullToken.FullImmediate;
419 ctx.literals = realloc(ctx.literals, (ctx.nliterals + 1) * 16);
420 if(ctx.literals == NULL) {
421 r = -ENOMEM;
422 goto out_err;
423 }
424 ctx.literals[ctx.nliterals * 4 + 0] = immediate->u[0].Uint;
425 ctx.literals[ctx.nliterals * 4 + 1] = immediate->u[1].Uint;
426 ctx.literals[ctx.nliterals * 4 + 2] = immediate->u[2].Uint;
427 ctx.literals[ctx.nliterals * 4 + 3] = immediate->u[3].Uint;
428 ctx.nliterals++;
429 break;
430 case TGSI_TOKEN_TYPE_DECLARATION:
431 r = tgsi_declaration(&ctx);
432 if (r)
433 goto out_err;
434 break;
435 case TGSI_TOKEN_TYPE_INSTRUCTION:
436 r = tgsi_is_supported(&ctx);
437 if (r)
438 goto out_err;
439 opcode = ctx.parse.FullToken.FullInstruction.Instruction.Opcode;
440 ctx.inst_info = &r600_shader_tgsi_instruction[opcode];
441 r = ctx.inst_info->process(&ctx);
442 if (r)
443 goto out_err;
444 r = r600_bc_add_literal(ctx.bc, ctx.value);
445 if (r)
446 goto out_err;
447 break;
448 default:
449 R600_ERR("unsupported token type %d\n", ctx.parse.FullToken.Token.Type);
450 r = -EINVAL;
451 goto out_err;
452 }
453 }
454 /* export output */
455 noutput = shader->noutput;
456 for (i = 0, pos0 = 0; i < noutput; i++) {
457 memset(&output[i], 0, sizeof(struct r600_bc_output));
458 output[i].gpr = shader->output[i].gpr;
459 output[i].elem_size = 3;
460 output[i].swizzle_x = 0;
461 output[i].swizzle_y = 1;
462 output[i].swizzle_z = 2;
463 output[i].swizzle_w = 3;
464 output[i].barrier = 1;
465 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
466 output[i].array_base = i - pos0;
467 output[i].inst = V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT;
468 switch (ctx.type) {
469 case TGSI_PROCESSOR_VERTEX:
470 if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
471 output[i].array_base = 60;
472 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
473 /* position doesn't count in array_base */
474 pos0++;
475 }
476 if (shader->output[i].name == TGSI_SEMANTIC_PSIZE) {
477 output[i].array_base = 61;
478 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
479 /* position doesn't count in array_base */
480 pos0++;
481 }
482 break;
483 case TGSI_PROCESSOR_FRAGMENT:
484 if (shader->output[i].name == TGSI_SEMANTIC_COLOR) {
485 output[i].array_base = shader->output[i].sid;
486 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
487 } else if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
488 output[i].array_base = 61;
489 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
490 } else {
491 R600_ERR("unsupported fragment output name %d\n", shader->output[i].name);
492 r = -EINVAL;
493 goto out_err;
494 }
495 break;
496 default:
497 R600_ERR("unsupported processor type %d\n", ctx.type);
498 r = -EINVAL;
499 goto out_err;
500 }
501 }
502 /* add fake param output for vertex shader if no param is exported */
503 if (ctx.type == TGSI_PROCESSOR_VERTEX) {
504 for (i = 0, pos0 = 0; i < noutput; i++) {
505 if (output[i].type == V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM) {
506 pos0 = 1;
507 break;
508 }
509 }
510 if (!pos0) {
511 memset(&output[i], 0, sizeof(struct r600_bc_output));
512 output[i].gpr = 0;
513 output[i].elem_size = 3;
514 output[i].swizzle_x = 0;
515 output[i].swizzle_y = 1;
516 output[i].swizzle_z = 2;
517 output[i].swizzle_w = 3;
518 output[i].barrier = 1;
519 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
520 output[i].array_base = 0;
521 output[i].inst = V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT;
522 noutput++;
523 }
524 }
525 /* add fake pixel export */
526 if (ctx.type == TGSI_PROCESSOR_FRAGMENT && !noutput) {
527 memset(&output[0], 0, sizeof(struct r600_bc_output));
528 output[0].gpr = 0;
529 output[0].elem_size = 3;
530 output[0].swizzle_x = 7;
531 output[0].swizzle_y = 7;
532 output[0].swizzle_z = 7;
533 output[0].swizzle_w = 7;
534 output[0].barrier = 1;
535 output[0].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
536 output[0].array_base = 0;
537 output[0].inst = V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT;
538 noutput++;
539 }
540 /* set export done on last export of each type */
541 for (i = noutput - 1, output_done = 0; i >= 0; i--) {
542 if (i == (noutput - 1)) {
543 output[i].end_of_program = 1;
544 }
545 if (!(output_done & (1 << output[i].type))) {
546 output_done |= (1 << output[i].type);
547 output[i].inst = V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE;
548 }
549 }
550 /* add output to bytecode */
551 for (i = 0; i < noutput; i++) {
552 r = r600_bc_add_output(ctx.bc, &output[i]);
553 if (r)
554 goto out_err;
555 }
556 free(ctx.literals);
557 tgsi_parse_free(&ctx.parse);
558 return 0;
559 out_err:
560 free(ctx.literals);
561 tgsi_parse_free(&ctx.parse);
562 return r;
563 }
564
565 static int tgsi_unsupported(struct r600_shader_ctx *ctx)
566 {
567 R600_ERR("%d tgsi opcode unsupported\n", ctx->inst_info->tgsi_opcode);
568 return -EINVAL;
569 }
570
571 static int tgsi_end(struct r600_shader_ctx *ctx)
572 {
573 return 0;
574 }
575
576 static int tgsi_src(struct r600_shader_ctx *ctx,
577 const struct tgsi_full_src_register *tgsi_src,
578 struct r600_bc_alu_src *r600_src)
579 {
580 int index;
581 memset(r600_src, 0, sizeof(struct r600_bc_alu_src));
582 r600_src->sel = tgsi_src->Register.Index;
583 if (tgsi_src->Register.File == TGSI_FILE_IMMEDIATE) {
584 r600_src->sel = 0;
585 index = tgsi_src->Register.Index;
586 ctx->value[0] = ctx->literals[index * 4 + 0];
587 ctx->value[1] = ctx->literals[index * 4 + 1];
588 ctx->value[2] = ctx->literals[index * 4 + 2];
589 ctx->value[3] = ctx->literals[index * 4 + 3];
590 }
591 if (tgsi_src->Register.Indirect)
592 r600_src->rel = V_SQ_REL_RELATIVE;
593 r600_src->neg = tgsi_src->Register.Negate;
594 r600_src->sel += ctx->file_offset[tgsi_src->Register.File];
595 return 0;
596 }
597
598 static int tgsi_dst(struct r600_shader_ctx *ctx,
599 const struct tgsi_full_dst_register *tgsi_dst,
600 unsigned swizzle,
601 struct r600_bc_alu_dst *r600_dst)
602 {
603 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
604
605 r600_dst->sel = tgsi_dst->Register.Index;
606 r600_dst->sel += ctx->file_offset[tgsi_dst->Register.File];
607 r600_dst->chan = swizzle;
608 r600_dst->write = 1;
609 if (tgsi_dst->Register.Indirect)
610 r600_dst->rel = V_SQ_REL_RELATIVE;
611 if (inst->Instruction.Saturate) {
612 r600_dst->clamp = 1;
613 }
614 return 0;
615 }
616
617 static unsigned tgsi_chan(const struct tgsi_full_src_register *tgsi_src, unsigned swizzle)
618 {
619 switch (swizzle) {
620 case 0:
621 return tgsi_src->Register.SwizzleX;
622 case 1:
623 return tgsi_src->Register.SwizzleY;
624 case 2:
625 return tgsi_src->Register.SwizzleZ;
626 case 3:
627 return tgsi_src->Register.SwizzleW;
628 default:
629 return 0;
630 }
631 }
632
633 static int tgsi_split_constant(struct r600_shader_ctx *ctx, struct r600_bc_alu_src r600_src[3])
634 {
635 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
636 struct r600_bc_alu alu;
637 int i, j, k, nconst, r;
638
639 for (i = 0, nconst = 0; i < inst->Instruction.NumSrcRegs; i++) {
640 if (inst->Src[i].Register.File == TGSI_FILE_CONSTANT) {
641 nconst++;
642 }
643 r = tgsi_src(ctx, &inst->Src[i], &r600_src[i]);
644 if (r) {
645 return r;
646 }
647 }
648 for (i = 0, j = nconst - 1; i < inst->Instruction.NumSrcRegs; i++) {
649 if (inst->Src[j].Register.File == TGSI_FILE_CONSTANT && j > 0) {
650 for (k = 0; k < 4; k++) {
651 memset(&alu, 0, sizeof(struct r600_bc_alu));
652 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
653 alu.src[0].sel = r600_src[0].sel;
654 alu.src[0].chan = k;
655 alu.dst.sel = ctx->temp_reg + j;
656 alu.dst.chan = k;
657 alu.dst.write = 1;
658 if (k == 3)
659 alu.last = 1;
660 r = r600_bc_add_alu(ctx->bc, &alu);
661 if (r)
662 return r;
663 }
664 r600_src[0].sel = ctx->temp_reg + j;
665 j--;
666 }
667 }
668 return 0;
669 }
670
671 static int tgsi_op2_s(struct r600_shader_ctx *ctx, int swap)
672 {
673 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
674 struct r600_bc_alu_src r600_src[3];
675 struct r600_bc_alu alu;
676 int i, j, r;
677 int lasti = 0;
678
679 for (i = 0; i < 4; i++) {
680 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
681 lasti = i;
682 }
683 }
684
685 r = tgsi_split_constant(ctx, r600_src);
686 if (r)
687 return r;
688 for (i = 0; i < lasti + 1; i++) {
689 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
690 continue;
691
692 memset(&alu, 0, sizeof(struct r600_bc_alu));
693 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
694 if (r)
695 return r;
696
697 alu.inst = ctx->inst_info->r600_opcode;
698 if (!swap) {
699 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
700 alu.src[j] = r600_src[j];
701 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
702 }
703 } else {
704 alu.src[0] = r600_src[1];
705 alu.src[0].chan = tgsi_chan(&inst->Src[1], i);
706
707 alu.src[1] = r600_src[0];
708 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
709 }
710 /* handle some special cases */
711 switch (ctx->inst_info->tgsi_opcode) {
712 case TGSI_OPCODE_SUB:
713 alu.src[1].neg = 1;
714 break;
715 case TGSI_OPCODE_ABS:
716 alu.src[0].abs = 1;
717 break;
718 default:
719 break;
720 }
721 if (i == lasti) {
722 alu.last = 1;
723 }
724 r = r600_bc_add_alu(ctx->bc, &alu);
725 if (r)
726 return r;
727 }
728 return 0;
729 }
730
731 static int tgsi_op2(struct r600_shader_ctx *ctx)
732 {
733 return tgsi_op2_s(ctx, 0);
734 }
735
736 static int tgsi_op2_swap(struct r600_shader_ctx *ctx)
737 {
738 return tgsi_op2_s(ctx, 1);
739 }
740
741 /*
742 * r600 - trunc to -PI..PI range
743 * r700 - normalize by dividing by 2PI
744 * see fdo bug 27901
745 */
746 static int tgsi_setup_trig(struct r600_shader_ctx *ctx,
747 struct r600_bc_alu_src r600_src[3])
748 {
749 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
750 int r;
751 uint32_t lit_vals[4];
752 struct r600_bc_alu alu;
753
754 memset(lit_vals, 0, 4*4);
755 r = tgsi_split_constant(ctx, r600_src);
756 if (r)
757 return r;
758 lit_vals[0] = fui(1.0 /(3.1415926535 * 2));
759 lit_vals[1] = fui(0.5f);
760
761 memset(&alu, 0, sizeof(struct r600_bc_alu));
762 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD;
763 alu.is_op3 = 1;
764
765 alu.dst.chan = 0;
766 alu.dst.sel = ctx->temp_reg;
767 alu.dst.write = 1;
768
769 alu.src[0] = r600_src[0];
770 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
771
772 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
773 alu.src[1].chan = 0;
774 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
775 alu.src[2].chan = 1;
776 alu.last = 1;
777 r = r600_bc_add_alu(ctx->bc, &alu);
778 if (r)
779 return r;
780 r = r600_bc_add_literal(ctx->bc, lit_vals);
781 if (r)
782 return r;
783
784 memset(&alu, 0, sizeof(struct r600_bc_alu));
785 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT;
786
787 alu.dst.chan = 0;
788 alu.dst.sel = ctx->temp_reg;
789 alu.dst.write = 1;
790
791 alu.src[0].sel = ctx->temp_reg;
792 alu.src[0].chan = 0;
793 alu.last = 1;
794 r = r600_bc_add_alu(ctx->bc, &alu);
795 if (r)
796 return r;
797
798 if (ctx->bc->chiprev == 0) {
799 lit_vals[0] = fui(3.1415926535897f * 2.0f);
800 lit_vals[1] = fui(-3.1415926535897f);
801 } else {
802 lit_vals[0] = fui(1.0f);
803 lit_vals[1] = fui(-0.5f);
804 }
805
806 memset(&alu, 0, sizeof(struct r600_bc_alu));
807 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD;
808 alu.is_op3 = 1;
809
810 alu.dst.chan = 0;
811 alu.dst.sel = ctx->temp_reg;
812 alu.dst.write = 1;
813
814 alu.src[0].sel = ctx->temp_reg;
815 alu.src[0].chan = 0;
816
817 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
818 alu.src[1].chan = 0;
819 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
820 alu.src[2].chan = 1;
821 alu.last = 1;
822 r = r600_bc_add_alu(ctx->bc, &alu);
823 if (r)
824 return r;
825 r = r600_bc_add_literal(ctx->bc, lit_vals);
826 if (r)
827 return r;
828 return 0;
829 }
830
831 static int tgsi_trig(struct r600_shader_ctx *ctx)
832 {
833 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
834 struct r600_bc_alu_src r600_src[3];
835 struct r600_bc_alu alu;
836 int i, r;
837
838 r = tgsi_split_constant(ctx, r600_src);
839 if (r)
840 return r;
841
842 r = tgsi_setup_trig(ctx, r600_src);
843 if (r)
844 return r;
845
846 memset(&alu, 0, sizeof(struct r600_bc_alu));
847 alu.inst = ctx->inst_info->r600_opcode;
848 alu.dst.chan = 0;
849 alu.dst.sel = ctx->temp_reg;
850 alu.dst.write = 1;
851
852 alu.src[0].sel = ctx->temp_reg;
853 alu.src[0].chan = 0;
854 alu.last = 1;
855 r = r600_bc_add_alu(ctx->bc, &alu);
856 if (r)
857 return r;
858
859 /* replicate result */
860 for (i = 0; i < 4; i++) {
861 memset(&alu, 0, sizeof(struct r600_bc_alu));
862 alu.src[0].sel = ctx->temp_reg;
863 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
864 alu.dst.chan = i;
865 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
866 if (r)
867 return r;
868 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
869 if (i == 3)
870 alu.last = 1;
871 r = r600_bc_add_alu(ctx->bc, &alu);
872 if (r)
873 return r;
874 }
875 return 0;
876 }
877
878 static int tgsi_scs(struct r600_shader_ctx *ctx)
879 {
880 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
881 struct r600_bc_alu_src r600_src[3];
882 struct r600_bc_alu alu;
883 int r;
884
885 r = tgsi_split_constant(ctx, r600_src);
886 if (r)
887 return r;
888
889 r = tgsi_setup_trig(ctx, r600_src);
890 if (r)
891 return r;
892
893
894 /* dst.x = COS */
895 memset(&alu, 0, sizeof(struct r600_bc_alu));
896 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS;
897 r = tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
898 if (r)
899 return r;
900
901 alu.src[0].sel = ctx->temp_reg;
902 alu.src[0].chan = 0;
903 alu.last = 1;
904 r = r600_bc_add_alu(ctx->bc, &alu);
905 if (r)
906 return r;
907
908 /* dst.y = SIN */
909 memset(&alu, 0, sizeof(struct r600_bc_alu));
910 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN;
911 r = tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
912 if (r)
913 return r;
914
915 alu.src[0].sel = ctx->temp_reg;
916 alu.src[0].chan = 0;
917 alu.last = 1;
918 r = r600_bc_add_alu(ctx->bc, &alu);
919 if (r)
920 return r;
921 return 0;
922 }
923
924 static int tgsi_kill(struct r600_shader_ctx *ctx)
925 {
926 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
927 struct r600_bc_alu alu;
928 int i, r;
929
930 for (i = 0; i < 4; i++) {
931 memset(&alu, 0, sizeof(struct r600_bc_alu));
932 alu.inst = ctx->inst_info->r600_opcode;
933
934 alu.dst.chan = i;
935
936 alu.src[0].sel = V_SQ_ALU_SRC_0;
937
938 if (ctx->inst_info->tgsi_opcode == TGSI_OPCODE_KILP) {
939 alu.src[1].sel = V_SQ_ALU_SRC_1;
940 alu.src[1].neg = 1;
941 } else {
942 r = tgsi_src(ctx, &inst->Src[0], &alu.src[1]);
943 if (r)
944 return r;
945 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
946 }
947 if (i == 3) {
948 alu.last = 1;
949 }
950 r = r600_bc_add_alu(ctx->bc, &alu);
951 if (r)
952 return r;
953 }
954 r = r600_bc_add_literal(ctx->bc, ctx->value);
955 if (r)
956 return r;
957
958 /* kill must be last in ALU */
959 ctx->bc->force_add_cf = 1;
960 ctx->shader->uses_kill = TRUE;
961 return 0;
962 }
963
964 static int tgsi_lit(struct r600_shader_ctx *ctx)
965 {
966 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
967 struct r600_bc_alu alu;
968 int r;
969
970 /* dst.x, <- 1.0 */
971 memset(&alu, 0, sizeof(struct r600_bc_alu));
972 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
973 alu.src[0].sel = V_SQ_ALU_SRC_1; /*1.0*/
974 alu.src[0].chan = 0;
975 r = tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
976 if (r)
977 return r;
978 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 0) & 1;
979 r = r600_bc_add_alu(ctx->bc, &alu);
980 if (r)
981 return r;
982
983 /* dst.y = max(src.x, 0.0) */
984 memset(&alu, 0, sizeof(struct r600_bc_alu));
985 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX;
986 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
987 if (r)
988 return r;
989 alu.src[1].sel = V_SQ_ALU_SRC_0; /*0.0*/
990 alu.src[1].chan = 0;
991 r = tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
992 if (r)
993 return r;
994 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 1) & 1;
995 r = r600_bc_add_alu(ctx->bc, &alu);
996 if (r)
997 return r;
998
999 /* dst.z = NOP - fill Z slot */
1000 memset(&alu, 0, sizeof(struct r600_bc_alu));
1001 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP;
1002 alu.dst.chan = 2;
1003 r = r600_bc_add_alu(ctx->bc, &alu);
1004 if (r)
1005 return r;
1006
1007 /* dst.w, <- 1.0 */
1008 memset(&alu, 0, sizeof(struct r600_bc_alu));
1009 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1010 alu.src[0].sel = V_SQ_ALU_SRC_1;
1011 alu.src[0].chan = 0;
1012 r = tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
1013 if (r)
1014 return r;
1015 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 3) & 1;
1016 alu.last = 1;
1017 r = r600_bc_add_alu(ctx->bc, &alu);
1018 if (r)
1019 return r;
1020
1021 r = r600_bc_add_literal(ctx->bc, ctx->value);
1022 if (r)
1023 return r;
1024
1025 if (inst->Dst[0].Register.WriteMask & (1 << 2))
1026 {
1027 int chan;
1028 int sel;
1029
1030 /* dst.z = log(src.y) */
1031 memset(&alu, 0, sizeof(struct r600_bc_alu));
1032 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED;
1033 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1034 if (r)
1035 return r;
1036 alu.src[0].chan = tgsi_chan(&inst->Src[0], 3);
1037 r = tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
1038 if (r)
1039 return r;
1040 alu.last = 1;
1041 r = r600_bc_add_alu(ctx->bc, &alu);
1042 if (r)
1043 return r;
1044
1045 r = r600_bc_add_literal(ctx->bc, ctx->value);
1046 if (r)
1047 return r;
1048
1049 chan = alu.dst.chan;
1050 sel = alu.dst.sel;
1051
1052 /* tmp.x = amd MUL_LIT(src.w, dst.z, src.x ) */
1053 memset(&alu, 0, sizeof(struct r600_bc_alu));
1054 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT;
1055 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1056 if (r)
1057 return r;
1058 alu.src[0].chan = tgsi_chan(&inst->Src[0], 3);
1059 alu.src[1].sel = sel;
1060 alu.src[1].chan = chan;
1061 r = tgsi_src(ctx, &inst->Src[0], &alu.src[2]);
1062 if (r)
1063 return r;
1064 alu.src[2].chan = tgsi_chan(&inst->Src[0], 0);
1065 alu.dst.sel = ctx->temp_reg;
1066 alu.dst.chan = 0;
1067 alu.dst.write = 1;
1068 alu.is_op3 = 1;
1069 alu.last = 1;
1070 r = r600_bc_add_alu(ctx->bc, &alu);
1071 if (r)
1072 return r;
1073
1074 r = r600_bc_add_literal(ctx->bc, ctx->value);
1075 if (r)
1076 return r;
1077 /* dst.z = exp(tmp.x) */
1078 memset(&alu, 0, sizeof(struct r600_bc_alu));
1079 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE;
1080 alu.src[0].sel = ctx->temp_reg;
1081 alu.src[0].chan = 0;
1082 r = tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
1083 if (r)
1084 return r;
1085 alu.last = 1;
1086 r = r600_bc_add_alu(ctx->bc, &alu);
1087 if (r)
1088 return r;
1089 }
1090 return 0;
1091 }
1092
1093 static int tgsi_trans(struct r600_shader_ctx *ctx)
1094 {
1095 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1096 struct r600_bc_alu alu;
1097 int i, j, r;
1098
1099 for (i = 0; i < 4; i++) {
1100 memset(&alu, 0, sizeof(struct r600_bc_alu));
1101 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
1102 alu.inst = ctx->inst_info->r600_opcode;
1103 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
1104 r = tgsi_src(ctx, &inst->Src[j], &alu.src[j]);
1105 if (r)
1106 return r;
1107 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
1108 }
1109 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1110 if (r)
1111 return r;
1112 alu.last = 1;
1113 r = r600_bc_add_alu(ctx->bc, &alu);
1114 if (r)
1115 return r;
1116 }
1117 }
1118 return 0;
1119 }
1120
1121 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx)
1122 {
1123 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1124 struct r600_bc_alu alu;
1125 int i, r;
1126
1127 for (i = 0; i < 4; i++) {
1128 memset(&alu, 0, sizeof(struct r600_bc_alu));
1129 alu.src[0].sel = ctx->temp_reg;
1130 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1131 alu.dst.chan = i;
1132 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1133 if (r)
1134 return r;
1135 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
1136 if (i == 3)
1137 alu.last = 1;
1138 r = r600_bc_add_alu(ctx->bc, &alu);
1139 if (r)
1140 return r;
1141 }
1142 return 0;
1143 }
1144
1145 static int tgsi_trans_srcx_replicate(struct r600_shader_ctx *ctx)
1146 {
1147 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1148 struct r600_bc_alu alu;
1149 int i, r;
1150
1151 memset(&alu, 0, sizeof(struct r600_bc_alu));
1152 alu.inst = ctx->inst_info->r600_opcode;
1153 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
1154 r = tgsi_src(ctx, &inst->Src[i], &alu.src[i]);
1155 if (r)
1156 return r;
1157 alu.src[i].chan = tgsi_chan(&inst->Src[i], 0);
1158 }
1159 alu.dst.sel = ctx->temp_reg;
1160 alu.dst.write = 1;
1161 alu.last = 1;
1162 r = r600_bc_add_alu(ctx->bc, &alu);
1163 if (r)
1164 return r;
1165 r = r600_bc_add_literal(ctx->bc, ctx->value);
1166 if (r)
1167 return r;
1168 /* replicate result */
1169 return tgsi_helper_tempx_replicate(ctx);
1170 }
1171
1172 static int tgsi_pow(struct r600_shader_ctx *ctx)
1173 {
1174 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1175 struct r600_bc_alu alu;
1176 int r;
1177
1178 /* LOG2(a) */
1179 memset(&alu, 0, sizeof(struct r600_bc_alu));
1180 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE;
1181 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1182 if (r)
1183 return r;
1184 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1185 alu.dst.sel = ctx->temp_reg;
1186 alu.dst.write = 1;
1187 alu.last = 1;
1188 r = r600_bc_add_alu(ctx->bc, &alu);
1189 if (r)
1190 return r;
1191 r = r600_bc_add_literal(ctx->bc,ctx->value);
1192 if (r)
1193 return r;
1194 /* b * LOG2(a) */
1195 memset(&alu, 0, sizeof(struct r600_bc_alu));
1196 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL_IEEE;
1197 r = tgsi_src(ctx, &inst->Src[1], &alu.src[0]);
1198 if (r)
1199 return r;
1200 alu.src[0].chan = tgsi_chan(&inst->Src[1], 0);
1201 alu.src[1].sel = ctx->temp_reg;
1202 alu.dst.sel = ctx->temp_reg;
1203 alu.dst.write = 1;
1204 alu.last = 1;
1205 r = r600_bc_add_alu(ctx->bc, &alu);
1206 if (r)
1207 return r;
1208 r = r600_bc_add_literal(ctx->bc,ctx->value);
1209 if (r)
1210 return r;
1211 /* POW(a,b) = EXP2(b * LOG2(a))*/
1212 memset(&alu, 0, sizeof(struct r600_bc_alu));
1213 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE;
1214 alu.src[0].sel = ctx->temp_reg;
1215 alu.dst.sel = ctx->temp_reg;
1216 alu.dst.write = 1;
1217 alu.last = 1;
1218 r = r600_bc_add_alu(ctx->bc, &alu);
1219 if (r)
1220 return r;
1221 r = r600_bc_add_literal(ctx->bc,ctx->value);
1222 if (r)
1223 return r;
1224 return tgsi_helper_tempx_replicate(ctx);
1225 }
1226
1227 static int tgsi_ssg(struct r600_shader_ctx *ctx)
1228 {
1229 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1230 struct r600_bc_alu alu;
1231 struct r600_bc_alu_src r600_src[3];
1232 int i, r;
1233
1234 r = tgsi_split_constant(ctx, r600_src);
1235 if (r)
1236 return r;
1237
1238 /* tmp = (src > 0 ? 1 : src) */
1239 for (i = 0; i < 4; i++) {
1240 memset(&alu, 0, sizeof(struct r600_bc_alu));
1241 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGT;
1242 alu.is_op3 = 1;
1243
1244 alu.dst.sel = ctx->temp_reg;
1245 alu.dst.chan = i;
1246
1247 alu.src[0] = r600_src[0];
1248 alu.src[0].chan = tgsi_chan(&inst->Src[0], i);
1249
1250 alu.src[1].sel = V_SQ_ALU_SRC_1;
1251
1252 alu.src[2] = r600_src[0];
1253 alu.src[2].chan = tgsi_chan(&inst->Src[0], i);
1254 if (i == 3)
1255 alu.last = 1;
1256 r = r600_bc_add_alu(ctx->bc, &alu);
1257 if (r)
1258 return r;
1259 }
1260 r = r600_bc_add_literal(ctx->bc, ctx->value);
1261 if (r)
1262 return r;
1263
1264 /* dst = (-tmp > 0 ? -1 : tmp) */
1265 for (i = 0; i < 4; i++) {
1266 memset(&alu, 0, sizeof(struct r600_bc_alu));
1267 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGT;
1268 alu.is_op3 = 1;
1269 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1270 if (r)
1271 return r;
1272
1273 alu.src[0].sel = ctx->temp_reg;
1274 alu.src[0].chan = i;
1275 alu.src[0].neg = 1;
1276
1277 alu.src[1].sel = V_SQ_ALU_SRC_1;
1278 alu.src[1].neg = 1;
1279
1280 alu.src[2].sel = ctx->temp_reg;
1281 alu.src[2].chan = i;
1282
1283 if (i == 3)
1284 alu.last = 1;
1285 r = r600_bc_add_alu(ctx->bc, &alu);
1286 if (r)
1287 return r;
1288 }
1289 return 0;
1290 }
1291
1292 static int tgsi_helper_copy(struct r600_shader_ctx *ctx, struct tgsi_full_instruction *inst)
1293 {
1294 struct r600_bc_alu alu;
1295 int i, r;
1296
1297 r = r600_bc_add_literal(ctx->bc, ctx->value);
1298 if (r)
1299 return r;
1300 for (i = 0; i < 4; i++) {
1301 memset(&alu, 0, sizeof(struct r600_bc_alu));
1302 if (!(inst->Dst[0].Register.WriteMask & (1 << i))) {
1303 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP;
1304 alu.dst.chan = i;
1305 } else {
1306 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1307 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1308 if (r)
1309 return r;
1310 alu.src[0].sel = ctx->temp_reg;
1311 alu.src[0].chan = i;
1312 }
1313 if (i == 3) {
1314 alu.last = 1;
1315 }
1316 r = r600_bc_add_alu(ctx->bc, &alu);
1317 if (r)
1318 return r;
1319 }
1320 return 0;
1321 }
1322
1323 static int tgsi_op3(struct r600_shader_ctx *ctx)
1324 {
1325 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1326 struct r600_bc_alu_src r600_src[3];
1327 struct r600_bc_alu alu;
1328 int i, j, r;
1329
1330 r = tgsi_split_constant(ctx, r600_src);
1331 if (r)
1332 return r;
1333 /* do it in 2 step as op3 doesn't support writemask */
1334 for (i = 0; i < 4; i++) {
1335 memset(&alu, 0, sizeof(struct r600_bc_alu));
1336 alu.inst = ctx->inst_info->r600_opcode;
1337 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
1338 alu.src[j] = r600_src[j];
1339 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
1340 }
1341 alu.dst.sel = ctx->temp_reg;
1342 alu.dst.chan = i;
1343 alu.dst.write = 1;
1344 alu.is_op3 = 1;
1345 if (i == 3) {
1346 alu.last = 1;
1347 }
1348 r = r600_bc_add_alu(ctx->bc, &alu);
1349 if (r)
1350 return r;
1351 }
1352 return tgsi_helper_copy(ctx, inst);
1353 }
1354
1355 static int tgsi_dp(struct r600_shader_ctx *ctx)
1356 {
1357 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1358 struct r600_bc_alu_src r600_src[3];
1359 struct r600_bc_alu alu;
1360 int i, j, r;
1361
1362 r = tgsi_split_constant(ctx, r600_src);
1363 if (r)
1364 return r;
1365 for (i = 0; i < 4; i++) {
1366 memset(&alu, 0, sizeof(struct r600_bc_alu));
1367 alu.inst = ctx->inst_info->r600_opcode;
1368 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
1369 alu.src[j] = r600_src[j];
1370 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
1371 }
1372 alu.dst.sel = ctx->temp_reg;
1373 alu.dst.chan = i;
1374 alu.dst.write = 1;
1375 /* handle some special cases */
1376 switch (ctx->inst_info->tgsi_opcode) {
1377 case TGSI_OPCODE_DP2:
1378 if (i > 1) {
1379 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
1380 alu.src[0].chan = alu.src[1].chan = 0;
1381 }
1382 break;
1383 case TGSI_OPCODE_DP3:
1384 if (i > 2) {
1385 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
1386 alu.src[0].chan = alu.src[1].chan = 0;
1387 }
1388 break;
1389 case TGSI_OPCODE_DPH:
1390 if (i == 3) {
1391 alu.src[0].sel = V_SQ_ALU_SRC_1;
1392 alu.src[0].chan = 0;
1393 alu.src[0].neg = 0;
1394 }
1395 break;
1396 default:
1397 break;
1398 }
1399 if (i == 3) {
1400 alu.last = 1;
1401 }
1402 r = r600_bc_add_alu(ctx->bc, &alu);
1403 if (r)
1404 return r;
1405 }
1406 return tgsi_helper_copy(ctx, inst);
1407 }
1408
1409 static int tgsi_tex(struct r600_shader_ctx *ctx)
1410 {
1411 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1412 struct r600_bc_tex tex;
1413 struct r600_bc_alu alu;
1414 unsigned src_gpr;
1415 int r, i;
1416
1417 src_gpr = ctx->file_offset[inst->Src[0].Register.File] + inst->Src[0].Register.Index;
1418
1419 if (inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
1420 /* Add perspective divide */
1421 memset(&alu, 0, sizeof(struct r600_bc_alu));
1422 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE;
1423 alu.src[0].sel = src_gpr;
1424 alu.src[0].chan = tgsi_chan(&inst->Src[0], 3);
1425 alu.dst.sel = ctx->temp_reg;
1426 alu.dst.chan = 3;
1427 alu.last = 1;
1428 alu.dst.write = 1;
1429 r = r600_bc_add_alu(ctx->bc, &alu);
1430 if (r)
1431 return r;
1432
1433 for (i = 0; i < 3; i++) {
1434 memset(&alu, 0, sizeof(struct r600_bc_alu));
1435 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL;
1436 alu.src[0].sel = ctx->temp_reg;
1437 alu.src[0].chan = 3;
1438 alu.src[1].sel = src_gpr;
1439 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
1440 alu.dst.sel = ctx->temp_reg;
1441 alu.dst.chan = i;
1442 alu.dst.write = 1;
1443 r = r600_bc_add_alu(ctx->bc, &alu);
1444 if (r)
1445 return r;
1446 }
1447 memset(&alu, 0, sizeof(struct r600_bc_alu));
1448 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1449 alu.src[0].sel = V_SQ_ALU_SRC_1;
1450 alu.src[0].chan = 0;
1451 alu.dst.sel = ctx->temp_reg;
1452 alu.dst.chan = 3;
1453 alu.last = 1;
1454 alu.dst.write = 1;
1455 r = r600_bc_add_alu(ctx->bc, &alu);
1456 if (r)
1457 return r;
1458 src_gpr = ctx->temp_reg;
1459 } else if (inst->Src[0].Register.File != TGSI_FILE_TEMPORARY) {
1460 for (i = 0; i < 4; i++) {
1461 memset(&alu, 0, sizeof(struct r600_bc_alu));
1462 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1463 alu.src[0].sel = src_gpr;
1464 alu.src[0].chan = i;
1465 alu.dst.sel = ctx->temp_reg;
1466 alu.dst.chan = i;
1467 if (i == 3)
1468 alu.last = 1;
1469 alu.dst.write = 1;
1470 r = r600_bc_add_alu(ctx->bc, &alu);
1471 if (r)
1472 return r;
1473 }
1474 src_gpr = ctx->temp_reg;
1475 }
1476
1477 memset(&tex, 0, sizeof(struct r600_bc_tex));
1478 tex.inst = ctx->inst_info->r600_opcode;
1479 tex.resource_id = ctx->file_offset[inst->Src[1].Register.File] + inst->Src[1].Register.Index;
1480 tex.sampler_id = tex.resource_id;
1481 tex.src_gpr = src_gpr;
1482 tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
1483 tex.dst_sel_x = 0;
1484 tex.dst_sel_y = 1;
1485 tex.dst_sel_z = 2;
1486 tex.dst_sel_w = 3;
1487 tex.src_sel_x = 0;
1488 tex.src_sel_y = 1;
1489 tex.src_sel_z = 2;
1490 tex.src_sel_w = 3;
1491
1492 if (inst->Texture.Texture != TGSI_TEXTURE_RECT) {
1493 tex.coord_type_x = 1;
1494 tex.coord_type_y = 1;
1495 tex.coord_type_z = 1;
1496 tex.coord_type_w = 1;
1497 }
1498 return r600_bc_add_tex(ctx->bc, &tex);
1499 }
1500
1501 static int tgsi_lrp(struct r600_shader_ctx *ctx)
1502 {
1503 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1504 struct r600_bc_alu_src r600_src[3];
1505 struct r600_bc_alu alu;
1506 unsigned i;
1507 int r;
1508
1509 r = tgsi_split_constant(ctx, r600_src);
1510 if (r)
1511 return r;
1512 /* 1 - src0 */
1513 for (i = 0; i < 4; i++) {
1514 memset(&alu, 0, sizeof(struct r600_bc_alu));
1515 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD;
1516 alu.src[0].sel = V_SQ_ALU_SRC_1;
1517 alu.src[0].chan = 0;
1518 alu.src[1] = r600_src[0];
1519 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
1520 alu.src[1].neg = 1;
1521 alu.dst.sel = ctx->temp_reg;
1522 alu.dst.chan = i;
1523 if (i == 3) {
1524 alu.last = 1;
1525 }
1526 alu.dst.write = 1;
1527 r = r600_bc_add_alu(ctx->bc, &alu);
1528 if (r)
1529 return r;
1530 }
1531 r = r600_bc_add_literal(ctx->bc, ctx->value);
1532 if (r)
1533 return r;
1534
1535 /* (1 - src0) * src2 */
1536 for (i = 0; i < 4; i++) {
1537 memset(&alu, 0, sizeof(struct r600_bc_alu));
1538 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL;
1539 alu.src[0].sel = ctx->temp_reg;
1540 alu.src[0].chan = i;
1541 alu.src[1] = r600_src[2];
1542 alu.src[1].chan = tgsi_chan(&inst->Src[2], i);
1543 alu.dst.sel = ctx->temp_reg;
1544 alu.dst.chan = i;
1545 if (i == 3) {
1546 alu.last = 1;
1547 }
1548 alu.dst.write = 1;
1549 r = r600_bc_add_alu(ctx->bc, &alu);
1550 if (r)
1551 return r;
1552 }
1553 r = r600_bc_add_literal(ctx->bc, ctx->value);
1554 if (r)
1555 return r;
1556
1557 /* src0 * src1 + (1 - src0) * src2 */
1558 for (i = 0; i < 4; i++) {
1559 memset(&alu, 0, sizeof(struct r600_bc_alu));
1560 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD;
1561 alu.is_op3 = 1;
1562 alu.src[0] = r600_src[0];
1563 alu.src[0].chan = tgsi_chan(&inst->Src[0], i);
1564 alu.src[1] = r600_src[1];
1565 alu.src[1].chan = tgsi_chan(&inst->Src[1], i);
1566 alu.src[2].sel = ctx->temp_reg;
1567 alu.src[2].chan = i;
1568 alu.dst.sel = ctx->temp_reg;
1569 alu.dst.chan = i;
1570 if (i == 3) {
1571 alu.last = 1;
1572 }
1573 r = r600_bc_add_alu(ctx->bc, &alu);
1574 if (r)
1575 return r;
1576 }
1577 return tgsi_helper_copy(ctx, inst);
1578 }
1579
1580 static int tgsi_cmp(struct r600_shader_ctx *ctx)
1581 {
1582 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1583 struct r600_bc_alu_src r600_src[3];
1584 struct r600_bc_alu alu;
1585 int use_temp = 0;
1586 int i, r;
1587
1588 r = tgsi_split_constant(ctx, r600_src);
1589 if (r)
1590 return r;
1591
1592 if (inst->Dst[0].Register.WriteMask != 0xf)
1593 use_temp = 1;
1594
1595 for (i = 0; i < 4; i++) {
1596 memset(&alu, 0, sizeof(struct r600_bc_alu));
1597 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGE;
1598 alu.src[0] = r600_src[0];
1599 alu.src[0].chan = tgsi_chan(&inst->Src[0], i);
1600
1601 alu.src[1] = r600_src[2];
1602 alu.src[1].chan = tgsi_chan(&inst->Src[2], i);
1603
1604 alu.src[2] = r600_src[1];
1605 alu.src[2].chan = tgsi_chan(&inst->Src[1], i);
1606
1607 if (use_temp)
1608 alu.dst.sel = ctx->temp_reg;
1609 else {
1610 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1611 if (r)
1612 return r;
1613 }
1614 alu.dst.chan = i;
1615 alu.dst.write = 1;
1616 alu.is_op3 = 1;
1617 if (i == 3)
1618 alu.last = 1;
1619 r = r600_bc_add_alu(ctx->bc, &alu);
1620 if (r)
1621 return r;
1622 }
1623 if (use_temp)
1624 return tgsi_helper_copy(ctx, inst);
1625 return 0;
1626 }
1627
1628 static int tgsi_xpd(struct r600_shader_ctx *ctx)
1629 {
1630 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1631 struct r600_bc_alu_src r600_src[3];
1632 struct r600_bc_alu alu;
1633 uint32_t use_temp = 0;
1634 int i, r;
1635
1636 if (inst->Dst[0].Register.WriteMask != 0xf)
1637 use_temp = 1;
1638
1639 r = tgsi_split_constant(ctx, r600_src);
1640 if (r)
1641 return r;
1642
1643 for (i = 0; i < 4; i++) {
1644 memset(&alu, 0, sizeof(struct r600_bc_alu));
1645 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL;
1646
1647 alu.src[0] = r600_src[0];
1648 switch (i) {
1649 case 0:
1650 alu.src[0].chan = tgsi_chan(&inst->Src[0], 2);
1651 break;
1652 case 1:
1653 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1654 break;
1655 case 2:
1656 alu.src[0].chan = tgsi_chan(&inst->Src[0], 1);
1657 break;
1658 case 3:
1659 alu.src[0].sel = V_SQ_ALU_SRC_0;
1660 alu.src[0].chan = i;
1661 }
1662
1663 alu.src[1] = r600_src[1];
1664 switch (i) {
1665 case 0:
1666 alu.src[1].chan = tgsi_chan(&inst->Src[1], 1);
1667 break;
1668 case 1:
1669 alu.src[1].chan = tgsi_chan(&inst->Src[1], 2);
1670 break;
1671 case 2:
1672 alu.src[1].chan = tgsi_chan(&inst->Src[1], 0);
1673 break;
1674 case 3:
1675 alu.src[1].sel = V_SQ_ALU_SRC_0;
1676 alu.src[1].chan = i;
1677 }
1678
1679 alu.dst.sel = ctx->temp_reg;
1680 alu.dst.chan = i;
1681 alu.dst.write = 1;
1682
1683 if (i == 3)
1684 alu.last = 1;
1685 r = r600_bc_add_alu(ctx->bc, &alu);
1686 if (r)
1687 return r;
1688 }
1689
1690 for (i = 0; i < 4; i++) {
1691 memset(&alu, 0, sizeof(struct r600_bc_alu));
1692 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD;
1693
1694 alu.src[0] = r600_src[0];
1695 switch (i) {
1696 case 0:
1697 alu.src[0].chan = tgsi_chan(&inst->Src[0], 1);
1698 break;
1699 case 1:
1700 alu.src[0].chan = tgsi_chan(&inst->Src[0], 2);
1701 break;
1702 case 2:
1703 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1704 break;
1705 case 3:
1706 alu.src[0].sel = V_SQ_ALU_SRC_0;
1707 alu.src[0].chan = i;
1708 }
1709
1710 alu.src[1] = r600_src[1];
1711 switch (i) {
1712 case 0:
1713 alu.src[1].chan = tgsi_chan(&inst->Src[1], 2);
1714 break;
1715 case 1:
1716 alu.src[1].chan = tgsi_chan(&inst->Src[1], 0);
1717 break;
1718 case 2:
1719 alu.src[1].chan = tgsi_chan(&inst->Src[1], 1);
1720 break;
1721 case 3:
1722 alu.src[1].sel = V_SQ_ALU_SRC_0;
1723 alu.src[1].chan = i;
1724 }
1725
1726 alu.src[2].sel = ctx->temp_reg;
1727 alu.src[2].neg = 1;
1728 alu.src[2].chan = i;
1729
1730 if (use_temp)
1731 alu.dst.sel = ctx->temp_reg;
1732 else {
1733 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1734 if (r)
1735 return r;
1736 }
1737 alu.dst.chan = i;
1738 alu.dst.write = 1;
1739 alu.is_op3 = 1;
1740 if (i == 3)
1741 alu.last = 1;
1742 r = r600_bc_add_alu(ctx->bc, &alu);
1743 if (r)
1744 return r;
1745 }
1746 if (use_temp)
1747 return tgsi_helper_copy(ctx, inst);
1748 return 0;
1749 }
1750
1751 static int tgsi_exp(struct r600_shader_ctx *ctx)
1752 {
1753 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1754 struct r600_bc_alu_src r600_src[3];
1755 struct r600_bc_alu alu;
1756 int r;
1757
1758 /* result.x = 2^floor(src); */
1759 if (inst->Dst[0].Register.WriteMask & 1) {
1760 memset(&alu, 0, sizeof(struct r600_bc_alu));
1761
1762 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR;
1763 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1764 if (r)
1765 return r;
1766
1767 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1768
1769 alu.dst.sel = ctx->temp_reg;
1770 alu.dst.chan = 0;
1771 alu.dst.write = 1;
1772 alu.last = 1;
1773 r = r600_bc_add_alu(ctx->bc, &alu);
1774 if (r)
1775 return r;
1776
1777 r = r600_bc_add_literal(ctx->bc, ctx->value);
1778 if (r)
1779 return r;
1780
1781 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE;
1782 alu.src[0].sel = ctx->temp_reg;
1783 alu.src[0].chan = 0;
1784
1785 alu.dst.sel = ctx->temp_reg;
1786 alu.dst.chan = 0;
1787 alu.dst.write = 1;
1788 alu.last = 1;
1789 r = r600_bc_add_alu(ctx->bc, &alu);
1790 if (r)
1791 return r;
1792
1793 r = r600_bc_add_literal(ctx->bc, ctx->value);
1794 if (r)
1795 return r;
1796 }
1797
1798 /* result.y = tmp - floor(tmp); */
1799 if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
1800 memset(&alu, 0, sizeof(struct r600_bc_alu));
1801
1802 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT;
1803 alu.src[0] = r600_src[0];
1804 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1805 if (r)
1806 return r;
1807 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1808
1809 alu.dst.sel = ctx->temp_reg;
1810 // r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1811 // if (r)
1812 // return r;
1813 alu.dst.write = 1;
1814 alu.dst.chan = 1;
1815
1816 alu.last = 1;
1817
1818 r = r600_bc_add_alu(ctx->bc, &alu);
1819 if (r)
1820 return r;
1821 r = r600_bc_add_literal(ctx->bc, ctx->value);
1822 if (r)
1823 return r;
1824 }
1825
1826 /* result.z = RoughApprox2ToX(tmp);*/
1827 if ((inst->Dst[0].Register.WriteMask >> 2) & 0x1) {
1828 memset(&alu, 0, sizeof(struct r600_bc_alu));
1829 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE;
1830 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1831 if (r)
1832 return r;
1833 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1834
1835 alu.dst.sel = ctx->temp_reg;
1836 alu.dst.write = 1;
1837 alu.dst.chan = 2;
1838
1839 alu.last = 1;
1840
1841 r = r600_bc_add_alu(ctx->bc, &alu);
1842 if (r)
1843 return r;
1844 r = r600_bc_add_literal(ctx->bc, ctx->value);
1845 if (r)
1846 return r;
1847 }
1848
1849 /* result.w = 1.0;*/
1850 if ((inst->Dst[0].Register.WriteMask >> 3) & 0x1) {
1851 memset(&alu, 0, sizeof(struct r600_bc_alu));
1852
1853 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1854 alu.src[0].sel = V_SQ_ALU_SRC_1;
1855 alu.src[0].chan = 0;
1856
1857 alu.dst.sel = ctx->temp_reg;
1858 alu.dst.chan = 3;
1859 alu.dst.write = 1;
1860 alu.last = 1;
1861 r = r600_bc_add_alu(ctx->bc, &alu);
1862 if (r)
1863 return r;
1864 r = r600_bc_add_literal(ctx->bc, ctx->value);
1865 if (r)
1866 return r;
1867 }
1868 return tgsi_helper_copy(ctx, inst);
1869 }
1870
1871 static int tgsi_arl(struct r600_shader_ctx *ctx)
1872 {
1873 /* TODO from r600c, ar values don't persist between clauses */
1874 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1875 struct r600_bc_alu alu;
1876 int r;
1877 memset(&alu, 0, sizeof(struct r600_bc_alu));
1878
1879 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR;
1880
1881 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1882 if (r)
1883 return r;
1884 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1885
1886 alu.last = 1;
1887
1888 r = r600_bc_add_alu_type(ctx->bc, &alu, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU);
1889 if (r)
1890 return r;
1891 return 0;
1892 }
1893
1894 static int tgsi_opdst(struct r600_shader_ctx *ctx)
1895 {
1896 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1897 struct r600_bc_alu alu;
1898 int i, r = 0;
1899
1900 for (i = 0; i < 4; i++) {
1901 memset(&alu, 0, sizeof(struct r600_bc_alu));
1902
1903 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL;
1904 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1905 if (r)
1906 return r;
1907
1908 if (i == 0 || i == 3) {
1909 alu.src[0].sel = V_SQ_ALU_SRC_1;
1910 } else {
1911 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1912 if (r)
1913 return r;
1914 alu.src[0].chan = tgsi_chan(&inst->Src[0], i);
1915 }
1916
1917 if (i == 0 || i == 2) {
1918 alu.src[1].sel = V_SQ_ALU_SRC_1;
1919 } else {
1920 r = tgsi_src(ctx, &inst->Src[1], &alu.src[1]);
1921 if (r)
1922 return r;
1923 alu.src[1].chan = tgsi_chan(&inst->Src[1], i);
1924 }
1925 if (i == 3)
1926 alu.last = 1;
1927 r = r600_bc_add_alu(ctx->bc, &alu);
1928 if (r)
1929 return r;
1930 }
1931 return 0;
1932 }
1933
1934 static int emit_logic_pred(struct r600_shader_ctx *ctx, int opcode)
1935 {
1936 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1937 struct r600_bc_alu alu;
1938 int r;
1939
1940 memset(&alu, 0, sizeof(struct r600_bc_alu));
1941 alu.inst = opcode;
1942 alu.predicate = 1;
1943
1944 alu.dst.sel = ctx->temp_reg;
1945 alu.dst.write = 1;
1946 alu.dst.chan = 0;
1947
1948 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1949 if (r)
1950 return r;
1951 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1952 alu.src[1].sel = V_SQ_ALU_SRC_0;
1953 alu.src[1].chan = 0;
1954
1955 alu.last = 1;
1956
1957 r = r600_bc_add_alu_type(ctx->bc, &alu, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE);
1958 if (r)
1959 return r;
1960 return 0;
1961 }
1962
1963 static int pops(struct r600_shader_ctx *ctx, int pops)
1964 {
1965 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_POP);
1966 ctx->bc->cf_last->pop_count = pops;
1967 return 0;
1968 }
1969
1970 static inline void callstack_decrease_current(struct r600_shader_ctx *ctx, unsigned reason)
1971 {
1972 switch(reason) {
1973 case FC_PUSH_VPM:
1974 ctx->bc->callstack[ctx->bc->call_sp].current--;
1975 break;
1976 case FC_PUSH_WQM:
1977 case FC_LOOP:
1978 ctx->bc->callstack[ctx->bc->call_sp].current -= 4;
1979 break;
1980 case FC_REP:
1981 /* TOODO : for 16 vp asic should -= 2; */
1982 ctx->bc->callstack[ctx->bc->call_sp].current --;
1983 break;
1984 }
1985 }
1986
1987 static inline void callstack_check_depth(struct r600_shader_ctx *ctx, unsigned reason, unsigned check_max_only)
1988 {
1989 if (check_max_only) {
1990 int diff;
1991 switch (reason) {
1992 case FC_PUSH_VPM:
1993 diff = 1;
1994 break;
1995 case FC_PUSH_WQM:
1996 diff = 4;
1997 break;
1998 }
1999 if ((ctx->bc->callstack[ctx->bc->call_sp].current + diff) >
2000 ctx->bc->callstack[ctx->bc->call_sp].max) {
2001 ctx->bc->callstack[ctx->bc->call_sp].max =
2002 ctx->bc->callstack[ctx->bc->call_sp].current + diff;
2003 }
2004 return;
2005 }
2006 switch (reason) {
2007 case FC_PUSH_VPM:
2008 ctx->bc->callstack[ctx->bc->call_sp].current++;
2009 break;
2010 case FC_PUSH_WQM:
2011 case FC_LOOP:
2012 ctx->bc->callstack[ctx->bc->call_sp].current += 4;
2013 break;
2014 case FC_REP:
2015 ctx->bc->callstack[ctx->bc->call_sp].current++;
2016 break;
2017 }
2018
2019 if ((ctx->bc->callstack[ctx->bc->call_sp].current) >
2020 ctx->bc->callstack[ctx->bc->call_sp].max) {
2021 ctx->bc->callstack[ctx->bc->call_sp].max =
2022 ctx->bc->callstack[ctx->bc->call_sp].current;
2023 }
2024 }
2025
2026 static void fc_set_mid(struct r600_shader_ctx *ctx, int fc_sp)
2027 {
2028 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[fc_sp];
2029
2030 sp->mid = (struct r600_bc_cf **)realloc((void *)sp->mid,
2031 sizeof(struct r600_bc_cf *) * (sp->num_mid + 1));
2032 sp->mid[sp->num_mid] = ctx->bc->cf_last;
2033 sp->num_mid++;
2034 }
2035
2036 static void fc_pushlevel(struct r600_shader_ctx *ctx, int type)
2037 {
2038 ctx->bc->fc_sp++;
2039 ctx->bc->fc_stack[ctx->bc->fc_sp].type = type;
2040 ctx->bc->fc_stack[ctx->bc->fc_sp].start = ctx->bc->cf_last;
2041 }
2042
2043 static void fc_poplevel(struct r600_shader_ctx *ctx)
2044 {
2045 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[ctx->bc->fc_sp];
2046 if (sp->mid) {
2047 free(sp->mid);
2048 sp->mid = NULL;
2049 }
2050 sp->num_mid = 0;
2051 sp->start = NULL;
2052 sp->type = 0;
2053 ctx->bc->fc_sp--;
2054 }
2055
2056 #if 0
2057 static int emit_return(struct r600_shader_ctx *ctx)
2058 {
2059 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_RETURN);
2060 return 0;
2061 }
2062
2063 static int emit_jump_to_offset(struct r600_shader_ctx *ctx, int pops, int offset)
2064 {
2065
2066 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_JUMP);
2067 ctx->bc->cf_last->pop_count = pops;
2068 /* TODO work out offset */
2069 return 0;
2070 }
2071
2072 static int emit_setret_in_loop_flag(struct r600_shader_ctx *ctx, unsigned flag_value)
2073 {
2074 return 0;
2075 }
2076
2077 static void emit_testflag(struct r600_shader_ctx *ctx)
2078 {
2079
2080 }
2081
2082 static void emit_return_on_flag(struct r600_shader_ctx *ctx, unsigned ifidx)
2083 {
2084 emit_testflag(ctx);
2085 emit_jump_to_offset(ctx, 1, 4);
2086 emit_setret_in_loop_flag(ctx, V_SQ_ALU_SRC_0);
2087 pops(ctx, ifidx + 1);
2088 emit_return(ctx);
2089 }
2090
2091 static void break_loop_on_flag(struct r600_shader_ctx *ctx, unsigned fc_sp)
2092 {
2093 emit_testflag(ctx);
2094
2095 r600_bc_add_cfinst(ctx->bc, ctx->inst_info->r600_opcode);
2096 ctx->bc->cf_last->pop_count = 1;
2097
2098 fc_set_mid(ctx, fc_sp);
2099
2100 pops(ctx, 1);
2101 }
2102 #endif
2103
2104 static int tgsi_if(struct r600_shader_ctx *ctx)
2105 {
2106 emit_logic_pred(ctx, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE);
2107
2108 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_JUMP);
2109
2110 fc_pushlevel(ctx, FC_IF);
2111
2112 callstack_check_depth(ctx, FC_PUSH_VPM, 0);
2113 return 0;
2114 }
2115
2116 static int tgsi_else(struct r600_shader_ctx *ctx)
2117 {
2118 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_ELSE);
2119 ctx->bc->cf_last->pop_count = 1;
2120
2121 fc_set_mid(ctx, ctx->bc->fc_sp);
2122 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id;
2123 return 0;
2124 }
2125
2126 static int tgsi_endif(struct r600_shader_ctx *ctx)
2127 {
2128 pops(ctx, 1);
2129 if (ctx->bc->fc_stack[ctx->bc->fc_sp].type != FC_IF) {
2130 R600_ERR("if/endif unbalanced in shader\n");
2131 return -1;
2132 }
2133
2134 if (ctx->bc->fc_stack[ctx->bc->fc_sp].mid == NULL) {
2135 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id + 2;
2136 ctx->bc->fc_stack[ctx->bc->fc_sp].start->pop_count = 1;
2137 } else {
2138 ctx->bc->fc_stack[ctx->bc->fc_sp].mid[0]->cf_addr = ctx->bc->cf_last->id + 2;
2139 }
2140 fc_poplevel(ctx);
2141
2142 callstack_decrease_current(ctx, FC_PUSH_VPM);
2143 return 0;
2144 }
2145
2146 static int tgsi_bgnloop(struct r600_shader_ctx *ctx)
2147 {
2148 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL);
2149
2150 fc_pushlevel(ctx, FC_LOOP);
2151
2152 /* check stack depth */
2153 callstack_check_depth(ctx, FC_LOOP, 0);
2154 return 0;
2155 }
2156
2157 static int tgsi_endloop(struct r600_shader_ctx *ctx)
2158 {
2159 int i;
2160
2161 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END);
2162
2163 if (ctx->bc->fc_stack[ctx->bc->fc_sp].type != FC_LOOP) {
2164 R600_ERR("loop/endloop in shader code are not paired.\n");
2165 return -EINVAL;
2166 }
2167
2168 /* fixup loop pointers - from r600isa
2169 LOOP END points to CF after LOOP START,
2170 LOOP START point to CF after LOOP END
2171 BRK/CONT point to LOOP END CF
2172 */
2173 ctx->bc->cf_last->cf_addr = ctx->bc->fc_stack[ctx->bc->fc_sp].start->id + 2;
2174
2175 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id + 2;
2176
2177 for (i = 0; i < ctx->bc->fc_stack[ctx->bc->fc_sp].num_mid; i++) {
2178 ctx->bc->fc_stack[ctx->bc->fc_sp].mid[i]->cf_addr = ctx->bc->cf_last->id;
2179 }
2180 /* TODO add LOOPRET support */
2181 fc_poplevel(ctx);
2182 callstack_decrease_current(ctx, FC_LOOP);
2183 return 0;
2184 }
2185
2186 static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx)
2187 {
2188 unsigned int fscp;
2189
2190 for (fscp = ctx->bc->fc_sp; fscp > 0; fscp--)
2191 {
2192 if (FC_LOOP == ctx->bc->fc_stack[fscp].type)
2193 break;
2194 }
2195
2196 if (fscp == 0) {
2197 R600_ERR("Break not inside loop/endloop pair\n");
2198 return -EINVAL;
2199 }
2200
2201 r600_bc_add_cfinst(ctx->bc, ctx->inst_info->r600_opcode);
2202 ctx->bc->cf_last->pop_count = 1;
2203
2204 fc_set_mid(ctx, fscp);
2205
2206 pops(ctx, 1);
2207 callstack_check_depth(ctx, FC_PUSH_VPM, 1);
2208 return 0;
2209 }
2210
2211 static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] = {
2212 {TGSI_OPCODE_ARL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_arl},
2213 {TGSI_OPCODE_MOV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV, tgsi_op2},
2214 {TGSI_OPCODE_LIT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_lit},
2215 {TGSI_OPCODE_RCP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE, tgsi_trans_srcx_replicate},
2216 {TGSI_OPCODE_RSQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE, tgsi_trans_srcx_replicate},
2217 {TGSI_OPCODE_EXP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_exp},
2218 {TGSI_OPCODE_LOG, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2219 {TGSI_OPCODE_MUL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL, tgsi_op2},
2220 {TGSI_OPCODE_ADD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD, tgsi_op2},
2221 {TGSI_OPCODE_DP3, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4, tgsi_dp},
2222 {TGSI_OPCODE_DP4, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4, tgsi_dp},
2223 {TGSI_OPCODE_DST, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_opdst},
2224 {TGSI_OPCODE_MIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN, tgsi_op2},
2225 {TGSI_OPCODE_MAX, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX, tgsi_op2},
2226 {TGSI_OPCODE_SLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT, tgsi_op2_swap},
2227 {TGSI_OPCODE_SGE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE, tgsi_op2},
2228 {TGSI_OPCODE_MAD, 1, V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD, tgsi_op3},
2229 {TGSI_OPCODE_SUB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD, tgsi_op2},
2230 {TGSI_OPCODE_LRP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_lrp},
2231 {TGSI_OPCODE_CND, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2232 /* gap */
2233 {20, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2234 {TGSI_OPCODE_DP2A, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2235 /* gap */
2236 {22, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2237 {23, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2238 {TGSI_OPCODE_FRC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT, tgsi_op2},
2239 {TGSI_OPCODE_CLAMP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2240 {TGSI_OPCODE_FLR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR, tgsi_op2},
2241 {TGSI_OPCODE_ROUND, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2242 {TGSI_OPCODE_EX2, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE, tgsi_trans_srcx_replicate},
2243 {TGSI_OPCODE_LG2, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE, tgsi_trans_srcx_replicate},
2244 {TGSI_OPCODE_POW, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_pow},
2245 {TGSI_OPCODE_XPD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_xpd},
2246 /* gap */
2247 {32, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2248 {TGSI_OPCODE_ABS, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV, tgsi_op2},
2249 {TGSI_OPCODE_RCC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2250 {TGSI_OPCODE_DPH, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4, tgsi_dp},
2251 {TGSI_OPCODE_COS, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS, tgsi_trig},
2252 {TGSI_OPCODE_DDX, 0, SQ_TEX_INST_GET_GRADIENTS_H, tgsi_tex},
2253 {TGSI_OPCODE_DDY, 0, SQ_TEX_INST_GET_GRADIENTS_V, tgsi_tex},
2254 {TGSI_OPCODE_KILP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT, tgsi_kill}, /* predicated kill */
2255 {TGSI_OPCODE_PK2H, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2256 {TGSI_OPCODE_PK2US, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2257 {TGSI_OPCODE_PK4B, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2258 {TGSI_OPCODE_PK4UB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2259 {TGSI_OPCODE_RFL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2260 {TGSI_OPCODE_SEQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE, tgsi_op2},
2261 {TGSI_OPCODE_SFL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2262 {TGSI_OPCODE_SGT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT, tgsi_op2},
2263 {TGSI_OPCODE_SIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN, tgsi_trig},
2264 {TGSI_OPCODE_SLE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE, tgsi_op2_swap},
2265 {TGSI_OPCODE_SNE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE, tgsi_op2},
2266 {TGSI_OPCODE_STR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2267 {TGSI_OPCODE_TEX, 0, SQ_TEX_INST_SAMPLE, tgsi_tex},
2268 {TGSI_OPCODE_TXD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2269 {TGSI_OPCODE_TXP, 0, SQ_TEX_INST_SAMPLE, tgsi_tex},
2270 {TGSI_OPCODE_UP2H, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2271 {TGSI_OPCODE_UP2US, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2272 {TGSI_OPCODE_UP4B, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2273 {TGSI_OPCODE_UP4UB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2274 {TGSI_OPCODE_X2D, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2275 {TGSI_OPCODE_ARA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2276 {TGSI_OPCODE_ARR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2277 {TGSI_OPCODE_BRA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2278 {TGSI_OPCODE_CAL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2279 {TGSI_OPCODE_RET, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2280 {TGSI_OPCODE_SSG, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_ssg},
2281 {TGSI_OPCODE_CMP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_cmp},
2282 {TGSI_OPCODE_SCS, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_scs},
2283 {TGSI_OPCODE_TXB, 0, SQ_TEX_INST_SAMPLE_L, tgsi_tex},
2284 {TGSI_OPCODE_NRM, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2285 {TGSI_OPCODE_DIV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2286 {TGSI_OPCODE_DP2, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4, tgsi_dp},
2287 {TGSI_OPCODE_TXL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2288 {TGSI_OPCODE_BRK, 0, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK, tgsi_loop_brk_cont},
2289 {TGSI_OPCODE_IF, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_if},
2290 /* gap */
2291 {75, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2292 {76, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2293 {TGSI_OPCODE_ELSE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_else},
2294 {TGSI_OPCODE_ENDIF, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_endif},
2295 /* gap */
2296 {79, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2297 {80, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2298 {TGSI_OPCODE_PUSHA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2299 {TGSI_OPCODE_POPA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2300 {TGSI_OPCODE_CEIL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2301 {TGSI_OPCODE_I2F, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2302 {TGSI_OPCODE_NOT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2303 {TGSI_OPCODE_TRUNC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC, tgsi_trans_srcx_replicate},
2304 {TGSI_OPCODE_SHL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2305 /* gap */
2306 {88, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2307 {TGSI_OPCODE_AND, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2308 {TGSI_OPCODE_OR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2309 {TGSI_OPCODE_MOD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2310 {TGSI_OPCODE_XOR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2311 {TGSI_OPCODE_SAD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2312 {TGSI_OPCODE_TXF, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2313 {TGSI_OPCODE_TXQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2314 {TGSI_OPCODE_CONT, 0, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE, tgsi_loop_brk_cont},
2315 {TGSI_OPCODE_EMIT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2316 {TGSI_OPCODE_ENDPRIM, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2317 {TGSI_OPCODE_BGNLOOP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_bgnloop},
2318 {TGSI_OPCODE_BGNSUB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2319 {TGSI_OPCODE_ENDLOOP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_endloop},
2320 {TGSI_OPCODE_ENDSUB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2321 /* gap */
2322 {103, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2323 {104, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2324 {105, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2325 {106, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2326 {TGSI_OPCODE_NOP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2327 /* gap */
2328 {108, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2329 {109, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2330 {110, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2331 {111, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2332 {TGSI_OPCODE_NRM4, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2333 {TGSI_OPCODE_CALLNZ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2334 {TGSI_OPCODE_IFC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2335 {TGSI_OPCODE_BREAKC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2336 {TGSI_OPCODE_KIL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT, tgsi_kill}, /* conditional kill */
2337 {TGSI_OPCODE_END, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_end}, /* aka HALT */
2338 /* gap */
2339 {118, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2340 {TGSI_OPCODE_F2I, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2341 {TGSI_OPCODE_IDIV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2342 {TGSI_OPCODE_IMAX, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2343 {TGSI_OPCODE_IMIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2344 {TGSI_OPCODE_INEG, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2345 {TGSI_OPCODE_ISGE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2346 {TGSI_OPCODE_ISHR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2347 {TGSI_OPCODE_ISLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2348 {TGSI_OPCODE_F2U, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2349 {TGSI_OPCODE_U2F, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2350 {TGSI_OPCODE_UADD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2351 {TGSI_OPCODE_UDIV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2352 {TGSI_OPCODE_UMAD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2353 {TGSI_OPCODE_UMAX, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2354 {TGSI_OPCODE_UMIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2355 {TGSI_OPCODE_UMOD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2356 {TGSI_OPCODE_UMUL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2357 {TGSI_OPCODE_USEQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2358 {TGSI_OPCODE_USGE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2359 {TGSI_OPCODE_USHR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2360 {TGSI_OPCODE_USLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2361 {TGSI_OPCODE_USNE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2362 {TGSI_OPCODE_SWITCH, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2363 {TGSI_OPCODE_CASE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2364 {TGSI_OPCODE_DEFAULT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2365 {TGSI_OPCODE_ENDSWITCH, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2366 {TGSI_OPCODE_LAST, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2367 };