r600g: add SCS support.
[mesa.git] / src / gallium / drivers / r600 / r600_shader.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "pipe/p_shader_tokens.h"
24 #include "tgsi/tgsi_parse.h"
25 #include "tgsi/tgsi_scan.h"
26 #include "tgsi/tgsi_dump.h"
27 #include "util/u_format.h"
28 #include "r600_screen.h"
29 #include "r600_context.h"
30 #include "r600_shader.h"
31 #include "r600_asm.h"
32 #include "r600_sq.h"
33 #include "r600d.h"
34 #include <stdio.h>
35 #include <errno.h>
36
37
38 struct r600_shader_tgsi_instruction;
39
40 struct r600_shader_ctx {
41 struct tgsi_shader_info info;
42 struct tgsi_parse_context parse;
43 const struct tgsi_token *tokens;
44 unsigned type;
45 unsigned file_offset[TGSI_FILE_COUNT];
46 unsigned temp_reg;
47 struct r600_shader_tgsi_instruction *inst_info;
48 struct r600_bc *bc;
49 struct r600_shader *shader;
50 u32 value[4];
51 u32 *literals;
52 u32 nliterals;
53 };
54
55 struct r600_shader_tgsi_instruction {
56 unsigned tgsi_opcode;
57 unsigned is_op3;
58 unsigned r600_opcode;
59 int (*process)(struct r600_shader_ctx *ctx);
60 };
61
62 static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[];
63 static int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader);
64
65 static int r600_shader_update(struct pipe_context *ctx, struct r600_shader *shader)
66 {
67 struct r600_context *rctx = r600_context(ctx);
68 const struct util_format_description *desc;
69 enum pipe_format resource_format[160];
70 unsigned i, nresources = 0;
71 struct r600_bc *bc = &shader->bc;
72 struct r600_bc_cf *cf;
73 struct r600_bc_vtx *vtx;
74
75 if (shader->processor_type != TGSI_PROCESSOR_VERTEX)
76 return 0;
77 for (i = 0; i < rctx->vertex_elements->count; i++) {
78 resource_format[nresources++] = rctx->vertex_elements->elements[i].src_format;
79 }
80 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
81 switch (cf->inst) {
82 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
83 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
84 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
85 desc = util_format_description(resource_format[vtx->buffer_id]);
86 if (desc == NULL) {
87 R600_ERR("unknown format %d\n", resource_format[vtx->buffer_id]);
88 return -EINVAL;
89 }
90 vtx->dst_sel_x = desc->swizzle[0];
91 vtx->dst_sel_y = desc->swizzle[1];
92 vtx->dst_sel_z = desc->swizzle[2];
93 vtx->dst_sel_w = desc->swizzle[3];
94 }
95 break;
96 default:
97 break;
98 }
99 }
100 return r600_bc_build(&shader->bc);
101 }
102
103 int r600_pipe_shader_create(struct pipe_context *ctx,
104 struct r600_context_state *rpshader,
105 const struct tgsi_token *tokens)
106 {
107 struct r600_screen *rscreen = r600_screen(ctx->screen);
108 int r;
109
110 //fprintf(stderr, "--------------------------------------------------------------\n");
111 //tgsi_dump(tokens, 0);
112 if (rpshader == NULL)
113 return -ENOMEM;
114 rpshader->shader.family = radeon_get_family(rscreen->rw);
115 r = r600_shader_from_tgsi(tokens, &rpshader->shader);
116 if (r) {
117 R600_ERR("translation from TGSI failed !\n");
118 return r;
119 }
120 r = r600_bc_build(&rpshader->shader.bc);
121 if (r) {
122 R600_ERR("building bytecode failed !\n");
123 return r;
124 }
125 //fprintf(stderr, "______________________________________________________________\n");
126 return 0;
127 }
128
129 static int r600_pipe_shader_vs(struct pipe_context *ctx, struct r600_context_state *rpshader)
130 {
131 struct r600_screen *rscreen = r600_screen(ctx->screen);
132 struct r600_shader *rshader = &rpshader->shader;
133 struct radeon_state *state;
134 unsigned i, tmp;
135
136 rpshader->rstate = radeon_state_decref(rpshader->rstate);
137 state = radeon_state(rscreen->rw, R600_VS_SHADER_TYPE, R600_VS_SHADER);
138 if (state == NULL)
139 return -ENOMEM;
140 for (i = 0; i < 10; i++) {
141 state->states[R600_VS_SHADER__SPI_VS_OUT_ID_0 + i] = 0;
142 }
143 /* so far never got proper semantic id from tgsi */
144 for (i = 0; i < 32; i++) {
145 tmp = i << ((i & 3) * 8);
146 state->states[R600_VS_SHADER__SPI_VS_OUT_ID_0 + i / 4] |= tmp;
147 }
148 state->states[R600_VS_SHADER__SPI_VS_OUT_CONFIG] = S_0286C4_VS_EXPORT_COUNT(rshader->noutput - 2);
149 state->states[R600_VS_SHADER__SQ_PGM_RESOURCES_VS] = S_028868_NUM_GPRS(rshader->bc.ngpr) |
150 S_028868_STACK_SIZE(rshader->bc.nstack);
151 rpshader->rstate = state;
152 rpshader->rstate->bo[0] = radeon_bo_incref(rscreen->rw, rpshader->bo);
153 rpshader->rstate->bo[1] = radeon_bo_incref(rscreen->rw, rpshader->bo);
154 rpshader->rstate->nbo = 2;
155 rpshader->rstate->placement[0] = RADEON_GEM_DOMAIN_GTT;
156 rpshader->rstate->placement[2] = RADEON_GEM_DOMAIN_GTT;
157 return radeon_state_pm4(state);
158 }
159
160 static int r600_pipe_shader_ps(struct pipe_context *ctx, struct r600_context_state *rpshader)
161 {
162 const struct pipe_rasterizer_state *rasterizer;
163 struct r600_screen *rscreen = r600_screen(ctx->screen);
164 struct r600_shader *rshader = &rpshader->shader;
165 struct r600_context *rctx = r600_context(ctx);
166 struct radeon_state *state;
167 unsigned i, tmp, exports_ps, num_cout;
168
169 rasterizer = &rctx->rasterizer->state.rasterizer;
170 rpshader->rstate = radeon_state_decref(rpshader->rstate);
171 state = radeon_state(rscreen->rw, R600_PS_SHADER_TYPE, R600_PS_SHADER);
172 if (state == NULL)
173 return -ENOMEM;
174 for (i = 0; i < rshader->ninput; i++) {
175 tmp = S_028644_SEMANTIC(i);
176 tmp |= S_028644_SEL_CENTROID(1);
177 if (rshader->input[i].name == TGSI_SEMANTIC_COLOR ||
178 rshader->input[i].name == TGSI_SEMANTIC_BCOLOR) {
179 tmp |= S_028644_FLAT_SHADE(rshader->flat_shade);
180 }
181 if (rasterizer->sprite_coord_enable & (1 << i)) {
182 tmp |= S_028644_PT_SPRITE_TEX(1);
183 }
184 state->states[R600_PS_SHADER__SPI_PS_INPUT_CNTL_0 + i] = tmp;
185 }
186
187 exports_ps = 0;
188 num_cout = 0;
189 for (i = 0; i < rshader->noutput; i++) {
190 if (rshader->output[i].name == TGSI_SEMANTIC_POSITION)
191 exports_ps |= 1;
192 else if (rshader->output[i].name == TGSI_SEMANTIC_COLOR) {
193 exports_ps |= (1 << (num_cout+1));
194 num_cout++;
195 }
196 }
197 if (!exports_ps) {
198 /* always at least export 1 component per pixel */
199 exports_ps = 2;
200 }
201 state->states[R600_PS_SHADER__SPI_PS_IN_CONTROL_0] = S_0286CC_NUM_INTERP(rshader->ninput) |
202 S_0286CC_PERSP_GRADIENT_ENA(1);
203 state->states[R600_PS_SHADER__SPI_PS_IN_CONTROL_1] = 0x00000000;
204 state->states[R600_PS_SHADER__SQ_PGM_RESOURCES_PS] = S_028868_NUM_GPRS(rshader->bc.ngpr) |
205 S_028868_STACK_SIZE(rshader->bc.nstack);
206 state->states[R600_PS_SHADER__SQ_PGM_EXPORTS_PS] = exports_ps;
207 rpshader->rstate = state;
208 rpshader->rstate->bo[0] = radeon_bo_incref(rscreen->rw, rpshader->bo);
209 rpshader->rstate->nbo = 1;
210 rpshader->rstate->placement[0] = RADEON_GEM_DOMAIN_GTT;
211 return radeon_state_pm4(state);
212 }
213
214 static int r600_pipe_shader(struct pipe_context *ctx, struct r600_context_state *rpshader)
215 {
216 struct r600_screen *rscreen = r600_screen(ctx->screen);
217 struct r600_context *rctx = r600_context(ctx);
218 struct r600_shader *rshader = &rpshader->shader;
219 int r;
220
221 /* copy new shader */
222 radeon_bo_decref(rscreen->rw, rpshader->bo);
223 rpshader->bo = NULL;
224 rpshader->bo = radeon_bo(rscreen->rw, 0, rshader->bc.ndw * 4,
225 4096, NULL);
226 if (rpshader->bo == NULL) {
227 return -ENOMEM;
228 }
229 radeon_bo_map(rscreen->rw, rpshader->bo);
230 memcpy(rpshader->bo->data, rshader->bc.bytecode, rshader->bc.ndw * 4);
231 radeon_bo_unmap(rscreen->rw, rpshader->bo);
232 /* build state */
233 rshader->flat_shade = rctx->flat_shade;
234 switch (rshader->processor_type) {
235 case TGSI_PROCESSOR_VERTEX:
236 r = r600_pipe_shader_vs(ctx, rpshader);
237 break;
238 case TGSI_PROCESSOR_FRAGMENT:
239 r = r600_pipe_shader_ps(ctx, rpshader);
240 break;
241 default:
242 r = -EINVAL;
243 break;
244 }
245 return r;
246 }
247
248 int r600_pipe_shader_update(struct pipe_context *ctx, struct r600_context_state *rpshader)
249 {
250 struct r600_context *rctx = r600_context(ctx);
251 int r;
252
253 if (rpshader == NULL)
254 return -EINVAL;
255 /* there should be enough input */
256 if (rctx->vertex_elements->count < rpshader->shader.bc.nresource) {
257 R600_ERR("%d resources provided, expecting %d\n",
258 rctx->vertex_elements->count, rpshader->shader.bc.nresource);
259 return -EINVAL;
260 }
261 r = r600_shader_update(ctx, &rpshader->shader);
262 if (r)
263 return r;
264 return r600_pipe_shader(ctx, rpshader);
265 }
266
267 static int tgsi_is_supported(struct r600_shader_ctx *ctx)
268 {
269 struct tgsi_full_instruction *i = &ctx->parse.FullToken.FullInstruction;
270 int j;
271
272 if (i->Instruction.NumDstRegs > 1) {
273 R600_ERR("too many dst (%d)\n", i->Instruction.NumDstRegs);
274 return -EINVAL;
275 }
276 if (i->Instruction.Predicate) {
277 R600_ERR("predicate unsupported\n");
278 return -EINVAL;
279 }
280 #if 0
281 if (i->Instruction.Label) {
282 R600_ERR("label unsupported\n");
283 return -EINVAL;
284 }
285 #endif
286 for (j = 0; j < i->Instruction.NumSrcRegs; j++) {
287 if (i->Src[j].Register.Dimension ||
288 i->Src[j].Register.Absolute) {
289 R600_ERR("unsupported src %d (dimension %d|absolute %d)\n", j,
290 i->Src[j].Register.Dimension,
291 i->Src[j].Register.Absolute);
292 return -EINVAL;
293 }
294 }
295 for (j = 0; j < i->Instruction.NumDstRegs; j++) {
296 if (i->Dst[j].Register.Dimension) {
297 R600_ERR("unsupported dst (dimension)\n");
298 return -EINVAL;
299 }
300 }
301 return 0;
302 }
303
304 static int tgsi_declaration(struct r600_shader_ctx *ctx)
305 {
306 struct tgsi_full_declaration *d = &ctx->parse.FullToken.FullDeclaration;
307 struct r600_bc_vtx vtx;
308 unsigned i;
309 int r;
310
311 switch (d->Declaration.File) {
312 case TGSI_FILE_INPUT:
313 i = ctx->shader->ninput++;
314 ctx->shader->input[i].name = d->Semantic.Name;
315 ctx->shader->input[i].sid = d->Semantic.Index;
316 ctx->shader->input[i].interpolate = d->Declaration.Interpolate;
317 ctx->shader->input[i].gpr = ctx->file_offset[TGSI_FILE_INPUT] + i;
318 if (ctx->type == TGSI_PROCESSOR_VERTEX) {
319 /* turn input into fetch */
320 memset(&vtx, 0, sizeof(struct r600_bc_vtx));
321 vtx.inst = 0;
322 vtx.fetch_type = 0;
323 vtx.buffer_id = i;
324 /* register containing the index into the buffer */
325 vtx.src_gpr = 0;
326 vtx.src_sel_x = 0;
327 vtx.mega_fetch_count = 0x1F;
328 vtx.dst_gpr = ctx->shader->input[i].gpr;
329 vtx.dst_sel_x = 0;
330 vtx.dst_sel_y = 1;
331 vtx.dst_sel_z = 2;
332 vtx.dst_sel_w = 3;
333 r = r600_bc_add_vtx(ctx->bc, &vtx);
334 if (r)
335 return r;
336 }
337 break;
338 case TGSI_FILE_OUTPUT:
339 i = ctx->shader->noutput++;
340 ctx->shader->output[i].name = d->Semantic.Name;
341 ctx->shader->output[i].sid = d->Semantic.Index;
342 ctx->shader->output[i].gpr = ctx->file_offset[TGSI_FILE_OUTPUT] + i;
343 ctx->shader->output[i].interpolate = d->Declaration.Interpolate;
344 break;
345 case TGSI_FILE_CONSTANT:
346 case TGSI_FILE_TEMPORARY:
347 case TGSI_FILE_SAMPLER:
348 case TGSI_FILE_ADDRESS:
349 break;
350 default:
351 R600_ERR("unsupported file %d declaration\n", d->Declaration.File);
352 return -EINVAL;
353 }
354 return 0;
355 }
356
357 int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader)
358 {
359 struct tgsi_full_immediate *immediate;
360 struct r600_shader_ctx ctx;
361 struct r600_bc_output output[32];
362 unsigned output_done, noutput;
363 unsigned opcode;
364 int i, r = 0, pos0;
365
366 ctx.bc = &shader->bc;
367 ctx.shader = shader;
368 r = r600_bc_init(ctx.bc, shader->family);
369 if (r)
370 return r;
371 ctx.tokens = tokens;
372 tgsi_scan_shader(tokens, &ctx.info);
373 tgsi_parse_init(&ctx.parse, tokens);
374 ctx.type = ctx.parse.FullHeader.Processor.Processor;
375 shader->processor_type = ctx.type;
376
377 /* register allocations */
378 /* Values [0,127] correspond to GPR[0..127].
379 * Values [128,159] correspond to constant buffer bank 0
380 * Values [160,191] correspond to constant buffer bank 1
381 * Values [256,511] correspond to cfile constants c[0..255].
382 * Other special values are shown in the list below.
383 * 244 ALU_SRC_1_DBL_L: special constant 1.0 double-float, LSW. (RV670+)
384 * 245 ALU_SRC_1_DBL_M: special constant 1.0 double-float, MSW. (RV670+)
385 * 246 ALU_SRC_0_5_DBL_L: special constant 0.5 double-float, LSW. (RV670+)
386 * 247 ALU_SRC_0_5_DBL_M: special constant 0.5 double-float, MSW. (RV670+)
387 * 248 SQ_ALU_SRC_0: special constant 0.0.
388 * 249 SQ_ALU_SRC_1: special constant 1.0 float.
389 * 250 SQ_ALU_SRC_1_INT: special constant 1 integer.
390 * 251 SQ_ALU_SRC_M_1_INT: special constant -1 integer.
391 * 252 SQ_ALU_SRC_0_5: special constant 0.5 float.
392 * 253 SQ_ALU_SRC_LITERAL: literal constant.
393 * 254 SQ_ALU_SRC_PV: previous vector result.
394 * 255 SQ_ALU_SRC_PS: previous scalar result.
395 */
396 for (i = 0; i < TGSI_FILE_COUNT; i++) {
397 ctx.file_offset[i] = 0;
398 }
399 if (ctx.type == TGSI_PROCESSOR_VERTEX) {
400 ctx.file_offset[TGSI_FILE_INPUT] = 1;
401 }
402 ctx.file_offset[TGSI_FILE_OUTPUT] = ctx.file_offset[TGSI_FILE_INPUT] +
403 ctx.info.file_count[TGSI_FILE_INPUT];
404 ctx.file_offset[TGSI_FILE_TEMPORARY] = ctx.file_offset[TGSI_FILE_OUTPUT] +
405 ctx.info.file_count[TGSI_FILE_OUTPUT];
406 ctx.file_offset[TGSI_FILE_CONSTANT] = 256;
407 ctx.file_offset[TGSI_FILE_IMMEDIATE] = 253;
408 ctx.temp_reg = ctx.file_offset[TGSI_FILE_TEMPORARY] +
409 ctx.info.file_count[TGSI_FILE_TEMPORARY];
410
411 ctx.nliterals = 0;
412 ctx.literals = NULL;
413
414 while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
415 tgsi_parse_token(&ctx.parse);
416 switch (ctx.parse.FullToken.Token.Type) {
417 case TGSI_TOKEN_TYPE_IMMEDIATE:
418 immediate = &ctx.parse.FullToken.FullImmediate;
419 ctx.literals = realloc(ctx.literals, (ctx.nliterals + 1) * 16);
420 if(ctx.literals == NULL) {
421 r = -ENOMEM;
422 goto out_err;
423 }
424 ctx.literals[ctx.nliterals * 4 + 0] = immediate->u[0].Uint;
425 ctx.literals[ctx.nliterals * 4 + 1] = immediate->u[1].Uint;
426 ctx.literals[ctx.nliterals * 4 + 2] = immediate->u[2].Uint;
427 ctx.literals[ctx.nliterals * 4 + 3] = immediate->u[3].Uint;
428 ctx.nliterals++;
429 break;
430 case TGSI_TOKEN_TYPE_DECLARATION:
431 r = tgsi_declaration(&ctx);
432 if (r)
433 goto out_err;
434 break;
435 case TGSI_TOKEN_TYPE_INSTRUCTION:
436 r = tgsi_is_supported(&ctx);
437 if (r)
438 goto out_err;
439 opcode = ctx.parse.FullToken.FullInstruction.Instruction.Opcode;
440 ctx.inst_info = &r600_shader_tgsi_instruction[opcode];
441 r = ctx.inst_info->process(&ctx);
442 if (r)
443 goto out_err;
444 r = r600_bc_add_literal(ctx.bc, ctx.value);
445 if (r)
446 goto out_err;
447 break;
448 default:
449 R600_ERR("unsupported token type %d\n", ctx.parse.FullToken.Token.Type);
450 r = -EINVAL;
451 goto out_err;
452 }
453 }
454 /* export output */
455 noutput = shader->noutput;
456 for (i = 0, pos0 = 0; i < noutput; i++) {
457 memset(&output[i], 0, sizeof(struct r600_bc_output));
458 output[i].gpr = shader->output[i].gpr;
459 output[i].elem_size = 3;
460 output[i].swizzle_x = 0;
461 output[i].swizzle_y = 1;
462 output[i].swizzle_z = 2;
463 output[i].swizzle_w = 3;
464 output[i].barrier = 1;
465 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
466 output[i].array_base = i - pos0;
467 output[i].inst = V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT;
468 switch (ctx.type) {
469 case TGSI_PROCESSOR_VERTEX:
470 if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
471 output[i].array_base = 60;
472 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
473 /* position doesn't count in array_base */
474 pos0++;
475 }
476 if (shader->output[i].name == TGSI_SEMANTIC_PSIZE) {
477 output[i].array_base = 61;
478 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
479 /* position doesn't count in array_base */
480 pos0++;
481 }
482 break;
483 case TGSI_PROCESSOR_FRAGMENT:
484 if (shader->output[i].name == TGSI_SEMANTIC_COLOR) {
485 output[i].array_base = shader->output[i].sid;
486 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
487 } else if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
488 output[i].array_base = 61;
489 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
490 } else {
491 R600_ERR("unsupported fragment output name %d\n", shader->output[i].name);
492 r = -EINVAL;
493 goto out_err;
494 }
495 break;
496 default:
497 R600_ERR("unsupported processor type %d\n", ctx.type);
498 r = -EINVAL;
499 goto out_err;
500 }
501 }
502 /* add fake param output for vertex shader if no param is exported */
503 if (ctx.type == TGSI_PROCESSOR_VERTEX) {
504 for (i = 0, pos0 = 0; i < noutput; i++) {
505 if (output[i].type == V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM) {
506 pos0 = 1;
507 break;
508 }
509 }
510 if (!pos0) {
511 memset(&output[i], 0, sizeof(struct r600_bc_output));
512 output[i].gpr = 0;
513 output[i].elem_size = 3;
514 output[i].swizzle_x = 0;
515 output[i].swizzle_y = 1;
516 output[i].swizzle_z = 2;
517 output[i].swizzle_w = 3;
518 output[i].barrier = 1;
519 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
520 output[i].array_base = 0;
521 output[i].inst = V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT;
522 noutput++;
523 }
524 }
525 /* add fake pixel export */
526 if (ctx.type == TGSI_PROCESSOR_FRAGMENT && !noutput) {
527 memset(&output[0], 0, sizeof(struct r600_bc_output));
528 output[0].gpr = 0;
529 output[0].elem_size = 3;
530 output[0].swizzle_x = 7;
531 output[0].swizzle_y = 7;
532 output[0].swizzle_z = 7;
533 output[0].swizzle_w = 7;
534 output[0].barrier = 1;
535 output[0].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
536 output[0].array_base = 0;
537 output[0].inst = V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT;
538 noutput++;
539 }
540 /* set export done on last export of each type */
541 for (i = noutput - 1, output_done = 0; i >= 0; i--) {
542 if (i == (noutput - 1)) {
543 output[i].end_of_program = 1;
544 }
545 if (!(output_done & (1 << output[i].type))) {
546 output_done |= (1 << output[i].type);
547 output[i].inst = V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE;
548 }
549 }
550 /* add output to bytecode */
551 for (i = 0; i < noutput; i++) {
552 r = r600_bc_add_output(ctx.bc, &output[i]);
553 if (r)
554 goto out_err;
555 }
556 free(ctx.literals);
557 tgsi_parse_free(&ctx.parse);
558 return 0;
559 out_err:
560 free(ctx.literals);
561 tgsi_parse_free(&ctx.parse);
562 return r;
563 }
564
565 static int tgsi_unsupported(struct r600_shader_ctx *ctx)
566 {
567 R600_ERR("%d tgsi opcode unsupported\n", ctx->inst_info->tgsi_opcode);
568 return -EINVAL;
569 }
570
571 static int tgsi_end(struct r600_shader_ctx *ctx)
572 {
573 return 0;
574 }
575
576 static int tgsi_src(struct r600_shader_ctx *ctx,
577 const struct tgsi_full_src_register *tgsi_src,
578 struct r600_bc_alu_src *r600_src)
579 {
580 int index;
581 memset(r600_src, 0, sizeof(struct r600_bc_alu_src));
582 r600_src->sel = tgsi_src->Register.Index;
583 if (tgsi_src->Register.File == TGSI_FILE_IMMEDIATE) {
584 r600_src->sel = 0;
585 index = tgsi_src->Register.Index;
586 ctx->value[0] = ctx->literals[index * 4 + 0];
587 ctx->value[1] = ctx->literals[index * 4 + 1];
588 ctx->value[2] = ctx->literals[index * 4 + 2];
589 ctx->value[3] = ctx->literals[index * 4 + 3];
590 }
591 if (tgsi_src->Register.Indirect)
592 r600_src->rel = V_SQ_REL_RELATIVE;
593 r600_src->neg = tgsi_src->Register.Negate;
594 r600_src->sel += ctx->file_offset[tgsi_src->Register.File];
595 return 0;
596 }
597
598 static int tgsi_dst(struct r600_shader_ctx *ctx,
599 const struct tgsi_full_dst_register *tgsi_dst,
600 unsigned swizzle,
601 struct r600_bc_alu_dst *r600_dst)
602 {
603 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
604
605 r600_dst->sel = tgsi_dst->Register.Index;
606 r600_dst->sel += ctx->file_offset[tgsi_dst->Register.File];
607 r600_dst->chan = swizzle;
608 r600_dst->write = 1;
609 if (tgsi_dst->Register.Indirect)
610 r600_dst->rel = V_SQ_REL_RELATIVE;
611 if (inst->Instruction.Saturate) {
612 r600_dst->clamp = 1;
613 }
614 return 0;
615 }
616
617 static unsigned tgsi_chan(const struct tgsi_full_src_register *tgsi_src, unsigned swizzle)
618 {
619 switch (swizzle) {
620 case 0:
621 return tgsi_src->Register.SwizzleX;
622 case 1:
623 return tgsi_src->Register.SwizzleY;
624 case 2:
625 return tgsi_src->Register.SwizzleZ;
626 case 3:
627 return tgsi_src->Register.SwizzleW;
628 default:
629 return 0;
630 }
631 }
632
633 static int tgsi_split_constant(struct r600_shader_ctx *ctx, struct r600_bc_alu_src r600_src[3])
634 {
635 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
636 struct r600_bc_alu alu;
637 int i, j, k, nconst, r;
638
639 for (i = 0, nconst = 0; i < inst->Instruction.NumSrcRegs; i++) {
640 if (inst->Src[i].Register.File == TGSI_FILE_CONSTANT) {
641 nconst++;
642 }
643 r = tgsi_src(ctx, &inst->Src[i], &r600_src[i]);
644 if (r) {
645 return r;
646 }
647 }
648 for (i = 0, j = nconst - 1; i < inst->Instruction.NumSrcRegs; i++) {
649 if (inst->Src[j].Register.File == TGSI_FILE_CONSTANT && j > 0) {
650 for (k = 0; k < 4; k++) {
651 memset(&alu, 0, sizeof(struct r600_bc_alu));
652 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
653 alu.src[0].sel = r600_src[0].sel;
654 alu.src[0].chan = k;
655 alu.dst.sel = ctx->temp_reg + j;
656 alu.dst.chan = k;
657 alu.dst.write = 1;
658 if (k == 3)
659 alu.last = 1;
660 r = r600_bc_add_alu(ctx->bc, &alu);
661 if (r)
662 return r;
663 }
664 r600_src[0].sel = ctx->temp_reg + j;
665 j--;
666 }
667 }
668 return 0;
669 }
670
671 static int tgsi_op2_s(struct r600_shader_ctx *ctx, int swap)
672 {
673 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
674 struct r600_bc_alu_src r600_src[3];
675 struct r600_bc_alu alu;
676 int i, j, r;
677 int lasti = 0;
678
679 for (i = 0; i < 4; i++) {
680 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
681 lasti = i;
682 }
683 }
684
685 r = tgsi_split_constant(ctx, r600_src);
686 if (r)
687 return r;
688 for (i = 0; i < lasti + 1; i++) {
689 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
690 continue;
691
692 memset(&alu, 0, sizeof(struct r600_bc_alu));
693 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
694 if (r)
695 return r;
696
697 alu.inst = ctx->inst_info->r600_opcode;
698 if (!swap) {
699 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
700 alu.src[j] = r600_src[j];
701 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
702 }
703 } else {
704 alu.src[0] = r600_src[1];
705 alu.src[0].chan = tgsi_chan(&inst->Src[1], i);
706
707 alu.src[1] = r600_src[0];
708 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
709 }
710 /* handle some special cases */
711 switch (ctx->inst_info->tgsi_opcode) {
712 case TGSI_OPCODE_SUB:
713 alu.src[1].neg = 1;
714 break;
715 case TGSI_OPCODE_ABS:
716 alu.src[0].abs = 1;
717 break;
718 default:
719 break;
720 }
721 if (i == lasti) {
722 alu.last = 1;
723 }
724 r = r600_bc_add_alu(ctx->bc, &alu);
725 if (r)
726 return r;
727 }
728 return 0;
729 }
730
731 static int tgsi_op2(struct r600_shader_ctx *ctx)
732 {
733 return tgsi_op2_s(ctx, 0);
734 }
735
736 static int tgsi_op2_swap(struct r600_shader_ctx *ctx)
737 {
738 return tgsi_op2_s(ctx, 1);
739 }
740
741 /*
742 * r600 - trunc to -PI..PI range
743 * r700 - normalize by dividing by 2PI
744 * see fdo bug 27901
745 */
746 static int tgsi_setup_trig(struct r600_shader_ctx *ctx,
747 struct r600_bc_alu_src r600_src[3])
748 {
749 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
750 int r;
751 uint32_t lit_vals[4];
752 struct r600_bc_alu alu;
753
754 memset(lit_vals, 0, 4*4);
755 r = tgsi_split_constant(ctx, r600_src);
756 if (r)
757 return r;
758 lit_vals[0] = fui(1.0 /(3.1415926535 * 2));
759 lit_vals[1] = fui(0.5f);
760
761 memset(&alu, 0, sizeof(struct r600_bc_alu));
762 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD;
763 alu.is_op3 = 1;
764
765 alu.dst.chan = 0;
766 alu.dst.sel = ctx->temp_reg;
767 alu.dst.write = 1;
768
769 alu.src[0] = r600_src[0];
770 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
771
772 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
773 alu.src[1].chan = 0;
774 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
775 alu.src[2].chan = 1;
776 alu.last = 1;
777 r = r600_bc_add_alu(ctx->bc, &alu);
778 if (r)
779 return r;
780 r = r600_bc_add_literal(ctx->bc, lit_vals);
781 if (r)
782 return r;
783
784 memset(&alu, 0, sizeof(struct r600_bc_alu));
785 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT;
786
787 alu.dst.chan = 0;
788 alu.dst.sel = ctx->temp_reg;
789 alu.dst.write = 1;
790
791 alu.src[0].sel = ctx->temp_reg;
792 alu.src[0].chan = 0;
793 alu.last = 1;
794 r = r600_bc_add_alu(ctx->bc, &alu);
795 if (r)
796 return r;
797
798 if (ctx->bc->chiprev == 0) {
799 lit_vals[0] = fui(3.1415926535897f * 2.0f);
800 lit_vals[1] = fui(-3.1415926535897f);
801 } else {
802 lit_vals[0] = fui(1.0f);
803 lit_vals[1] = fui(-0.5f);
804 }
805
806 memset(&alu, 0, sizeof(struct r600_bc_alu));
807 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD;
808 alu.is_op3 = 1;
809
810 alu.dst.chan = 0;
811 alu.dst.sel = ctx->temp_reg;
812 alu.dst.write = 1;
813
814 alu.src[0].sel = ctx->temp_reg;
815 alu.src[0].chan = 0;
816
817 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
818 alu.src[1].chan = 0;
819 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
820 alu.src[2].chan = 1;
821 alu.last = 1;
822 r = r600_bc_add_alu(ctx->bc, &alu);
823 if (r)
824 return r;
825 r = r600_bc_add_literal(ctx->bc, lit_vals);
826 if (r)
827 return r;
828 return 0;
829 }
830
831 static int tgsi_trig(struct r600_shader_ctx *ctx)
832 {
833 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
834 struct r600_bc_alu_src r600_src[3];
835 struct r600_bc_alu alu;
836 int i, r;
837
838 r = tgsi_split_constant(ctx, r600_src);
839 if (r)
840 return r;
841
842 r = tgsi_setup_trig(ctx, r600_src);
843 if (r)
844 return r;
845
846 memset(&alu, 0, sizeof(struct r600_bc_alu));
847 alu.inst = ctx->inst_info->r600_opcode;
848 alu.dst.chan = 0;
849 alu.dst.sel = ctx->temp_reg;
850 alu.dst.write = 1;
851
852 alu.src[0].sel = ctx->temp_reg;
853 alu.src[0].chan = 0;
854 alu.last = 1;
855 r = r600_bc_add_alu(ctx->bc, &alu);
856 if (r)
857 return r;
858
859 /* replicate result */
860 for (i = 0; i < 4; i++) {
861 memset(&alu, 0, sizeof(struct r600_bc_alu));
862 alu.src[0].sel = ctx->temp_reg;
863 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
864 alu.dst.chan = i;
865 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
866 if (r)
867 return r;
868 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
869 if (i == 3)
870 alu.last = 1;
871 r = r600_bc_add_alu(ctx->bc, &alu);
872 if (r)
873 return r;
874 }
875 return 0;
876 }
877
878 static int tgsi_scs(struct r600_shader_ctx *ctx)
879 {
880 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
881 struct r600_bc_alu_src r600_src[3];
882 struct r600_bc_alu alu;
883 int r;
884
885 r = tgsi_split_constant(ctx, r600_src);
886 if (r)
887 return r;
888
889 r = tgsi_setup_trig(ctx, r600_src);
890 if (r)
891 return r;
892
893
894 /* dst.x = COS */
895 memset(&alu, 0, sizeof(struct r600_bc_alu));
896 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS;
897 r = tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
898 if (r)
899 return r;
900
901 alu.src[0].sel = ctx->temp_reg;
902 alu.src[0].chan = 0;
903 alu.last = 1;
904 r = r600_bc_add_alu(ctx->bc, &alu);
905 if (r)
906 return r;
907
908 /* dst.y = SIN */
909 memset(&alu, 0, sizeof(struct r600_bc_alu));
910 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN;
911 r = tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
912 if (r)
913 return r;
914
915 alu.src[0].sel = ctx->temp_reg;
916 alu.src[0].chan = 0;
917 alu.last = 1;
918 r = r600_bc_add_alu(ctx->bc, &alu);
919 if (r)
920 return r;
921 return 0;
922 }
923
924 static int tgsi_kill(struct r600_shader_ctx *ctx)
925 {
926 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
927 struct r600_bc_alu alu;
928 int i, r;
929
930 for (i = 0; i < 4; i++) {
931 memset(&alu, 0, sizeof(struct r600_bc_alu));
932 alu.inst = ctx->inst_info->r600_opcode;
933
934 alu.dst.chan = i;
935
936 alu.src[0].sel = V_SQ_ALU_SRC_0;
937
938 if (ctx->inst_info->tgsi_opcode == TGSI_OPCODE_KILP) {
939 alu.src[1].sel = V_SQ_ALU_SRC_1;
940 alu.src[1].neg = 1;
941 } else {
942 r = tgsi_src(ctx, &inst->Src[0], &alu.src[1]);
943 if (r)
944 return r;
945 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
946 }
947 if (i == 3) {
948 alu.last = 1;
949 }
950 r = r600_bc_add_alu(ctx->bc, &alu);
951 if (r)
952 return r;
953 }
954 r = r600_bc_add_literal(ctx->bc, ctx->value);
955 if (r)
956 return r;
957
958 /* kill must be last in ALU */
959 ctx->bc->force_add_cf = 1;
960 ctx->shader->uses_kill = TRUE;
961 return 0;
962 }
963
964 static int tgsi_lit(struct r600_shader_ctx *ctx)
965 {
966 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
967 struct r600_bc_alu alu;
968 int r;
969
970 /* dst.x, <- 1.0 */
971 memset(&alu, 0, sizeof(struct r600_bc_alu));
972 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
973 alu.src[0].sel = V_SQ_ALU_SRC_1; /*1.0*/
974 alu.src[0].chan = 0;
975 r = tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
976 if (r)
977 return r;
978 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 0) & 1;
979 r = r600_bc_add_alu(ctx->bc, &alu);
980 if (r)
981 return r;
982
983 /* dst.y = max(src.x, 0.0) */
984 memset(&alu, 0, sizeof(struct r600_bc_alu));
985 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX;
986 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
987 if (r)
988 return r;
989 alu.src[1].sel = V_SQ_ALU_SRC_0; /*0.0*/
990 alu.src[1].chan = tgsi_chan(&inst->Src[0], 0);
991 r = tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
992 if (r)
993 return r;
994 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 1) & 1;
995 r = r600_bc_add_alu(ctx->bc, &alu);
996 if (r)
997 return r;
998
999 /* dst.z = NOP - fill Z slot */
1000 memset(&alu, 0, sizeof(struct r600_bc_alu));
1001 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP;
1002 alu.dst.chan = 2;
1003 r = r600_bc_add_alu(ctx->bc, &alu);
1004 if (r)
1005 return r;
1006
1007 /* dst.w, <- 1.0 */
1008 memset(&alu, 0, sizeof(struct r600_bc_alu));
1009 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1010 alu.src[0].sel = V_SQ_ALU_SRC_1;
1011 alu.src[0].chan = 0;
1012 r = tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
1013 if (r)
1014 return r;
1015 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 3) & 1;
1016 alu.last = 1;
1017 r = r600_bc_add_alu(ctx->bc, &alu);
1018 if (r)
1019 return r;
1020
1021 if (inst->Dst[0].Register.WriteMask & (1 << 2))
1022 {
1023 int chan;
1024 int sel;
1025
1026 /* dst.z = log(src.y) */
1027 memset(&alu, 0, sizeof(struct r600_bc_alu));
1028 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED;
1029 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1030 if (r)
1031 return r;
1032 alu.src[0].chan = tgsi_chan(&inst->Src[0], 1);
1033 r = tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
1034 if (r)
1035 return r;
1036 alu.last = 1;
1037 r = r600_bc_add_alu(ctx->bc, &alu);
1038 if (r)
1039 return r;
1040
1041 chan = alu.dst.chan;
1042 sel = alu.dst.sel;
1043
1044 /* tmp.x = amd MUL_LIT(src.w, dst.z, src.x ) */
1045 memset(&alu, 0, sizeof(struct r600_bc_alu));
1046 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT;
1047 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1048 if (r)
1049 return r;
1050 alu.src[0].chan = tgsi_chan(&inst->Src[0], 3);
1051 alu.src[1].sel = sel;
1052 alu.src[1].chan = chan;
1053 r = tgsi_src(ctx, &inst->Src[0], &alu.src[2]);
1054 if (r)
1055 return r;
1056 alu.src[2].chan = tgsi_chan(&inst->Src[0], 0);
1057 alu.dst.sel = ctx->temp_reg;
1058 alu.dst.chan = 0;
1059 alu.dst.write = 1;
1060 alu.is_op3 = 1;
1061 alu.last = 1;
1062 r = r600_bc_add_alu(ctx->bc, &alu);
1063 if (r)
1064 return r;
1065
1066 /* dst.z = exp(tmp.x) */
1067 memset(&alu, 0, sizeof(struct r600_bc_alu));
1068 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE;
1069 alu.src[0].sel = ctx->temp_reg;
1070 alu.src[0].chan = 0;
1071 r = tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
1072 if (r)
1073 return r;
1074 alu.last = 1;
1075 r = r600_bc_add_alu(ctx->bc, &alu);
1076 if (r)
1077 return r;
1078 }
1079 return 0;
1080 }
1081
1082 static int tgsi_trans(struct r600_shader_ctx *ctx)
1083 {
1084 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1085 struct r600_bc_alu alu;
1086 int i, j, r;
1087
1088 for (i = 0; i < 4; i++) {
1089 memset(&alu, 0, sizeof(struct r600_bc_alu));
1090 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
1091 alu.inst = ctx->inst_info->r600_opcode;
1092 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
1093 r = tgsi_src(ctx, &inst->Src[j], &alu.src[j]);
1094 if (r)
1095 return r;
1096 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
1097 }
1098 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1099 if (r)
1100 return r;
1101 alu.last = 1;
1102 r = r600_bc_add_alu(ctx->bc, &alu);
1103 if (r)
1104 return r;
1105 }
1106 }
1107 return 0;
1108 }
1109
1110 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx)
1111 {
1112 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1113 struct r600_bc_alu alu;
1114 int i, r;
1115
1116 for (i = 0; i < 4; i++) {
1117 memset(&alu, 0, sizeof(struct r600_bc_alu));
1118 alu.src[0].sel = ctx->temp_reg;
1119 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1120 alu.dst.chan = i;
1121 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1122 if (r)
1123 return r;
1124 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
1125 if (i == 3)
1126 alu.last = 1;
1127 r = r600_bc_add_alu(ctx->bc, &alu);
1128 if (r)
1129 return r;
1130 }
1131 return 0;
1132 }
1133
1134 static int tgsi_trans_srcx_replicate(struct r600_shader_ctx *ctx)
1135 {
1136 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1137 struct r600_bc_alu alu;
1138 int i, r;
1139
1140 memset(&alu, 0, sizeof(struct r600_bc_alu));
1141 alu.inst = ctx->inst_info->r600_opcode;
1142 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
1143 r = tgsi_src(ctx, &inst->Src[i], &alu.src[i]);
1144 if (r)
1145 return r;
1146 alu.src[i].chan = tgsi_chan(&inst->Src[i], 0);
1147 }
1148 alu.dst.sel = ctx->temp_reg;
1149 alu.dst.write = 1;
1150 alu.last = 1;
1151 r = r600_bc_add_alu(ctx->bc, &alu);
1152 if (r)
1153 return r;
1154 /* replicate result */
1155 return tgsi_helper_tempx_replicate(ctx);
1156 }
1157
1158 static int tgsi_pow(struct r600_shader_ctx *ctx)
1159 {
1160 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1161 struct r600_bc_alu alu;
1162 int r;
1163
1164 /* LOG2(a) */
1165 memset(&alu, 0, sizeof(struct r600_bc_alu));
1166 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE;
1167 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1168 if (r)
1169 return r;
1170 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1171 alu.dst.sel = ctx->temp_reg;
1172 alu.dst.write = 1;
1173 alu.last = 1;
1174 r = r600_bc_add_alu(ctx->bc, &alu);
1175 if (r)
1176 return r;
1177 r = r600_bc_add_literal(ctx->bc,ctx->value);
1178 if (r)
1179 return r;
1180 /* b * LOG2(a) */
1181 memset(&alu, 0, sizeof(struct r600_bc_alu));
1182 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL_IEEE;
1183 r = tgsi_src(ctx, &inst->Src[1], &alu.src[0]);
1184 if (r)
1185 return r;
1186 alu.src[0].chan = tgsi_chan(&inst->Src[1], 0);
1187 alu.src[1].sel = ctx->temp_reg;
1188 alu.dst.sel = ctx->temp_reg;
1189 alu.dst.write = 1;
1190 alu.last = 1;
1191 r = r600_bc_add_alu(ctx->bc, &alu);
1192 if (r)
1193 return r;
1194 r = r600_bc_add_literal(ctx->bc,ctx->value);
1195 if (r)
1196 return r;
1197 /* POW(a,b) = EXP2(b * LOG2(a))*/
1198 memset(&alu, 0, sizeof(struct r600_bc_alu));
1199 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE;
1200 alu.src[0].sel = ctx->temp_reg;
1201 alu.dst.sel = ctx->temp_reg;
1202 alu.dst.write = 1;
1203 alu.last = 1;
1204 r = r600_bc_add_alu(ctx->bc, &alu);
1205 if (r)
1206 return r;
1207 r = r600_bc_add_literal(ctx->bc,ctx->value);
1208 if (r)
1209 return r;
1210 return tgsi_helper_tempx_replicate(ctx);
1211 }
1212
1213 static int tgsi_ssg(struct r600_shader_ctx *ctx)
1214 {
1215 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1216 struct r600_bc_alu alu;
1217 struct r600_bc_alu_src r600_src[3];
1218 int i, r;
1219
1220 r = tgsi_split_constant(ctx, r600_src);
1221 if (r)
1222 return r;
1223
1224 /* tmp = (src > 0 ? 1 : src) */
1225 for (i = 0; i < 4; i++) {
1226 memset(&alu, 0, sizeof(struct r600_bc_alu));
1227 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGT;
1228 alu.is_op3 = 1;
1229
1230 alu.dst.sel = ctx->temp_reg;
1231 alu.dst.chan = i;
1232
1233 alu.src[0] = r600_src[0];
1234 alu.src[0].chan = tgsi_chan(&inst->Src[0], i);
1235
1236 alu.src[1].sel = V_SQ_ALU_SRC_1;
1237
1238 alu.src[2] = r600_src[0];
1239 alu.src[2].chan = tgsi_chan(&inst->Src[0], i);
1240 if (i == 3)
1241 alu.last = 1;
1242 r = r600_bc_add_alu(ctx->bc, &alu);
1243 if (r)
1244 return r;
1245 }
1246 r = r600_bc_add_literal(ctx->bc, ctx->value);
1247 if (r)
1248 return r;
1249
1250 /* dst = (-tmp > 0 ? -1 : tmp) */
1251 for (i = 0; i < 4; i++) {
1252 memset(&alu, 0, sizeof(struct r600_bc_alu));
1253 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGT;
1254 alu.is_op3 = 1;
1255 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1256 if (r)
1257 return r;
1258
1259 alu.src[0].sel = ctx->temp_reg;
1260 alu.src[0].chan = i;
1261 alu.src[0].neg = 1;
1262
1263 alu.src[1].sel = V_SQ_ALU_SRC_1;
1264 alu.src[1].neg = 1;
1265
1266 alu.src[2].sel = ctx->temp_reg;
1267 alu.src[2].chan = i;
1268
1269 if (i == 3)
1270 alu.last = 1;
1271 r = r600_bc_add_alu(ctx->bc, &alu);
1272 if (r)
1273 return r;
1274 }
1275 return 0;
1276 }
1277
1278 static int tgsi_helper_copy(struct r600_shader_ctx *ctx, struct tgsi_full_instruction *inst)
1279 {
1280 struct r600_bc_alu alu;
1281 int i, r;
1282
1283 r = r600_bc_add_literal(ctx->bc, ctx->value);
1284 if (r)
1285 return r;
1286 for (i = 0; i < 4; i++) {
1287 memset(&alu, 0, sizeof(struct r600_bc_alu));
1288 if (!(inst->Dst[0].Register.WriteMask & (1 << i))) {
1289 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP;
1290 alu.dst.chan = i;
1291 } else {
1292 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1293 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1294 if (r)
1295 return r;
1296 alu.src[0].sel = ctx->temp_reg;
1297 alu.src[0].chan = i;
1298 }
1299 if (i == 3) {
1300 alu.last = 1;
1301 }
1302 r = r600_bc_add_alu(ctx->bc, &alu);
1303 if (r)
1304 return r;
1305 }
1306 return 0;
1307 }
1308
1309 static int tgsi_op3(struct r600_shader_ctx *ctx)
1310 {
1311 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1312 struct r600_bc_alu_src r600_src[3];
1313 struct r600_bc_alu alu;
1314 int i, j, r;
1315
1316 r = tgsi_split_constant(ctx, r600_src);
1317 if (r)
1318 return r;
1319 /* do it in 2 step as op3 doesn't support writemask */
1320 for (i = 0; i < 4; i++) {
1321 memset(&alu, 0, sizeof(struct r600_bc_alu));
1322 alu.inst = ctx->inst_info->r600_opcode;
1323 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
1324 alu.src[j] = r600_src[j];
1325 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
1326 }
1327 alu.dst.sel = ctx->temp_reg;
1328 alu.dst.chan = i;
1329 alu.dst.write = 1;
1330 alu.is_op3 = 1;
1331 if (i == 3) {
1332 alu.last = 1;
1333 }
1334 r = r600_bc_add_alu(ctx->bc, &alu);
1335 if (r)
1336 return r;
1337 }
1338 return tgsi_helper_copy(ctx, inst);
1339 }
1340
1341 static int tgsi_dp(struct r600_shader_ctx *ctx)
1342 {
1343 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1344 struct r600_bc_alu_src r600_src[3];
1345 struct r600_bc_alu alu;
1346 int i, j, r;
1347
1348 r = tgsi_split_constant(ctx, r600_src);
1349 if (r)
1350 return r;
1351 for (i = 0; i < 4; i++) {
1352 memset(&alu, 0, sizeof(struct r600_bc_alu));
1353 alu.inst = ctx->inst_info->r600_opcode;
1354 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
1355 alu.src[j] = r600_src[j];
1356 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
1357 }
1358 alu.dst.sel = ctx->temp_reg;
1359 alu.dst.chan = i;
1360 alu.dst.write = 1;
1361 /* handle some special cases */
1362 switch (ctx->inst_info->tgsi_opcode) {
1363 case TGSI_OPCODE_DP2:
1364 if (i > 1) {
1365 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
1366 alu.src[0].chan = alu.src[1].chan = 0;
1367 }
1368 break;
1369 case TGSI_OPCODE_DP3:
1370 if (i > 2) {
1371 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
1372 alu.src[0].chan = alu.src[1].chan = 0;
1373 }
1374 break;
1375 case TGSI_OPCODE_DPH:
1376 if (i == 3) {
1377 alu.src[0].sel = V_SQ_ALU_SRC_1;
1378 alu.src[0].chan = 0;
1379 alu.src[0].neg = 0;
1380 }
1381 break;
1382 default:
1383 break;
1384 }
1385 if (i == 3) {
1386 alu.last = 1;
1387 }
1388 r = r600_bc_add_alu(ctx->bc, &alu);
1389 if (r)
1390 return r;
1391 }
1392 return tgsi_helper_copy(ctx, inst);
1393 }
1394
1395 static int tgsi_tex(struct r600_shader_ctx *ctx)
1396 {
1397 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1398 struct r600_bc_tex tex;
1399 struct r600_bc_alu alu;
1400 unsigned src_gpr;
1401 int r, i;
1402
1403 src_gpr = ctx->file_offset[inst->Src[0].Register.File] + inst->Src[0].Register.Index;
1404
1405 if (inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
1406 /* Add perspective divide */
1407 memset(&alu, 0, sizeof(struct r600_bc_alu));
1408 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE;
1409 alu.src[0].sel = src_gpr;
1410 alu.src[0].chan = tgsi_chan(&inst->Src[0], 3);
1411 alu.dst.sel = ctx->temp_reg;
1412 alu.dst.chan = 3;
1413 alu.last = 1;
1414 alu.dst.write = 1;
1415 r = r600_bc_add_alu(ctx->bc, &alu);
1416 if (r)
1417 return r;
1418
1419 for (i = 0; i < 3; i++) {
1420 memset(&alu, 0, sizeof(struct r600_bc_alu));
1421 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL;
1422 alu.src[0].sel = ctx->temp_reg;
1423 alu.src[0].chan = 3;
1424 alu.src[1].sel = src_gpr;
1425 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
1426 alu.dst.sel = ctx->temp_reg;
1427 alu.dst.chan = i;
1428 alu.dst.write = 1;
1429 r = r600_bc_add_alu(ctx->bc, &alu);
1430 if (r)
1431 return r;
1432 }
1433 memset(&alu, 0, sizeof(struct r600_bc_alu));
1434 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1435 alu.src[0].sel = V_SQ_ALU_SRC_1;
1436 alu.src[0].chan = 0;
1437 alu.dst.sel = ctx->temp_reg;
1438 alu.dst.chan = 3;
1439 alu.last = 1;
1440 alu.dst.write = 1;
1441 r = r600_bc_add_alu(ctx->bc, &alu);
1442 if (r)
1443 return r;
1444 src_gpr = ctx->temp_reg;
1445 } else if (inst->Src[0].Register.File != TGSI_FILE_TEMPORARY) {
1446 for (i = 0; i < 4; i++) {
1447 memset(&alu, 0, sizeof(struct r600_bc_alu));
1448 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1449 alu.src[0].sel = src_gpr;
1450 alu.src[0].chan = i;
1451 alu.dst.sel = ctx->temp_reg;
1452 alu.dst.chan = i;
1453 if (i == 3)
1454 alu.last = 1;
1455 alu.dst.write = 1;
1456 r = r600_bc_add_alu(ctx->bc, &alu);
1457 if (r)
1458 return r;
1459 }
1460 src_gpr = ctx->temp_reg;
1461 }
1462
1463 memset(&tex, 0, sizeof(struct r600_bc_tex));
1464 tex.inst = ctx->inst_info->r600_opcode;
1465 tex.resource_id = ctx->file_offset[inst->Src[1].Register.File] + inst->Src[1].Register.Index;
1466 tex.sampler_id = tex.resource_id;
1467 tex.src_gpr = src_gpr;
1468 tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
1469 tex.dst_sel_x = 0;
1470 tex.dst_sel_y = 1;
1471 tex.dst_sel_z = 2;
1472 tex.dst_sel_w = 3;
1473 tex.src_sel_x = 0;
1474 tex.src_sel_y = 1;
1475 tex.src_sel_z = 2;
1476 tex.src_sel_w = 3;
1477
1478 if (inst->Texture.Texture != TGSI_TEXTURE_RECT) {
1479 tex.coord_type_x = 1;
1480 tex.coord_type_y = 1;
1481 tex.coord_type_z = 1;
1482 tex.coord_type_w = 1;
1483 }
1484 return r600_bc_add_tex(ctx->bc, &tex);
1485 }
1486
1487 static int tgsi_lrp(struct r600_shader_ctx *ctx)
1488 {
1489 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1490 struct r600_bc_alu_src r600_src[3];
1491 struct r600_bc_alu alu;
1492 unsigned i;
1493 int r;
1494
1495 r = tgsi_split_constant(ctx, r600_src);
1496 if (r)
1497 return r;
1498 /* 1 - src0 */
1499 for (i = 0; i < 4; i++) {
1500 memset(&alu, 0, sizeof(struct r600_bc_alu));
1501 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD;
1502 alu.src[0].sel = V_SQ_ALU_SRC_1;
1503 alu.src[0].chan = 0;
1504 alu.src[1] = r600_src[0];
1505 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
1506 alu.src[1].neg = 1;
1507 alu.dst.sel = ctx->temp_reg;
1508 alu.dst.chan = i;
1509 if (i == 3) {
1510 alu.last = 1;
1511 }
1512 alu.dst.write = 1;
1513 r = r600_bc_add_alu(ctx->bc, &alu);
1514 if (r)
1515 return r;
1516 }
1517 r = r600_bc_add_literal(ctx->bc, ctx->value);
1518 if (r)
1519 return r;
1520
1521 /* (1 - src0) * src2 */
1522 for (i = 0; i < 4; i++) {
1523 memset(&alu, 0, sizeof(struct r600_bc_alu));
1524 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL;
1525 alu.src[0].sel = ctx->temp_reg;
1526 alu.src[0].chan = i;
1527 alu.src[1] = r600_src[2];
1528 alu.src[1].chan = tgsi_chan(&inst->Src[2], i);
1529 alu.dst.sel = ctx->temp_reg;
1530 alu.dst.chan = i;
1531 if (i == 3) {
1532 alu.last = 1;
1533 }
1534 alu.dst.write = 1;
1535 r = r600_bc_add_alu(ctx->bc, &alu);
1536 if (r)
1537 return r;
1538 }
1539 r = r600_bc_add_literal(ctx->bc, ctx->value);
1540 if (r)
1541 return r;
1542
1543 /* src0 * src1 + (1 - src0) * src2 */
1544 for (i = 0; i < 4; i++) {
1545 memset(&alu, 0, sizeof(struct r600_bc_alu));
1546 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD;
1547 alu.is_op3 = 1;
1548 alu.src[0] = r600_src[0];
1549 alu.src[0].chan = tgsi_chan(&inst->Src[0], i);
1550 alu.src[1] = r600_src[1];
1551 alu.src[1].chan = tgsi_chan(&inst->Src[1], i);
1552 alu.src[2].sel = ctx->temp_reg;
1553 alu.src[2].chan = i;
1554 alu.dst.sel = ctx->temp_reg;
1555 alu.dst.chan = i;
1556 if (i == 3) {
1557 alu.last = 1;
1558 }
1559 r = r600_bc_add_alu(ctx->bc, &alu);
1560 if (r)
1561 return r;
1562 }
1563 return tgsi_helper_copy(ctx, inst);
1564 }
1565
1566 static int tgsi_cmp(struct r600_shader_ctx *ctx)
1567 {
1568 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1569 struct r600_bc_alu_src r600_src[3];
1570 struct r600_bc_alu alu;
1571 int use_temp = 0;
1572 int i, r;
1573
1574 r = tgsi_split_constant(ctx, r600_src);
1575 if (r)
1576 return r;
1577
1578 if (inst->Dst[0].Register.WriteMask != 0xf)
1579 use_temp = 1;
1580
1581 for (i = 0; i < 4; i++) {
1582 memset(&alu, 0, sizeof(struct r600_bc_alu));
1583 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGE;
1584 alu.src[0] = r600_src[0];
1585 alu.src[0].chan = tgsi_chan(&inst->Src[0], i);
1586
1587 alu.src[1] = r600_src[2];
1588 alu.src[1].chan = tgsi_chan(&inst->Src[2], i);
1589
1590 alu.src[2] = r600_src[1];
1591 alu.src[2].chan = tgsi_chan(&inst->Src[1], i);
1592
1593 if (use_temp)
1594 alu.dst.sel = ctx->temp_reg;
1595 else {
1596 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1597 if (r)
1598 return r;
1599 }
1600 alu.dst.chan = i;
1601 alu.dst.write = 1;
1602 alu.is_op3 = 1;
1603 if (i == 3)
1604 alu.last = 1;
1605 r = r600_bc_add_alu(ctx->bc, &alu);
1606 if (r)
1607 return r;
1608 }
1609 if (use_temp)
1610 return tgsi_helper_copy(ctx, inst);
1611 return 0;
1612 }
1613
1614 static int tgsi_xpd(struct r600_shader_ctx *ctx)
1615 {
1616 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1617 struct r600_bc_alu_src r600_src[3];
1618 struct r600_bc_alu alu;
1619 uint32_t use_temp = 0;
1620 int i, r;
1621
1622 if (inst->Dst[0].Register.WriteMask != 0xf)
1623 use_temp = 1;
1624
1625 r = tgsi_split_constant(ctx, r600_src);
1626 if (r)
1627 return r;
1628
1629 for (i = 0; i < 4; i++) {
1630 memset(&alu, 0, sizeof(struct r600_bc_alu));
1631 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL;
1632
1633 alu.src[0] = r600_src[0];
1634 switch (i) {
1635 case 0:
1636 alu.src[0].chan = tgsi_chan(&inst->Src[0], 2);
1637 break;
1638 case 1:
1639 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1640 break;
1641 case 2:
1642 alu.src[0].chan = tgsi_chan(&inst->Src[0], 1);
1643 break;
1644 case 3:
1645 alu.src[0].sel = V_SQ_ALU_SRC_0;
1646 alu.src[0].chan = i;
1647 }
1648
1649 alu.src[1] = r600_src[1];
1650 switch (i) {
1651 case 0:
1652 alu.src[1].chan = tgsi_chan(&inst->Src[1], 1);
1653 break;
1654 case 1:
1655 alu.src[1].chan = tgsi_chan(&inst->Src[1], 2);
1656 break;
1657 case 2:
1658 alu.src[1].chan = tgsi_chan(&inst->Src[1], 0);
1659 break;
1660 case 3:
1661 alu.src[1].sel = V_SQ_ALU_SRC_0;
1662 alu.src[1].chan = i;
1663 }
1664
1665 alu.dst.sel = ctx->temp_reg;
1666 alu.dst.chan = i;
1667 alu.dst.write = 1;
1668
1669 if (i == 3)
1670 alu.last = 1;
1671 r = r600_bc_add_alu(ctx->bc, &alu);
1672 if (r)
1673 return r;
1674 }
1675
1676 for (i = 0; i < 4; i++) {
1677 memset(&alu, 0, sizeof(struct r600_bc_alu));
1678 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD;
1679
1680 alu.src[0] = r600_src[0];
1681 switch (i) {
1682 case 0:
1683 alu.src[0].chan = tgsi_chan(&inst->Src[0], 1);
1684 break;
1685 case 1:
1686 alu.src[0].chan = tgsi_chan(&inst->Src[0], 2);
1687 break;
1688 case 2:
1689 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1690 break;
1691 case 3:
1692 alu.src[0].sel = V_SQ_ALU_SRC_0;
1693 alu.src[0].chan = i;
1694 }
1695
1696 alu.src[1] = r600_src[1];
1697 switch (i) {
1698 case 0:
1699 alu.src[1].chan = tgsi_chan(&inst->Src[1], 2);
1700 break;
1701 case 1:
1702 alu.src[1].chan = tgsi_chan(&inst->Src[1], 0);
1703 break;
1704 case 2:
1705 alu.src[1].chan = tgsi_chan(&inst->Src[1], 1);
1706 break;
1707 case 3:
1708 alu.src[1].sel = V_SQ_ALU_SRC_0;
1709 alu.src[1].chan = i;
1710 }
1711
1712 alu.src[2].sel = ctx->temp_reg;
1713 alu.src[2].neg = 1;
1714 alu.src[2].chan = i;
1715
1716 if (use_temp)
1717 alu.dst.sel = ctx->temp_reg;
1718 else {
1719 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1720 if (r)
1721 return r;
1722 }
1723 alu.dst.chan = i;
1724 alu.dst.write = 1;
1725 alu.is_op3 = 1;
1726 if (i == 3)
1727 alu.last = 1;
1728 r = r600_bc_add_alu(ctx->bc, &alu);
1729 if (r)
1730 return r;
1731 }
1732 if (use_temp)
1733 return tgsi_helper_copy(ctx, inst);
1734 return 0;
1735 }
1736
1737 static int tgsi_exp(struct r600_shader_ctx *ctx)
1738 {
1739 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1740 struct r600_bc_alu_src r600_src[3];
1741 struct r600_bc_alu alu;
1742 int r;
1743
1744 /* result.x = 2^floor(src); */
1745 if (inst->Dst[0].Register.WriteMask & 1) {
1746 memset(&alu, 0, sizeof(struct r600_bc_alu));
1747
1748 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR;
1749 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1750 if (r)
1751 return r;
1752
1753 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1754
1755 alu.dst.sel = ctx->temp_reg;
1756 alu.dst.chan = 0;
1757 alu.dst.write = 1;
1758 alu.last = 1;
1759 r = r600_bc_add_alu(ctx->bc, &alu);
1760 if (r)
1761 return r;
1762
1763 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE;
1764 alu.src[0].sel = ctx->temp_reg;
1765 alu.src[0].chan = 0;
1766
1767 alu.dst.sel = ctx->temp_reg;
1768 alu.dst.chan = 0;
1769 alu.dst.write = 1;
1770 alu.last = 1;
1771 r = r600_bc_add_alu(ctx->bc, &alu);
1772 if (r)
1773 return r;
1774 }
1775
1776 /* result.y = tmp - floor(tmp); */
1777 if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
1778 memset(&alu, 0, sizeof(struct r600_bc_alu));
1779
1780 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT;
1781 alu.src[0] = r600_src[0];
1782 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1783 if (r)
1784 return r;
1785 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1786
1787 alu.dst.sel = ctx->temp_reg;
1788 // r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1789 // if (r)
1790 // return r;
1791 alu.dst.write = 1;
1792 alu.dst.chan = 1;
1793
1794 alu.last = 1;
1795
1796 r = r600_bc_add_alu(ctx->bc, &alu);
1797 if (r)
1798 return r;
1799 }
1800
1801 /* result.z = RoughApprox2ToX(tmp);*/
1802 if ((inst->Dst[0].Register.WriteMask >> 2) & 0x1) {
1803 memset(&alu, 0, sizeof(struct r600_bc_alu));
1804 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE;
1805 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1806 if (r)
1807 return r;
1808 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1809
1810 alu.dst.sel = ctx->temp_reg;
1811 alu.dst.write = 1;
1812 alu.dst.chan = 2;
1813
1814 alu.last = 1;
1815
1816 r = r600_bc_add_alu(ctx->bc, &alu);
1817 if (r)
1818 return r;
1819
1820 }
1821
1822 /* result.w = 1.0;*/
1823 if ((inst->Dst[0].Register.WriteMask >> 3) & 0x1) {
1824 memset(&alu, 0, sizeof(struct r600_bc_alu));
1825
1826 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1827 alu.src[0].sel = V_SQ_ALU_SRC_1;
1828 alu.src[0].chan = 0;
1829
1830 alu.dst.sel = ctx->temp_reg;
1831 alu.dst.chan = 3;
1832 alu.dst.write = 1;
1833 alu.last = 1;
1834 r = r600_bc_add_alu(ctx->bc, &alu);
1835 if (r)
1836 return r;
1837 }
1838 return tgsi_helper_copy(ctx, inst);
1839 }
1840
1841 static int tgsi_arl(struct r600_shader_ctx *ctx)
1842 {
1843 /* TODO from r600c, ar values don't persist between clauses */
1844 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1845 struct r600_bc_alu alu;
1846 int r;
1847 memset(&alu, 0, sizeof(struct r600_bc_alu));
1848
1849 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR;
1850
1851 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1852 if (r)
1853 return r;
1854 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1855
1856 alu.last = 1;
1857
1858 r = r600_bc_add_alu_type(ctx->bc, &alu, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU);
1859 if (r)
1860 return r;
1861 return 0;
1862 }
1863
1864 static int emit_logic_pred(struct r600_shader_ctx *ctx, int opcode)
1865 {
1866 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1867 struct r600_bc_alu alu;
1868 int r;
1869
1870 memset(&alu, 0, sizeof(struct r600_bc_alu));
1871 alu.inst = opcode;
1872 alu.predicate = 1;
1873
1874 alu.dst.sel = ctx->temp_reg;
1875 alu.dst.write = 1;
1876 alu.dst.chan = 0;
1877
1878 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1879 if (r)
1880 return r;
1881 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1882 alu.src[1].sel = V_SQ_ALU_SRC_0;
1883 alu.src[1].chan = 0;
1884
1885 alu.last = 1;
1886
1887 r = r600_bc_add_alu_type(ctx->bc, &alu, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE);
1888 if (r)
1889 return r;
1890 return 0;
1891 }
1892
1893 static int pops(struct r600_shader_ctx *ctx, int pops)
1894 {
1895 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_POP);
1896 ctx->bc->cf_last->pop_count = pops;
1897 return 0;
1898 }
1899
1900 static inline void callstack_decrease_current(struct r600_shader_ctx *ctx, unsigned reason)
1901 {
1902 switch(reason) {
1903 case FC_PUSH_VPM:
1904 ctx->bc->callstack[ctx->bc->call_sp].current--;
1905 break;
1906 case FC_PUSH_WQM:
1907 case FC_LOOP:
1908 ctx->bc->callstack[ctx->bc->call_sp].current -= 4;
1909 break;
1910 case FC_REP:
1911 /* TOODO : for 16 vp asic should -= 2; */
1912 ctx->bc->callstack[ctx->bc->call_sp].current --;
1913 break;
1914 }
1915 }
1916
1917 static inline void callstack_check_depth(struct r600_shader_ctx *ctx, unsigned reason, unsigned check_max_only)
1918 {
1919 if (check_max_only) {
1920 int diff;
1921 switch (reason) {
1922 case FC_PUSH_VPM:
1923 diff = 1;
1924 break;
1925 case FC_PUSH_WQM:
1926 diff = 4;
1927 break;
1928 }
1929 if ((ctx->bc->callstack[ctx->bc->call_sp].current + diff) >
1930 ctx->bc->callstack[ctx->bc->call_sp].max) {
1931 ctx->bc->callstack[ctx->bc->call_sp].max =
1932 ctx->bc->callstack[ctx->bc->call_sp].current + diff;
1933 }
1934 return;
1935 }
1936 switch (reason) {
1937 case FC_PUSH_VPM:
1938 ctx->bc->callstack[ctx->bc->call_sp].current++;
1939 break;
1940 case FC_PUSH_WQM:
1941 case FC_LOOP:
1942 ctx->bc->callstack[ctx->bc->call_sp].current += 4;
1943 break;
1944 case FC_REP:
1945 ctx->bc->callstack[ctx->bc->call_sp].current++;
1946 break;
1947 }
1948
1949 if ((ctx->bc->callstack[ctx->bc->call_sp].current) >
1950 ctx->bc->callstack[ctx->bc->call_sp].max) {
1951 ctx->bc->callstack[ctx->bc->call_sp].max =
1952 ctx->bc->callstack[ctx->bc->call_sp].current;
1953 }
1954 }
1955
1956 static void fc_set_mid(struct r600_shader_ctx *ctx, int fc_sp)
1957 {
1958 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[fc_sp];
1959
1960 sp->mid = (struct r600_bc_cf **)realloc((void *)sp->mid,
1961 sizeof(struct r600_bc_cf *) * (sp->num_mid + 1));
1962 sp->mid[sp->num_mid] = ctx->bc->cf_last;
1963 sp->num_mid++;
1964 }
1965
1966 static void fc_pushlevel(struct r600_shader_ctx *ctx, int type)
1967 {
1968 ctx->bc->fc_sp++;
1969 ctx->bc->fc_stack[ctx->bc->fc_sp].type = type;
1970 ctx->bc->fc_stack[ctx->bc->fc_sp].start = ctx->bc->cf_last;
1971 }
1972
1973 static void fc_poplevel(struct r600_shader_ctx *ctx)
1974 {
1975 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[ctx->bc->fc_sp];
1976 if (sp->mid) {
1977 free(sp->mid);
1978 sp->mid = NULL;
1979 }
1980 sp->num_mid = 0;
1981 sp->start = NULL;
1982 sp->type = 0;
1983 ctx->bc->fc_sp--;
1984 }
1985
1986 #if 0
1987 static int emit_return(struct r600_shader_ctx *ctx)
1988 {
1989 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_RETURN);
1990 return 0;
1991 }
1992
1993 static int emit_jump_to_offset(struct r600_shader_ctx *ctx, int pops, int offset)
1994 {
1995
1996 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_JUMP);
1997 ctx->bc->cf_last->pop_count = pops;
1998 /* TODO work out offset */
1999 return 0;
2000 }
2001
2002 static int emit_setret_in_loop_flag(struct r600_shader_ctx *ctx, unsigned flag_value)
2003 {
2004 return 0;
2005 }
2006
2007 static void emit_testflag(struct r600_shader_ctx *ctx)
2008 {
2009
2010 }
2011
2012 static void emit_return_on_flag(struct r600_shader_ctx *ctx, unsigned ifidx)
2013 {
2014 emit_testflag(ctx);
2015 emit_jump_to_offset(ctx, 1, 4);
2016 emit_setret_in_loop_flag(ctx, V_SQ_ALU_SRC_0);
2017 pops(ctx, ifidx + 1);
2018 emit_return(ctx);
2019 }
2020
2021 static void break_loop_on_flag(struct r600_shader_ctx *ctx, unsigned fc_sp)
2022 {
2023 emit_testflag(ctx);
2024
2025 r600_bc_add_cfinst(ctx->bc, ctx->inst_info->r600_opcode);
2026 ctx->bc->cf_last->pop_count = 1;
2027
2028 fc_set_mid(ctx, fc_sp);
2029
2030 pops(ctx, 1);
2031 }
2032 #endif
2033
2034 static int tgsi_if(struct r600_shader_ctx *ctx)
2035 {
2036 emit_logic_pred(ctx, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE);
2037
2038 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_JUMP);
2039
2040 fc_pushlevel(ctx, FC_IF);
2041
2042 callstack_check_depth(ctx, FC_PUSH_VPM, 0);
2043 return 0;
2044 }
2045
2046 static int tgsi_else(struct r600_shader_ctx *ctx)
2047 {
2048 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_ELSE);
2049 ctx->bc->cf_last->pop_count = 1;
2050
2051 fc_set_mid(ctx, ctx->bc->fc_sp);
2052 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id;
2053 return 0;
2054 }
2055
2056 static int tgsi_endif(struct r600_shader_ctx *ctx)
2057 {
2058 pops(ctx, 1);
2059 if (ctx->bc->fc_stack[ctx->bc->fc_sp].type != FC_IF) {
2060 R600_ERR("if/endif unbalanced in shader\n");
2061 return -1;
2062 }
2063
2064 if (ctx->bc->fc_stack[ctx->bc->fc_sp].mid == NULL) {
2065 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id + 2;
2066 ctx->bc->fc_stack[ctx->bc->fc_sp].start->pop_count = 1;
2067 } else {
2068 ctx->bc->fc_stack[ctx->bc->fc_sp].mid[0]->cf_addr = ctx->bc->cf_last->id + 2;
2069 }
2070 fc_poplevel(ctx);
2071
2072 callstack_decrease_current(ctx, FC_PUSH_VPM);
2073 return 0;
2074 }
2075
2076 static int tgsi_bgnloop(struct r600_shader_ctx *ctx)
2077 {
2078 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL);
2079
2080 fc_pushlevel(ctx, FC_LOOP);
2081
2082 /* check stack depth */
2083 callstack_check_depth(ctx, FC_LOOP, 0);
2084 return 0;
2085 }
2086
2087 static int tgsi_endloop(struct r600_shader_ctx *ctx)
2088 {
2089 int i;
2090
2091 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END);
2092
2093 if (ctx->bc->fc_stack[ctx->bc->fc_sp].type != FC_LOOP) {
2094 R600_ERR("loop/endloop in shader code are not paired.\n");
2095 return -EINVAL;
2096 }
2097
2098 /* fixup loop pointers - from r600isa
2099 LOOP END points to CF after LOOP START,
2100 LOOP START point to CF after LOOP END
2101 BRK/CONT point to LOOP END CF
2102 */
2103 ctx->bc->cf_last->cf_addr = ctx->bc->fc_stack[ctx->bc->fc_sp].start->id + 2;
2104
2105 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id + 2;
2106
2107 for (i = 0; i < ctx->bc->fc_stack[ctx->bc->fc_sp].num_mid; i++) {
2108 ctx->bc->fc_stack[ctx->bc->fc_sp].mid[i]->cf_addr = ctx->bc->cf_last->id;
2109 }
2110 /* TODO add LOOPRET support */
2111 fc_poplevel(ctx);
2112 callstack_decrease_current(ctx, FC_LOOP);
2113 return 0;
2114 }
2115
2116 static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx)
2117 {
2118 unsigned int fscp;
2119
2120 for (fscp = ctx->bc->fc_sp; fscp > 0; fscp--)
2121 {
2122 if (FC_LOOP == ctx->bc->fc_stack[fscp].type)
2123 break;
2124 }
2125
2126 if (fscp == 0) {
2127 R600_ERR("Break not inside loop/endloop pair\n");
2128 return -EINVAL;
2129 }
2130
2131 r600_bc_add_cfinst(ctx->bc, ctx->inst_info->r600_opcode);
2132 ctx->bc->cf_last->pop_count = 1;
2133
2134 fc_set_mid(ctx, fscp);
2135
2136 pops(ctx, 1);
2137 callstack_check_depth(ctx, FC_PUSH_VPM, 1);
2138 return 0;
2139 }
2140
2141 static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] = {
2142 {TGSI_OPCODE_ARL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_arl},
2143 {TGSI_OPCODE_MOV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV, tgsi_op2},
2144 {TGSI_OPCODE_LIT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_lit},
2145 {TGSI_OPCODE_RCP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE, tgsi_trans_srcx_replicate},
2146 {TGSI_OPCODE_RSQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE, tgsi_trans_srcx_replicate},
2147 {TGSI_OPCODE_EXP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_exp},
2148 {TGSI_OPCODE_LOG, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2149 {TGSI_OPCODE_MUL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL, tgsi_op2},
2150 {TGSI_OPCODE_ADD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD, tgsi_op2},
2151 {TGSI_OPCODE_DP3, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4, tgsi_dp},
2152 {TGSI_OPCODE_DP4, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4, tgsi_dp},
2153 {TGSI_OPCODE_DST, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2154 {TGSI_OPCODE_MIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN, tgsi_op2},
2155 {TGSI_OPCODE_MAX, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX, tgsi_op2},
2156 {TGSI_OPCODE_SLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT, tgsi_op2_swap},
2157 {TGSI_OPCODE_SGE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE, tgsi_op2},
2158 {TGSI_OPCODE_MAD, 1, V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD, tgsi_op3},
2159 {TGSI_OPCODE_SUB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD, tgsi_op2},
2160 {TGSI_OPCODE_LRP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_lrp},
2161 {TGSI_OPCODE_CND, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2162 /* gap */
2163 {20, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2164 {TGSI_OPCODE_DP2A, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2165 /* gap */
2166 {22, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2167 {23, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2168 {TGSI_OPCODE_FRC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT, tgsi_op2},
2169 {TGSI_OPCODE_CLAMP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2170 {TGSI_OPCODE_FLR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR, tgsi_op2},
2171 {TGSI_OPCODE_ROUND, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2172 {TGSI_OPCODE_EX2, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE, tgsi_trans_srcx_replicate},
2173 {TGSI_OPCODE_LG2, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE, tgsi_trans_srcx_replicate},
2174 {TGSI_OPCODE_POW, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_pow},
2175 {TGSI_OPCODE_XPD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_xpd},
2176 /* gap */
2177 {32, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2178 {TGSI_OPCODE_ABS, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV, tgsi_op2},
2179 {TGSI_OPCODE_RCC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2180 {TGSI_OPCODE_DPH, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4, tgsi_dp},
2181 {TGSI_OPCODE_COS, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS, tgsi_trig},
2182 {TGSI_OPCODE_DDX, 0, SQ_TEX_INST_GET_GRADIENTS_H, tgsi_tex},
2183 {TGSI_OPCODE_DDY, 0, SQ_TEX_INST_GET_GRADIENTS_V, tgsi_tex},
2184 {TGSI_OPCODE_KILP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT, tgsi_kill}, /* predicated kill */
2185 {TGSI_OPCODE_PK2H, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2186 {TGSI_OPCODE_PK2US, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2187 {TGSI_OPCODE_PK4B, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2188 {TGSI_OPCODE_PK4UB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2189 {TGSI_OPCODE_RFL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2190 {TGSI_OPCODE_SEQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE, tgsi_op2},
2191 {TGSI_OPCODE_SFL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2192 {TGSI_OPCODE_SGT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT, tgsi_op2},
2193 {TGSI_OPCODE_SIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN, tgsi_trig},
2194 {TGSI_OPCODE_SLE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE, tgsi_op2_swap},
2195 {TGSI_OPCODE_SNE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE, tgsi_op2},
2196 {TGSI_OPCODE_STR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2197 {TGSI_OPCODE_TEX, 0, SQ_TEX_INST_SAMPLE, tgsi_tex},
2198 {TGSI_OPCODE_TXD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2199 {TGSI_OPCODE_TXP, 0, SQ_TEX_INST_SAMPLE, tgsi_tex},
2200 {TGSI_OPCODE_UP2H, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2201 {TGSI_OPCODE_UP2US, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2202 {TGSI_OPCODE_UP4B, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2203 {TGSI_OPCODE_UP4UB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2204 {TGSI_OPCODE_X2D, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2205 {TGSI_OPCODE_ARA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2206 {TGSI_OPCODE_ARR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2207 {TGSI_OPCODE_BRA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2208 {TGSI_OPCODE_CAL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2209 {TGSI_OPCODE_RET, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2210 {TGSI_OPCODE_SSG, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_ssg},
2211 {TGSI_OPCODE_CMP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_cmp},
2212 {TGSI_OPCODE_SCS, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_scs},
2213 {TGSI_OPCODE_TXB, 0, SQ_TEX_INST_SAMPLE_L, tgsi_tex},
2214 {TGSI_OPCODE_NRM, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2215 {TGSI_OPCODE_DIV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2216 {TGSI_OPCODE_DP2, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4, tgsi_dp},
2217 {TGSI_OPCODE_TXL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2218 {TGSI_OPCODE_BRK, 0, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK, tgsi_loop_brk_cont},
2219 {TGSI_OPCODE_IF, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_if},
2220 /* gap */
2221 {75, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2222 {76, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2223 {TGSI_OPCODE_ELSE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_else},
2224 {TGSI_OPCODE_ENDIF, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_endif},
2225 /* gap */
2226 {79, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2227 {80, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2228 {TGSI_OPCODE_PUSHA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2229 {TGSI_OPCODE_POPA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2230 {TGSI_OPCODE_CEIL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2231 {TGSI_OPCODE_I2F, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2232 {TGSI_OPCODE_NOT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2233 {TGSI_OPCODE_TRUNC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC, tgsi_trans_srcx_replicate},
2234 {TGSI_OPCODE_SHL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2235 /* gap */
2236 {88, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2237 {TGSI_OPCODE_AND, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2238 {TGSI_OPCODE_OR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2239 {TGSI_OPCODE_MOD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2240 {TGSI_OPCODE_XOR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2241 {TGSI_OPCODE_SAD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2242 {TGSI_OPCODE_TXF, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2243 {TGSI_OPCODE_TXQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2244 {TGSI_OPCODE_CONT, 0, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE, tgsi_loop_brk_cont},
2245 {TGSI_OPCODE_EMIT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2246 {TGSI_OPCODE_ENDPRIM, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2247 {TGSI_OPCODE_BGNLOOP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_bgnloop},
2248 {TGSI_OPCODE_BGNSUB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2249 {TGSI_OPCODE_ENDLOOP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_endloop},
2250 {TGSI_OPCODE_ENDSUB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2251 /* gap */
2252 {103, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2253 {104, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2254 {105, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2255 {106, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2256 {TGSI_OPCODE_NOP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2257 /* gap */
2258 {108, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2259 {109, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2260 {110, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2261 {111, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2262 {TGSI_OPCODE_NRM4, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2263 {TGSI_OPCODE_CALLNZ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2264 {TGSI_OPCODE_IFC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2265 {TGSI_OPCODE_BREAKC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2266 {TGSI_OPCODE_KIL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT, tgsi_kill}, /* conditional kill */
2267 {TGSI_OPCODE_END, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_end}, /* aka HALT */
2268 /* gap */
2269 {118, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2270 {TGSI_OPCODE_F2I, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2271 {TGSI_OPCODE_IDIV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2272 {TGSI_OPCODE_IMAX, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2273 {TGSI_OPCODE_IMIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2274 {TGSI_OPCODE_INEG, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2275 {TGSI_OPCODE_ISGE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2276 {TGSI_OPCODE_ISHR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2277 {TGSI_OPCODE_ISLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2278 {TGSI_OPCODE_F2U, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2279 {TGSI_OPCODE_U2F, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2280 {TGSI_OPCODE_UADD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2281 {TGSI_OPCODE_UDIV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2282 {TGSI_OPCODE_UMAD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2283 {TGSI_OPCODE_UMAX, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2284 {TGSI_OPCODE_UMIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2285 {TGSI_OPCODE_UMOD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2286 {TGSI_OPCODE_UMUL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2287 {TGSI_OPCODE_USEQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2288 {TGSI_OPCODE_USGE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2289 {TGSI_OPCODE_USHR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2290 {TGSI_OPCODE_USLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2291 {TGSI_OPCODE_USNE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2292 {TGSI_OPCODE_SWITCH, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2293 {TGSI_OPCODE_CASE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2294 {TGSI_OPCODE_DEFAULT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2295 {TGSI_OPCODE_ENDSWITCH, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2296 {TGSI_OPCODE_LAST, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2297 };