r600g: avoid dynamic allocation of states
[mesa.git] / src / gallium / drivers / r600 / r600_shader.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "pipe/p_shader_tokens.h"
24 #include "tgsi/tgsi_parse.h"
25 #include "tgsi/tgsi_scan.h"
26 #include "tgsi/tgsi_dump.h"
27 #include "util/u_format.h"
28 #include "r600_screen.h"
29 #include "r600_context.h"
30 #include "r600_shader.h"
31 #include "r600_asm.h"
32 #include "r600_sq.h"
33 #include "r600d.h"
34 #include <stdio.h>
35 #include <errno.h>
36
37
38 struct r600_shader_tgsi_instruction;
39
40 struct r600_shader_ctx {
41 struct tgsi_shader_info info;
42 struct tgsi_parse_context parse;
43 const struct tgsi_token *tokens;
44 unsigned type;
45 unsigned file_offset[TGSI_FILE_COUNT];
46 unsigned temp_reg;
47 struct r600_shader_tgsi_instruction *inst_info;
48 struct r600_bc *bc;
49 struct r600_shader *shader;
50 u32 value[4];
51 u32 *literals;
52 u32 nliterals;
53 u32 max_driver_temp_used;
54 };
55
56 struct r600_shader_tgsi_instruction {
57 unsigned tgsi_opcode;
58 unsigned is_op3;
59 unsigned r600_opcode;
60 int (*process)(struct r600_shader_ctx *ctx);
61 };
62
63 static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[];
64 static int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader);
65
66 static int r600_shader_update(struct pipe_context *ctx, struct r600_shader *shader)
67 {
68 struct r600_context *rctx = r600_context(ctx);
69 const struct util_format_description *desc;
70 enum pipe_format resource_format[160];
71 unsigned i, nresources = 0;
72 struct r600_bc *bc = &shader->bc;
73 struct r600_bc_cf *cf;
74 struct r600_bc_vtx *vtx;
75
76 if (shader->processor_type != TGSI_PROCESSOR_VERTEX)
77 return 0;
78 for (i = 0; i < rctx->vertex_elements->count; i++) {
79 resource_format[nresources++] = rctx->vertex_elements->elements[i].src_format;
80 }
81 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
82 switch (cf->inst) {
83 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
84 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
85 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
86 desc = util_format_description(resource_format[vtx->buffer_id]);
87 if (desc == NULL) {
88 R600_ERR("unknown format %d\n", resource_format[vtx->buffer_id]);
89 return -EINVAL;
90 }
91 vtx->dst_sel_x = desc->swizzle[0];
92 vtx->dst_sel_y = desc->swizzle[1];
93 vtx->dst_sel_z = desc->swizzle[2];
94 vtx->dst_sel_w = desc->swizzle[3];
95 }
96 break;
97 default:
98 break;
99 }
100 }
101 return r600_bc_build(&shader->bc);
102 }
103
104 int r600_pipe_shader_create(struct pipe_context *ctx,
105 struct r600_context_state *rpshader,
106 const struct tgsi_token *tokens)
107 {
108 struct r600_screen *rscreen = r600_screen(ctx->screen);
109 int r;
110
111 //fprintf(stderr, "--------------------------------------------------------------\n");
112 //tgsi_dump(tokens, 0);
113 if (rpshader == NULL)
114 return -ENOMEM;
115 rpshader->shader.family = radeon_get_family(rscreen->rw);
116 r = r600_shader_from_tgsi(tokens, &rpshader->shader);
117 if (r) {
118 R600_ERR("translation from TGSI failed !\n");
119 return r;
120 }
121 r = r600_bc_build(&rpshader->shader.bc);
122 if (r) {
123 R600_ERR("building bytecode failed !\n");
124 return r;
125 }
126 //fprintf(stderr, "______________________________________________________________\n");
127 return 0;
128 }
129
130 static int r600_pipe_shader_vs(struct pipe_context *ctx, struct r600_context_state *rpshader)
131 {
132 struct r600_screen *rscreen = r600_screen(ctx->screen);
133 struct r600_shader *rshader = &rpshader->shader;
134 struct radeon_state *state;
135 unsigned i, tmp;
136
137 state = &rpshader->rstate;
138 radeon_state_fini(&rpshader->rstate);
139 radeon_state_init(state, rscreen->rw, R600_STATE_SHADER, 0, R600_SHADER_VS);
140 for (i = 0; i < 10; i++) {
141 state->states[R600_VS_SHADER__SPI_VS_OUT_ID_0 + i] = 0;
142 }
143 /* so far never got proper semantic id from tgsi */
144 for (i = 0; i < 32; i++) {
145 tmp = i << ((i & 3) * 8);
146 state->states[R600_VS_SHADER__SPI_VS_OUT_ID_0 + i / 4] |= tmp;
147 }
148 state->states[R600_VS_SHADER__SPI_VS_OUT_CONFIG] = S_0286C4_VS_EXPORT_COUNT(rshader->noutput - 2);
149 state->states[R600_VS_SHADER__SQ_PGM_RESOURCES_VS] = S_028868_NUM_GPRS(rshader->bc.ngpr) |
150 S_028868_STACK_SIZE(rshader->bc.nstack);
151 state->bo[0] = radeon_bo_incref(rscreen->rw, rpshader->bo);
152 state->bo[1] = radeon_bo_incref(rscreen->rw, rpshader->bo);
153 state->nbo = 2;
154 state->placement[0] = RADEON_GEM_DOMAIN_GTT;
155 state->placement[2] = RADEON_GEM_DOMAIN_GTT;
156 return radeon_state_pm4(state);
157 }
158
159 static int r600_pipe_shader_ps(struct pipe_context *ctx, struct r600_context_state *rpshader)
160 {
161 const struct pipe_rasterizer_state *rasterizer;
162 struct r600_screen *rscreen = r600_screen(ctx->screen);
163 struct r600_shader *rshader = &rpshader->shader;
164 struct r600_context *rctx = r600_context(ctx);
165 struct radeon_state *state;
166 unsigned i, tmp, exports_ps, num_cout;
167 boolean have_pos = FALSE;
168
169 state = &rpshader->rstate;
170 rasterizer = &rctx->rasterizer->state.rasterizer;
171 radeon_state_fini(state);
172 radeon_state_init(state, rscreen->rw, R600_STATE_SHADER, 0, R600_SHADER_PS);
173 for (i = 0; i < rshader->ninput; i++) {
174 tmp = S_028644_SEMANTIC(i);
175 tmp |= S_028644_SEL_CENTROID(1);
176 if (rshader->input[i].name == TGSI_SEMANTIC_POSITION)
177 have_pos = TRUE;
178 if (rshader->input[i].name == TGSI_SEMANTIC_COLOR ||
179 rshader->input[i].name == TGSI_SEMANTIC_BCOLOR ||
180 rshader->input[i].name == TGSI_SEMANTIC_POSITION) {
181 tmp |= S_028644_FLAT_SHADE(rshader->flat_shade);
182 }
183 if (rasterizer->sprite_coord_enable & (1 << i)) {
184 tmp |= S_028644_PT_SPRITE_TEX(1);
185 }
186 state->states[R600_PS_SHADER__SPI_PS_INPUT_CNTL_0 + i] = tmp;
187 }
188
189 exports_ps = 0;
190 num_cout = 0;
191 for (i = 0; i < rshader->noutput; i++) {
192 if (rshader->output[i].name == TGSI_SEMANTIC_POSITION)
193 exports_ps |= 1;
194 else if (rshader->output[i].name == TGSI_SEMANTIC_COLOR) {
195 exports_ps |= (1 << (num_cout+1));
196 num_cout++;
197 }
198 }
199 if (!exports_ps) {
200 /* always at least export 1 component per pixel */
201 exports_ps = 2;
202 }
203 state->states[R600_PS_SHADER__SPI_PS_IN_CONTROL_0] = S_0286CC_NUM_INTERP(rshader->ninput) |
204 S_0286CC_PERSP_GRADIENT_ENA(1);
205 if (have_pos) {
206 state->states[R600_PS_SHADER__SPI_PS_IN_CONTROL_0] |= S_0286CC_POSITION_ENA(1) |
207 S_0286CC_BARYC_SAMPLE_CNTL(1);
208 state->states[R600_PS_SHADER__SPI_INPUT_Z] |= 1;
209 }
210 state->states[R600_PS_SHADER__SPI_PS_IN_CONTROL_1] = 0x00000000;
211 state->states[R600_PS_SHADER__SQ_PGM_RESOURCES_PS] = S_028868_NUM_GPRS(rshader->bc.ngpr) |
212 S_028868_STACK_SIZE(rshader->bc.nstack);
213 state->states[R600_PS_SHADER__SQ_PGM_EXPORTS_PS] = exports_ps;
214 state->bo[0] = radeon_bo_incref(rscreen->rw, rpshader->bo);
215 state->nbo = 1;
216 state->placement[0] = RADEON_GEM_DOMAIN_GTT;
217 return radeon_state_pm4(state);
218 }
219
220 static int r600_pipe_shader(struct pipe_context *ctx, struct r600_context_state *rpshader)
221 {
222 struct r600_screen *rscreen = r600_screen(ctx->screen);
223 struct r600_context *rctx = r600_context(ctx);
224 struct r600_shader *rshader = &rpshader->shader;
225 int r;
226
227 /* copy new shader */
228 radeon_bo_decref(rscreen->rw, rpshader->bo);
229 rpshader->bo = NULL;
230 rpshader->bo = radeon_bo(rscreen->rw, 0, rshader->bc.ndw * 4,
231 4096, NULL);
232 if (rpshader->bo == NULL) {
233 return -ENOMEM;
234 }
235 radeon_bo_map(rscreen->rw, rpshader->bo);
236 memcpy(rpshader->bo->data, rshader->bc.bytecode, rshader->bc.ndw * 4);
237 radeon_bo_unmap(rscreen->rw, rpshader->bo);
238 /* build state */
239 rshader->flat_shade = rctx->flat_shade;
240 switch (rshader->processor_type) {
241 case TGSI_PROCESSOR_VERTEX:
242 r = r600_pipe_shader_vs(ctx, rpshader);
243 break;
244 case TGSI_PROCESSOR_FRAGMENT:
245 r = r600_pipe_shader_ps(ctx, rpshader);
246 break;
247 default:
248 r = -EINVAL;
249 break;
250 }
251 return r;
252 }
253
254 int r600_pipe_shader_update(struct pipe_context *ctx, struct r600_context_state *rpshader)
255 {
256 struct r600_context *rctx = r600_context(ctx);
257 int r;
258
259 if (rpshader == NULL)
260 return -EINVAL;
261 /* there should be enough input */
262 if (rctx->vertex_elements->count < rpshader->shader.bc.nresource) {
263 R600_ERR("%d resources provided, expecting %d\n",
264 rctx->vertex_elements->count, rpshader->shader.bc.nresource);
265 return -EINVAL;
266 }
267 r = r600_shader_update(ctx, &rpshader->shader);
268 if (r)
269 return r;
270 return r600_pipe_shader(ctx, rpshader);
271 }
272
273 static int tgsi_is_supported(struct r600_shader_ctx *ctx)
274 {
275 struct tgsi_full_instruction *i = &ctx->parse.FullToken.FullInstruction;
276 int j;
277
278 if (i->Instruction.NumDstRegs > 1) {
279 R600_ERR("too many dst (%d)\n", i->Instruction.NumDstRegs);
280 return -EINVAL;
281 }
282 if (i->Instruction.Predicate) {
283 R600_ERR("predicate unsupported\n");
284 return -EINVAL;
285 }
286 #if 0
287 if (i->Instruction.Label) {
288 R600_ERR("label unsupported\n");
289 return -EINVAL;
290 }
291 #endif
292 for (j = 0; j < i->Instruction.NumSrcRegs; j++) {
293 if (i->Src[j].Register.Dimension ||
294 i->Src[j].Register.Absolute) {
295 R600_ERR("unsupported src %d (dimension %d|absolute %d)\n", j,
296 i->Src[j].Register.Dimension,
297 i->Src[j].Register.Absolute);
298 return -EINVAL;
299 }
300 }
301 for (j = 0; j < i->Instruction.NumDstRegs; j++) {
302 if (i->Dst[j].Register.Dimension) {
303 R600_ERR("unsupported dst (dimension)\n");
304 return -EINVAL;
305 }
306 }
307 return 0;
308 }
309
310 static int tgsi_declaration(struct r600_shader_ctx *ctx)
311 {
312 struct tgsi_full_declaration *d = &ctx->parse.FullToken.FullDeclaration;
313 struct r600_bc_vtx vtx;
314 unsigned i;
315 int r;
316
317 switch (d->Declaration.File) {
318 case TGSI_FILE_INPUT:
319 i = ctx->shader->ninput++;
320 ctx->shader->input[i].name = d->Semantic.Name;
321 ctx->shader->input[i].sid = d->Semantic.Index;
322 ctx->shader->input[i].interpolate = d->Declaration.Interpolate;
323 ctx->shader->input[i].gpr = ctx->file_offset[TGSI_FILE_INPUT] + i;
324 if (ctx->type == TGSI_PROCESSOR_VERTEX) {
325 /* turn input into fetch */
326 memset(&vtx, 0, sizeof(struct r600_bc_vtx));
327 vtx.inst = 0;
328 vtx.fetch_type = 0;
329 vtx.buffer_id = i;
330 /* register containing the index into the buffer */
331 vtx.src_gpr = 0;
332 vtx.src_sel_x = 0;
333 vtx.mega_fetch_count = 0x1F;
334 vtx.dst_gpr = ctx->shader->input[i].gpr;
335 vtx.dst_sel_x = 0;
336 vtx.dst_sel_y = 1;
337 vtx.dst_sel_z = 2;
338 vtx.dst_sel_w = 3;
339 r = r600_bc_add_vtx(ctx->bc, &vtx);
340 if (r)
341 return r;
342 }
343 break;
344 case TGSI_FILE_OUTPUT:
345 i = ctx->shader->noutput++;
346 ctx->shader->output[i].name = d->Semantic.Name;
347 ctx->shader->output[i].sid = d->Semantic.Index;
348 ctx->shader->output[i].gpr = ctx->file_offset[TGSI_FILE_OUTPUT] + i;
349 ctx->shader->output[i].interpolate = d->Declaration.Interpolate;
350 break;
351 case TGSI_FILE_CONSTANT:
352 case TGSI_FILE_TEMPORARY:
353 case TGSI_FILE_SAMPLER:
354 case TGSI_FILE_ADDRESS:
355 break;
356 default:
357 R600_ERR("unsupported file %d declaration\n", d->Declaration.File);
358 return -EINVAL;
359 }
360 return 0;
361 }
362
363 static int r600_get_temp(struct r600_shader_ctx *ctx)
364 {
365 return ctx->temp_reg + ctx->max_driver_temp_used++;
366 }
367
368 int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader)
369 {
370 struct tgsi_full_immediate *immediate;
371 struct r600_shader_ctx ctx;
372 struct r600_bc_output output[32];
373 unsigned output_done, noutput;
374 unsigned opcode;
375 int i, r = 0, pos0;
376
377 ctx.bc = &shader->bc;
378 ctx.shader = shader;
379 r = r600_bc_init(ctx.bc, shader->family);
380 if (r)
381 return r;
382 ctx.tokens = tokens;
383 tgsi_scan_shader(tokens, &ctx.info);
384 tgsi_parse_init(&ctx.parse, tokens);
385 ctx.type = ctx.parse.FullHeader.Processor.Processor;
386 shader->processor_type = ctx.type;
387
388 /* register allocations */
389 /* Values [0,127] correspond to GPR[0..127].
390 * Values [128,159] correspond to constant buffer bank 0
391 * Values [160,191] correspond to constant buffer bank 1
392 * Values [256,511] correspond to cfile constants c[0..255].
393 * Other special values are shown in the list below.
394 * 244 ALU_SRC_1_DBL_L: special constant 1.0 double-float, LSW. (RV670+)
395 * 245 ALU_SRC_1_DBL_M: special constant 1.0 double-float, MSW. (RV670+)
396 * 246 ALU_SRC_0_5_DBL_L: special constant 0.5 double-float, LSW. (RV670+)
397 * 247 ALU_SRC_0_5_DBL_M: special constant 0.5 double-float, MSW. (RV670+)
398 * 248 SQ_ALU_SRC_0: special constant 0.0.
399 * 249 SQ_ALU_SRC_1: special constant 1.0 float.
400 * 250 SQ_ALU_SRC_1_INT: special constant 1 integer.
401 * 251 SQ_ALU_SRC_M_1_INT: special constant -1 integer.
402 * 252 SQ_ALU_SRC_0_5: special constant 0.5 float.
403 * 253 SQ_ALU_SRC_LITERAL: literal constant.
404 * 254 SQ_ALU_SRC_PV: previous vector result.
405 * 255 SQ_ALU_SRC_PS: previous scalar result.
406 */
407 for (i = 0; i < TGSI_FILE_COUNT; i++) {
408 ctx.file_offset[i] = 0;
409 }
410 if (ctx.type == TGSI_PROCESSOR_VERTEX) {
411 ctx.file_offset[TGSI_FILE_INPUT] = 1;
412 }
413 ctx.file_offset[TGSI_FILE_OUTPUT] = ctx.file_offset[TGSI_FILE_INPUT] +
414 ctx.info.file_count[TGSI_FILE_INPUT];
415 ctx.file_offset[TGSI_FILE_TEMPORARY] = ctx.file_offset[TGSI_FILE_OUTPUT] +
416 ctx.info.file_count[TGSI_FILE_OUTPUT];
417 ctx.file_offset[TGSI_FILE_CONSTANT] = 256;
418 ctx.file_offset[TGSI_FILE_IMMEDIATE] = 253;
419 ctx.temp_reg = ctx.file_offset[TGSI_FILE_TEMPORARY] +
420 ctx.info.file_count[TGSI_FILE_TEMPORARY];
421
422 ctx.nliterals = 0;
423 ctx.literals = NULL;
424
425 while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
426 tgsi_parse_token(&ctx.parse);
427 switch (ctx.parse.FullToken.Token.Type) {
428 case TGSI_TOKEN_TYPE_IMMEDIATE:
429 immediate = &ctx.parse.FullToken.FullImmediate;
430 ctx.literals = realloc(ctx.literals, (ctx.nliterals + 1) * 16);
431 if(ctx.literals == NULL) {
432 r = -ENOMEM;
433 goto out_err;
434 }
435 ctx.literals[ctx.nliterals * 4 + 0] = immediate->u[0].Uint;
436 ctx.literals[ctx.nliterals * 4 + 1] = immediate->u[1].Uint;
437 ctx.literals[ctx.nliterals * 4 + 2] = immediate->u[2].Uint;
438 ctx.literals[ctx.nliterals * 4 + 3] = immediate->u[3].Uint;
439 ctx.nliterals++;
440 break;
441 case TGSI_TOKEN_TYPE_DECLARATION:
442 r = tgsi_declaration(&ctx);
443 if (r)
444 goto out_err;
445 break;
446 case TGSI_TOKEN_TYPE_INSTRUCTION:
447 r = tgsi_is_supported(&ctx);
448 if (r)
449 goto out_err;
450 ctx.max_driver_temp_used = 0;
451 /* reserve first tmp for everyone */
452 r600_get_temp(&ctx);
453 opcode = ctx.parse.FullToken.FullInstruction.Instruction.Opcode;
454 ctx.inst_info = &r600_shader_tgsi_instruction[opcode];
455 r = ctx.inst_info->process(&ctx);
456 if (r)
457 goto out_err;
458 r = r600_bc_add_literal(ctx.bc, ctx.value);
459 if (r)
460 goto out_err;
461 break;
462 default:
463 R600_ERR("unsupported token type %d\n", ctx.parse.FullToken.Token.Type);
464 r = -EINVAL;
465 goto out_err;
466 }
467 }
468 /* export output */
469 noutput = shader->noutput;
470 for (i = 0, pos0 = 0; i < noutput; i++) {
471 memset(&output[i], 0, sizeof(struct r600_bc_output));
472 output[i].gpr = shader->output[i].gpr;
473 output[i].elem_size = 3;
474 output[i].swizzle_x = 0;
475 output[i].swizzle_y = 1;
476 output[i].swizzle_z = 2;
477 output[i].swizzle_w = 3;
478 output[i].barrier = 1;
479 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
480 output[i].array_base = i - pos0;
481 output[i].inst = V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT;
482 switch (ctx.type) {
483 case TGSI_PROCESSOR_VERTEX:
484 if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
485 output[i].array_base = 60;
486 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
487 /* position doesn't count in array_base */
488 pos0++;
489 }
490 if (shader->output[i].name == TGSI_SEMANTIC_PSIZE) {
491 output[i].array_base = 61;
492 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
493 /* position doesn't count in array_base */
494 pos0++;
495 }
496 break;
497 case TGSI_PROCESSOR_FRAGMENT:
498 if (shader->output[i].name == TGSI_SEMANTIC_COLOR) {
499 output[i].array_base = shader->output[i].sid;
500 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
501 } else if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
502 output[i].array_base = 61;
503 output[i].swizzle_x = 2;
504 output[i].swizzle_y = output[i].swizzle_z = output[i].swizzle_w = 7;
505 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
506 } else {
507 R600_ERR("unsupported fragment output name %d\n", shader->output[i].name);
508 r = -EINVAL;
509 goto out_err;
510 }
511 break;
512 default:
513 R600_ERR("unsupported processor type %d\n", ctx.type);
514 r = -EINVAL;
515 goto out_err;
516 }
517 }
518 /* add fake param output for vertex shader if no param is exported */
519 if (ctx.type == TGSI_PROCESSOR_VERTEX) {
520 for (i = 0, pos0 = 0; i < noutput; i++) {
521 if (output[i].type == V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM) {
522 pos0 = 1;
523 break;
524 }
525 }
526 if (!pos0) {
527 memset(&output[i], 0, sizeof(struct r600_bc_output));
528 output[i].gpr = 0;
529 output[i].elem_size = 3;
530 output[i].swizzle_x = 0;
531 output[i].swizzle_y = 1;
532 output[i].swizzle_z = 2;
533 output[i].swizzle_w = 3;
534 output[i].barrier = 1;
535 output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
536 output[i].array_base = 0;
537 output[i].inst = V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT;
538 noutput++;
539 }
540 }
541 /* add fake pixel export */
542 if (ctx.type == TGSI_PROCESSOR_FRAGMENT && !noutput) {
543 memset(&output[0], 0, sizeof(struct r600_bc_output));
544 output[0].gpr = 0;
545 output[0].elem_size = 3;
546 output[0].swizzle_x = 7;
547 output[0].swizzle_y = 7;
548 output[0].swizzle_z = 7;
549 output[0].swizzle_w = 7;
550 output[0].barrier = 1;
551 output[0].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
552 output[0].array_base = 0;
553 output[0].inst = V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT;
554 noutput++;
555 }
556 /* set export done on last export of each type */
557 for (i = noutput - 1, output_done = 0; i >= 0; i--) {
558 if (i == (noutput - 1)) {
559 output[i].end_of_program = 1;
560 }
561 if (!(output_done & (1 << output[i].type))) {
562 output_done |= (1 << output[i].type);
563 output[i].inst = V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE;
564 }
565 }
566 /* add output to bytecode */
567 for (i = 0; i < noutput; i++) {
568 r = r600_bc_add_output(ctx.bc, &output[i]);
569 if (r)
570 goto out_err;
571 }
572 free(ctx.literals);
573 tgsi_parse_free(&ctx.parse);
574 return 0;
575 out_err:
576 free(ctx.literals);
577 tgsi_parse_free(&ctx.parse);
578 return r;
579 }
580
581 static int tgsi_unsupported(struct r600_shader_ctx *ctx)
582 {
583 R600_ERR("%d tgsi opcode unsupported\n", ctx->inst_info->tgsi_opcode);
584 return -EINVAL;
585 }
586
587 static int tgsi_end(struct r600_shader_ctx *ctx)
588 {
589 return 0;
590 }
591
592 static int tgsi_src(struct r600_shader_ctx *ctx,
593 const struct tgsi_full_src_register *tgsi_src,
594 struct r600_bc_alu_src *r600_src)
595 {
596 int index;
597 memset(r600_src, 0, sizeof(struct r600_bc_alu_src));
598 r600_src->sel = tgsi_src->Register.Index;
599 if (tgsi_src->Register.File == TGSI_FILE_IMMEDIATE) {
600 r600_src->sel = 0;
601 index = tgsi_src->Register.Index;
602 ctx->value[0] = ctx->literals[index * 4 + 0];
603 ctx->value[1] = ctx->literals[index * 4 + 1];
604 ctx->value[2] = ctx->literals[index * 4 + 2];
605 ctx->value[3] = ctx->literals[index * 4 + 3];
606 }
607 if (tgsi_src->Register.Indirect)
608 r600_src->rel = V_SQ_REL_RELATIVE;
609 r600_src->neg = tgsi_src->Register.Negate;
610 r600_src->sel += ctx->file_offset[tgsi_src->Register.File];
611 return 0;
612 }
613
614 static int tgsi_dst(struct r600_shader_ctx *ctx,
615 const struct tgsi_full_dst_register *tgsi_dst,
616 unsigned swizzle,
617 struct r600_bc_alu_dst *r600_dst)
618 {
619 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
620
621 r600_dst->sel = tgsi_dst->Register.Index;
622 r600_dst->sel += ctx->file_offset[tgsi_dst->Register.File];
623 r600_dst->chan = swizzle;
624 r600_dst->write = 1;
625 if (tgsi_dst->Register.Indirect)
626 r600_dst->rel = V_SQ_REL_RELATIVE;
627 if (inst->Instruction.Saturate) {
628 r600_dst->clamp = 1;
629 }
630 return 0;
631 }
632
633 static unsigned tgsi_chan(const struct tgsi_full_src_register *tgsi_src, unsigned swizzle)
634 {
635 switch (swizzle) {
636 case 0:
637 return tgsi_src->Register.SwizzleX;
638 case 1:
639 return tgsi_src->Register.SwizzleY;
640 case 2:
641 return tgsi_src->Register.SwizzleZ;
642 case 3:
643 return tgsi_src->Register.SwizzleW;
644 default:
645 return 0;
646 }
647 }
648
649 static int tgsi_split_constant(struct r600_shader_ctx *ctx, struct r600_bc_alu_src r600_src[3])
650 {
651 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
652 struct r600_bc_alu alu;
653 int i, j, k, nconst, r;
654
655 for (i = 0, nconst = 0; i < inst->Instruction.NumSrcRegs; i++) {
656 if (inst->Src[i].Register.File == TGSI_FILE_CONSTANT) {
657 nconst++;
658 }
659 r = tgsi_src(ctx, &inst->Src[i], &r600_src[i]);
660 if (r) {
661 return r;
662 }
663 }
664 for (i = 0, j = nconst - 1; i < inst->Instruction.NumSrcRegs; i++) {
665 if (inst->Src[j].Register.File == TGSI_FILE_CONSTANT && j > 0) {
666 int treg = r600_get_temp(ctx);
667 for (k = 0; k < 4; k++) {
668 memset(&alu, 0, sizeof(struct r600_bc_alu));
669 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
670 alu.src[0].sel = r600_src[j].sel;
671 alu.src[0].chan = k;
672 alu.dst.sel = treg;
673 alu.dst.chan = k;
674 alu.dst.write = 1;
675 if (k == 3)
676 alu.last = 1;
677 r = r600_bc_add_alu(ctx->bc, &alu);
678 if (r)
679 return r;
680 }
681 r600_src[j].sel = treg;
682 j--;
683 }
684 }
685 return 0;
686 }
687
688 /* need to move any immediate into a temp - for trig functions which use literal for PI stuff */
689 static int tgsi_split_literal_constant(struct r600_shader_ctx *ctx, struct r600_bc_alu_src r600_src[3])
690 {
691 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
692 struct r600_bc_alu alu;
693 int i, j, k, nliteral, r;
694
695 for (i = 0, nliteral = 0; i < inst->Instruction.NumSrcRegs; i++) {
696 if (inst->Src[i].Register.File == TGSI_FILE_IMMEDIATE) {
697 nliteral++;
698 }
699 }
700 for (i = 0, j = 0; i < inst->Instruction.NumSrcRegs; i++) {
701 if (inst->Src[j].Register.File == TGSI_FILE_IMMEDIATE) {
702 int treg = r600_get_temp(ctx);
703 for (k = 0; k < 4; k++) {
704 memset(&alu, 0, sizeof(struct r600_bc_alu));
705 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
706 alu.src[0].sel = r600_src[j].sel;
707 alu.src[0].chan = k;
708 alu.dst.sel = treg;
709 alu.dst.chan = k;
710 alu.dst.write = 1;
711 if (k == 3)
712 alu.last = 1;
713 r = r600_bc_add_alu(ctx->bc, &alu);
714 if (r)
715 return r;
716 }
717 r = r600_bc_add_literal(ctx->bc, ctx->value);
718 if (r)
719 return r;
720 r600_src[j].sel = treg;
721 j++;
722 }
723 }
724 return 0;
725 }
726
727 static int tgsi_op2_s(struct r600_shader_ctx *ctx, int swap)
728 {
729 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
730 struct r600_bc_alu_src r600_src[3];
731 struct r600_bc_alu alu;
732 int i, j, r;
733 int lasti = 0;
734
735 for (i = 0; i < 4; i++) {
736 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
737 lasti = i;
738 }
739 }
740
741 r = tgsi_split_constant(ctx, r600_src);
742 if (r)
743 return r;
744 for (i = 0; i < lasti + 1; i++) {
745 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
746 continue;
747
748 memset(&alu, 0, sizeof(struct r600_bc_alu));
749 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
750 if (r)
751 return r;
752
753 alu.inst = ctx->inst_info->r600_opcode;
754 if (!swap) {
755 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
756 alu.src[j] = r600_src[j];
757 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
758 }
759 } else {
760 alu.src[0] = r600_src[1];
761 alu.src[0].chan = tgsi_chan(&inst->Src[1], i);
762
763 alu.src[1] = r600_src[0];
764 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
765 }
766 /* handle some special cases */
767 switch (ctx->inst_info->tgsi_opcode) {
768 case TGSI_OPCODE_SUB:
769 alu.src[1].neg = 1;
770 break;
771 case TGSI_OPCODE_ABS:
772 alu.src[0].abs = 1;
773 break;
774 default:
775 break;
776 }
777 if (i == lasti) {
778 alu.last = 1;
779 }
780 r = r600_bc_add_alu(ctx->bc, &alu);
781 if (r)
782 return r;
783 }
784 return 0;
785 }
786
787 static int tgsi_op2(struct r600_shader_ctx *ctx)
788 {
789 return tgsi_op2_s(ctx, 0);
790 }
791
792 static int tgsi_op2_swap(struct r600_shader_ctx *ctx)
793 {
794 return tgsi_op2_s(ctx, 1);
795 }
796
797 /*
798 * r600 - trunc to -PI..PI range
799 * r700 - normalize by dividing by 2PI
800 * see fdo bug 27901
801 */
802 static int tgsi_setup_trig(struct r600_shader_ctx *ctx,
803 struct r600_bc_alu_src r600_src[3])
804 {
805 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
806 int r;
807 uint32_t lit_vals[4];
808 struct r600_bc_alu alu;
809
810 memset(lit_vals, 0, 4*4);
811 r = tgsi_split_constant(ctx, r600_src);
812 if (r)
813 return r;
814
815 r = tgsi_split_literal_constant(ctx, r600_src);
816 if (r)
817 return r;
818
819 lit_vals[0] = fui(1.0 /(3.1415926535 * 2));
820 lit_vals[1] = fui(0.5f);
821
822 memset(&alu, 0, sizeof(struct r600_bc_alu));
823 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD;
824 alu.is_op3 = 1;
825
826 alu.dst.chan = 0;
827 alu.dst.sel = ctx->temp_reg;
828 alu.dst.write = 1;
829
830 alu.src[0] = r600_src[0];
831 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
832
833 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
834 alu.src[1].chan = 0;
835 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
836 alu.src[2].chan = 1;
837 alu.last = 1;
838 r = r600_bc_add_alu(ctx->bc, &alu);
839 if (r)
840 return r;
841 r = r600_bc_add_literal(ctx->bc, lit_vals);
842 if (r)
843 return r;
844
845 memset(&alu, 0, sizeof(struct r600_bc_alu));
846 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT;
847
848 alu.dst.chan = 0;
849 alu.dst.sel = ctx->temp_reg;
850 alu.dst.write = 1;
851
852 alu.src[0].sel = ctx->temp_reg;
853 alu.src[0].chan = 0;
854 alu.last = 1;
855 r = r600_bc_add_alu(ctx->bc, &alu);
856 if (r)
857 return r;
858
859 if (ctx->bc->chiprev == 0) {
860 lit_vals[0] = fui(3.1415926535897f * 2.0f);
861 lit_vals[1] = fui(-3.1415926535897f);
862 } else {
863 lit_vals[0] = fui(1.0f);
864 lit_vals[1] = fui(-0.5f);
865 }
866
867 memset(&alu, 0, sizeof(struct r600_bc_alu));
868 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD;
869 alu.is_op3 = 1;
870
871 alu.dst.chan = 0;
872 alu.dst.sel = ctx->temp_reg;
873 alu.dst.write = 1;
874
875 alu.src[0].sel = ctx->temp_reg;
876 alu.src[0].chan = 0;
877
878 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
879 alu.src[1].chan = 0;
880 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
881 alu.src[2].chan = 1;
882 alu.last = 1;
883 r = r600_bc_add_alu(ctx->bc, &alu);
884 if (r)
885 return r;
886 r = r600_bc_add_literal(ctx->bc, lit_vals);
887 if (r)
888 return r;
889 return 0;
890 }
891
892 static int tgsi_trig(struct r600_shader_ctx *ctx)
893 {
894 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
895 struct r600_bc_alu_src r600_src[3];
896 struct r600_bc_alu alu;
897 int i, r;
898 int lasti = 0;
899
900 r = tgsi_setup_trig(ctx, r600_src);
901 if (r)
902 return r;
903
904 memset(&alu, 0, sizeof(struct r600_bc_alu));
905 alu.inst = ctx->inst_info->r600_opcode;
906 alu.dst.chan = 0;
907 alu.dst.sel = ctx->temp_reg;
908 alu.dst.write = 1;
909
910 alu.src[0].sel = ctx->temp_reg;
911 alu.src[0].chan = 0;
912 alu.last = 1;
913 r = r600_bc_add_alu(ctx->bc, &alu);
914 if (r)
915 return r;
916
917 /* replicate result */
918 for (i = 0; i < 4; i++) {
919 if (inst->Dst[0].Register.WriteMask & (1 << i))
920 lasti = i;
921 }
922 for (i = 0; i < lasti + 1; i++) {
923 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
924 continue;
925
926 memset(&alu, 0, sizeof(struct r600_bc_alu));
927 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
928
929 alu.src[0].sel = ctx->temp_reg;
930 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
931 if (r)
932 return r;
933 if (i == lasti)
934 alu.last = 1;
935 r = r600_bc_add_alu(ctx->bc, &alu);
936 if (r)
937 return r;
938 }
939 return 0;
940 }
941
942 static int tgsi_scs(struct r600_shader_ctx *ctx)
943 {
944 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
945 struct r600_bc_alu_src r600_src[3];
946 struct r600_bc_alu alu;
947 int r;
948
949 r = tgsi_setup_trig(ctx, r600_src);
950 if (r)
951 return r;
952
953
954 /* dst.x = COS */
955 memset(&alu, 0, sizeof(struct r600_bc_alu));
956 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS;
957 r = tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
958 if (r)
959 return r;
960
961 alu.src[0].sel = ctx->temp_reg;
962 alu.src[0].chan = 0;
963 alu.last = 1;
964 r = r600_bc_add_alu(ctx->bc, &alu);
965 if (r)
966 return r;
967
968 /* dst.y = SIN */
969 memset(&alu, 0, sizeof(struct r600_bc_alu));
970 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN;
971 r = tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
972 if (r)
973 return r;
974
975 alu.src[0].sel = ctx->temp_reg;
976 alu.src[0].chan = 0;
977 alu.last = 1;
978 r = r600_bc_add_alu(ctx->bc, &alu);
979 if (r)
980 return r;
981 return 0;
982 }
983
984 static int tgsi_kill(struct r600_shader_ctx *ctx)
985 {
986 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
987 struct r600_bc_alu alu;
988 int i, r;
989
990 for (i = 0; i < 4; i++) {
991 memset(&alu, 0, sizeof(struct r600_bc_alu));
992 alu.inst = ctx->inst_info->r600_opcode;
993
994 alu.dst.chan = i;
995
996 alu.src[0].sel = V_SQ_ALU_SRC_0;
997
998 if (ctx->inst_info->tgsi_opcode == TGSI_OPCODE_KILP) {
999 alu.src[1].sel = V_SQ_ALU_SRC_1;
1000 alu.src[1].neg = 1;
1001 } else {
1002 r = tgsi_src(ctx, &inst->Src[0], &alu.src[1]);
1003 if (r)
1004 return r;
1005 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
1006 }
1007 if (i == 3) {
1008 alu.last = 1;
1009 }
1010 r = r600_bc_add_alu(ctx->bc, &alu);
1011 if (r)
1012 return r;
1013 }
1014 r = r600_bc_add_literal(ctx->bc, ctx->value);
1015 if (r)
1016 return r;
1017
1018 /* kill must be last in ALU */
1019 ctx->bc->force_add_cf = 1;
1020 ctx->shader->uses_kill = TRUE;
1021 return 0;
1022 }
1023
1024 static int tgsi_lit(struct r600_shader_ctx *ctx)
1025 {
1026 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1027 struct r600_bc_alu alu;
1028 struct r600_bc_alu_src r600_src[3];
1029 int r;
1030
1031 r = tgsi_split_constant(ctx, r600_src);
1032 if (r)
1033 return r;
1034 r = tgsi_split_literal_constant(ctx, r600_src);
1035 if (r)
1036 return r;
1037
1038 /* dst.x, <- 1.0 */
1039 memset(&alu, 0, sizeof(struct r600_bc_alu));
1040 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1041 alu.src[0].sel = V_SQ_ALU_SRC_1; /*1.0*/
1042 alu.src[0].chan = 0;
1043 r = tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
1044 if (r)
1045 return r;
1046 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 0) & 1;
1047 r = r600_bc_add_alu(ctx->bc, &alu);
1048 if (r)
1049 return r;
1050
1051 /* dst.y = max(src.x, 0.0) */
1052 memset(&alu, 0, sizeof(struct r600_bc_alu));
1053 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX;
1054 alu.src[0] = r600_src[0];
1055 alu.src[1].sel = V_SQ_ALU_SRC_0; /*0.0*/
1056 alu.src[1].chan = 0;
1057 r = tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
1058 if (r)
1059 return r;
1060 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 1) & 1;
1061 r = r600_bc_add_alu(ctx->bc, &alu);
1062 if (r)
1063 return r;
1064
1065 /* dst.w, <- 1.0 */
1066 memset(&alu, 0, sizeof(struct r600_bc_alu));
1067 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1068 alu.src[0].sel = V_SQ_ALU_SRC_1;
1069 alu.src[0].chan = 0;
1070 r = tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
1071 if (r)
1072 return r;
1073 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 3) & 1;
1074 alu.last = 1;
1075 r = r600_bc_add_alu(ctx->bc, &alu);
1076 if (r)
1077 return r;
1078
1079 r = r600_bc_add_literal(ctx->bc, ctx->value);
1080 if (r)
1081 return r;
1082
1083 if (inst->Dst[0].Register.WriteMask & (1 << 2))
1084 {
1085 int chan;
1086 int sel;
1087
1088 /* dst.z = log(src.y) */
1089 memset(&alu, 0, sizeof(struct r600_bc_alu));
1090 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED;
1091 alu.src[0] = r600_src[0];
1092 alu.src[0].chan = tgsi_chan(&inst->Src[0], 1);
1093 r = tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
1094 if (r)
1095 return r;
1096 alu.last = 1;
1097 r = r600_bc_add_alu(ctx->bc, &alu);
1098 if (r)
1099 return r;
1100
1101 r = r600_bc_add_literal(ctx->bc, ctx->value);
1102 if (r)
1103 return r;
1104
1105 chan = alu.dst.chan;
1106 sel = alu.dst.sel;
1107
1108 /* tmp.x = amd MUL_LIT(src.w, dst.z, src.x ) */
1109 memset(&alu, 0, sizeof(struct r600_bc_alu));
1110 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT;
1111 alu.src[0] = r600_src[0];
1112 alu.src[0].chan = tgsi_chan(&inst->Src[0], 3);
1113 alu.src[1].sel = sel;
1114 alu.src[1].chan = chan;
1115
1116 alu.src[2] = r600_src[0];
1117 alu.src[2].chan = tgsi_chan(&inst->Src[0], 0);
1118 alu.dst.sel = ctx->temp_reg;
1119 alu.dst.chan = 0;
1120 alu.dst.write = 1;
1121 alu.is_op3 = 1;
1122 alu.last = 1;
1123 r = r600_bc_add_alu(ctx->bc, &alu);
1124 if (r)
1125 return r;
1126
1127 r = r600_bc_add_literal(ctx->bc, ctx->value);
1128 if (r)
1129 return r;
1130 /* dst.z = exp(tmp.x) */
1131 memset(&alu, 0, sizeof(struct r600_bc_alu));
1132 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE;
1133 alu.src[0].sel = ctx->temp_reg;
1134 alu.src[0].chan = 0;
1135 r = tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
1136 if (r)
1137 return r;
1138 alu.last = 1;
1139 r = r600_bc_add_alu(ctx->bc, &alu);
1140 if (r)
1141 return r;
1142 }
1143 return 0;
1144 }
1145
1146 static int tgsi_trans(struct r600_shader_ctx *ctx)
1147 {
1148 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1149 struct r600_bc_alu alu;
1150 int i, j, r;
1151
1152 for (i = 0; i < 4; i++) {
1153 memset(&alu, 0, sizeof(struct r600_bc_alu));
1154 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
1155 alu.inst = ctx->inst_info->r600_opcode;
1156 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
1157 r = tgsi_src(ctx, &inst->Src[j], &alu.src[j]);
1158 if (r)
1159 return r;
1160 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
1161 }
1162 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1163 if (r)
1164 return r;
1165 alu.last = 1;
1166 r = r600_bc_add_alu(ctx->bc, &alu);
1167 if (r)
1168 return r;
1169 }
1170 }
1171 return 0;
1172 }
1173
1174 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx)
1175 {
1176 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1177 struct r600_bc_alu alu;
1178 int i, r;
1179
1180 for (i = 0; i < 4; i++) {
1181 memset(&alu, 0, sizeof(struct r600_bc_alu));
1182 alu.src[0].sel = ctx->temp_reg;
1183 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1184 alu.dst.chan = i;
1185 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1186 if (r)
1187 return r;
1188 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
1189 if (i == 3)
1190 alu.last = 1;
1191 r = r600_bc_add_alu(ctx->bc, &alu);
1192 if (r)
1193 return r;
1194 }
1195 return 0;
1196 }
1197
1198 static int tgsi_trans_srcx_replicate(struct r600_shader_ctx *ctx)
1199 {
1200 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1201 struct r600_bc_alu alu;
1202 int i, r;
1203
1204 memset(&alu, 0, sizeof(struct r600_bc_alu));
1205 alu.inst = ctx->inst_info->r600_opcode;
1206 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
1207 r = tgsi_src(ctx, &inst->Src[i], &alu.src[i]);
1208 if (r)
1209 return r;
1210 alu.src[i].chan = tgsi_chan(&inst->Src[i], 0);
1211 }
1212 alu.dst.sel = ctx->temp_reg;
1213 alu.dst.write = 1;
1214 alu.last = 1;
1215 r = r600_bc_add_alu(ctx->bc, &alu);
1216 if (r)
1217 return r;
1218 r = r600_bc_add_literal(ctx->bc, ctx->value);
1219 if (r)
1220 return r;
1221 /* replicate result */
1222 return tgsi_helper_tempx_replicate(ctx);
1223 }
1224
1225 static int tgsi_pow(struct r600_shader_ctx *ctx)
1226 {
1227 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1228 struct r600_bc_alu alu;
1229 int r;
1230
1231 /* LOG2(a) */
1232 memset(&alu, 0, sizeof(struct r600_bc_alu));
1233 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE;
1234 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1235 if (r)
1236 return r;
1237 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1238 alu.dst.sel = ctx->temp_reg;
1239 alu.dst.write = 1;
1240 alu.last = 1;
1241 r = r600_bc_add_alu(ctx->bc, &alu);
1242 if (r)
1243 return r;
1244 r = r600_bc_add_literal(ctx->bc,ctx->value);
1245 if (r)
1246 return r;
1247 /* b * LOG2(a) */
1248 memset(&alu, 0, sizeof(struct r600_bc_alu));
1249 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL_IEEE;
1250 r = tgsi_src(ctx, &inst->Src[1], &alu.src[0]);
1251 if (r)
1252 return r;
1253 alu.src[0].chan = tgsi_chan(&inst->Src[1], 0);
1254 alu.src[1].sel = ctx->temp_reg;
1255 alu.dst.sel = ctx->temp_reg;
1256 alu.dst.write = 1;
1257 alu.last = 1;
1258 r = r600_bc_add_alu(ctx->bc, &alu);
1259 if (r)
1260 return r;
1261 r = r600_bc_add_literal(ctx->bc,ctx->value);
1262 if (r)
1263 return r;
1264 /* POW(a,b) = EXP2(b * LOG2(a))*/
1265 memset(&alu, 0, sizeof(struct r600_bc_alu));
1266 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE;
1267 alu.src[0].sel = ctx->temp_reg;
1268 alu.dst.sel = ctx->temp_reg;
1269 alu.dst.write = 1;
1270 alu.last = 1;
1271 r = r600_bc_add_alu(ctx->bc, &alu);
1272 if (r)
1273 return r;
1274 r = r600_bc_add_literal(ctx->bc,ctx->value);
1275 if (r)
1276 return r;
1277 return tgsi_helper_tempx_replicate(ctx);
1278 }
1279
1280 static int tgsi_ssg(struct r600_shader_ctx *ctx)
1281 {
1282 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1283 struct r600_bc_alu alu;
1284 struct r600_bc_alu_src r600_src[3];
1285 int i, r;
1286
1287 r = tgsi_split_constant(ctx, r600_src);
1288 if (r)
1289 return r;
1290
1291 /* tmp = (src > 0 ? 1 : src) */
1292 for (i = 0; i < 4; i++) {
1293 memset(&alu, 0, sizeof(struct r600_bc_alu));
1294 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGT;
1295 alu.is_op3 = 1;
1296
1297 alu.dst.sel = ctx->temp_reg;
1298 alu.dst.chan = i;
1299
1300 alu.src[0] = r600_src[0];
1301 alu.src[0].chan = tgsi_chan(&inst->Src[0], i);
1302
1303 alu.src[1].sel = V_SQ_ALU_SRC_1;
1304
1305 alu.src[2] = r600_src[0];
1306 alu.src[2].chan = tgsi_chan(&inst->Src[0], i);
1307 if (i == 3)
1308 alu.last = 1;
1309 r = r600_bc_add_alu(ctx->bc, &alu);
1310 if (r)
1311 return r;
1312 }
1313 r = r600_bc_add_literal(ctx->bc, ctx->value);
1314 if (r)
1315 return r;
1316
1317 /* dst = (-tmp > 0 ? -1 : tmp) */
1318 for (i = 0; i < 4; i++) {
1319 memset(&alu, 0, sizeof(struct r600_bc_alu));
1320 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGT;
1321 alu.is_op3 = 1;
1322 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1323 if (r)
1324 return r;
1325
1326 alu.src[0].sel = ctx->temp_reg;
1327 alu.src[0].chan = i;
1328 alu.src[0].neg = 1;
1329
1330 alu.src[1].sel = V_SQ_ALU_SRC_1;
1331 alu.src[1].neg = 1;
1332
1333 alu.src[2].sel = ctx->temp_reg;
1334 alu.src[2].chan = i;
1335
1336 if (i == 3)
1337 alu.last = 1;
1338 r = r600_bc_add_alu(ctx->bc, &alu);
1339 if (r)
1340 return r;
1341 }
1342 return 0;
1343 }
1344
1345 static int tgsi_helper_copy(struct r600_shader_ctx *ctx, struct tgsi_full_instruction *inst)
1346 {
1347 struct r600_bc_alu alu;
1348 int i, r;
1349
1350 r = r600_bc_add_literal(ctx->bc, ctx->value);
1351 if (r)
1352 return r;
1353 for (i = 0; i < 4; i++) {
1354 memset(&alu, 0, sizeof(struct r600_bc_alu));
1355 if (!(inst->Dst[0].Register.WriteMask & (1 << i))) {
1356 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP;
1357 alu.dst.chan = i;
1358 } else {
1359 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1360 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1361 if (r)
1362 return r;
1363 alu.src[0].sel = ctx->temp_reg;
1364 alu.src[0].chan = i;
1365 }
1366 if (i == 3) {
1367 alu.last = 1;
1368 }
1369 r = r600_bc_add_alu(ctx->bc, &alu);
1370 if (r)
1371 return r;
1372 }
1373 return 0;
1374 }
1375
1376 static int tgsi_op3(struct r600_shader_ctx *ctx)
1377 {
1378 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1379 struct r600_bc_alu_src r600_src[3];
1380 struct r600_bc_alu alu;
1381 int i, j, r;
1382
1383 r = tgsi_split_constant(ctx, r600_src);
1384 if (r)
1385 return r;
1386 /* do it in 2 step as op3 doesn't support writemask */
1387 for (i = 0; i < 4; i++) {
1388 memset(&alu, 0, sizeof(struct r600_bc_alu));
1389 alu.inst = ctx->inst_info->r600_opcode;
1390 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
1391 alu.src[j] = r600_src[j];
1392 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
1393 }
1394 alu.dst.sel = ctx->temp_reg;
1395 alu.dst.chan = i;
1396 alu.dst.write = 1;
1397 alu.is_op3 = 1;
1398 if (i == 3) {
1399 alu.last = 1;
1400 }
1401 r = r600_bc_add_alu(ctx->bc, &alu);
1402 if (r)
1403 return r;
1404 }
1405 return tgsi_helper_copy(ctx, inst);
1406 }
1407
1408 static int tgsi_dp(struct r600_shader_ctx *ctx)
1409 {
1410 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1411 struct r600_bc_alu_src r600_src[3];
1412 struct r600_bc_alu alu;
1413 int i, j, r;
1414
1415 r = tgsi_split_constant(ctx, r600_src);
1416 if (r)
1417 return r;
1418 for (i = 0; i < 4; i++) {
1419 memset(&alu, 0, sizeof(struct r600_bc_alu));
1420 alu.inst = ctx->inst_info->r600_opcode;
1421 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
1422 alu.src[j] = r600_src[j];
1423 alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
1424 }
1425 alu.dst.sel = ctx->temp_reg;
1426 alu.dst.chan = i;
1427 alu.dst.write = 1;
1428 /* handle some special cases */
1429 switch (ctx->inst_info->tgsi_opcode) {
1430 case TGSI_OPCODE_DP2:
1431 if (i > 1) {
1432 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
1433 alu.src[0].chan = alu.src[1].chan = 0;
1434 }
1435 break;
1436 case TGSI_OPCODE_DP3:
1437 if (i > 2) {
1438 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
1439 alu.src[0].chan = alu.src[1].chan = 0;
1440 }
1441 break;
1442 case TGSI_OPCODE_DPH:
1443 if (i == 3) {
1444 alu.src[0].sel = V_SQ_ALU_SRC_1;
1445 alu.src[0].chan = 0;
1446 alu.src[0].neg = 0;
1447 }
1448 break;
1449 default:
1450 break;
1451 }
1452 if (i == 3) {
1453 alu.last = 1;
1454 }
1455 r = r600_bc_add_alu(ctx->bc, &alu);
1456 if (r)
1457 return r;
1458 }
1459 return tgsi_helper_copy(ctx, inst);
1460 }
1461
1462 static int tgsi_tex(struct r600_shader_ctx *ctx)
1463 {
1464 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1465 struct r600_bc_tex tex;
1466 struct r600_bc_alu alu;
1467 unsigned src_gpr;
1468 int r, i;
1469 int opcode;
1470 boolean src_not_temp = inst->Src[0].Register.File != TGSI_FILE_TEMPORARY;
1471 uint32_t lit_vals[4];
1472
1473 src_gpr = ctx->file_offset[inst->Src[0].Register.File] + inst->Src[0].Register.Index;
1474
1475 if (inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
1476 /* Add perspective divide */
1477 memset(&alu, 0, sizeof(struct r600_bc_alu));
1478 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE;
1479 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1480 if (r)
1481 return r;
1482
1483 alu.src[0].chan = tgsi_chan(&inst->Src[0], 3);
1484 alu.dst.sel = ctx->temp_reg;
1485 alu.dst.chan = 3;
1486 alu.last = 1;
1487 alu.dst.write = 1;
1488 r = r600_bc_add_alu(ctx->bc, &alu);
1489 if (r)
1490 return r;
1491
1492 for (i = 0; i < 3; i++) {
1493 memset(&alu, 0, sizeof(struct r600_bc_alu));
1494 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL;
1495 alu.src[0].sel = ctx->temp_reg;
1496 alu.src[0].chan = 3;
1497 r = tgsi_src(ctx, &inst->Src[0], &alu.src[1]);
1498 if (r)
1499 return r;
1500 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
1501 alu.dst.sel = ctx->temp_reg;
1502 alu.dst.chan = i;
1503 alu.dst.write = 1;
1504 r = r600_bc_add_alu(ctx->bc, &alu);
1505 if (r)
1506 return r;
1507 }
1508 memset(&alu, 0, sizeof(struct r600_bc_alu));
1509 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1510 alu.src[0].sel = V_SQ_ALU_SRC_1;
1511 alu.src[0].chan = 0;
1512 alu.dst.sel = ctx->temp_reg;
1513 alu.dst.chan = 3;
1514 alu.last = 1;
1515 alu.dst.write = 1;
1516 r = r600_bc_add_alu(ctx->bc, &alu);
1517 if (r)
1518 return r;
1519 src_not_temp = false;
1520 src_gpr = ctx->temp_reg;
1521 }
1522
1523 if (inst->Texture.Texture == TGSI_TEXTURE_CUBE) {
1524 int src_chan, src2_chan;
1525
1526 /* tmp1.xyzw = CUBE(R0.zzxy, R0.yxzz) */
1527 for (i = 0; i < 4; i++) {
1528 memset(&alu, 0, sizeof(struct r600_bc_alu));
1529 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE;
1530 switch (i) {
1531 case 0:
1532 src_chan = 2;
1533 src2_chan = 1;
1534 break;
1535 case 1:
1536 src_chan = 2;
1537 src2_chan = 0;
1538 break;
1539 case 2:
1540 src_chan = 0;
1541 src2_chan = 2;
1542 break;
1543 case 3:
1544 src_chan = 1;
1545 src2_chan = 2;
1546 break;
1547 }
1548 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1549 if (r)
1550 return r;
1551 alu.src[0].chan = tgsi_chan(&inst->Src[0], src_chan);
1552 r = tgsi_src(ctx, &inst->Src[0], &alu.src[1]);
1553 if (r)
1554 return r;
1555 alu.src[1].chan = tgsi_chan(&inst->Src[0], src2_chan);
1556 alu.dst.sel = ctx->temp_reg;
1557 alu.dst.chan = i;
1558 if (i == 3)
1559 alu.last = 1;
1560 alu.dst.write = 1;
1561 r = r600_bc_add_alu(ctx->bc, &alu);
1562 if (r)
1563 return r;
1564 }
1565
1566 /* tmp1.z = RCP_e(|tmp1.z|) */
1567 memset(&alu, 0, sizeof(struct r600_bc_alu));
1568 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE;
1569 alu.src[0].sel = ctx->temp_reg;
1570 alu.src[0].chan = 2;
1571 alu.src[0].abs = 1;
1572 alu.dst.sel = ctx->temp_reg;
1573 alu.dst.chan = 2;
1574 alu.dst.write = 1;
1575 alu.last = 1;
1576 r = r600_bc_add_alu(ctx->bc, &alu);
1577 if (r)
1578 return r;
1579
1580 /* MULADD R0.x, R0.x, PS1, (0x3FC00000, 1.5f).x
1581 * MULADD R0.y, R0.y, PS1, (0x3FC00000, 1.5f).x
1582 * muladd has no writemask, have to use another temp
1583 */
1584 memset(&alu, 0, sizeof(struct r600_bc_alu));
1585 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD;
1586 alu.is_op3 = 1;
1587
1588 alu.src[0].sel = ctx->temp_reg;
1589 alu.src[0].chan = 0;
1590 alu.src[1].sel = ctx->temp_reg;
1591 alu.src[1].chan = 2;
1592
1593 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
1594 alu.src[2].chan = 0;
1595
1596 alu.dst.sel = ctx->temp_reg;
1597 alu.dst.chan = 0;
1598 alu.dst.write = 1;
1599
1600 r = r600_bc_add_alu(ctx->bc, &alu);
1601 if (r)
1602 return r;
1603
1604 memset(&alu, 0, sizeof(struct r600_bc_alu));
1605 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD;
1606 alu.is_op3 = 1;
1607
1608 alu.src[0].sel = ctx->temp_reg;
1609 alu.src[0].chan = 1;
1610 alu.src[1].sel = ctx->temp_reg;
1611 alu.src[1].chan = 2;
1612
1613 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
1614 alu.src[2].chan = 0;
1615
1616 alu.dst.sel = ctx->temp_reg;
1617 alu.dst.chan = 1;
1618 alu.dst.write = 1;
1619
1620 alu.last = 1;
1621 r = r600_bc_add_alu(ctx->bc, &alu);
1622 if (r)
1623 return r;
1624
1625 lit_vals[0] = fui(1.5f);
1626
1627 r = r600_bc_add_literal(ctx->bc, lit_vals);
1628 if (r)
1629 return r;
1630 src_not_temp = false;
1631 src_gpr = ctx->temp_reg;
1632 }
1633
1634 if (src_not_temp) {
1635 for (i = 0; i < 4; i++) {
1636 memset(&alu, 0, sizeof(struct r600_bc_alu));
1637 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
1638 alu.src[0].sel = src_gpr;
1639 alu.src[0].chan = i;
1640 alu.dst.sel = ctx->temp_reg;
1641 alu.dst.chan = i;
1642 if (i == 3)
1643 alu.last = 1;
1644 alu.dst.write = 1;
1645 r = r600_bc_add_alu(ctx->bc, &alu);
1646 if (r)
1647 return r;
1648 }
1649 src_gpr = ctx->temp_reg;
1650 }
1651
1652 opcode = ctx->inst_info->r600_opcode;
1653 if (opcode == SQ_TEX_INST_SAMPLE &&
1654 (inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D || inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D))
1655 opcode = SQ_TEX_INST_SAMPLE_C;
1656
1657 memset(&tex, 0, sizeof(struct r600_bc_tex));
1658 tex.inst = opcode;
1659 tex.resource_id = ctx->file_offset[inst->Src[1].Register.File] + inst->Src[1].Register.Index;
1660 tex.sampler_id = tex.resource_id;
1661 tex.src_gpr = src_gpr;
1662 tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
1663 tex.dst_sel_x = 0;
1664 tex.dst_sel_y = 1;
1665 tex.dst_sel_z = 2;
1666 tex.dst_sel_w = 3;
1667 tex.src_sel_x = 0;
1668 tex.src_sel_y = 1;
1669 tex.src_sel_z = 2;
1670 tex.src_sel_w = 3;
1671
1672 if (inst->Texture.Texture == TGSI_TEXTURE_CUBE) {
1673 tex.src_sel_x = 1;
1674 tex.src_sel_y = 0;
1675 tex.src_sel_z = 3;
1676 tex.src_sel_w = 1;
1677 }
1678
1679 if (inst->Texture.Texture != TGSI_TEXTURE_RECT) {
1680 tex.coord_type_x = 1;
1681 tex.coord_type_y = 1;
1682 tex.coord_type_z = 1;
1683 tex.coord_type_w = 1;
1684 }
1685
1686 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D || inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D)
1687 tex.coord_type_w = 2;
1688
1689 r = r600_bc_add_tex(ctx->bc, &tex);
1690 if (r)
1691 return r;
1692
1693 /* add shadow ambient support - gallium doesn't do it yet */
1694 return 0;
1695
1696 }
1697
1698 static int tgsi_lrp(struct r600_shader_ctx *ctx)
1699 {
1700 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1701 struct r600_bc_alu_src r600_src[3];
1702 struct r600_bc_alu alu;
1703 unsigned i;
1704 int r;
1705
1706 r = tgsi_split_constant(ctx, r600_src);
1707 if (r)
1708 return r;
1709 /* 1 - src0 */
1710 for (i = 0; i < 4; i++) {
1711 memset(&alu, 0, sizeof(struct r600_bc_alu));
1712 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD;
1713 alu.src[0].sel = V_SQ_ALU_SRC_1;
1714 alu.src[0].chan = 0;
1715 alu.src[1] = r600_src[0];
1716 alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
1717 alu.src[1].neg = 1;
1718 alu.dst.sel = ctx->temp_reg;
1719 alu.dst.chan = i;
1720 if (i == 3) {
1721 alu.last = 1;
1722 }
1723 alu.dst.write = 1;
1724 r = r600_bc_add_alu(ctx->bc, &alu);
1725 if (r)
1726 return r;
1727 }
1728 r = r600_bc_add_literal(ctx->bc, ctx->value);
1729 if (r)
1730 return r;
1731
1732 /* (1 - src0) * src2 */
1733 for (i = 0; i < 4; i++) {
1734 memset(&alu, 0, sizeof(struct r600_bc_alu));
1735 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL;
1736 alu.src[0].sel = ctx->temp_reg;
1737 alu.src[0].chan = i;
1738 alu.src[1] = r600_src[2];
1739 alu.src[1].chan = tgsi_chan(&inst->Src[2], i);
1740 alu.dst.sel = ctx->temp_reg;
1741 alu.dst.chan = i;
1742 if (i == 3) {
1743 alu.last = 1;
1744 }
1745 alu.dst.write = 1;
1746 r = r600_bc_add_alu(ctx->bc, &alu);
1747 if (r)
1748 return r;
1749 }
1750 r = r600_bc_add_literal(ctx->bc, ctx->value);
1751 if (r)
1752 return r;
1753
1754 /* src0 * src1 + (1 - src0) * src2 */
1755 for (i = 0; i < 4; i++) {
1756 memset(&alu, 0, sizeof(struct r600_bc_alu));
1757 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD;
1758 alu.is_op3 = 1;
1759 alu.src[0] = r600_src[0];
1760 alu.src[0].chan = tgsi_chan(&inst->Src[0], i);
1761 alu.src[1] = r600_src[1];
1762 alu.src[1].chan = tgsi_chan(&inst->Src[1], i);
1763 alu.src[2].sel = ctx->temp_reg;
1764 alu.src[2].chan = i;
1765 alu.dst.sel = ctx->temp_reg;
1766 alu.dst.chan = i;
1767 if (i == 3) {
1768 alu.last = 1;
1769 }
1770 r = r600_bc_add_alu(ctx->bc, &alu);
1771 if (r)
1772 return r;
1773 }
1774 return tgsi_helper_copy(ctx, inst);
1775 }
1776
1777 static int tgsi_cmp(struct r600_shader_ctx *ctx)
1778 {
1779 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1780 struct r600_bc_alu_src r600_src[3];
1781 struct r600_bc_alu alu;
1782 int use_temp = 0;
1783 int i, r;
1784
1785 r = tgsi_split_constant(ctx, r600_src);
1786 if (r)
1787 return r;
1788
1789 if (inst->Dst[0].Register.WriteMask != 0xf)
1790 use_temp = 1;
1791
1792 for (i = 0; i < 4; i++) {
1793 memset(&alu, 0, sizeof(struct r600_bc_alu));
1794 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGE;
1795 alu.src[0] = r600_src[0];
1796 alu.src[0].chan = tgsi_chan(&inst->Src[0], i);
1797
1798 alu.src[1] = r600_src[2];
1799 alu.src[1].chan = tgsi_chan(&inst->Src[2], i);
1800
1801 alu.src[2] = r600_src[1];
1802 alu.src[2].chan = tgsi_chan(&inst->Src[1], i);
1803
1804 if (use_temp)
1805 alu.dst.sel = ctx->temp_reg;
1806 else {
1807 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1808 if (r)
1809 return r;
1810 }
1811 alu.dst.chan = i;
1812 alu.dst.write = 1;
1813 alu.is_op3 = 1;
1814 if (i == 3)
1815 alu.last = 1;
1816 r = r600_bc_add_alu(ctx->bc, &alu);
1817 if (r)
1818 return r;
1819 }
1820 if (use_temp)
1821 return tgsi_helper_copy(ctx, inst);
1822 return 0;
1823 }
1824
1825 static int tgsi_xpd(struct r600_shader_ctx *ctx)
1826 {
1827 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1828 struct r600_bc_alu_src r600_src[3];
1829 struct r600_bc_alu alu;
1830 uint32_t use_temp = 0;
1831 int i, r;
1832
1833 if (inst->Dst[0].Register.WriteMask != 0xf)
1834 use_temp = 1;
1835
1836 r = tgsi_split_constant(ctx, r600_src);
1837 if (r)
1838 return r;
1839
1840 for (i = 0; i < 4; i++) {
1841 memset(&alu, 0, sizeof(struct r600_bc_alu));
1842 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL;
1843
1844 alu.src[0] = r600_src[0];
1845 switch (i) {
1846 case 0:
1847 alu.src[0].chan = tgsi_chan(&inst->Src[0], 2);
1848 break;
1849 case 1:
1850 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1851 break;
1852 case 2:
1853 alu.src[0].chan = tgsi_chan(&inst->Src[0], 1);
1854 break;
1855 case 3:
1856 alu.src[0].sel = V_SQ_ALU_SRC_0;
1857 alu.src[0].chan = i;
1858 }
1859
1860 alu.src[1] = r600_src[1];
1861 switch (i) {
1862 case 0:
1863 alu.src[1].chan = tgsi_chan(&inst->Src[1], 1);
1864 break;
1865 case 1:
1866 alu.src[1].chan = tgsi_chan(&inst->Src[1], 2);
1867 break;
1868 case 2:
1869 alu.src[1].chan = tgsi_chan(&inst->Src[1], 0);
1870 break;
1871 case 3:
1872 alu.src[1].sel = V_SQ_ALU_SRC_0;
1873 alu.src[1].chan = i;
1874 }
1875
1876 alu.dst.sel = ctx->temp_reg;
1877 alu.dst.chan = i;
1878 alu.dst.write = 1;
1879
1880 if (i == 3)
1881 alu.last = 1;
1882 r = r600_bc_add_alu(ctx->bc, &alu);
1883 if (r)
1884 return r;
1885 }
1886
1887 for (i = 0; i < 4; i++) {
1888 memset(&alu, 0, sizeof(struct r600_bc_alu));
1889 alu.inst = V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD;
1890
1891 alu.src[0] = r600_src[0];
1892 switch (i) {
1893 case 0:
1894 alu.src[0].chan = tgsi_chan(&inst->Src[0], 1);
1895 break;
1896 case 1:
1897 alu.src[0].chan = tgsi_chan(&inst->Src[0], 2);
1898 break;
1899 case 2:
1900 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1901 break;
1902 case 3:
1903 alu.src[0].sel = V_SQ_ALU_SRC_0;
1904 alu.src[0].chan = i;
1905 }
1906
1907 alu.src[1] = r600_src[1];
1908 switch (i) {
1909 case 0:
1910 alu.src[1].chan = tgsi_chan(&inst->Src[1], 2);
1911 break;
1912 case 1:
1913 alu.src[1].chan = tgsi_chan(&inst->Src[1], 0);
1914 break;
1915 case 2:
1916 alu.src[1].chan = tgsi_chan(&inst->Src[1], 1);
1917 break;
1918 case 3:
1919 alu.src[1].sel = V_SQ_ALU_SRC_0;
1920 alu.src[1].chan = i;
1921 }
1922
1923 alu.src[2].sel = ctx->temp_reg;
1924 alu.src[2].neg = 1;
1925 alu.src[2].chan = i;
1926
1927 if (use_temp)
1928 alu.dst.sel = ctx->temp_reg;
1929 else {
1930 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1931 if (r)
1932 return r;
1933 }
1934 alu.dst.chan = i;
1935 alu.dst.write = 1;
1936 alu.is_op3 = 1;
1937 if (i == 3)
1938 alu.last = 1;
1939 r = r600_bc_add_alu(ctx->bc, &alu);
1940 if (r)
1941 return r;
1942 }
1943 if (use_temp)
1944 return tgsi_helper_copy(ctx, inst);
1945 return 0;
1946 }
1947
1948 static int tgsi_exp(struct r600_shader_ctx *ctx)
1949 {
1950 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1951 struct r600_bc_alu_src r600_src[3];
1952 struct r600_bc_alu alu;
1953 int r;
1954
1955 /* result.x = 2^floor(src); */
1956 if (inst->Dst[0].Register.WriteMask & 1) {
1957 memset(&alu, 0, sizeof(struct r600_bc_alu));
1958
1959 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR;
1960 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
1961 if (r)
1962 return r;
1963
1964 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
1965
1966 alu.dst.sel = ctx->temp_reg;
1967 alu.dst.chan = 0;
1968 alu.dst.write = 1;
1969 alu.last = 1;
1970 r = r600_bc_add_alu(ctx->bc, &alu);
1971 if (r)
1972 return r;
1973
1974 r = r600_bc_add_literal(ctx->bc, ctx->value);
1975 if (r)
1976 return r;
1977
1978 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE;
1979 alu.src[0].sel = ctx->temp_reg;
1980 alu.src[0].chan = 0;
1981
1982 alu.dst.sel = ctx->temp_reg;
1983 alu.dst.chan = 0;
1984 alu.dst.write = 1;
1985 alu.last = 1;
1986 r = r600_bc_add_alu(ctx->bc, &alu);
1987 if (r)
1988 return r;
1989
1990 r = r600_bc_add_literal(ctx->bc, ctx->value);
1991 if (r)
1992 return r;
1993 }
1994
1995 /* result.y = tmp - floor(tmp); */
1996 if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
1997 memset(&alu, 0, sizeof(struct r600_bc_alu));
1998
1999 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT;
2000 alu.src[0] = r600_src[0];
2001 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
2002 if (r)
2003 return r;
2004 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
2005
2006 alu.dst.sel = ctx->temp_reg;
2007 // r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2008 // if (r)
2009 // return r;
2010 alu.dst.write = 1;
2011 alu.dst.chan = 1;
2012
2013 alu.last = 1;
2014
2015 r = r600_bc_add_alu(ctx->bc, &alu);
2016 if (r)
2017 return r;
2018 r = r600_bc_add_literal(ctx->bc, ctx->value);
2019 if (r)
2020 return r;
2021 }
2022
2023 /* result.z = RoughApprox2ToX(tmp);*/
2024 if ((inst->Dst[0].Register.WriteMask >> 2) & 0x1) {
2025 memset(&alu, 0, sizeof(struct r600_bc_alu));
2026 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE;
2027 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
2028 if (r)
2029 return r;
2030 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
2031
2032 alu.dst.sel = ctx->temp_reg;
2033 alu.dst.write = 1;
2034 alu.dst.chan = 2;
2035
2036 alu.last = 1;
2037
2038 r = r600_bc_add_alu(ctx->bc, &alu);
2039 if (r)
2040 return r;
2041 r = r600_bc_add_literal(ctx->bc, ctx->value);
2042 if (r)
2043 return r;
2044 }
2045
2046 /* result.w = 1.0;*/
2047 if ((inst->Dst[0].Register.WriteMask >> 3) & 0x1) {
2048 memset(&alu, 0, sizeof(struct r600_bc_alu));
2049
2050 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV;
2051 alu.src[0].sel = V_SQ_ALU_SRC_1;
2052 alu.src[0].chan = 0;
2053
2054 alu.dst.sel = ctx->temp_reg;
2055 alu.dst.chan = 3;
2056 alu.dst.write = 1;
2057 alu.last = 1;
2058 r = r600_bc_add_alu(ctx->bc, &alu);
2059 if (r)
2060 return r;
2061 r = r600_bc_add_literal(ctx->bc, ctx->value);
2062 if (r)
2063 return r;
2064 }
2065 return tgsi_helper_copy(ctx, inst);
2066 }
2067
2068 static int tgsi_arl(struct r600_shader_ctx *ctx)
2069 {
2070 /* TODO from r600c, ar values don't persist between clauses */
2071 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2072 struct r600_bc_alu alu;
2073 int r;
2074 memset(&alu, 0, sizeof(struct r600_bc_alu));
2075
2076 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR;
2077
2078 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
2079 if (r)
2080 return r;
2081 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
2082
2083 alu.last = 1;
2084
2085 r = r600_bc_add_alu_type(ctx->bc, &alu, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU);
2086 if (r)
2087 return r;
2088 return 0;
2089 }
2090
2091 static int tgsi_opdst(struct r600_shader_ctx *ctx)
2092 {
2093 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2094 struct r600_bc_alu alu;
2095 int i, r = 0;
2096
2097 for (i = 0; i < 4; i++) {
2098 memset(&alu, 0, sizeof(struct r600_bc_alu));
2099
2100 alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL;
2101 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2102 if (r)
2103 return r;
2104
2105 if (i == 0 || i == 3) {
2106 alu.src[0].sel = V_SQ_ALU_SRC_1;
2107 } else {
2108 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
2109 if (r)
2110 return r;
2111 alu.src[0].chan = tgsi_chan(&inst->Src[0], i);
2112 }
2113
2114 if (i == 0 || i == 2) {
2115 alu.src[1].sel = V_SQ_ALU_SRC_1;
2116 } else {
2117 r = tgsi_src(ctx, &inst->Src[1], &alu.src[1]);
2118 if (r)
2119 return r;
2120 alu.src[1].chan = tgsi_chan(&inst->Src[1], i);
2121 }
2122 if (i == 3)
2123 alu.last = 1;
2124 r = r600_bc_add_alu(ctx->bc, &alu);
2125 if (r)
2126 return r;
2127 }
2128 return 0;
2129 }
2130
2131 static int emit_logic_pred(struct r600_shader_ctx *ctx, int opcode)
2132 {
2133 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2134 struct r600_bc_alu alu;
2135 int r;
2136
2137 memset(&alu, 0, sizeof(struct r600_bc_alu));
2138 alu.inst = opcode;
2139 alu.predicate = 1;
2140
2141 alu.dst.sel = ctx->temp_reg;
2142 alu.dst.write = 1;
2143 alu.dst.chan = 0;
2144
2145 r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
2146 if (r)
2147 return r;
2148 alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
2149 alu.src[1].sel = V_SQ_ALU_SRC_0;
2150 alu.src[1].chan = 0;
2151
2152 alu.last = 1;
2153
2154 r = r600_bc_add_alu_type(ctx->bc, &alu, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE);
2155 if (r)
2156 return r;
2157 return 0;
2158 }
2159
2160 static int pops(struct r600_shader_ctx *ctx, int pops)
2161 {
2162 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_POP);
2163 ctx->bc->cf_last->pop_count = pops;
2164 return 0;
2165 }
2166
2167 static inline void callstack_decrease_current(struct r600_shader_ctx *ctx, unsigned reason)
2168 {
2169 switch(reason) {
2170 case FC_PUSH_VPM:
2171 ctx->bc->callstack[ctx->bc->call_sp].current--;
2172 break;
2173 case FC_PUSH_WQM:
2174 case FC_LOOP:
2175 ctx->bc->callstack[ctx->bc->call_sp].current -= 4;
2176 break;
2177 case FC_REP:
2178 /* TOODO : for 16 vp asic should -= 2; */
2179 ctx->bc->callstack[ctx->bc->call_sp].current --;
2180 break;
2181 }
2182 }
2183
2184 static inline void callstack_check_depth(struct r600_shader_ctx *ctx, unsigned reason, unsigned check_max_only)
2185 {
2186 if (check_max_only) {
2187 int diff;
2188 switch (reason) {
2189 case FC_PUSH_VPM:
2190 diff = 1;
2191 break;
2192 case FC_PUSH_WQM:
2193 diff = 4;
2194 break;
2195 }
2196 if ((ctx->bc->callstack[ctx->bc->call_sp].current + diff) >
2197 ctx->bc->callstack[ctx->bc->call_sp].max) {
2198 ctx->bc->callstack[ctx->bc->call_sp].max =
2199 ctx->bc->callstack[ctx->bc->call_sp].current + diff;
2200 }
2201 return;
2202 }
2203 switch (reason) {
2204 case FC_PUSH_VPM:
2205 ctx->bc->callstack[ctx->bc->call_sp].current++;
2206 break;
2207 case FC_PUSH_WQM:
2208 case FC_LOOP:
2209 ctx->bc->callstack[ctx->bc->call_sp].current += 4;
2210 break;
2211 case FC_REP:
2212 ctx->bc->callstack[ctx->bc->call_sp].current++;
2213 break;
2214 }
2215
2216 if ((ctx->bc->callstack[ctx->bc->call_sp].current) >
2217 ctx->bc->callstack[ctx->bc->call_sp].max) {
2218 ctx->bc->callstack[ctx->bc->call_sp].max =
2219 ctx->bc->callstack[ctx->bc->call_sp].current;
2220 }
2221 }
2222
2223 static void fc_set_mid(struct r600_shader_ctx *ctx, int fc_sp)
2224 {
2225 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[fc_sp];
2226
2227 sp->mid = (struct r600_bc_cf **)realloc((void *)sp->mid,
2228 sizeof(struct r600_bc_cf *) * (sp->num_mid + 1));
2229 sp->mid[sp->num_mid] = ctx->bc->cf_last;
2230 sp->num_mid++;
2231 }
2232
2233 static void fc_pushlevel(struct r600_shader_ctx *ctx, int type)
2234 {
2235 ctx->bc->fc_sp++;
2236 ctx->bc->fc_stack[ctx->bc->fc_sp].type = type;
2237 ctx->bc->fc_stack[ctx->bc->fc_sp].start = ctx->bc->cf_last;
2238 }
2239
2240 static void fc_poplevel(struct r600_shader_ctx *ctx)
2241 {
2242 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[ctx->bc->fc_sp];
2243 if (sp->mid) {
2244 free(sp->mid);
2245 sp->mid = NULL;
2246 }
2247 sp->num_mid = 0;
2248 sp->start = NULL;
2249 sp->type = 0;
2250 ctx->bc->fc_sp--;
2251 }
2252
2253 #if 0
2254 static int emit_return(struct r600_shader_ctx *ctx)
2255 {
2256 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_RETURN);
2257 return 0;
2258 }
2259
2260 static int emit_jump_to_offset(struct r600_shader_ctx *ctx, int pops, int offset)
2261 {
2262
2263 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_JUMP);
2264 ctx->bc->cf_last->pop_count = pops;
2265 /* TODO work out offset */
2266 return 0;
2267 }
2268
2269 static int emit_setret_in_loop_flag(struct r600_shader_ctx *ctx, unsigned flag_value)
2270 {
2271 return 0;
2272 }
2273
2274 static void emit_testflag(struct r600_shader_ctx *ctx)
2275 {
2276
2277 }
2278
2279 static void emit_return_on_flag(struct r600_shader_ctx *ctx, unsigned ifidx)
2280 {
2281 emit_testflag(ctx);
2282 emit_jump_to_offset(ctx, 1, 4);
2283 emit_setret_in_loop_flag(ctx, V_SQ_ALU_SRC_0);
2284 pops(ctx, ifidx + 1);
2285 emit_return(ctx);
2286 }
2287
2288 static void break_loop_on_flag(struct r600_shader_ctx *ctx, unsigned fc_sp)
2289 {
2290 emit_testflag(ctx);
2291
2292 r600_bc_add_cfinst(ctx->bc, ctx->inst_info->r600_opcode);
2293 ctx->bc->cf_last->pop_count = 1;
2294
2295 fc_set_mid(ctx, fc_sp);
2296
2297 pops(ctx, 1);
2298 }
2299 #endif
2300
2301 static int tgsi_if(struct r600_shader_ctx *ctx)
2302 {
2303 emit_logic_pred(ctx, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE);
2304
2305 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_JUMP);
2306
2307 fc_pushlevel(ctx, FC_IF);
2308
2309 callstack_check_depth(ctx, FC_PUSH_VPM, 0);
2310 return 0;
2311 }
2312
2313 static int tgsi_else(struct r600_shader_ctx *ctx)
2314 {
2315 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_ELSE);
2316 ctx->bc->cf_last->pop_count = 1;
2317
2318 fc_set_mid(ctx, ctx->bc->fc_sp);
2319 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id;
2320 return 0;
2321 }
2322
2323 static int tgsi_endif(struct r600_shader_ctx *ctx)
2324 {
2325 pops(ctx, 1);
2326 if (ctx->bc->fc_stack[ctx->bc->fc_sp].type != FC_IF) {
2327 R600_ERR("if/endif unbalanced in shader\n");
2328 return -1;
2329 }
2330
2331 if (ctx->bc->fc_stack[ctx->bc->fc_sp].mid == NULL) {
2332 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id + 2;
2333 ctx->bc->fc_stack[ctx->bc->fc_sp].start->pop_count = 1;
2334 } else {
2335 ctx->bc->fc_stack[ctx->bc->fc_sp].mid[0]->cf_addr = ctx->bc->cf_last->id + 2;
2336 }
2337 fc_poplevel(ctx);
2338
2339 callstack_decrease_current(ctx, FC_PUSH_VPM);
2340 return 0;
2341 }
2342
2343 static int tgsi_bgnloop(struct r600_shader_ctx *ctx)
2344 {
2345 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL);
2346
2347 fc_pushlevel(ctx, FC_LOOP);
2348
2349 /* check stack depth */
2350 callstack_check_depth(ctx, FC_LOOP, 0);
2351 return 0;
2352 }
2353
2354 static int tgsi_endloop(struct r600_shader_ctx *ctx)
2355 {
2356 int i;
2357
2358 r600_bc_add_cfinst(ctx->bc, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END);
2359
2360 if (ctx->bc->fc_stack[ctx->bc->fc_sp].type != FC_LOOP) {
2361 R600_ERR("loop/endloop in shader code are not paired.\n");
2362 return -EINVAL;
2363 }
2364
2365 /* fixup loop pointers - from r600isa
2366 LOOP END points to CF after LOOP START,
2367 LOOP START point to CF after LOOP END
2368 BRK/CONT point to LOOP END CF
2369 */
2370 ctx->bc->cf_last->cf_addr = ctx->bc->fc_stack[ctx->bc->fc_sp].start->id + 2;
2371
2372 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id + 2;
2373
2374 for (i = 0; i < ctx->bc->fc_stack[ctx->bc->fc_sp].num_mid; i++) {
2375 ctx->bc->fc_stack[ctx->bc->fc_sp].mid[i]->cf_addr = ctx->bc->cf_last->id;
2376 }
2377 /* TODO add LOOPRET support */
2378 fc_poplevel(ctx);
2379 callstack_decrease_current(ctx, FC_LOOP);
2380 return 0;
2381 }
2382
2383 static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx)
2384 {
2385 unsigned int fscp;
2386
2387 for (fscp = ctx->bc->fc_sp; fscp > 0; fscp--)
2388 {
2389 if (FC_LOOP == ctx->bc->fc_stack[fscp].type)
2390 break;
2391 }
2392
2393 if (fscp == 0) {
2394 R600_ERR("Break not inside loop/endloop pair\n");
2395 return -EINVAL;
2396 }
2397
2398 r600_bc_add_cfinst(ctx->bc, ctx->inst_info->r600_opcode);
2399 ctx->bc->cf_last->pop_count = 1;
2400
2401 fc_set_mid(ctx, fscp);
2402
2403 pops(ctx, 1);
2404 callstack_check_depth(ctx, FC_PUSH_VPM, 1);
2405 return 0;
2406 }
2407
2408 static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] = {
2409 {TGSI_OPCODE_ARL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_arl},
2410 {TGSI_OPCODE_MOV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV, tgsi_op2},
2411 {TGSI_OPCODE_LIT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_lit},
2412 {TGSI_OPCODE_RCP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE, tgsi_trans_srcx_replicate},
2413 {TGSI_OPCODE_RSQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE, tgsi_trans_srcx_replicate},
2414 {TGSI_OPCODE_EXP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_exp},
2415 {TGSI_OPCODE_LOG, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2416 {TGSI_OPCODE_MUL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL, tgsi_op2},
2417 {TGSI_OPCODE_ADD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD, tgsi_op2},
2418 {TGSI_OPCODE_DP3, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4, tgsi_dp},
2419 {TGSI_OPCODE_DP4, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4, tgsi_dp},
2420 {TGSI_OPCODE_DST, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_opdst},
2421 {TGSI_OPCODE_MIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN, tgsi_op2},
2422 {TGSI_OPCODE_MAX, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX, tgsi_op2},
2423 {TGSI_OPCODE_SLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT, tgsi_op2_swap},
2424 {TGSI_OPCODE_SGE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE, tgsi_op2},
2425 {TGSI_OPCODE_MAD, 1, V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD, tgsi_op3},
2426 {TGSI_OPCODE_SUB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD, tgsi_op2},
2427 {TGSI_OPCODE_LRP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_lrp},
2428 {TGSI_OPCODE_CND, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2429 /* gap */
2430 {20, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2431 {TGSI_OPCODE_DP2A, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2432 /* gap */
2433 {22, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2434 {23, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2435 {TGSI_OPCODE_FRC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT, tgsi_op2},
2436 {TGSI_OPCODE_CLAMP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2437 {TGSI_OPCODE_FLR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR, tgsi_op2},
2438 {TGSI_OPCODE_ROUND, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2439 {TGSI_OPCODE_EX2, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE, tgsi_trans_srcx_replicate},
2440 {TGSI_OPCODE_LG2, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE, tgsi_trans_srcx_replicate},
2441 {TGSI_OPCODE_POW, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_pow},
2442 {TGSI_OPCODE_XPD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_xpd},
2443 /* gap */
2444 {32, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2445 {TGSI_OPCODE_ABS, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV, tgsi_op2},
2446 {TGSI_OPCODE_RCC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2447 {TGSI_OPCODE_DPH, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4, tgsi_dp},
2448 {TGSI_OPCODE_COS, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS, tgsi_trig},
2449 {TGSI_OPCODE_DDX, 0, SQ_TEX_INST_GET_GRADIENTS_H, tgsi_tex},
2450 {TGSI_OPCODE_DDY, 0, SQ_TEX_INST_GET_GRADIENTS_V, tgsi_tex},
2451 {TGSI_OPCODE_KILP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT, tgsi_kill}, /* predicated kill */
2452 {TGSI_OPCODE_PK2H, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2453 {TGSI_OPCODE_PK2US, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2454 {TGSI_OPCODE_PK4B, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2455 {TGSI_OPCODE_PK4UB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2456 {TGSI_OPCODE_RFL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2457 {TGSI_OPCODE_SEQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE, tgsi_op2},
2458 {TGSI_OPCODE_SFL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2459 {TGSI_OPCODE_SGT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT, tgsi_op2},
2460 {TGSI_OPCODE_SIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN, tgsi_trig},
2461 {TGSI_OPCODE_SLE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE, tgsi_op2_swap},
2462 {TGSI_OPCODE_SNE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE, tgsi_op2},
2463 {TGSI_OPCODE_STR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2464 {TGSI_OPCODE_TEX, 0, SQ_TEX_INST_SAMPLE, tgsi_tex},
2465 {TGSI_OPCODE_TXD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2466 {TGSI_OPCODE_TXP, 0, SQ_TEX_INST_SAMPLE, tgsi_tex},
2467 {TGSI_OPCODE_UP2H, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2468 {TGSI_OPCODE_UP2US, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2469 {TGSI_OPCODE_UP4B, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2470 {TGSI_OPCODE_UP4UB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2471 {TGSI_OPCODE_X2D, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2472 {TGSI_OPCODE_ARA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2473 {TGSI_OPCODE_ARR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2474 {TGSI_OPCODE_BRA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2475 {TGSI_OPCODE_CAL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2476 {TGSI_OPCODE_RET, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2477 {TGSI_OPCODE_SSG, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_ssg},
2478 {TGSI_OPCODE_CMP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_cmp},
2479 {TGSI_OPCODE_SCS, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_scs},
2480 {TGSI_OPCODE_TXB, 0, SQ_TEX_INST_SAMPLE_L, tgsi_tex},
2481 {TGSI_OPCODE_NRM, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2482 {TGSI_OPCODE_DIV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2483 {TGSI_OPCODE_DP2, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4, tgsi_dp},
2484 {TGSI_OPCODE_TXL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2485 {TGSI_OPCODE_BRK, 0, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK, tgsi_loop_brk_cont},
2486 {TGSI_OPCODE_IF, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_if},
2487 /* gap */
2488 {75, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2489 {76, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2490 {TGSI_OPCODE_ELSE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_else},
2491 {TGSI_OPCODE_ENDIF, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_endif},
2492 /* gap */
2493 {79, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2494 {80, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2495 {TGSI_OPCODE_PUSHA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2496 {TGSI_OPCODE_POPA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2497 {TGSI_OPCODE_CEIL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2498 {TGSI_OPCODE_I2F, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2499 {TGSI_OPCODE_NOT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2500 {TGSI_OPCODE_TRUNC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC, tgsi_trans_srcx_replicate},
2501 {TGSI_OPCODE_SHL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2502 /* gap */
2503 {88, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2504 {TGSI_OPCODE_AND, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2505 {TGSI_OPCODE_OR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2506 {TGSI_OPCODE_MOD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2507 {TGSI_OPCODE_XOR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2508 {TGSI_OPCODE_SAD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2509 {TGSI_OPCODE_TXF, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2510 {TGSI_OPCODE_TXQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2511 {TGSI_OPCODE_CONT, 0, V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE, tgsi_loop_brk_cont},
2512 {TGSI_OPCODE_EMIT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2513 {TGSI_OPCODE_ENDPRIM, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2514 {TGSI_OPCODE_BGNLOOP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_bgnloop},
2515 {TGSI_OPCODE_BGNSUB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2516 {TGSI_OPCODE_ENDLOOP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_endloop},
2517 {TGSI_OPCODE_ENDSUB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2518 /* gap */
2519 {103, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2520 {104, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2521 {105, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2522 {106, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2523 {TGSI_OPCODE_NOP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2524 /* gap */
2525 {108, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2526 {109, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2527 {110, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2528 {111, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2529 {TGSI_OPCODE_NRM4, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2530 {TGSI_OPCODE_CALLNZ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2531 {TGSI_OPCODE_IFC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2532 {TGSI_OPCODE_BREAKC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2533 {TGSI_OPCODE_KIL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT, tgsi_kill}, /* conditional kill */
2534 {TGSI_OPCODE_END, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_end}, /* aka HALT */
2535 /* gap */
2536 {118, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2537 {TGSI_OPCODE_F2I, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2538 {TGSI_OPCODE_IDIV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2539 {TGSI_OPCODE_IMAX, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2540 {TGSI_OPCODE_IMIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2541 {TGSI_OPCODE_INEG, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2542 {TGSI_OPCODE_ISGE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2543 {TGSI_OPCODE_ISHR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2544 {TGSI_OPCODE_ISLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2545 {TGSI_OPCODE_F2U, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2546 {TGSI_OPCODE_U2F, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2547 {TGSI_OPCODE_UADD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2548 {TGSI_OPCODE_UDIV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2549 {TGSI_OPCODE_UMAD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2550 {TGSI_OPCODE_UMAX, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2551 {TGSI_OPCODE_UMIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2552 {TGSI_OPCODE_UMOD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2553 {TGSI_OPCODE_UMUL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2554 {TGSI_OPCODE_USEQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2555 {TGSI_OPCODE_USGE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2556 {TGSI_OPCODE_USHR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2557 {TGSI_OPCODE_USLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2558 {TGSI_OPCODE_USNE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2559 {TGSI_OPCODE_SWITCH, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2560 {TGSI_OPCODE_CASE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2561 {TGSI_OPCODE_DEFAULT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2562 {TGSI_OPCODE_ENDSWITCH, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2563 {TGSI_OPCODE_LAST, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
2564 };