r600: Fix llvm build since const buffer changes
[mesa.git] / src / gallium / drivers / r600 / r600_shader.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "r600_sq.h"
24 #include "r600_llvm.h"
25 #include "r600_formats.h"
26 #include "r600_opcodes.h"
27 #include "r600_shader.h"
28 #include "r600d.h"
29
30 #include "sb/sb_public.h"
31
32 #include "pipe/p_shader_tokens.h"
33 #include "tgsi/tgsi_info.h"
34 #include "tgsi/tgsi_parse.h"
35 #include "tgsi/tgsi_scan.h"
36 #include "tgsi/tgsi_dump.h"
37 #include "util/u_memory.h"
38 #include "util/u_math.h"
39 #include <stdio.h>
40 #include <errno.h>
41
42 /* CAYMAN notes
43 Why CAYMAN got loops for lots of instructions is explained here.
44
45 -These 8xx t-slot only ops are implemented in all vector slots.
46 MUL_LIT, FLT_TO_UINT, INT_TO_FLT, UINT_TO_FLT
47 These 8xx t-slot only opcodes become vector ops, with all four
48 slots expecting the arguments on sources a and b. Result is
49 broadcast to all channels.
50 MULLO_INT, MULHI_INT, MULLO_UINT, MULHI_UINT, MUL_64
51 These 8xx t-slot only opcodes become vector ops in the z, y, and
52 x slots.
53 EXP_IEEE, LOG_IEEE/CLAMPED, RECIP_IEEE/CLAMPED/FF/INT/UINT/_64/CLAMPED_64
54 RECIPSQRT_IEEE/CLAMPED/FF/_64/CLAMPED_64
55 SQRT_IEEE/_64
56 SIN/COS
57 The w slot may have an independent co-issued operation, or if the
58 result is required to be in the w slot, the opcode above may be
59 issued in the w slot as well.
60 The compiler must issue the source argument to slots z, y, and x
61 */
62
63 #define R600_SHADER_BUFFER_INFO_SEL (512 + R600_BUFFER_INFO_OFFSET / 16)
64 static int r600_shader_from_tgsi(struct r600_context *rctx,
65 struct r600_pipe_shader *pipeshader,
66 union r600_shader_key key);
67
68
69 static void r600_add_gpr_array(struct r600_shader *ps, int start_gpr,
70 int size, unsigned comp_mask) {
71
72 if (!size)
73 return;
74
75 if (ps->num_arrays == ps->max_arrays) {
76 ps->max_arrays += 64;
77 ps->arrays = realloc(ps->arrays, ps->max_arrays *
78 sizeof(struct r600_shader_array));
79 }
80
81 int n = ps->num_arrays;
82 ++ps->num_arrays;
83
84 ps->arrays[n].comp_mask = comp_mask;
85 ps->arrays[n].gpr_start = start_gpr;
86 ps->arrays[n].gpr_count = size;
87 }
88
89 static void r600_dump_streamout(struct pipe_stream_output_info *so)
90 {
91 unsigned i;
92
93 fprintf(stderr, "STREAMOUT\n");
94 for (i = 0; i < so->num_outputs; i++) {
95 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
96 so->output[i].start_component;
97 fprintf(stderr, " %i: MEM_STREAM%d_BUF%i[%i..%i] <- OUT[%i].%s%s%s%s%s\n",
98 i,
99 so->output[i].stream,
100 so->output[i].output_buffer,
101 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
102 so->output[i].register_index,
103 mask & 1 ? "x" : "",
104 mask & 2 ? "y" : "",
105 mask & 4 ? "z" : "",
106 mask & 8 ? "w" : "",
107 so->output[i].dst_offset < so->output[i].start_component ? " (will lower)" : "");
108 }
109 }
110
111 static int store_shader(struct pipe_context *ctx,
112 struct r600_pipe_shader *shader)
113 {
114 struct r600_context *rctx = (struct r600_context *)ctx;
115 uint32_t *ptr, i;
116
117 if (shader->bo == NULL) {
118 shader->bo = (struct r600_resource*)
119 pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE, shader->shader.bc.ndw * 4);
120 if (shader->bo == NULL) {
121 return -ENOMEM;
122 }
123 ptr = r600_buffer_map_sync_with_rings(&rctx->b, shader->bo, PIPE_TRANSFER_WRITE);
124 if (R600_BIG_ENDIAN) {
125 for (i = 0; i < shader->shader.bc.ndw; ++i) {
126 ptr[i] = util_cpu_to_le32(shader->shader.bc.bytecode[i]);
127 }
128 } else {
129 memcpy(ptr, shader->shader.bc.bytecode, shader->shader.bc.ndw * sizeof(*ptr));
130 }
131 rctx->b.ws->buffer_unmap(shader->bo->cs_buf);
132 }
133
134 return 0;
135 }
136
137 int r600_pipe_shader_create(struct pipe_context *ctx,
138 struct r600_pipe_shader *shader,
139 union r600_shader_key key)
140 {
141 struct r600_context *rctx = (struct r600_context *)ctx;
142 struct r600_pipe_shader_selector *sel = shader->selector;
143 int r;
144 bool dump = r600_can_dump_shader(&rctx->screen->b, sel->tokens);
145 unsigned use_sb = !(rctx->screen->b.debug_flags & DBG_NO_SB);
146 unsigned sb_disasm = use_sb || (rctx->screen->b.debug_flags & DBG_SB_DISASM);
147 unsigned export_shader;
148
149 shader->shader.bc.isa = rctx->isa;
150
151 if (dump) {
152 fprintf(stderr, "--------------------------------------------------------------\n");
153 tgsi_dump(sel->tokens, 0);
154
155 if (sel->so.num_outputs) {
156 r600_dump_streamout(&sel->so);
157 }
158 }
159 r = r600_shader_from_tgsi(rctx, shader, key);
160 if (r) {
161 R600_ERR("translation from TGSI failed !\n");
162 goto error;
163 }
164
165 /* disable SB for geom shaders on R6xx/R7xx due to some mysterious gs piglit regressions with it enabled. */
166 if (rctx->b.chip_class <= R700) {
167 use_sb &= (shader->shader.processor_type != TGSI_PROCESSOR_GEOMETRY);
168 }
169 /* disable SB for shaders using CF_INDEX_0/1 (sampler/ubo array indexing) as it doesn't handle those currently */
170 use_sb &= !shader->shader.uses_index_registers;
171 /* disable SB for shaders using doubles */
172 use_sb &= !shader->shader.uses_doubles;
173
174 /* Check if the bytecode has already been built. When using the llvm
175 * backend, r600_shader_from_tgsi() will take care of building the
176 * bytecode.
177 */
178 if (!shader->shader.bc.bytecode) {
179 r = r600_bytecode_build(&shader->shader.bc);
180 if (r) {
181 R600_ERR("building bytecode failed !\n");
182 goto error;
183 }
184 }
185
186 if (dump && !sb_disasm) {
187 fprintf(stderr, "--------------------------------------------------------------\n");
188 r600_bytecode_disasm(&shader->shader.bc);
189 fprintf(stderr, "______________________________________________________________\n");
190 } else if ((dump && sb_disasm) || use_sb) {
191 r = r600_sb_bytecode_process(rctx, &shader->shader.bc, &shader->shader,
192 dump, use_sb);
193 if (r) {
194 R600_ERR("r600_sb_bytecode_process failed !\n");
195 goto error;
196 }
197 }
198
199 if (shader->gs_copy_shader) {
200 if (dump) {
201 // dump copy shader
202 r = r600_sb_bytecode_process(rctx, &shader->gs_copy_shader->shader.bc,
203 &shader->gs_copy_shader->shader, dump, 0);
204 if (r)
205 goto error;
206 }
207
208 if ((r = store_shader(ctx, shader->gs_copy_shader)))
209 goto error;
210 }
211
212 /* Store the shader in a buffer. */
213 if ((r = store_shader(ctx, shader)))
214 goto error;
215
216 /* Build state. */
217 switch (shader->shader.processor_type) {
218 case TGSI_PROCESSOR_GEOMETRY:
219 if (rctx->b.chip_class >= EVERGREEN) {
220 evergreen_update_gs_state(ctx, shader);
221 evergreen_update_vs_state(ctx, shader->gs_copy_shader);
222 } else {
223 r600_update_gs_state(ctx, shader);
224 r600_update_vs_state(ctx, shader->gs_copy_shader);
225 }
226 break;
227 case TGSI_PROCESSOR_VERTEX:
228 export_shader = key.vs.as_es;
229 if (rctx->b.chip_class >= EVERGREEN) {
230 if (export_shader)
231 evergreen_update_es_state(ctx, shader);
232 else
233 evergreen_update_vs_state(ctx, shader);
234 } else {
235 if (export_shader)
236 r600_update_es_state(ctx, shader);
237 else
238 r600_update_vs_state(ctx, shader);
239 }
240 break;
241 case TGSI_PROCESSOR_FRAGMENT:
242 if (rctx->b.chip_class >= EVERGREEN) {
243 evergreen_update_ps_state(ctx, shader);
244 } else {
245 r600_update_ps_state(ctx, shader);
246 }
247 break;
248 default:
249 r = -EINVAL;
250 goto error;
251 }
252 return 0;
253
254 error:
255 r600_pipe_shader_destroy(ctx, shader);
256 return r;
257 }
258
259 void r600_pipe_shader_destroy(struct pipe_context *ctx, struct r600_pipe_shader *shader)
260 {
261 pipe_resource_reference((struct pipe_resource**)&shader->bo, NULL);
262 r600_bytecode_clear(&shader->shader.bc);
263 r600_release_command_buffer(&shader->command_buffer);
264 }
265
266 /*
267 * tgsi -> r600 shader
268 */
269 struct r600_shader_tgsi_instruction;
270
271 struct r600_shader_src {
272 unsigned sel;
273 unsigned swizzle[4];
274 unsigned neg;
275 unsigned abs;
276 unsigned rel;
277 unsigned kc_bank;
278 boolean kc_rel; /* true if cache bank is indexed */
279 uint32_t value[4];
280 };
281
282 struct eg_interp {
283 boolean enabled;
284 unsigned ij_index;
285 };
286
287 struct r600_shader_ctx {
288 struct tgsi_shader_info info;
289 struct tgsi_parse_context parse;
290 const struct tgsi_token *tokens;
291 unsigned type;
292 unsigned file_offset[TGSI_FILE_COUNT];
293 unsigned temp_reg;
294 const struct r600_shader_tgsi_instruction *inst_info;
295 struct r600_bytecode *bc;
296 struct r600_shader *shader;
297 struct r600_shader_src src[4];
298 uint32_t *literals;
299 uint32_t nliterals;
300 uint32_t max_driver_temp_used;
301 boolean use_llvm;
302 /* needed for evergreen interpolation */
303 struct eg_interp eg_interpolators[6]; // indexed by Persp/Linear * 3 + sample/center/centroid
304 /* evergreen/cayman also store sample mask in face register */
305 int face_gpr;
306 /* sample id is .w component stored in fixed point position register */
307 int fixed_pt_position_gpr;
308 int colors_used;
309 boolean clip_vertex_write;
310 unsigned cv_output;
311 unsigned edgeflag_output;
312 int fragcoord_input;
313 int native_integers;
314 int next_ring_offset;
315 int gs_out_ring_offset;
316 int gs_next_vertex;
317 struct r600_shader *gs_for_vs;
318 int gs_export_gpr_tregs[4];
319 const struct pipe_stream_output_info *gs_stream_output_info;
320 unsigned enabled_stream_buffers_mask;
321 };
322
323 struct r600_shader_tgsi_instruction {
324 unsigned op;
325 int (*process)(struct r600_shader_ctx *ctx);
326 };
327
328 static int emit_gs_ring_writes(struct r600_shader_ctx *ctx, const struct pipe_stream_output_info *so, int stream, bool ind);
329 static const struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[], eg_shader_tgsi_instruction[], cm_shader_tgsi_instruction[];
330 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx);
331 static inline void callstack_push(struct r600_shader_ctx *ctx, unsigned reason);
332 static void fc_pushlevel(struct r600_shader_ctx *ctx, int type);
333 static int tgsi_else(struct r600_shader_ctx *ctx);
334 static int tgsi_endif(struct r600_shader_ctx *ctx);
335 static int tgsi_bgnloop(struct r600_shader_ctx *ctx);
336 static int tgsi_endloop(struct r600_shader_ctx *ctx);
337 static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx);
338 static int tgsi_fetch_rel_const(struct r600_shader_ctx *ctx,
339 unsigned int cb_idx, unsigned cb_rel, unsigned int offset, unsigned ar_chan,
340 unsigned int dst_reg);
341 static void r600_bytecode_src(struct r600_bytecode_alu_src *bc_src,
342 const struct r600_shader_src *shader_src,
343 unsigned chan);
344
345 static int tgsi_is_supported(struct r600_shader_ctx *ctx)
346 {
347 struct tgsi_full_instruction *i = &ctx->parse.FullToken.FullInstruction;
348 int j;
349
350 if (i->Instruction.NumDstRegs > 1 && i->Instruction.Opcode != TGSI_OPCODE_DFRACEXP) {
351 R600_ERR("too many dst (%d)\n", i->Instruction.NumDstRegs);
352 return -EINVAL;
353 }
354 if (i->Instruction.Predicate) {
355 R600_ERR("predicate unsupported\n");
356 return -EINVAL;
357 }
358 #if 0
359 if (i->Instruction.Label) {
360 R600_ERR("label unsupported\n");
361 return -EINVAL;
362 }
363 #endif
364 for (j = 0; j < i->Instruction.NumSrcRegs; j++) {
365 if (i->Src[j].Register.Dimension) {
366 switch (i->Src[j].Register.File) {
367 case TGSI_FILE_CONSTANT:
368 break;
369 case TGSI_FILE_INPUT:
370 if (ctx->type == TGSI_PROCESSOR_GEOMETRY)
371 break;
372 default:
373 R600_ERR("unsupported src %d (dimension %d)\n", j,
374 i->Src[j].Register.Dimension);
375 return -EINVAL;
376 }
377 }
378 }
379 for (j = 0; j < i->Instruction.NumDstRegs; j++) {
380 if (i->Dst[j].Register.Dimension) {
381 R600_ERR("unsupported dst (dimension)\n");
382 return -EINVAL;
383 }
384 }
385 return 0;
386 }
387
388 int eg_get_interpolator_index(unsigned interpolate, unsigned location)
389 {
390 if (interpolate == TGSI_INTERPOLATE_COLOR ||
391 interpolate == TGSI_INTERPOLATE_LINEAR ||
392 interpolate == TGSI_INTERPOLATE_PERSPECTIVE)
393 {
394 int is_linear = interpolate == TGSI_INTERPOLATE_LINEAR;
395 int loc;
396
397 switch(location) {
398 case TGSI_INTERPOLATE_LOC_CENTER:
399 loc = 1;
400 break;
401 case TGSI_INTERPOLATE_LOC_CENTROID:
402 loc = 2;
403 break;
404 case TGSI_INTERPOLATE_LOC_SAMPLE:
405 default:
406 loc = 0; break;
407 }
408
409 return is_linear * 3 + loc;
410 }
411
412 return -1;
413 }
414
415 static void evergreen_interp_assign_ij_index(struct r600_shader_ctx *ctx,
416 int input)
417 {
418 int i = eg_get_interpolator_index(
419 ctx->shader->input[input].interpolate,
420 ctx->shader->input[input].interpolate_location);
421 assert(i >= 0);
422 ctx->shader->input[input].ij_index = ctx->eg_interpolators[i].ij_index;
423 }
424
425 static int evergreen_interp_alu(struct r600_shader_ctx *ctx, int input)
426 {
427 int i, r;
428 struct r600_bytecode_alu alu;
429 int gpr = 0, base_chan = 0;
430 int ij_index = ctx->shader->input[input].ij_index;
431
432 /* work out gpr and base_chan from index */
433 gpr = ij_index / 2;
434 base_chan = (2 * (ij_index % 2)) + 1;
435
436 for (i = 0; i < 8; i++) {
437 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
438
439 if (i < 4)
440 alu.op = ALU_OP2_INTERP_ZW;
441 else
442 alu.op = ALU_OP2_INTERP_XY;
443
444 if ((i > 1) && (i < 6)) {
445 alu.dst.sel = ctx->shader->input[input].gpr;
446 alu.dst.write = 1;
447 }
448
449 alu.dst.chan = i % 4;
450
451 alu.src[0].sel = gpr;
452 alu.src[0].chan = (base_chan - (i % 2));
453
454 alu.src[1].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
455
456 alu.bank_swizzle_force = SQ_ALU_VEC_210;
457 if ((i % 4) == 3)
458 alu.last = 1;
459 r = r600_bytecode_add_alu(ctx->bc, &alu);
460 if (r)
461 return r;
462 }
463 return 0;
464 }
465
466 static int evergreen_interp_flat(struct r600_shader_ctx *ctx, int input)
467 {
468 int i, r;
469 struct r600_bytecode_alu alu;
470
471 for (i = 0; i < 4; i++) {
472 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
473
474 alu.op = ALU_OP1_INTERP_LOAD_P0;
475
476 alu.dst.sel = ctx->shader->input[input].gpr;
477 alu.dst.write = 1;
478
479 alu.dst.chan = i;
480
481 alu.src[0].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
482 alu.src[0].chan = i;
483
484 if (i == 3)
485 alu.last = 1;
486 r = r600_bytecode_add_alu(ctx->bc, &alu);
487 if (r)
488 return r;
489 }
490 return 0;
491 }
492
493 /*
494 * Special export handling in shaders
495 *
496 * shader export ARRAY_BASE for EXPORT_POS:
497 * 60 is position
498 * 61 is misc vector
499 * 62, 63 are clip distance vectors
500 *
501 * The use of the values exported in 61-63 are controlled by PA_CL_VS_OUT_CNTL:
502 * VS_OUT_MISC_VEC_ENA - enables the use of all fields in export 61
503 * USE_VTX_POINT_SIZE - point size in the X channel of export 61
504 * USE_VTX_EDGE_FLAG - edge flag in the Y channel of export 61
505 * USE_VTX_RENDER_TARGET_INDX - render target index in the Z channel of export 61
506 * USE_VTX_VIEWPORT_INDX - viewport index in the W channel of export 61
507 * USE_VTX_KILL_FLAG - kill flag in the Z channel of export 61 (mutually
508 * exclusive from render target index)
509 * VS_OUT_CCDIST0_VEC_ENA/VS_OUT_CCDIST1_VEC_ENA - enable clip distance vectors
510 *
511 *
512 * shader export ARRAY_BASE for EXPORT_PIXEL:
513 * 0-7 CB targets
514 * 61 computed Z vector
515 *
516 * The use of the values exported in the computed Z vector are controlled
517 * by DB_SHADER_CONTROL:
518 * Z_EXPORT_ENABLE - Z as a float in RED
519 * STENCIL_REF_EXPORT_ENABLE - stencil ref as int in GREEN
520 * COVERAGE_TO_MASK_ENABLE - alpha to mask in ALPHA
521 * MASK_EXPORT_ENABLE - pixel sample mask in BLUE
522 * DB_SOURCE_FORMAT - export control restrictions
523 *
524 */
525
526
527 /* Map name/sid pair from tgsi to the 8-bit semantic index for SPI setup */
528 static int r600_spi_sid(struct r600_shader_io * io)
529 {
530 int index, name = io->name;
531
532 /* These params are handled differently, they don't need
533 * semantic indices, so we'll use 0 for them.
534 */
535 if (name == TGSI_SEMANTIC_POSITION ||
536 name == TGSI_SEMANTIC_PSIZE ||
537 name == TGSI_SEMANTIC_EDGEFLAG ||
538 name == TGSI_SEMANTIC_FACE ||
539 name == TGSI_SEMANTIC_SAMPLEMASK)
540 index = 0;
541 else {
542 if (name == TGSI_SEMANTIC_GENERIC) {
543 /* For generic params simply use sid from tgsi */
544 index = io->sid;
545 } else {
546 /* For non-generic params - pack name and sid into 8 bits */
547 index = 0x80 | (name<<3) | (io->sid);
548 }
549
550 /* Make sure that all really used indices have nonzero value, so
551 * we can just compare it to 0 later instead of comparing the name
552 * with different values to detect special cases. */
553 index++;
554 }
555
556 return index;
557 };
558
559 /* turn input into interpolate on EG */
560 static int evergreen_interp_input(struct r600_shader_ctx *ctx, int index)
561 {
562 int r = 0;
563
564 if (ctx->shader->input[index].spi_sid) {
565 ctx->shader->input[index].lds_pos = ctx->shader->nlds++;
566 if (ctx->shader->input[index].interpolate > 0) {
567 evergreen_interp_assign_ij_index(ctx, index);
568 if (!ctx->use_llvm)
569 r = evergreen_interp_alu(ctx, index);
570 } else {
571 if (!ctx->use_llvm)
572 r = evergreen_interp_flat(ctx, index);
573 }
574 }
575 return r;
576 }
577
578 static int select_twoside_color(struct r600_shader_ctx *ctx, int front, int back)
579 {
580 struct r600_bytecode_alu alu;
581 int i, r;
582 int gpr_front = ctx->shader->input[front].gpr;
583 int gpr_back = ctx->shader->input[back].gpr;
584
585 for (i = 0; i < 4; i++) {
586 memset(&alu, 0, sizeof(alu));
587 alu.op = ALU_OP3_CNDGT;
588 alu.is_op3 = 1;
589 alu.dst.write = 1;
590 alu.dst.sel = gpr_front;
591 alu.src[0].sel = ctx->face_gpr;
592 alu.src[1].sel = gpr_front;
593 alu.src[2].sel = gpr_back;
594
595 alu.dst.chan = i;
596 alu.src[1].chan = i;
597 alu.src[2].chan = i;
598 alu.last = (i==3);
599
600 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
601 return r;
602 }
603
604 return 0;
605 }
606
607 static int vs_add_primid_output(struct r600_shader_ctx *ctx, int prim_id_sid)
608 {
609 int i;
610 i = ctx->shader->noutput++;
611 ctx->shader->output[i].name = TGSI_SEMANTIC_PRIMID;
612 ctx->shader->output[i].sid = 0;
613 ctx->shader->output[i].gpr = 0;
614 ctx->shader->output[i].interpolate = TGSI_INTERPOLATE_CONSTANT;
615 ctx->shader->output[i].write_mask = 0x4;
616 ctx->shader->output[i].spi_sid = prim_id_sid;
617
618 return 0;
619 }
620
621 static int tgsi_declaration(struct r600_shader_ctx *ctx)
622 {
623 struct tgsi_full_declaration *d = &ctx->parse.FullToken.FullDeclaration;
624 int r, i, j, count = d->Range.Last - d->Range.First + 1;
625
626 switch (d->Declaration.File) {
627 case TGSI_FILE_INPUT:
628 for (j = 0; j < count; j++) {
629 i = ctx->shader->ninput + j;
630 assert(i < Elements(ctx->shader->input));
631 ctx->shader->input[i].name = d->Semantic.Name;
632 ctx->shader->input[i].sid = d->Semantic.Index + j;
633 ctx->shader->input[i].interpolate = d->Interp.Interpolate;
634 ctx->shader->input[i].interpolate_location = d->Interp.Location;
635 ctx->shader->input[i].gpr = ctx->file_offset[TGSI_FILE_INPUT] + d->Range.First + j;
636 if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
637 ctx->shader->input[i].spi_sid = r600_spi_sid(&ctx->shader->input[i]);
638 switch (ctx->shader->input[i].name) {
639 case TGSI_SEMANTIC_FACE:
640 if (ctx->face_gpr != -1)
641 ctx->shader->input[i].gpr = ctx->face_gpr; /* already allocated by allocate_system_value_inputs */
642 else
643 ctx->face_gpr = ctx->shader->input[i].gpr;
644 break;
645 case TGSI_SEMANTIC_COLOR:
646 ctx->colors_used++;
647 break;
648 case TGSI_SEMANTIC_POSITION:
649 ctx->fragcoord_input = i;
650 break;
651 case TGSI_SEMANTIC_PRIMID:
652 /* set this for now */
653 ctx->shader->gs_prim_id_input = true;
654 ctx->shader->ps_prim_id_input = i;
655 break;
656 }
657 if (ctx->bc->chip_class >= EVERGREEN) {
658 if ((r = evergreen_interp_input(ctx, i)))
659 return r;
660 }
661 } else if (ctx->type == TGSI_PROCESSOR_GEOMETRY) {
662 /* FIXME probably skip inputs if they aren't passed in the ring */
663 ctx->shader->input[i].ring_offset = ctx->next_ring_offset;
664 ctx->next_ring_offset += 16;
665 if (ctx->shader->input[i].name == TGSI_SEMANTIC_PRIMID)
666 ctx->shader->gs_prim_id_input = true;
667 }
668 }
669 ctx->shader->ninput += count;
670 break;
671 case TGSI_FILE_OUTPUT:
672 for (j = 0; j < count; j++) {
673 i = ctx->shader->noutput + j;
674 assert(i < Elements(ctx->shader->output));
675 ctx->shader->output[i].name = d->Semantic.Name;
676 ctx->shader->output[i].sid = d->Semantic.Index + j;
677 ctx->shader->output[i].gpr = ctx->file_offset[TGSI_FILE_OUTPUT] + d->Range.First + j;
678 ctx->shader->output[i].interpolate = d->Interp.Interpolate;
679 ctx->shader->output[i].write_mask = d->Declaration.UsageMask;
680 if (ctx->type == TGSI_PROCESSOR_VERTEX ||
681 ctx->type == TGSI_PROCESSOR_GEOMETRY) {
682 ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]);
683 switch (d->Semantic.Name) {
684 case TGSI_SEMANTIC_CLIPDIST:
685 ctx->shader->clip_dist_write |= d->Declaration.UsageMask <<
686 ((d->Semantic.Index + j) << 2);
687 break;
688 case TGSI_SEMANTIC_PSIZE:
689 ctx->shader->vs_out_misc_write = 1;
690 ctx->shader->vs_out_point_size = 1;
691 break;
692 case TGSI_SEMANTIC_EDGEFLAG:
693 ctx->shader->vs_out_misc_write = 1;
694 ctx->shader->vs_out_edgeflag = 1;
695 ctx->edgeflag_output = i;
696 break;
697 case TGSI_SEMANTIC_VIEWPORT_INDEX:
698 ctx->shader->vs_out_misc_write = 1;
699 ctx->shader->vs_out_viewport = 1;
700 break;
701 case TGSI_SEMANTIC_LAYER:
702 ctx->shader->vs_out_misc_write = 1;
703 ctx->shader->vs_out_layer = 1;
704 break;
705 case TGSI_SEMANTIC_CLIPVERTEX:
706 ctx->clip_vertex_write = TRUE;
707 ctx->cv_output = i;
708 break;
709 }
710 if (ctx->type == TGSI_PROCESSOR_GEOMETRY) {
711 ctx->gs_out_ring_offset += 16;
712 }
713 } else if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
714 switch (d->Semantic.Name) {
715 case TGSI_SEMANTIC_COLOR:
716 ctx->shader->nr_ps_max_color_exports++;
717 break;
718 }
719 }
720 }
721 ctx->shader->noutput += count;
722 break;
723 case TGSI_FILE_TEMPORARY:
724 if (ctx->info.indirect_files & (1 << TGSI_FILE_TEMPORARY)) {
725 if (d->Array.ArrayID) {
726 r600_add_gpr_array(ctx->shader,
727 ctx->file_offset[TGSI_FILE_TEMPORARY] +
728 d->Range.First,
729 d->Range.Last - d->Range.First + 1, 0x0F);
730 }
731 }
732 break;
733
734 case TGSI_FILE_CONSTANT:
735 case TGSI_FILE_SAMPLER:
736 case TGSI_FILE_SAMPLER_VIEW:
737 case TGSI_FILE_ADDRESS:
738 break;
739
740 case TGSI_FILE_SYSTEM_VALUE:
741 if (d->Semantic.Name == TGSI_SEMANTIC_SAMPLEMASK ||
742 d->Semantic.Name == TGSI_SEMANTIC_SAMPLEID ||
743 d->Semantic.Name == TGSI_SEMANTIC_SAMPLEPOS) {
744 break; /* Already handled from allocate_system_value_inputs */
745 } else if (d->Semantic.Name == TGSI_SEMANTIC_INSTANCEID) {
746 if (!ctx->native_integers) {
747 struct r600_bytecode_alu alu;
748 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
749
750 alu.op = ALU_OP1_INT_TO_FLT;
751 alu.src[0].sel = 0;
752 alu.src[0].chan = 3;
753
754 alu.dst.sel = 0;
755 alu.dst.chan = 3;
756 alu.dst.write = 1;
757 alu.last = 1;
758
759 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
760 return r;
761 }
762 break;
763 } else if (d->Semantic.Name == TGSI_SEMANTIC_VERTEXID)
764 break;
765 else if (d->Semantic.Name == TGSI_SEMANTIC_INVOCATIONID)
766 break;
767 default:
768 R600_ERR("unsupported file %d declaration\n", d->Declaration.File);
769 return -EINVAL;
770 }
771 return 0;
772 }
773
774 static int r600_get_temp(struct r600_shader_ctx *ctx)
775 {
776 return ctx->temp_reg + ctx->max_driver_temp_used++;
777 }
778
779 static int allocate_system_value_inputs(struct r600_shader_ctx *ctx, int gpr_offset)
780 {
781 struct tgsi_parse_context parse;
782 struct {
783 boolean enabled;
784 int *reg;
785 unsigned name, alternate_name;
786 } inputs[2] = {
787 { false, &ctx->face_gpr, TGSI_SEMANTIC_SAMPLEMASK, ~0u }, /* lives in Front Face GPR.z */
788
789 { false, &ctx->fixed_pt_position_gpr, TGSI_SEMANTIC_SAMPLEID, TGSI_SEMANTIC_SAMPLEPOS } /* SAMPLEID is in Fixed Point Position GPR.w */
790 };
791 int i, k, num_regs = 0;
792
793 if (tgsi_parse_init(&parse, ctx->tokens) != TGSI_PARSE_OK) {
794 return 0;
795 }
796
797 /* need to scan shader for system values and interpolateAtSample/Offset/Centroid */
798 while (!tgsi_parse_end_of_tokens(&parse)) {
799 tgsi_parse_token(&parse);
800
801 if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_INSTRUCTION) {
802 const struct tgsi_full_instruction *inst = &parse.FullToken.FullInstruction;
803 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE ||
804 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
805 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_CENTROID)
806 {
807 int interpolate, location, k;
808
809 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
810 location = TGSI_INTERPOLATE_LOC_CENTER;
811 inputs[1].enabled = true; /* needs SAMPLEID */
812 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
813 location = TGSI_INTERPOLATE_LOC_CENTER;
814 /* Needs sample positions, currently those are always available */
815 } else {
816 location = TGSI_INTERPOLATE_LOC_CENTROID;
817 }
818
819 interpolate = ctx->info.input_interpolate[inst->Src[0].Register.Index];
820 k = eg_get_interpolator_index(interpolate, location);
821 ctx->eg_interpolators[k].enabled = true;
822 }
823 } else if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_DECLARATION) {
824 struct tgsi_full_declaration *d = &parse.FullToken.FullDeclaration;
825 if (d->Declaration.File == TGSI_FILE_SYSTEM_VALUE) {
826 for (k = 0; k < Elements(inputs); k++) {
827 if (d->Semantic.Name == inputs[k].name ||
828 d->Semantic.Name == inputs[k].alternate_name) {
829 inputs[k].enabled = true;
830 }
831 }
832 }
833 }
834 }
835
836 tgsi_parse_free(&parse);
837
838 for (i = 0; i < Elements(inputs); i++) {
839 boolean enabled = inputs[i].enabled;
840 int *reg = inputs[i].reg;
841 unsigned name = inputs[i].name;
842
843 if (enabled) {
844 int gpr = gpr_offset + num_regs++;
845
846 // add to inputs, allocate a gpr
847 k = ctx->shader->ninput ++;
848 ctx->shader->input[k].name = name;
849 ctx->shader->input[k].sid = 0;
850 ctx->shader->input[k].interpolate = TGSI_INTERPOLATE_CONSTANT;
851 ctx->shader->input[k].interpolate_location = TGSI_INTERPOLATE_LOC_CENTER;
852 *reg = ctx->shader->input[k].gpr = gpr;
853 }
854 }
855
856 return gpr_offset + num_regs;
857 }
858
859 /*
860 * for evergreen we need to scan the shader to find the number of GPRs we need to
861 * reserve for interpolation and system values
862 *
863 * we need to know if we are going to emit
864 * any sample or centroid inputs
865 * if perspective and linear are required
866 */
867 static int evergreen_gpr_count(struct r600_shader_ctx *ctx)
868 {
869 int i;
870 int num_baryc;
871 struct tgsi_parse_context parse;
872
873 memset(&ctx->eg_interpolators, 0, sizeof(ctx->eg_interpolators));
874
875 for (i = 0; i < ctx->info.num_inputs; i++) {
876 int k;
877 /* skip position/face/mask/sampleid */
878 if (ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_POSITION ||
879 ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_FACE ||
880 ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_SAMPLEMASK ||
881 ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_SAMPLEID)
882 continue;
883
884 k = eg_get_interpolator_index(
885 ctx->info.input_interpolate[i],
886 ctx->info.input_interpolate_loc[i]);
887 if (k >= 0)
888 ctx->eg_interpolators[k].enabled = TRUE;
889 }
890
891 if (tgsi_parse_init(&parse, ctx->tokens) != TGSI_PARSE_OK) {
892 return 0;
893 }
894
895 /* need to scan shader for system values and interpolateAtSample/Offset/Centroid */
896 while (!tgsi_parse_end_of_tokens(&parse)) {
897 tgsi_parse_token(&parse);
898
899 if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_INSTRUCTION) {
900 const struct tgsi_full_instruction *inst = &parse.FullToken.FullInstruction;
901 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE ||
902 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
903 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_CENTROID)
904 {
905 int interpolate, location, k;
906
907 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
908 location = TGSI_INTERPOLATE_LOC_CENTER;
909 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
910 location = TGSI_INTERPOLATE_LOC_CENTER;
911 } else {
912 location = TGSI_INTERPOLATE_LOC_CENTROID;
913 }
914
915 interpolate = ctx->info.input_interpolate[inst->Src[0].Register.Index];
916 k = eg_get_interpolator_index(interpolate, location);
917 ctx->eg_interpolators[k].enabled = true;
918 }
919 }
920 }
921
922 tgsi_parse_free(&parse);
923
924 /* assign gpr to each interpolator according to priority */
925 num_baryc = 0;
926 for (i = 0; i < Elements(ctx->eg_interpolators); i++) {
927 if (ctx->eg_interpolators[i].enabled) {
928 ctx->eg_interpolators[i].ij_index = num_baryc;
929 num_baryc ++;
930 }
931 }
932
933 /* XXX PULL MODEL and LINE STIPPLE */
934
935 num_baryc = (num_baryc + 1) >> 1;
936 return allocate_system_value_inputs(ctx, num_baryc);
937 }
938
939 /* sample_id_sel == NULL means fetch for current sample */
940 static int load_sample_position(struct r600_shader_ctx *ctx, struct r600_shader_src *sample_id, int chan_sel)
941 {
942 struct r600_bytecode_vtx vtx;
943 int r, t1;
944
945 assert(ctx->fixed_pt_position_gpr != -1);
946
947 t1 = r600_get_temp(ctx);
948
949 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
950 vtx.op = FETCH_OP_VFETCH;
951 vtx.buffer_id = R600_BUFFER_INFO_CONST_BUFFER;
952 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
953 if (sample_id == NULL) {
954 vtx.src_gpr = ctx->fixed_pt_position_gpr; // SAMPLEID is in .w;
955 vtx.src_sel_x = 3;
956 }
957 else {
958 struct r600_bytecode_alu alu;
959
960 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
961 alu.op = ALU_OP1_MOV;
962 r600_bytecode_src(&alu.src[0], sample_id, chan_sel);
963 alu.dst.sel = t1;
964 alu.dst.write = 1;
965 alu.last = 1;
966 r = r600_bytecode_add_alu(ctx->bc, &alu);
967 if (r)
968 return r;
969
970 vtx.src_gpr = t1;
971 vtx.src_sel_x = 0;
972 }
973 vtx.mega_fetch_count = 16;
974 vtx.dst_gpr = t1;
975 vtx.dst_sel_x = 0;
976 vtx.dst_sel_y = 1;
977 vtx.dst_sel_z = 2;
978 vtx.dst_sel_w = 3;
979 vtx.data_format = FMT_32_32_32_32_FLOAT;
980 vtx.num_format_all = 2;
981 vtx.format_comp_all = 1;
982 vtx.use_const_fields = 0;
983 vtx.offset = 1; // first element is size of buffer
984 vtx.endian = r600_endian_swap(32);
985 vtx.srf_mode_all = 1; /* SRF_MODE_NO_ZERO */
986
987 r = r600_bytecode_add_vtx(ctx->bc, &vtx);
988 if (r)
989 return r;
990
991 return t1;
992 }
993
994 static void tgsi_src(struct r600_shader_ctx *ctx,
995 const struct tgsi_full_src_register *tgsi_src,
996 struct r600_shader_src *r600_src)
997 {
998 memset(r600_src, 0, sizeof(*r600_src));
999 r600_src->swizzle[0] = tgsi_src->Register.SwizzleX;
1000 r600_src->swizzle[1] = tgsi_src->Register.SwizzleY;
1001 r600_src->swizzle[2] = tgsi_src->Register.SwizzleZ;
1002 r600_src->swizzle[3] = tgsi_src->Register.SwizzleW;
1003 r600_src->neg = tgsi_src->Register.Negate;
1004 r600_src->abs = tgsi_src->Register.Absolute;
1005
1006 if (tgsi_src->Register.File == TGSI_FILE_IMMEDIATE) {
1007 int index;
1008 if ((tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleY) &&
1009 (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleZ) &&
1010 (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleW)) {
1011
1012 index = tgsi_src->Register.Index * 4 + tgsi_src->Register.SwizzleX;
1013 r600_bytecode_special_constants(ctx->literals[index], &r600_src->sel, &r600_src->neg);
1014 if (r600_src->sel != V_SQ_ALU_SRC_LITERAL)
1015 return;
1016 }
1017 index = tgsi_src->Register.Index;
1018 r600_src->sel = V_SQ_ALU_SRC_LITERAL;
1019 memcpy(r600_src->value, ctx->literals + index * 4, sizeof(r600_src->value));
1020 } else if (tgsi_src->Register.File == TGSI_FILE_SYSTEM_VALUE) {
1021 if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_SAMPLEMASK) {
1022 r600_src->swizzle[0] = 2; // Z value
1023 r600_src->swizzle[1] = 2;
1024 r600_src->swizzle[2] = 2;
1025 r600_src->swizzle[3] = 2;
1026 r600_src->sel = ctx->face_gpr;
1027 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_SAMPLEID) {
1028 r600_src->swizzle[0] = 3; // W value
1029 r600_src->swizzle[1] = 3;
1030 r600_src->swizzle[2] = 3;
1031 r600_src->swizzle[3] = 3;
1032 r600_src->sel = ctx->fixed_pt_position_gpr;
1033 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_SAMPLEPOS) {
1034 r600_src->swizzle[0] = 0;
1035 r600_src->swizzle[1] = 1;
1036 r600_src->swizzle[2] = 4;
1037 r600_src->swizzle[3] = 4;
1038 r600_src->sel = load_sample_position(ctx, NULL, -1);
1039 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_INSTANCEID) {
1040 r600_src->swizzle[0] = 3;
1041 r600_src->swizzle[1] = 3;
1042 r600_src->swizzle[2] = 3;
1043 r600_src->swizzle[3] = 3;
1044 r600_src->sel = 0;
1045 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_VERTEXID) {
1046 r600_src->swizzle[0] = 0;
1047 r600_src->swizzle[1] = 0;
1048 r600_src->swizzle[2] = 0;
1049 r600_src->swizzle[3] = 0;
1050 r600_src->sel = 0;
1051 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_INVOCATIONID) {
1052 r600_src->swizzle[0] = 3;
1053 r600_src->swizzle[1] = 3;
1054 r600_src->swizzle[2] = 3;
1055 r600_src->swizzle[3] = 3;
1056 r600_src->sel = 1;
1057 }
1058 } else {
1059 if (tgsi_src->Register.Indirect)
1060 r600_src->rel = V_SQ_REL_RELATIVE;
1061 r600_src->sel = tgsi_src->Register.Index;
1062 r600_src->sel += ctx->file_offset[tgsi_src->Register.File];
1063 }
1064 if (tgsi_src->Register.File == TGSI_FILE_CONSTANT) {
1065 if (tgsi_src->Register.Dimension) {
1066 r600_src->kc_bank = tgsi_src->Dimension.Index;
1067 if (tgsi_src->Dimension.Indirect) {
1068 r600_src->kc_rel = 1;
1069 }
1070 }
1071 }
1072 }
1073
1074 static int tgsi_fetch_rel_const(struct r600_shader_ctx *ctx,
1075 unsigned int cb_idx, unsigned cb_rel, unsigned int offset, unsigned ar_chan,
1076 unsigned int dst_reg)
1077 {
1078 struct r600_bytecode_vtx vtx;
1079 unsigned int ar_reg;
1080 int r;
1081
1082 if (offset) {
1083 struct r600_bytecode_alu alu;
1084
1085 memset(&alu, 0, sizeof(alu));
1086
1087 alu.op = ALU_OP2_ADD_INT;
1088 alu.src[0].sel = ctx->bc->ar_reg;
1089 alu.src[0].chan = ar_chan;
1090
1091 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
1092 alu.src[1].value = offset;
1093
1094 alu.dst.sel = dst_reg;
1095 alu.dst.chan = ar_chan;
1096 alu.dst.write = 1;
1097 alu.last = 1;
1098
1099 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
1100 return r;
1101
1102 ar_reg = dst_reg;
1103 } else {
1104 ar_reg = ctx->bc->ar_reg;
1105 }
1106
1107 memset(&vtx, 0, sizeof(vtx));
1108 vtx.buffer_id = cb_idx;
1109 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1110 vtx.src_gpr = ar_reg;
1111 vtx.src_sel_x = ar_chan;
1112 vtx.mega_fetch_count = 16;
1113 vtx.dst_gpr = dst_reg;
1114 vtx.dst_sel_x = 0; /* SEL_X */
1115 vtx.dst_sel_y = 1; /* SEL_Y */
1116 vtx.dst_sel_z = 2; /* SEL_Z */
1117 vtx.dst_sel_w = 3; /* SEL_W */
1118 vtx.data_format = FMT_32_32_32_32_FLOAT;
1119 vtx.num_format_all = 2; /* NUM_FORMAT_SCALED */
1120 vtx.format_comp_all = 1; /* FORMAT_COMP_SIGNED */
1121 vtx.endian = r600_endian_swap(32);
1122 vtx.buffer_index_mode = cb_rel; // cb_rel ? V_SQ_CF_INDEX_0 : V_SQ_CF_INDEX_NONE;
1123
1124 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
1125 return r;
1126
1127 return 0;
1128 }
1129
1130 static int fetch_gs_input(struct r600_shader_ctx *ctx, struct tgsi_full_src_register *src, unsigned int dst_reg)
1131 {
1132 struct r600_bytecode_vtx vtx;
1133 int r;
1134 unsigned index = src->Register.Index;
1135 unsigned vtx_id = src->Dimension.Index;
1136 int offset_reg = vtx_id / 3;
1137 int offset_chan = vtx_id % 3;
1138
1139 /* offsets of per-vertex data in ESGS ring are passed to GS in R0.x, R0.y,
1140 * R0.w, R1.x, R1.y, R1.z (it seems R0.z is used for PrimitiveID) */
1141
1142 if (offset_reg == 0 && offset_chan == 2)
1143 offset_chan = 3;
1144
1145 if (src->Dimension.Indirect) {
1146 int treg[3];
1147 int t2;
1148 struct r600_bytecode_alu alu;
1149 int r, i;
1150
1151 /* you have got to be shitting me -
1152 we have to put the R0.x/y/w into Rt.x Rt+1.x Rt+2.x then index reg from Rt.
1153 at least this is what fglrx seems to do. */
1154 for (i = 0; i < 3; i++) {
1155 treg[i] = r600_get_temp(ctx);
1156 }
1157 r600_add_gpr_array(ctx->shader, treg[0], 3, 0x0F);
1158
1159 t2 = r600_get_temp(ctx);
1160 for (i = 0; i < 3; i++) {
1161 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1162 alu.op = ALU_OP1_MOV;
1163 alu.src[0].sel = 0;
1164 alu.src[0].chan = i == 2 ? 3 : i;
1165 alu.dst.sel = treg[i];
1166 alu.dst.chan = 0;
1167 alu.dst.write = 1;
1168 alu.last = 1;
1169 r = r600_bytecode_add_alu(ctx->bc, &alu);
1170 if (r)
1171 return r;
1172 }
1173 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1174 alu.op = ALU_OP1_MOV;
1175 alu.src[0].sel = treg[0];
1176 alu.src[0].rel = 1;
1177 alu.dst.sel = t2;
1178 alu.dst.write = 1;
1179 alu.last = 1;
1180 r = r600_bytecode_add_alu(ctx->bc, &alu);
1181 if (r)
1182 return r;
1183 offset_reg = t2;
1184 }
1185
1186
1187 memset(&vtx, 0, sizeof(vtx));
1188 vtx.buffer_id = R600_GS_RING_CONST_BUFFER;
1189 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1190 vtx.src_gpr = offset_reg;
1191 vtx.src_sel_x = offset_chan;
1192 vtx.offset = index * 16; /*bytes*/
1193 vtx.mega_fetch_count = 16;
1194 vtx.dst_gpr = dst_reg;
1195 vtx.dst_sel_x = 0; /* SEL_X */
1196 vtx.dst_sel_y = 1; /* SEL_Y */
1197 vtx.dst_sel_z = 2; /* SEL_Z */
1198 vtx.dst_sel_w = 3; /* SEL_W */
1199 if (ctx->bc->chip_class >= EVERGREEN) {
1200 vtx.use_const_fields = 1;
1201 } else {
1202 vtx.data_format = FMT_32_32_32_32_FLOAT;
1203 }
1204
1205 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
1206 return r;
1207
1208 return 0;
1209 }
1210
1211 static int tgsi_split_gs_inputs(struct r600_shader_ctx *ctx)
1212 {
1213 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1214 int i;
1215
1216 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
1217 struct tgsi_full_src_register *src = &inst->Src[i];
1218
1219 if (src->Register.File == TGSI_FILE_INPUT) {
1220 if (ctx->shader->input[src->Register.Index].name == TGSI_SEMANTIC_PRIMID) {
1221 /* primitive id is in R0.z */
1222 ctx->src[i].sel = 0;
1223 ctx->src[i].swizzle[0] = 2;
1224 }
1225 }
1226 if (src->Register.File == TGSI_FILE_INPUT && src->Register.Dimension) {
1227 int treg = r600_get_temp(ctx);
1228
1229 fetch_gs_input(ctx, src, treg);
1230 ctx->src[i].sel = treg;
1231 }
1232 }
1233 return 0;
1234 }
1235
1236 static int tgsi_split_constant(struct r600_shader_ctx *ctx)
1237 {
1238 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1239 struct r600_bytecode_alu alu;
1240 int i, j, k, nconst, r;
1241
1242 for (i = 0, nconst = 0; i < inst->Instruction.NumSrcRegs; i++) {
1243 if (inst->Src[i].Register.File == TGSI_FILE_CONSTANT) {
1244 nconst++;
1245 }
1246 tgsi_src(ctx, &inst->Src[i], &ctx->src[i]);
1247 }
1248 for (i = 0, j = nconst - 1; i < inst->Instruction.NumSrcRegs; i++) {
1249 if (inst->Src[i].Register.File != TGSI_FILE_CONSTANT) {
1250 continue;
1251 }
1252
1253 if (ctx->src[i].kc_rel)
1254 ctx->shader->uses_index_registers = true;
1255
1256 if (ctx->src[i].rel) {
1257 int chan = inst->Src[i].Indirect.Swizzle;
1258 int treg = r600_get_temp(ctx);
1259 if ((r = tgsi_fetch_rel_const(ctx, ctx->src[i].kc_bank, ctx->src[i].kc_rel, ctx->src[i].sel - 512, chan, treg)))
1260 return r;
1261
1262 ctx->src[i].kc_bank = 0;
1263 ctx->src[i].kc_rel = 0;
1264 ctx->src[i].sel = treg;
1265 ctx->src[i].rel = 0;
1266 j--;
1267 } else if (j > 0) {
1268 int treg = r600_get_temp(ctx);
1269 for (k = 0; k < 4; k++) {
1270 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1271 alu.op = ALU_OP1_MOV;
1272 alu.src[0].sel = ctx->src[i].sel;
1273 alu.src[0].chan = k;
1274 alu.src[0].rel = ctx->src[i].rel;
1275 alu.src[0].kc_bank = ctx->src[i].kc_bank;
1276 alu.src[0].kc_rel = ctx->src[i].kc_rel;
1277 alu.dst.sel = treg;
1278 alu.dst.chan = k;
1279 alu.dst.write = 1;
1280 if (k == 3)
1281 alu.last = 1;
1282 r = r600_bytecode_add_alu(ctx->bc, &alu);
1283 if (r)
1284 return r;
1285 }
1286 ctx->src[i].sel = treg;
1287 ctx->src[i].rel =0;
1288 j--;
1289 }
1290 }
1291 return 0;
1292 }
1293
1294 /* need to move any immediate into a temp - for trig functions which use literal for PI stuff */
1295 static int tgsi_split_literal_constant(struct r600_shader_ctx *ctx)
1296 {
1297 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1298 struct r600_bytecode_alu alu;
1299 int i, j, k, nliteral, r;
1300
1301 for (i = 0, nliteral = 0; i < inst->Instruction.NumSrcRegs; i++) {
1302 if (ctx->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
1303 nliteral++;
1304 }
1305 }
1306 for (i = 0, j = nliteral - 1; i < inst->Instruction.NumSrcRegs; i++) {
1307 if (j > 0 && ctx->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
1308 int treg = r600_get_temp(ctx);
1309 for (k = 0; k < 4; k++) {
1310 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1311 alu.op = ALU_OP1_MOV;
1312 alu.src[0].sel = ctx->src[i].sel;
1313 alu.src[0].chan = k;
1314 alu.src[0].value = ctx->src[i].value[k];
1315 alu.dst.sel = treg;
1316 alu.dst.chan = k;
1317 alu.dst.write = 1;
1318 if (k == 3)
1319 alu.last = 1;
1320 r = r600_bytecode_add_alu(ctx->bc, &alu);
1321 if (r)
1322 return r;
1323 }
1324 ctx->src[i].sel = treg;
1325 j--;
1326 }
1327 }
1328 return 0;
1329 }
1330
1331 static int process_twoside_color_inputs(struct r600_shader_ctx *ctx)
1332 {
1333 int i, r, count = ctx->shader->ninput;
1334
1335 for (i = 0; i < count; i++) {
1336 if (ctx->shader->input[i].name == TGSI_SEMANTIC_COLOR) {
1337 r = select_twoside_color(ctx, i, ctx->shader->input[i].back_color_input);
1338 if (r)
1339 return r;
1340 }
1341 }
1342 return 0;
1343 }
1344
1345 static int emit_streamout(struct r600_shader_ctx *ctx, struct pipe_stream_output_info *so,
1346 int stream, unsigned *stream_item_size)
1347 {
1348 unsigned so_gpr[PIPE_MAX_SHADER_OUTPUTS];
1349 unsigned start_comp[PIPE_MAX_SHADER_OUTPUTS];
1350 int i, j, r;
1351
1352 /* Sanity checking. */
1353 if (so->num_outputs > PIPE_MAX_SO_OUTPUTS) {
1354 R600_ERR("Too many stream outputs: %d\n", so->num_outputs);
1355 r = -EINVAL;
1356 goto out_err;
1357 }
1358 for (i = 0; i < so->num_outputs; i++) {
1359 if (so->output[i].output_buffer >= 4) {
1360 R600_ERR("Exceeded the max number of stream output buffers, got: %d\n",
1361 so->output[i].output_buffer);
1362 r = -EINVAL;
1363 goto out_err;
1364 }
1365 }
1366
1367 /* Initialize locations where the outputs are stored. */
1368 for (i = 0; i < so->num_outputs; i++) {
1369
1370 so_gpr[i] = ctx->shader->output[so->output[i].register_index].gpr;
1371 start_comp[i] = so->output[i].start_component;
1372 /* Lower outputs with dst_offset < start_component.
1373 *
1374 * We can only output 4D vectors with a write mask, e.g. we can
1375 * only output the W component at offset 3, etc. If we want
1376 * to store Y, Z, or W at buffer offset 0, we need to use MOV
1377 * to move it to X and output X. */
1378 if (so->output[i].dst_offset < so->output[i].start_component) {
1379 unsigned tmp = r600_get_temp(ctx);
1380
1381 for (j = 0; j < so->output[i].num_components; j++) {
1382 struct r600_bytecode_alu alu;
1383 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1384 alu.op = ALU_OP1_MOV;
1385 alu.src[0].sel = so_gpr[i];
1386 alu.src[0].chan = so->output[i].start_component + j;
1387
1388 alu.dst.sel = tmp;
1389 alu.dst.chan = j;
1390 alu.dst.write = 1;
1391 if (j == so->output[i].num_components - 1)
1392 alu.last = 1;
1393 r = r600_bytecode_add_alu(ctx->bc, &alu);
1394 if (r)
1395 return r;
1396 }
1397 start_comp[i] = 0;
1398 so_gpr[i] = tmp;
1399 }
1400 }
1401
1402 /* Write outputs to buffers. */
1403 for (i = 0; i < so->num_outputs; i++) {
1404 struct r600_bytecode_output output;
1405
1406 if (stream != -1 && stream != so->output[i].output_buffer)
1407 continue;
1408
1409 memset(&output, 0, sizeof(struct r600_bytecode_output));
1410 output.gpr = so_gpr[i];
1411 output.elem_size = so->output[i].num_components - 1;
1412 if (output.elem_size == 2)
1413 output.elem_size = 3; // 3 not supported, write 4 with junk at end
1414 output.array_base = so->output[i].dst_offset - start_comp[i];
1415 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE;
1416 output.burst_count = 1;
1417 /* array_size is an upper limit for the burst_count
1418 * with MEM_STREAM instructions */
1419 output.array_size = 0xFFF;
1420 output.comp_mask = ((1 << so->output[i].num_components) - 1) << start_comp[i];
1421
1422 if (ctx->bc->chip_class >= EVERGREEN) {
1423 switch (so->output[i].output_buffer) {
1424 case 0:
1425 output.op = CF_OP_MEM_STREAM0_BUF0;
1426 break;
1427 case 1:
1428 output.op = CF_OP_MEM_STREAM0_BUF1;
1429 break;
1430 case 2:
1431 output.op = CF_OP_MEM_STREAM0_BUF2;
1432 break;
1433 case 3:
1434 output.op = CF_OP_MEM_STREAM0_BUF3;
1435 break;
1436 }
1437 output.op += so->output[i].stream * 4;
1438 assert(output.op >= CF_OP_MEM_STREAM0_BUF0 && output.op <= CF_OP_MEM_STREAM3_BUF3);
1439 ctx->enabled_stream_buffers_mask |= (1 << so->output[i].output_buffer) << so->output[i].stream * 4;
1440 } else {
1441 switch (so->output[i].output_buffer) {
1442 case 0:
1443 output.op = CF_OP_MEM_STREAM0;
1444 break;
1445 case 1:
1446 output.op = CF_OP_MEM_STREAM1;
1447 break;
1448 case 2:
1449 output.op = CF_OP_MEM_STREAM2;
1450 break;
1451 case 3:
1452 output.op = CF_OP_MEM_STREAM3;
1453 break;
1454 }
1455 ctx->enabled_stream_buffers_mask |= 1 << so->output[i].output_buffer;
1456 }
1457 r = r600_bytecode_add_output(ctx->bc, &output);
1458 if (r)
1459 goto out_err;
1460 }
1461 return 0;
1462 out_err:
1463 return r;
1464 }
1465
1466 static void convert_edgeflag_to_int(struct r600_shader_ctx *ctx)
1467 {
1468 struct r600_bytecode_alu alu;
1469 unsigned reg;
1470
1471 if (!ctx->shader->vs_out_edgeflag)
1472 return;
1473
1474 reg = ctx->shader->output[ctx->edgeflag_output].gpr;
1475
1476 /* clamp(x, 0, 1) */
1477 memset(&alu, 0, sizeof(alu));
1478 alu.op = ALU_OP1_MOV;
1479 alu.src[0].sel = reg;
1480 alu.dst.sel = reg;
1481 alu.dst.write = 1;
1482 alu.dst.clamp = 1;
1483 alu.last = 1;
1484 r600_bytecode_add_alu(ctx->bc, &alu);
1485
1486 memset(&alu, 0, sizeof(alu));
1487 alu.op = ALU_OP1_FLT_TO_INT;
1488 alu.src[0].sel = reg;
1489 alu.dst.sel = reg;
1490 alu.dst.write = 1;
1491 alu.last = 1;
1492 r600_bytecode_add_alu(ctx->bc, &alu);
1493 }
1494
1495 static int generate_gs_copy_shader(struct r600_context *rctx,
1496 struct r600_pipe_shader *gs,
1497 struct pipe_stream_output_info *so)
1498 {
1499 struct r600_shader_ctx ctx = {};
1500 struct r600_shader *gs_shader = &gs->shader;
1501 struct r600_pipe_shader *cshader;
1502 int ocnt = gs_shader->noutput;
1503 struct r600_bytecode_alu alu;
1504 struct r600_bytecode_vtx vtx;
1505 struct r600_bytecode_output output;
1506 struct r600_bytecode_cf *cf_jump, *cf_pop,
1507 *last_exp_pos = NULL, *last_exp_param = NULL;
1508 int i, j, next_clip_pos = 61, next_param = 0;
1509 int ring;
1510
1511 cshader = calloc(1, sizeof(struct r600_pipe_shader));
1512 if (!cshader)
1513 return 0;
1514
1515 memcpy(cshader->shader.output, gs_shader->output, ocnt *
1516 sizeof(struct r600_shader_io));
1517
1518 cshader->shader.noutput = ocnt;
1519
1520 ctx.shader = &cshader->shader;
1521 ctx.bc = &ctx.shader->bc;
1522 ctx.type = ctx.bc->type = TGSI_PROCESSOR_VERTEX;
1523
1524 r600_bytecode_init(ctx.bc, rctx->b.chip_class, rctx->b.family,
1525 rctx->screen->has_compressed_msaa_texturing);
1526
1527 ctx.bc->isa = rctx->isa;
1528
1529 cf_jump = NULL;
1530 memset(cshader->shader.ring_item_sizes, 0, sizeof(cshader->shader.ring_item_sizes));
1531
1532 /* R0.x = R0.x & 0x3fffffff */
1533 memset(&alu, 0, sizeof(alu));
1534 alu.op = ALU_OP2_AND_INT;
1535 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
1536 alu.src[1].value = 0x3fffffff;
1537 alu.dst.write = 1;
1538 r600_bytecode_add_alu(ctx.bc, &alu);
1539
1540 /* R0.y = R0.x >> 30 */
1541 memset(&alu, 0, sizeof(alu));
1542 alu.op = ALU_OP2_LSHR_INT;
1543 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
1544 alu.src[1].value = 0x1e;
1545 alu.dst.chan = 1;
1546 alu.dst.write = 1;
1547 alu.last = 1;
1548 r600_bytecode_add_alu(ctx.bc, &alu);
1549
1550 /* fetch vertex data from GSVS ring */
1551 for (i = 0; i < ocnt; ++i) {
1552 struct r600_shader_io *out = &ctx.shader->output[i];
1553
1554 out->gpr = i + 1;
1555 out->ring_offset = i * 16;
1556
1557 memset(&vtx, 0, sizeof(vtx));
1558 vtx.op = FETCH_OP_VFETCH;
1559 vtx.buffer_id = R600_GS_RING_CONST_BUFFER;
1560 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1561 vtx.offset = out->ring_offset;
1562 vtx.dst_gpr = out->gpr;
1563 vtx.src_gpr = 0;
1564 vtx.dst_sel_x = 0;
1565 vtx.dst_sel_y = 1;
1566 vtx.dst_sel_z = 2;
1567 vtx.dst_sel_w = 3;
1568 if (rctx->b.chip_class >= EVERGREEN) {
1569 vtx.use_const_fields = 1;
1570 } else {
1571 vtx.data_format = FMT_32_32_32_32_FLOAT;
1572 }
1573
1574 r600_bytecode_add_vtx(ctx.bc, &vtx);
1575 }
1576 ctx.temp_reg = i + 1;
1577 for (ring = 3; ring >= 0; --ring) {
1578 bool enabled = false;
1579 for (i = 0; i < so->num_outputs; i++) {
1580 if (so->output[i].stream == ring) {
1581 enabled = true;
1582 break;
1583 }
1584 }
1585 if (ring != 0 && !enabled) {
1586 cshader->shader.ring_item_sizes[ring] = 0;
1587 continue;
1588 }
1589
1590 if (cf_jump) {
1591 // Patch up jump label
1592 r600_bytecode_add_cfinst(ctx.bc, CF_OP_POP);
1593 cf_pop = ctx.bc->cf_last;
1594
1595 cf_jump->cf_addr = cf_pop->id + 2;
1596 cf_jump->pop_count = 1;
1597 cf_pop->cf_addr = cf_pop->id + 2;
1598 cf_pop->pop_count = 1;
1599 }
1600
1601 /* PRED_SETE_INT __, R0.y, ring */
1602 memset(&alu, 0, sizeof(alu));
1603 alu.op = ALU_OP2_PRED_SETE_INT;
1604 alu.src[0].chan = 1;
1605 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
1606 alu.src[1].value = ring;
1607 alu.execute_mask = 1;
1608 alu.update_pred = 1;
1609 alu.last = 1;
1610 r600_bytecode_add_alu_type(ctx.bc, &alu, CF_OP_ALU_PUSH_BEFORE);
1611
1612 r600_bytecode_add_cfinst(ctx.bc, CF_OP_JUMP);
1613 cf_jump = ctx.bc->cf_last;
1614
1615 if (enabled)
1616 emit_streamout(&ctx, so, ring, &cshader->shader.ring_item_sizes[ring]);
1617 cshader->shader.ring_item_sizes[ring] = ocnt * 16;
1618 }
1619
1620 /* export vertex data */
1621 /* XXX factor out common code with r600_shader_from_tgsi ? */
1622 for (i = 0; i < ocnt; ++i) {
1623 struct r600_shader_io *out = &ctx.shader->output[i];
1624 bool instream0 = true;
1625 if (out->name == TGSI_SEMANTIC_CLIPVERTEX)
1626 continue;
1627
1628 for (j = 0; j < so->num_outputs; j++) {
1629 if (so->output[j].register_index == i) {
1630 if (so->output[j].stream == 0)
1631 break;
1632 if (so->output[j].stream > 0)
1633 instream0 = false;
1634 }
1635 }
1636 if (!instream0)
1637 continue;
1638 memset(&output, 0, sizeof(output));
1639 output.gpr = out->gpr;
1640 output.elem_size = 3;
1641 output.swizzle_x = 0;
1642 output.swizzle_y = 1;
1643 output.swizzle_z = 2;
1644 output.swizzle_w = 3;
1645 output.burst_count = 1;
1646 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
1647 output.op = CF_OP_EXPORT;
1648 switch (out->name) {
1649 case TGSI_SEMANTIC_POSITION:
1650 output.array_base = 60;
1651 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
1652 break;
1653
1654 case TGSI_SEMANTIC_PSIZE:
1655 output.array_base = 61;
1656 if (next_clip_pos == 61)
1657 next_clip_pos = 62;
1658 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
1659 output.swizzle_y = 7;
1660 output.swizzle_z = 7;
1661 output.swizzle_w = 7;
1662 ctx.shader->vs_out_misc_write = 1;
1663 ctx.shader->vs_out_point_size = 1;
1664 break;
1665 case TGSI_SEMANTIC_LAYER:
1666 if (out->spi_sid) {
1667 /* duplicate it as PARAM to pass to the pixel shader */
1668 output.array_base = next_param++;
1669 r600_bytecode_add_output(ctx.bc, &output);
1670 last_exp_param = ctx.bc->cf_last;
1671 }
1672 output.array_base = 61;
1673 if (next_clip_pos == 61)
1674 next_clip_pos = 62;
1675 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
1676 output.swizzle_x = 7;
1677 output.swizzle_y = 7;
1678 output.swizzle_z = 0;
1679 output.swizzle_w = 7;
1680 ctx.shader->vs_out_misc_write = 1;
1681 ctx.shader->vs_out_layer = 1;
1682 break;
1683 case TGSI_SEMANTIC_VIEWPORT_INDEX:
1684 if (out->spi_sid) {
1685 /* duplicate it as PARAM to pass to the pixel shader */
1686 output.array_base = next_param++;
1687 r600_bytecode_add_output(ctx.bc, &output);
1688 last_exp_param = ctx.bc->cf_last;
1689 }
1690 output.array_base = 61;
1691 if (next_clip_pos == 61)
1692 next_clip_pos = 62;
1693 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
1694 ctx.shader->vs_out_misc_write = 1;
1695 ctx.shader->vs_out_viewport = 1;
1696 output.swizzle_x = 7;
1697 output.swizzle_y = 7;
1698 output.swizzle_z = 7;
1699 output.swizzle_w = 0;
1700 break;
1701 case TGSI_SEMANTIC_CLIPDIST:
1702 /* spi_sid is 0 for clipdistance outputs that were generated
1703 * for clipvertex - we don't need to pass them to PS */
1704 ctx.shader->clip_dist_write = gs->shader.clip_dist_write;
1705 if (out->spi_sid) {
1706 /* duplicate it as PARAM to pass to the pixel shader */
1707 output.array_base = next_param++;
1708 r600_bytecode_add_output(ctx.bc, &output);
1709 last_exp_param = ctx.bc->cf_last;
1710 }
1711 output.array_base = next_clip_pos++;
1712 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
1713 break;
1714 case TGSI_SEMANTIC_FOG:
1715 output.swizzle_y = 4; /* 0 */
1716 output.swizzle_z = 4; /* 0 */
1717 output.swizzle_w = 5; /* 1 */
1718 break;
1719 default:
1720 output.array_base = next_param++;
1721 break;
1722 }
1723 r600_bytecode_add_output(ctx.bc, &output);
1724 if (output.type == V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM)
1725 last_exp_param = ctx.bc->cf_last;
1726 else
1727 last_exp_pos = ctx.bc->cf_last;
1728 }
1729
1730 if (!last_exp_pos) {
1731 memset(&output, 0, sizeof(output));
1732 output.gpr = 0;
1733 output.elem_size = 3;
1734 output.swizzle_x = 7;
1735 output.swizzle_y = 7;
1736 output.swizzle_z = 7;
1737 output.swizzle_w = 7;
1738 output.burst_count = 1;
1739 output.type = 2;
1740 output.op = CF_OP_EXPORT;
1741 output.array_base = 60;
1742 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
1743 r600_bytecode_add_output(ctx.bc, &output);
1744 last_exp_pos = ctx.bc->cf_last;
1745 }
1746
1747 if (!last_exp_param) {
1748 memset(&output, 0, sizeof(output));
1749 output.gpr = 0;
1750 output.elem_size = 3;
1751 output.swizzle_x = 7;
1752 output.swizzle_y = 7;
1753 output.swizzle_z = 7;
1754 output.swizzle_w = 7;
1755 output.burst_count = 1;
1756 output.type = 2;
1757 output.op = CF_OP_EXPORT;
1758 output.array_base = next_param++;
1759 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
1760 r600_bytecode_add_output(ctx.bc, &output);
1761 last_exp_param = ctx.bc->cf_last;
1762 }
1763
1764 last_exp_pos->op = CF_OP_EXPORT_DONE;
1765 last_exp_param->op = CF_OP_EXPORT_DONE;
1766
1767 r600_bytecode_add_cfinst(ctx.bc, CF_OP_POP);
1768 cf_pop = ctx.bc->cf_last;
1769
1770 cf_jump->cf_addr = cf_pop->id + 2;
1771 cf_jump->pop_count = 1;
1772 cf_pop->cf_addr = cf_pop->id + 2;
1773 cf_pop->pop_count = 1;
1774
1775 if (ctx.bc->chip_class == CAYMAN)
1776 cm_bytecode_add_cf_end(ctx.bc);
1777 else {
1778 r600_bytecode_add_cfinst(ctx.bc, CF_OP_NOP);
1779 ctx.bc->cf_last->end_of_program = 1;
1780 }
1781
1782 gs->gs_copy_shader = cshader;
1783 cshader->enabled_stream_buffers_mask = ctx.enabled_stream_buffers_mask;
1784
1785 ctx.bc->nstack = 1;
1786
1787 return r600_bytecode_build(ctx.bc);
1788 }
1789
1790 static int emit_gs_ring_writes(struct r600_shader_ctx *ctx, const struct pipe_stream_output_info *so, int stream, bool ind)
1791 {
1792 struct r600_bytecode_output output;
1793 int i, k, ring_offset;
1794 int effective_stream = stream == -1 ? 0 : stream;
1795 int idx = 0;
1796
1797 for (i = 0; i < ctx->shader->noutput; i++) {
1798 if (ctx->gs_for_vs) {
1799 /* for ES we need to lookup corresponding ring offset expected by GS
1800 * (map this output to GS input by name and sid) */
1801 /* FIXME precompute offsets */
1802 ring_offset = -1;
1803 for(k = 0; k < ctx->gs_for_vs->ninput; ++k) {
1804 struct r600_shader_io *in = &ctx->gs_for_vs->input[k];
1805 struct r600_shader_io *out = &ctx->shader->output[i];
1806 if (in->name == out->name && in->sid == out->sid)
1807 ring_offset = in->ring_offset;
1808 }
1809
1810 if (ring_offset == -1)
1811 continue;
1812 } else {
1813 ring_offset = idx * 16;
1814 idx++;
1815 }
1816
1817 if (stream > 0 && ctx->shader->output[i].name == TGSI_SEMANTIC_POSITION)
1818 continue;
1819 /* next_ring_offset after parsing input decls contains total size of
1820 * single vertex data, gs_next_vertex - current vertex index */
1821 if (!ind)
1822 ring_offset += ctx->gs_out_ring_offset * ctx->gs_next_vertex;
1823
1824 memset(&output, 0, sizeof(struct r600_bytecode_output));
1825 output.gpr = ctx->shader->output[i].gpr;
1826 output.elem_size = 3;
1827 output.comp_mask = 0xF;
1828 output.burst_count = 1;
1829
1830 if (ind)
1831 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND;
1832 else
1833 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE;
1834
1835 switch (stream) {
1836 default:
1837 case 0:
1838 output.op = CF_OP_MEM_RING; break;
1839 case 1:
1840 output.op = CF_OP_MEM_RING1; break;
1841 case 2:
1842 output.op = CF_OP_MEM_RING2; break;
1843 case 3:
1844 output.op = CF_OP_MEM_RING3; break;
1845 }
1846
1847 if (ind) {
1848 output.array_base = ring_offset >> 2; /* in dwords */
1849 output.array_size = 0xfff;
1850 output.index_gpr = ctx->gs_export_gpr_tregs[effective_stream];
1851 } else
1852 output.array_base = ring_offset >> 2; /* in dwords */
1853 r600_bytecode_add_output(ctx->bc, &output);
1854 }
1855
1856 if (ind) {
1857 /* get a temp and add the ring offset to the next vertex base in the shader */
1858 struct r600_bytecode_alu alu;
1859 int r;
1860
1861 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1862 alu.op = ALU_OP2_ADD_INT;
1863 alu.src[0].sel = ctx->gs_export_gpr_tregs[effective_stream];
1864 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
1865 alu.src[1].value = ctx->gs_out_ring_offset >> 4;
1866 alu.dst.sel = ctx->gs_export_gpr_tregs[effective_stream];
1867 alu.dst.write = 1;
1868 alu.last = 1;
1869 r = r600_bytecode_add_alu(ctx->bc, &alu);
1870 if (r)
1871 return r;
1872 }
1873 ++ctx->gs_next_vertex;
1874 return 0;
1875 }
1876
1877 static int r600_shader_from_tgsi(struct r600_context *rctx,
1878 struct r600_pipe_shader *pipeshader,
1879 union r600_shader_key key)
1880 {
1881 struct r600_screen *rscreen = rctx->screen;
1882 struct r600_shader *shader = &pipeshader->shader;
1883 struct tgsi_token *tokens = pipeshader->selector->tokens;
1884 struct pipe_stream_output_info so = pipeshader->selector->so;
1885 struct tgsi_full_immediate *immediate;
1886 struct r600_shader_ctx ctx;
1887 struct r600_bytecode_output output[32];
1888 unsigned output_done, noutput;
1889 unsigned opcode;
1890 int i, j, k, r = 0;
1891 int next_param_base = 0, next_clip_base;
1892 int max_color_exports = MAX2(key.ps.nr_cbufs, 1);
1893 /* Declarations used by llvm code */
1894 bool use_llvm = false;
1895 bool indirect_gprs;
1896 bool ring_outputs = false;
1897 bool pos_emitted = false;
1898
1899 #ifdef R600_USE_LLVM
1900 use_llvm = rscreen->b.debug_flags & DBG_LLVM;
1901 #endif
1902 ctx.bc = &shader->bc;
1903 ctx.shader = shader;
1904 ctx.native_integers = true;
1905
1906
1907 r600_bytecode_init(ctx.bc, rscreen->b.chip_class, rscreen->b.family,
1908 rscreen->has_compressed_msaa_texturing);
1909 ctx.tokens = tokens;
1910 tgsi_scan_shader(tokens, &ctx.info);
1911 shader->indirect_files = ctx.info.indirect_files;
1912
1913 shader->uses_doubles = ctx.info.uses_doubles;
1914
1915 indirect_gprs = ctx.info.indirect_files & ~(1 << TGSI_FILE_CONSTANT);
1916 tgsi_parse_init(&ctx.parse, tokens);
1917 ctx.type = ctx.info.processor;
1918 shader->processor_type = ctx.type;
1919 ctx.bc->type = shader->processor_type;
1920
1921 if (ctx.type == TGSI_PROCESSOR_VERTEX) {
1922 shader->vs_as_gs_a = key.vs.as_gs_a;
1923 shader->vs_as_es = key.vs.as_es;
1924 }
1925
1926 ring_outputs = shader->vs_as_es || ctx.type == TGSI_PROCESSOR_GEOMETRY;
1927
1928 if (shader->vs_as_es) {
1929 ctx.gs_for_vs = &rctx->gs_shader->current->shader;
1930 } else {
1931 ctx.gs_for_vs = NULL;
1932 }
1933
1934 ctx.next_ring_offset = 0;
1935 ctx.gs_out_ring_offset = 0;
1936 ctx.gs_next_vertex = 0;
1937 ctx.gs_stream_output_info = &so;
1938
1939 shader->uses_index_registers = false;
1940 ctx.face_gpr = -1;
1941 ctx.fixed_pt_position_gpr = -1;
1942 ctx.fragcoord_input = -1;
1943 ctx.colors_used = 0;
1944 ctx.clip_vertex_write = 0;
1945
1946 shader->nr_ps_color_exports = 0;
1947 shader->nr_ps_max_color_exports = 0;
1948
1949 if (ctx.type == TGSI_PROCESSOR_FRAGMENT)
1950 shader->two_side = key.ps.color_two_side;
1951
1952 /* register allocations */
1953 /* Values [0,127] correspond to GPR[0..127].
1954 * Values [128,159] correspond to constant buffer bank 0
1955 * Values [160,191] correspond to constant buffer bank 1
1956 * Values [256,511] correspond to cfile constants c[0..255]. (Gone on EG)
1957 * Values [256,287] correspond to constant buffer bank 2 (EG)
1958 * Values [288,319] correspond to constant buffer bank 3 (EG)
1959 * Other special values are shown in the list below.
1960 * 244 ALU_SRC_1_DBL_L: special constant 1.0 double-float, LSW. (RV670+)
1961 * 245 ALU_SRC_1_DBL_M: special constant 1.0 double-float, MSW. (RV670+)
1962 * 246 ALU_SRC_0_5_DBL_L: special constant 0.5 double-float, LSW. (RV670+)
1963 * 247 ALU_SRC_0_5_DBL_M: special constant 0.5 double-float, MSW. (RV670+)
1964 * 248 SQ_ALU_SRC_0: special constant 0.0.
1965 * 249 SQ_ALU_SRC_1: special constant 1.0 float.
1966 * 250 SQ_ALU_SRC_1_INT: special constant 1 integer.
1967 * 251 SQ_ALU_SRC_M_1_INT: special constant -1 integer.
1968 * 252 SQ_ALU_SRC_0_5: special constant 0.5 float.
1969 * 253 SQ_ALU_SRC_LITERAL: literal constant.
1970 * 254 SQ_ALU_SRC_PV: previous vector result.
1971 * 255 SQ_ALU_SRC_PS: previous scalar result.
1972 */
1973 for (i = 0; i < TGSI_FILE_COUNT; i++) {
1974 ctx.file_offset[i] = 0;
1975 }
1976
1977 #ifdef R600_USE_LLVM
1978 if (use_llvm && ctx.info.indirect_files && (ctx.info.indirect_files & (1 << TGSI_FILE_CONSTANT)) != ctx.info.indirect_files) {
1979 fprintf(stderr, "Warning: R600 LLVM backend does not support "
1980 "indirect adressing. Falling back to TGSI "
1981 "backend.\n");
1982 use_llvm = 0;
1983 }
1984 #endif
1985 if (ctx.type == TGSI_PROCESSOR_VERTEX) {
1986 ctx.file_offset[TGSI_FILE_INPUT] = 1;
1987 if (!use_llvm) {
1988 r600_bytecode_add_cfinst(ctx.bc, CF_OP_CALL_FS);
1989 }
1990 }
1991 if (ctx.type == TGSI_PROCESSOR_FRAGMENT) {
1992 if (ctx.bc->chip_class >= EVERGREEN)
1993 ctx.file_offset[TGSI_FILE_INPUT] = evergreen_gpr_count(&ctx);
1994 else
1995 ctx.file_offset[TGSI_FILE_INPUT] = allocate_system_value_inputs(&ctx, ctx.file_offset[TGSI_FILE_INPUT]);
1996 }
1997 if (ctx.type == TGSI_PROCESSOR_GEOMETRY) {
1998 /* FIXME 1 would be enough in some cases (3 or less input vertices) */
1999 ctx.file_offset[TGSI_FILE_INPUT] = 2;
2000 }
2001 ctx.use_llvm = use_llvm;
2002
2003 if (use_llvm) {
2004 ctx.file_offset[TGSI_FILE_OUTPUT] =
2005 ctx.file_offset[TGSI_FILE_INPUT];
2006 } else {
2007 ctx.file_offset[TGSI_FILE_OUTPUT] =
2008 ctx.file_offset[TGSI_FILE_INPUT] +
2009 ctx.info.file_max[TGSI_FILE_INPUT] + 1;
2010 }
2011 ctx.file_offset[TGSI_FILE_TEMPORARY] = ctx.file_offset[TGSI_FILE_OUTPUT] +
2012 ctx.info.file_max[TGSI_FILE_OUTPUT] + 1;
2013
2014 /* Outside the GPR range. This will be translated to one of the
2015 * kcache banks later. */
2016 ctx.file_offset[TGSI_FILE_CONSTANT] = 512;
2017
2018 ctx.file_offset[TGSI_FILE_IMMEDIATE] = V_SQ_ALU_SRC_LITERAL;
2019 ctx.bc->ar_reg = ctx.file_offset[TGSI_FILE_TEMPORARY] +
2020 ctx.info.file_max[TGSI_FILE_TEMPORARY] + 1;
2021 ctx.bc->index_reg[0] = ctx.bc->ar_reg + 1;
2022 ctx.bc->index_reg[1] = ctx.bc->ar_reg + 2;
2023
2024 if (ctx.type == TGSI_PROCESSOR_GEOMETRY) {
2025 ctx.gs_export_gpr_tregs[0] = ctx.bc->ar_reg + 3;
2026 ctx.gs_export_gpr_tregs[1] = ctx.bc->ar_reg + 4;
2027 ctx.gs_export_gpr_tregs[2] = ctx.bc->ar_reg + 5;
2028 ctx.gs_export_gpr_tregs[3] = ctx.bc->ar_reg + 6;
2029 ctx.temp_reg = ctx.bc->ar_reg + 7;
2030 } else {
2031 ctx.temp_reg = ctx.bc->ar_reg + 3;
2032 }
2033
2034 shader->max_arrays = 0;
2035 shader->num_arrays = 0;
2036 if (indirect_gprs) {
2037
2038 if (ctx.info.indirect_files & (1 << TGSI_FILE_INPUT)) {
2039 r600_add_gpr_array(shader, ctx.file_offset[TGSI_FILE_INPUT],
2040 ctx.file_offset[TGSI_FILE_OUTPUT] -
2041 ctx.file_offset[TGSI_FILE_INPUT],
2042 0x0F);
2043 }
2044 if (ctx.info.indirect_files & (1 << TGSI_FILE_OUTPUT)) {
2045 r600_add_gpr_array(shader, ctx.file_offset[TGSI_FILE_OUTPUT],
2046 ctx.file_offset[TGSI_FILE_TEMPORARY] -
2047 ctx.file_offset[TGSI_FILE_OUTPUT],
2048 0x0F);
2049 }
2050 }
2051
2052 ctx.nliterals = 0;
2053 ctx.literals = NULL;
2054
2055 shader->fs_write_all = ctx.info.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS];
2056 shader->vs_position_window_space = ctx.info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
2057
2058 if (shader->vs_as_gs_a)
2059 vs_add_primid_output(&ctx, key.vs.prim_id_out);
2060
2061 while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
2062 tgsi_parse_token(&ctx.parse);
2063 switch (ctx.parse.FullToken.Token.Type) {
2064 case TGSI_TOKEN_TYPE_IMMEDIATE:
2065 immediate = &ctx.parse.FullToken.FullImmediate;
2066 ctx.literals = realloc(ctx.literals, (ctx.nliterals + 1) * 16);
2067 if(ctx.literals == NULL) {
2068 r = -ENOMEM;
2069 goto out_err;
2070 }
2071 ctx.literals[ctx.nliterals * 4 + 0] = immediate->u[0].Uint;
2072 ctx.literals[ctx.nliterals * 4 + 1] = immediate->u[1].Uint;
2073 ctx.literals[ctx.nliterals * 4 + 2] = immediate->u[2].Uint;
2074 ctx.literals[ctx.nliterals * 4 + 3] = immediate->u[3].Uint;
2075 ctx.nliterals++;
2076 break;
2077 case TGSI_TOKEN_TYPE_DECLARATION:
2078 r = tgsi_declaration(&ctx);
2079 if (r)
2080 goto out_err;
2081 break;
2082 case TGSI_TOKEN_TYPE_INSTRUCTION:
2083 case TGSI_TOKEN_TYPE_PROPERTY:
2084 break;
2085 default:
2086 R600_ERR("unsupported token type %d\n", ctx.parse.FullToken.Token.Type);
2087 r = -EINVAL;
2088 goto out_err;
2089 }
2090 }
2091
2092 shader->ring_item_sizes[0] = ctx.next_ring_offset;
2093 shader->ring_item_sizes[1] = 0;
2094 shader->ring_item_sizes[2] = 0;
2095 shader->ring_item_sizes[3] = 0;
2096
2097 /* Process two side if needed */
2098 if (shader->two_side && ctx.colors_used) {
2099 int i, count = ctx.shader->ninput;
2100 unsigned next_lds_loc = ctx.shader->nlds;
2101
2102 /* additional inputs will be allocated right after the existing inputs,
2103 * we won't need them after the color selection, so we don't need to
2104 * reserve these gprs for the rest of the shader code and to adjust
2105 * output offsets etc. */
2106 int gpr = ctx.file_offset[TGSI_FILE_INPUT] +
2107 ctx.info.file_max[TGSI_FILE_INPUT] + 1;
2108
2109 /* if two sided and neither face or sample mask is used by shader, ensure face_gpr is emitted */
2110 if (ctx.face_gpr == -1) {
2111 i = ctx.shader->ninput++;
2112 ctx.shader->input[i].name = TGSI_SEMANTIC_FACE;
2113 ctx.shader->input[i].spi_sid = 0;
2114 ctx.shader->input[i].gpr = gpr++;
2115 ctx.face_gpr = ctx.shader->input[i].gpr;
2116 }
2117
2118 for (i = 0; i < count; i++) {
2119 if (ctx.shader->input[i].name == TGSI_SEMANTIC_COLOR) {
2120 int ni = ctx.shader->ninput++;
2121 memcpy(&ctx.shader->input[ni],&ctx.shader->input[i], sizeof(struct r600_shader_io));
2122 ctx.shader->input[ni].name = TGSI_SEMANTIC_BCOLOR;
2123 ctx.shader->input[ni].spi_sid = r600_spi_sid(&ctx.shader->input[ni]);
2124 ctx.shader->input[ni].gpr = gpr++;
2125 // TGSI to LLVM needs to know the lds position of inputs.
2126 // Non LLVM path computes it later (in process_twoside_color)
2127 ctx.shader->input[ni].lds_pos = next_lds_loc++;
2128 ctx.shader->input[i].back_color_input = ni;
2129 if (ctx.bc->chip_class >= EVERGREEN) {
2130 if ((r = evergreen_interp_input(&ctx, ni)))
2131 return r;
2132 }
2133 }
2134 }
2135 }
2136
2137 /* LLVM backend setup */
2138 #ifdef R600_USE_LLVM
2139 if (use_llvm) {
2140 struct radeon_llvm_context radeon_llvm_ctx;
2141 LLVMModuleRef mod;
2142 bool dump = r600_can_dump_shader(&rscreen->b, tokens);
2143 boolean use_kill = false;
2144
2145 memset(&radeon_llvm_ctx, 0, sizeof(radeon_llvm_ctx));
2146 radeon_llvm_ctx.type = ctx.type;
2147 radeon_llvm_ctx.two_side = shader->two_side;
2148 radeon_llvm_ctx.face_gpr = ctx.face_gpr;
2149 radeon_llvm_ctx.inputs_count = ctx.shader->ninput + 1;
2150 radeon_llvm_ctx.r600_inputs = ctx.shader->input;
2151 radeon_llvm_ctx.r600_outputs = ctx.shader->output;
2152 radeon_llvm_ctx.color_buffer_count = max_color_exports;
2153 radeon_llvm_ctx.chip_class = ctx.bc->chip_class;
2154 radeon_llvm_ctx.fs_color_all = shader->fs_write_all && (rscreen->b.chip_class >= EVERGREEN);
2155 radeon_llvm_ctx.stream_outputs = &so;
2156 radeon_llvm_ctx.alpha_to_one = key.ps.alpha_to_one;
2157 radeon_llvm_ctx.has_compressed_msaa_texturing =
2158 ctx.bc->has_compressed_msaa_texturing;
2159 mod = r600_tgsi_llvm(&radeon_llvm_ctx, tokens);
2160 ctx.shader->has_txq_cube_array_z_comp = radeon_llvm_ctx.has_txq_cube_array_z_comp;
2161 ctx.shader->uses_tex_buffers = radeon_llvm_ctx.uses_tex_buffers;
2162
2163 if (r600_llvm_compile(mod, rscreen->b.family, ctx.bc, &use_kill, dump)) {
2164 radeon_llvm_dispose(&radeon_llvm_ctx);
2165 use_llvm = 0;
2166 fprintf(stderr, "R600 LLVM backend failed to compile "
2167 "shader. Falling back to TGSI\n");
2168 } else {
2169 ctx.file_offset[TGSI_FILE_OUTPUT] =
2170 ctx.file_offset[TGSI_FILE_INPUT];
2171 }
2172 if (use_kill)
2173 ctx.shader->uses_kill = use_kill;
2174 radeon_llvm_dispose(&radeon_llvm_ctx);
2175 }
2176 #endif
2177 /* End of LLVM backend setup */
2178
2179 if (shader->fs_write_all && rscreen->b.chip_class >= EVERGREEN)
2180 shader->nr_ps_max_color_exports = 8;
2181
2182 if (!use_llvm) {
2183 if (ctx.fragcoord_input >= 0) {
2184 if (ctx.bc->chip_class == CAYMAN) {
2185 for (j = 0 ; j < 4; j++) {
2186 struct r600_bytecode_alu alu;
2187 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2188 alu.op = ALU_OP1_RECIP_IEEE;
2189 alu.src[0].sel = shader->input[ctx.fragcoord_input].gpr;
2190 alu.src[0].chan = 3;
2191
2192 alu.dst.sel = shader->input[ctx.fragcoord_input].gpr;
2193 alu.dst.chan = j;
2194 alu.dst.write = (j == 3);
2195 alu.last = 1;
2196 if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
2197 return r;
2198 }
2199 } else {
2200 struct r600_bytecode_alu alu;
2201 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2202 alu.op = ALU_OP1_RECIP_IEEE;
2203 alu.src[0].sel = shader->input[ctx.fragcoord_input].gpr;
2204 alu.src[0].chan = 3;
2205
2206 alu.dst.sel = shader->input[ctx.fragcoord_input].gpr;
2207 alu.dst.chan = 3;
2208 alu.dst.write = 1;
2209 alu.last = 1;
2210 if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
2211 return r;
2212 }
2213 }
2214
2215 if (ctx.type == TGSI_PROCESSOR_GEOMETRY) {
2216 struct r600_bytecode_alu alu;
2217 int r;
2218 for (j = 0; j < 4; j++) {
2219 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2220 alu.op = ALU_OP1_MOV;
2221 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
2222 alu.src[0].value = 0;
2223 alu.dst.sel = ctx.gs_export_gpr_tregs[j];
2224 alu.dst.write = 1;
2225 alu.last = 1;
2226 r = r600_bytecode_add_alu(ctx.bc, &alu);
2227 if (r)
2228 return r;
2229 }
2230 }
2231 if (shader->two_side && ctx.colors_used) {
2232 if ((r = process_twoside_color_inputs(&ctx)))
2233 return r;
2234 }
2235
2236 tgsi_parse_init(&ctx.parse, tokens);
2237 while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
2238 tgsi_parse_token(&ctx.parse);
2239 switch (ctx.parse.FullToken.Token.Type) {
2240 case TGSI_TOKEN_TYPE_INSTRUCTION:
2241 r = tgsi_is_supported(&ctx);
2242 if (r)
2243 goto out_err;
2244 ctx.max_driver_temp_used = 0;
2245 /* reserve first tmp for everyone */
2246 r600_get_temp(&ctx);
2247
2248 opcode = ctx.parse.FullToken.FullInstruction.Instruction.Opcode;
2249 if ((r = tgsi_split_constant(&ctx)))
2250 goto out_err;
2251 if ((r = tgsi_split_literal_constant(&ctx)))
2252 goto out_err;
2253 if (ctx.type == TGSI_PROCESSOR_GEOMETRY)
2254 if ((r = tgsi_split_gs_inputs(&ctx)))
2255 goto out_err;
2256 if (ctx.bc->chip_class == CAYMAN)
2257 ctx.inst_info = &cm_shader_tgsi_instruction[opcode];
2258 else if (ctx.bc->chip_class >= EVERGREEN)
2259 ctx.inst_info = &eg_shader_tgsi_instruction[opcode];
2260 else
2261 ctx.inst_info = &r600_shader_tgsi_instruction[opcode];
2262 r = ctx.inst_info->process(&ctx);
2263 if (r)
2264 goto out_err;
2265 break;
2266 default:
2267 break;
2268 }
2269 }
2270 }
2271
2272 /* Reset the temporary register counter. */
2273 ctx.max_driver_temp_used = 0;
2274
2275 noutput = shader->noutput;
2276
2277 if (!ring_outputs && ctx.clip_vertex_write) {
2278 unsigned clipdist_temp[2];
2279
2280 clipdist_temp[0] = r600_get_temp(&ctx);
2281 clipdist_temp[1] = r600_get_temp(&ctx);
2282
2283 /* need to convert a clipvertex write into clipdistance writes and not export
2284 the clip vertex anymore */
2285
2286 memset(&shader->output[noutput], 0, 2*sizeof(struct r600_shader_io));
2287 shader->output[noutput].name = TGSI_SEMANTIC_CLIPDIST;
2288 shader->output[noutput].gpr = clipdist_temp[0];
2289 noutput++;
2290 shader->output[noutput].name = TGSI_SEMANTIC_CLIPDIST;
2291 shader->output[noutput].gpr = clipdist_temp[1];
2292 noutput++;
2293
2294 /* reset spi_sid for clipvertex output to avoid confusing spi */
2295 shader->output[ctx.cv_output].spi_sid = 0;
2296
2297 shader->clip_dist_write = 0xFF;
2298
2299 for (i = 0; i < 8; i++) {
2300 int oreg = i >> 2;
2301 int ochan = i & 3;
2302
2303 for (j = 0; j < 4; j++) {
2304 struct r600_bytecode_alu alu;
2305 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2306 alu.op = ALU_OP2_DOT4;
2307 alu.src[0].sel = shader->output[ctx.cv_output].gpr;
2308 alu.src[0].chan = j;
2309
2310 alu.src[1].sel = 512 + i;
2311 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
2312 alu.src[1].chan = j;
2313
2314 alu.dst.sel = clipdist_temp[oreg];
2315 alu.dst.chan = j;
2316 alu.dst.write = (j == ochan);
2317 if (j == 3)
2318 alu.last = 1;
2319 if (!use_llvm)
2320 r = r600_bytecode_add_alu(ctx.bc, &alu);
2321 if (r)
2322 return r;
2323 }
2324 }
2325 }
2326
2327 /* Add stream outputs. */
2328 if (!ring_outputs && ctx.type == TGSI_PROCESSOR_VERTEX &&
2329 so.num_outputs && !use_llvm)
2330 emit_streamout(&ctx, &so, -1, NULL);
2331
2332 pipeshader->enabled_stream_buffers_mask = ctx.enabled_stream_buffers_mask;
2333 convert_edgeflag_to_int(&ctx);
2334
2335 if (ring_outputs) {
2336 if (shader->vs_as_es) {
2337 ctx.gs_export_gpr_tregs[0] = r600_get_temp(&ctx);
2338 ctx.gs_export_gpr_tregs[1] = -1;
2339 ctx.gs_export_gpr_tregs[2] = -1;
2340 ctx.gs_export_gpr_tregs[3] = -1;
2341
2342 emit_gs_ring_writes(&ctx, &so, -1, FALSE);
2343 }
2344 } else {
2345 /* Export output */
2346 next_clip_base = shader->vs_out_misc_write ? 62 : 61;
2347
2348 for (i = 0, j = 0; i < noutput; i++, j++) {
2349 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
2350 output[j].gpr = shader->output[i].gpr;
2351 output[j].elem_size = 3;
2352 output[j].swizzle_x = 0;
2353 output[j].swizzle_y = 1;
2354 output[j].swizzle_z = 2;
2355 output[j].swizzle_w = 3;
2356 output[j].burst_count = 1;
2357 output[j].type = -1;
2358 output[j].op = CF_OP_EXPORT;
2359 switch (ctx.type) {
2360 case TGSI_PROCESSOR_VERTEX:
2361 switch (shader->output[i].name) {
2362 case TGSI_SEMANTIC_POSITION:
2363 output[j].array_base = 60;
2364 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2365 pos_emitted = true;
2366 break;
2367
2368 case TGSI_SEMANTIC_PSIZE:
2369 output[j].array_base = 61;
2370 output[j].swizzle_y = 7;
2371 output[j].swizzle_z = 7;
2372 output[j].swizzle_w = 7;
2373 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2374 pos_emitted = true;
2375 break;
2376 case TGSI_SEMANTIC_EDGEFLAG:
2377 output[j].array_base = 61;
2378 output[j].swizzle_x = 7;
2379 output[j].swizzle_y = 0;
2380 output[j].swizzle_z = 7;
2381 output[j].swizzle_w = 7;
2382 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2383 pos_emitted = true;
2384 break;
2385 case TGSI_SEMANTIC_LAYER:
2386 /* spi_sid is 0 for outputs that are
2387 * not consumed by PS */
2388 if (shader->output[i].spi_sid) {
2389 output[j].array_base = next_param_base++;
2390 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2391 j++;
2392 memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output));
2393 }
2394 output[j].array_base = 61;
2395 output[j].swizzle_x = 7;
2396 output[j].swizzle_y = 7;
2397 output[j].swizzle_z = 0;
2398 output[j].swizzle_w = 7;
2399 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2400 pos_emitted = true;
2401 break;
2402 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2403 /* spi_sid is 0 for outputs that are
2404 * not consumed by PS */
2405 if (shader->output[i].spi_sid) {
2406 output[j].array_base = next_param_base++;
2407 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2408 j++;
2409 memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output));
2410 }
2411 output[j].array_base = 61;
2412 output[j].swizzle_x = 7;
2413 output[j].swizzle_y = 7;
2414 output[j].swizzle_z = 7;
2415 output[j].swizzle_w = 0;
2416 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2417 pos_emitted = true;
2418 break;
2419 case TGSI_SEMANTIC_CLIPVERTEX:
2420 j--;
2421 break;
2422 case TGSI_SEMANTIC_CLIPDIST:
2423 output[j].array_base = next_clip_base++;
2424 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2425 pos_emitted = true;
2426 /* spi_sid is 0 for clipdistance outputs that were generated
2427 * for clipvertex - we don't need to pass them to PS */
2428 if (shader->output[i].spi_sid) {
2429 j++;
2430 /* duplicate it as PARAM to pass to the pixel shader */
2431 memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output));
2432 output[j].array_base = next_param_base++;
2433 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2434 }
2435 break;
2436 case TGSI_SEMANTIC_FOG:
2437 output[j].swizzle_y = 4; /* 0 */
2438 output[j].swizzle_z = 4; /* 0 */
2439 output[j].swizzle_w = 5; /* 1 */
2440 break;
2441 case TGSI_SEMANTIC_PRIMID:
2442 output[j].swizzle_x = 2;
2443 output[j].swizzle_y = 4; /* 0 */
2444 output[j].swizzle_z = 4; /* 0 */
2445 output[j].swizzle_w = 4; /* 0 */
2446 break;
2447 }
2448
2449 break;
2450 case TGSI_PROCESSOR_FRAGMENT:
2451 if (shader->output[i].name == TGSI_SEMANTIC_COLOR) {
2452 /* never export more colors than the number of CBs */
2453 if (shader->output[i].sid >= max_color_exports) {
2454 /* skip export */
2455 j--;
2456 continue;
2457 }
2458 output[j].swizzle_w = key.ps.alpha_to_one ? 5 : 3;
2459 output[j].array_base = shader->output[i].sid;
2460 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
2461 shader->nr_ps_color_exports++;
2462 if (shader->fs_write_all && (rscreen->b.chip_class >= EVERGREEN)) {
2463 for (k = 1; k < max_color_exports; k++) {
2464 j++;
2465 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
2466 output[j].gpr = shader->output[i].gpr;
2467 output[j].elem_size = 3;
2468 output[j].swizzle_x = 0;
2469 output[j].swizzle_y = 1;
2470 output[j].swizzle_z = 2;
2471 output[j].swizzle_w = key.ps.alpha_to_one ? 5 : 3;
2472 output[j].burst_count = 1;
2473 output[j].array_base = k;
2474 output[j].op = CF_OP_EXPORT;
2475 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
2476 shader->nr_ps_color_exports++;
2477 }
2478 }
2479 } else if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
2480 output[j].array_base = 61;
2481 output[j].swizzle_x = 2;
2482 output[j].swizzle_y = 7;
2483 output[j].swizzle_z = output[j].swizzle_w = 7;
2484 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
2485 } else if (shader->output[i].name == TGSI_SEMANTIC_STENCIL) {
2486 output[j].array_base = 61;
2487 output[j].swizzle_x = 7;
2488 output[j].swizzle_y = 1;
2489 output[j].swizzle_z = output[j].swizzle_w = 7;
2490 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
2491 } else if (shader->output[i].name == TGSI_SEMANTIC_SAMPLEMASK) {
2492 output[j].array_base = 61;
2493 output[j].swizzle_x = 7;
2494 output[j].swizzle_y = 7;
2495 output[j].swizzle_z = 0;
2496 output[j].swizzle_w = 7;
2497 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
2498 } else {
2499 R600_ERR("unsupported fragment output name %d\n", shader->output[i].name);
2500 r = -EINVAL;
2501 goto out_err;
2502 }
2503 break;
2504 default:
2505 R600_ERR("unsupported processor type %d\n", ctx.type);
2506 r = -EINVAL;
2507 goto out_err;
2508 }
2509
2510 if (output[j].type==-1) {
2511 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2512 output[j].array_base = next_param_base++;
2513 }
2514 }
2515
2516 /* add fake position export */
2517 if (ctx.type == TGSI_PROCESSOR_VERTEX && pos_emitted == false) {
2518 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
2519 output[j].gpr = 0;
2520 output[j].elem_size = 3;
2521 output[j].swizzle_x = 7;
2522 output[j].swizzle_y = 7;
2523 output[j].swizzle_z = 7;
2524 output[j].swizzle_w = 7;
2525 output[j].burst_count = 1;
2526 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2527 output[j].array_base = 60;
2528 output[j].op = CF_OP_EXPORT;
2529 j++;
2530 }
2531
2532 /* add fake param output for vertex shader if no param is exported */
2533 if (ctx.type == TGSI_PROCESSOR_VERTEX && next_param_base == 0) {
2534 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
2535 output[j].gpr = 0;
2536 output[j].elem_size = 3;
2537 output[j].swizzle_x = 7;
2538 output[j].swizzle_y = 7;
2539 output[j].swizzle_z = 7;
2540 output[j].swizzle_w = 7;
2541 output[j].burst_count = 1;
2542 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2543 output[j].array_base = 0;
2544 output[j].op = CF_OP_EXPORT;
2545 j++;
2546 }
2547
2548 /* add fake pixel export */
2549 if (ctx.type == TGSI_PROCESSOR_FRAGMENT && shader->nr_ps_color_exports == 0) {
2550 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
2551 output[j].gpr = 0;
2552 output[j].elem_size = 3;
2553 output[j].swizzle_x = 7;
2554 output[j].swizzle_y = 7;
2555 output[j].swizzle_z = 7;
2556 output[j].swizzle_w = 7;
2557 output[j].burst_count = 1;
2558 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
2559 output[j].array_base = 0;
2560 output[j].op = CF_OP_EXPORT;
2561 j++;
2562 shader->nr_ps_color_exports++;
2563 }
2564
2565 noutput = j;
2566
2567 /* set export done on last export of each type */
2568 for (i = noutput - 1, output_done = 0; i >= 0; i--) {
2569 if (!(output_done & (1 << output[i].type))) {
2570 output_done |= (1 << output[i].type);
2571 output[i].op = CF_OP_EXPORT_DONE;
2572 }
2573 }
2574 /* add output to bytecode */
2575 if (!use_llvm) {
2576 for (i = 0; i < noutput; i++) {
2577 r = r600_bytecode_add_output(ctx.bc, &output[i]);
2578 if (r)
2579 goto out_err;
2580 }
2581 }
2582 }
2583
2584 /* add program end */
2585 if (!use_llvm) {
2586 if (ctx.bc->chip_class == CAYMAN)
2587 cm_bytecode_add_cf_end(ctx.bc);
2588 else {
2589 const struct cf_op_info *last = NULL;
2590
2591 if (ctx.bc->cf_last)
2592 last = r600_isa_cf(ctx.bc->cf_last->op);
2593
2594 /* alu clause instructions don't have EOP bit, so add NOP */
2595 if (!last || last->flags & CF_ALU || ctx.bc->cf_last->op == CF_OP_LOOP_END || ctx.bc->cf_last->op == CF_OP_CALL_FS)
2596 r600_bytecode_add_cfinst(ctx.bc, CF_OP_NOP);
2597
2598 ctx.bc->cf_last->end_of_program = 1;
2599 }
2600 }
2601
2602 /* check GPR limit - we have 124 = 128 - 4
2603 * (4 are reserved as alu clause temporary registers) */
2604 if (ctx.bc->ngpr > 124) {
2605 R600_ERR("GPR limit exceeded - shader requires %d registers\n", ctx.bc->ngpr);
2606 r = -ENOMEM;
2607 goto out_err;
2608 }
2609
2610 if (ctx.type == TGSI_PROCESSOR_GEOMETRY) {
2611 if ((r = generate_gs_copy_shader(rctx, pipeshader, &so)))
2612 return r;
2613 }
2614
2615 free(ctx.literals);
2616 tgsi_parse_free(&ctx.parse);
2617 return 0;
2618 out_err:
2619 free(ctx.literals);
2620 tgsi_parse_free(&ctx.parse);
2621 return r;
2622 }
2623
2624 static int tgsi_unsupported(struct r600_shader_ctx *ctx)
2625 {
2626 const unsigned tgsi_opcode =
2627 ctx->parse.FullToken.FullInstruction.Instruction.Opcode;
2628 R600_ERR("%s tgsi opcode unsupported\n",
2629 tgsi_get_opcode_name(tgsi_opcode));
2630 return -EINVAL;
2631 }
2632
2633 static int tgsi_end(struct r600_shader_ctx *ctx)
2634 {
2635 return 0;
2636 }
2637
2638 static void r600_bytecode_src(struct r600_bytecode_alu_src *bc_src,
2639 const struct r600_shader_src *shader_src,
2640 unsigned chan)
2641 {
2642 bc_src->sel = shader_src->sel;
2643 bc_src->chan = shader_src->swizzle[chan];
2644 bc_src->neg = shader_src->neg;
2645 bc_src->abs = shader_src->abs;
2646 bc_src->rel = shader_src->rel;
2647 bc_src->value = shader_src->value[bc_src->chan];
2648 bc_src->kc_bank = shader_src->kc_bank;
2649 bc_src->kc_rel = shader_src->kc_rel;
2650 }
2651
2652 static void r600_bytecode_src_set_abs(struct r600_bytecode_alu_src *bc_src)
2653 {
2654 bc_src->abs = 1;
2655 bc_src->neg = 0;
2656 }
2657
2658 static void r600_bytecode_src_toggle_neg(struct r600_bytecode_alu_src *bc_src)
2659 {
2660 bc_src->neg = !bc_src->neg;
2661 }
2662
2663 static void tgsi_dst(struct r600_shader_ctx *ctx,
2664 const struct tgsi_full_dst_register *tgsi_dst,
2665 unsigned swizzle,
2666 struct r600_bytecode_alu_dst *r600_dst)
2667 {
2668 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2669
2670 r600_dst->sel = tgsi_dst->Register.Index;
2671 r600_dst->sel += ctx->file_offset[tgsi_dst->Register.File];
2672 r600_dst->chan = swizzle;
2673 r600_dst->write = 1;
2674 if (tgsi_dst->Register.Indirect)
2675 r600_dst->rel = V_SQ_REL_RELATIVE;
2676 if (inst->Instruction.Saturate) {
2677 r600_dst->clamp = 1;
2678 }
2679 }
2680
2681 static int tgsi_last_instruction(unsigned writemask)
2682 {
2683 int i, lasti = 0;
2684
2685 for (i = 0; i < 4; i++) {
2686 if (writemask & (1 << i)) {
2687 lasti = i;
2688 }
2689 }
2690 return lasti;
2691 }
2692
2693
2694
2695 static int tgsi_op2_64_params(struct r600_shader_ctx *ctx, bool singledest, bool swap)
2696 {
2697 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2698 unsigned write_mask = inst->Dst[0].Register.WriteMask;
2699 struct r600_bytecode_alu alu;
2700 int i, j, r, lasti = tgsi_last_instruction(write_mask);
2701 int use_tmp = 0;
2702
2703 if (singledest) {
2704 switch (write_mask) {
2705 case 0x1:
2706 write_mask = 0x3;
2707 break;
2708 case 0x2:
2709 use_tmp = 1;
2710 write_mask = 0x3;
2711 break;
2712 case 0x4:
2713 write_mask = 0xc;
2714 break;
2715 case 0x8:
2716 write_mask = 0xc;
2717 use_tmp = 3;
2718 break;
2719 }
2720 }
2721
2722 lasti = tgsi_last_instruction(write_mask);
2723 for (i = 0; i <= lasti; i++) {
2724
2725 if (!(write_mask & (1 << i)))
2726 continue;
2727
2728 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2729
2730 if (singledest) {
2731 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2732 if (use_tmp) {
2733 alu.dst.sel = ctx->temp_reg;
2734 alu.dst.chan = i;
2735 alu.dst.write = 1;
2736 }
2737 if (i == 1 || i == 3)
2738 alu.dst.write = 0;
2739 } else
2740 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2741
2742 alu.op = ctx->inst_info->op;
2743 if (ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DABS) {
2744 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
2745 } else if (!swap) {
2746 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
2747 r600_bytecode_src(&alu.src[j], &ctx->src[j], fp64_switch(i));
2748 }
2749 } else {
2750 r600_bytecode_src(&alu.src[0], &ctx->src[1], fp64_switch(i));
2751 r600_bytecode_src(&alu.src[1], &ctx->src[0], fp64_switch(i));
2752 }
2753
2754 /* handle some special cases */
2755 if (i == 1 || i == 3) {
2756 switch (ctx->parse.FullToken.FullInstruction.Instruction.Opcode) {
2757 case TGSI_OPCODE_SUB:
2758 r600_bytecode_src_toggle_neg(&alu.src[1]);
2759 break;
2760 case TGSI_OPCODE_DABS:
2761 r600_bytecode_src_set_abs(&alu.src[0]);
2762 break;
2763 default:
2764 break;
2765 }
2766 }
2767 if (i == lasti) {
2768 alu.last = 1;
2769 }
2770 r = r600_bytecode_add_alu(ctx->bc, &alu);
2771 if (r)
2772 return r;
2773 }
2774
2775 if (use_tmp) {
2776 write_mask = inst->Dst[0].Register.WriteMask;
2777
2778 /* move result from temp to dst */
2779 for (i = 0; i <= lasti; i++) {
2780 if (!(write_mask & (1 << i)))
2781 continue;
2782
2783 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2784 alu.op = ALU_OP1_MOV;
2785 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2786 alu.src[0].sel = ctx->temp_reg;
2787 alu.src[0].chan = use_tmp - 1;
2788 alu.last = (i == lasti);
2789
2790 r = r600_bytecode_add_alu(ctx->bc, &alu);
2791 if (r)
2792 return r;
2793 }
2794 }
2795 return 0;
2796 }
2797
2798 static int tgsi_op2_64(struct r600_shader_ctx *ctx)
2799 {
2800 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2801 unsigned write_mask = inst->Dst[0].Register.WriteMask;
2802 /* confirm writemasking */
2803 if ((write_mask & 0x3) != 0x3 &&
2804 (write_mask & 0xc) != 0xc) {
2805 fprintf(stderr, "illegal writemask for 64-bit: 0x%x\n", write_mask);
2806 return -1;
2807 }
2808 return tgsi_op2_64_params(ctx, false, false);
2809 }
2810
2811 static int tgsi_op2_64_single_dest(struct r600_shader_ctx *ctx)
2812 {
2813 return tgsi_op2_64_params(ctx, true, false);
2814 }
2815
2816 static int tgsi_op2_64_single_dest_s(struct r600_shader_ctx *ctx)
2817 {
2818 return tgsi_op2_64_params(ctx, true, true);
2819 }
2820
2821 static int tgsi_op3_64(struct r600_shader_ctx *ctx)
2822 {
2823 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2824 struct r600_bytecode_alu alu;
2825 int i, j, r;
2826 int lasti = 3;
2827 int tmp = r600_get_temp(ctx);
2828
2829 for (i = 0; i < lasti + 1; i++) {
2830
2831 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2832 alu.op = ctx->inst_info->op;
2833 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
2834 r600_bytecode_src(&alu.src[j], &ctx->src[j], i == 3 ? 0 : 1);
2835 }
2836
2837 if (inst->Dst[0].Register.WriteMask & (1 << i))
2838 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2839 else
2840 alu.dst.sel = tmp;
2841
2842 alu.dst.chan = i;
2843 alu.is_op3 = 1;
2844 if (i == lasti) {
2845 alu.last = 1;
2846 }
2847 r = r600_bytecode_add_alu(ctx->bc, &alu);
2848 if (r)
2849 return r;
2850 }
2851 return 0;
2852 }
2853
2854 static int tgsi_op2_s(struct r600_shader_ctx *ctx, int swap, int trans_only)
2855 {
2856 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2857 struct r600_bytecode_alu alu;
2858 unsigned write_mask = inst->Dst[0].Register.WriteMask;
2859 int i, j, r, lasti = tgsi_last_instruction(write_mask);
2860 /* use temp register if trans_only and more than one dst component */
2861 int use_tmp = trans_only && (write_mask ^ (1 << lasti));
2862
2863 for (i = 0; i <= lasti; i++) {
2864 if (!(write_mask & (1 << i)))
2865 continue;
2866
2867 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2868 if (use_tmp) {
2869 alu.dst.sel = ctx->temp_reg;
2870 alu.dst.chan = i;
2871 alu.dst.write = 1;
2872 } else
2873 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2874
2875 alu.op = ctx->inst_info->op;
2876 if (!swap) {
2877 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
2878 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
2879 }
2880 } else {
2881 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
2882 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
2883 }
2884 /* handle some special cases */
2885 switch (inst->Instruction.Opcode) {
2886 case TGSI_OPCODE_SUB:
2887 r600_bytecode_src_toggle_neg(&alu.src[1]);
2888 break;
2889 case TGSI_OPCODE_ABS:
2890 r600_bytecode_src_set_abs(&alu.src[0]);
2891 break;
2892 default:
2893 break;
2894 }
2895 if (i == lasti || trans_only) {
2896 alu.last = 1;
2897 }
2898 r = r600_bytecode_add_alu(ctx->bc, &alu);
2899 if (r)
2900 return r;
2901 }
2902
2903 if (use_tmp) {
2904 /* move result from temp to dst */
2905 for (i = 0; i <= lasti; i++) {
2906 if (!(write_mask & (1 << i)))
2907 continue;
2908
2909 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2910 alu.op = ALU_OP1_MOV;
2911 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2912 alu.src[0].sel = ctx->temp_reg;
2913 alu.src[0].chan = i;
2914 alu.last = (i == lasti);
2915
2916 r = r600_bytecode_add_alu(ctx->bc, &alu);
2917 if (r)
2918 return r;
2919 }
2920 }
2921 return 0;
2922 }
2923
2924 static int tgsi_op2(struct r600_shader_ctx *ctx)
2925 {
2926 return tgsi_op2_s(ctx, 0, 0);
2927 }
2928
2929 static int tgsi_op2_swap(struct r600_shader_ctx *ctx)
2930 {
2931 return tgsi_op2_s(ctx, 1, 0);
2932 }
2933
2934 static int tgsi_op2_trans(struct r600_shader_ctx *ctx)
2935 {
2936 return tgsi_op2_s(ctx, 0, 1);
2937 }
2938
2939 static int tgsi_ineg(struct r600_shader_ctx *ctx)
2940 {
2941 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2942 struct r600_bytecode_alu alu;
2943 int i, r;
2944 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
2945
2946 for (i = 0; i < lasti + 1; i++) {
2947
2948 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
2949 continue;
2950 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2951 alu.op = ctx->inst_info->op;
2952
2953 alu.src[0].sel = V_SQ_ALU_SRC_0;
2954
2955 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
2956
2957 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2958
2959 if (i == lasti) {
2960 alu.last = 1;
2961 }
2962 r = r600_bytecode_add_alu(ctx->bc, &alu);
2963 if (r)
2964 return r;
2965 }
2966 return 0;
2967
2968 }
2969
2970 static int tgsi_dneg(struct r600_shader_ctx *ctx)
2971 {
2972 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2973 struct r600_bytecode_alu alu;
2974 int i, r;
2975 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
2976
2977 for (i = 0; i < lasti + 1; i++) {
2978
2979 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
2980 continue;
2981 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2982 alu.op = ALU_OP1_MOV;
2983
2984 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
2985
2986 if (i == 1 || i == 3)
2987 r600_bytecode_src_toggle_neg(&alu.src[0]);
2988 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2989
2990 if (i == lasti) {
2991 alu.last = 1;
2992 }
2993 r = r600_bytecode_add_alu(ctx->bc, &alu);
2994 if (r)
2995 return r;
2996 }
2997 return 0;
2998
2999 }
3000
3001 static int tgsi_dfracexp(struct r600_shader_ctx *ctx)
3002 {
3003 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3004 struct r600_bytecode_alu alu;
3005 unsigned write_mask = inst->Dst[0].Register.WriteMask;
3006 int i, j, r;
3007 int firsti = write_mask == 0xc ? 2 : 0;
3008
3009 for (i = 0; i <= 3; i++) {
3010 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3011 alu.op = ctx->inst_info->op;
3012
3013 alu.dst.sel = ctx->temp_reg;
3014 alu.dst.chan = i;
3015 alu.dst.write = 1;
3016 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
3017 r600_bytecode_src(&alu.src[j], &ctx->src[j], fp64_switch(i));
3018 }
3019
3020 if (i == 3)
3021 alu.last = 1;
3022
3023 r = r600_bytecode_add_alu(ctx->bc, &alu);
3024 if (r)
3025 return r;
3026 }
3027
3028 /* MOV first two channels to writemask dst0 */
3029 for (i = 0; i <= 1; i++) {
3030 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3031 alu.op = ALU_OP1_MOV;
3032 alu.src[0].chan = i + 2;
3033 alu.src[0].sel = ctx->temp_reg;
3034
3035 tgsi_dst(ctx, &inst->Dst[0], firsti + i, &alu.dst);
3036 alu.dst.write = (inst->Dst[0].Register.WriteMask >> (firsti + i)) & 1;
3037 alu.last = 1;
3038 r = r600_bytecode_add_alu(ctx->bc, &alu);
3039 if (r)
3040 return r;
3041 }
3042
3043 for (i = 0; i <= 3; i++) {
3044 if (inst->Dst[1].Register.WriteMask & (1 << i)) {
3045 /* MOV third channels to writemask dst1 */
3046 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3047 alu.op = ALU_OP1_MOV;
3048 alu.src[0].chan = 1;
3049 alu.src[0].sel = ctx->temp_reg;
3050
3051 tgsi_dst(ctx, &inst->Dst[1], i, &alu.dst);
3052 alu.last = 1;
3053 r = r600_bytecode_add_alu(ctx->bc, &alu);
3054 if (r)
3055 return r;
3056 break;
3057 }
3058 }
3059 return 0;
3060 }
3061
3062
3063 static int egcm_int_to_double(struct r600_shader_ctx *ctx)
3064 {
3065 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3066 struct r600_bytecode_alu alu;
3067 int i, r;
3068 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
3069
3070 assert(inst->Instruction.Opcode == TGSI_OPCODE_I2D ||
3071 inst->Instruction.Opcode == TGSI_OPCODE_U2D);
3072
3073 for (i = 0; i <= (lasti+1)/2; i++) {
3074 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3075 alu.op = ctx->inst_info->op;
3076
3077 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
3078 alu.dst.sel = ctx->temp_reg;
3079 alu.dst.chan = i;
3080 alu.dst.write = 1;
3081 alu.last = 1;
3082
3083 r = r600_bytecode_add_alu(ctx->bc, &alu);
3084 if (r)
3085 return r;
3086 }
3087
3088 for (i = 0; i <= lasti; i++) {
3089 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3090 alu.op = ALU_OP1_FLT32_TO_FLT64;
3091
3092 alu.src[0].chan = i/2;
3093 if (i%2 == 0)
3094 alu.src[0].sel = ctx->temp_reg;
3095 else {
3096 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
3097 alu.src[0].value = 0x0;
3098 }
3099 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3100 alu.last = i == lasti;
3101
3102 r = r600_bytecode_add_alu(ctx->bc, &alu);
3103 if (r)
3104 return r;
3105 }
3106
3107 return 0;
3108 }
3109
3110 static int egcm_double_to_int(struct r600_shader_ctx *ctx)
3111 {
3112 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3113 struct r600_bytecode_alu alu;
3114 int i, r;
3115 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
3116
3117 assert(inst->Instruction.Opcode == TGSI_OPCODE_D2I ||
3118 inst->Instruction.Opcode == TGSI_OPCODE_D2U);
3119
3120 for (i = 0; i <= lasti; i++) {
3121 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3122 alu.op = ALU_OP1_FLT64_TO_FLT32;
3123
3124 r600_bytecode_src(&alu.src[0], &ctx->src[0], fp64_switch(i));
3125 alu.dst.chan = i;
3126 alu.dst.sel = ctx->temp_reg;
3127 alu.dst.write = i%2 == 0;
3128 alu.last = i == lasti;
3129
3130 r = r600_bytecode_add_alu(ctx->bc, &alu);
3131 if (r)
3132 return r;
3133 }
3134
3135 for (i = 0; i <= (lasti+1)/2; i++) {
3136 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3137 alu.op = ctx->inst_info->op;
3138
3139 alu.src[0].chan = i*2;
3140 alu.src[0].sel = ctx->temp_reg;
3141 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
3142 alu.last = 1;
3143
3144 r = r600_bytecode_add_alu(ctx->bc, &alu);
3145 if (r)
3146 return r;
3147 }
3148
3149 return 0;
3150 }
3151
3152 static int cayman_emit_double_instr(struct r600_shader_ctx *ctx)
3153 {
3154 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3155 int i, r;
3156 struct r600_bytecode_alu alu;
3157 int last_slot = 3;
3158 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
3159 int t1 = ctx->temp_reg;
3160
3161 /* these have to write the result to X/Y by the looks of it */
3162 for (i = 0 ; i < last_slot; i++) {
3163 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3164 alu.op = ctx->inst_info->op;
3165
3166 /* should only be one src regs */
3167 assert (inst->Instruction.NumSrcRegs == 1);
3168
3169 r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
3170 r600_bytecode_src(&alu.src[1], &ctx->src[0], 0);
3171
3172 /* RSQ should take the absolute value of src */
3173 if (ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DRSQ ||
3174 ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DSQRT) {
3175 r600_bytecode_src_set_abs(&alu.src[1]);
3176 }
3177 alu.dst.sel = t1;
3178 alu.dst.chan = i;
3179 alu.dst.write = (i == 0 || i == 1);
3180
3181 if (ctx->bc->chip_class != CAYMAN || i == last_slot - 1)
3182 alu.last = 1;
3183 r = r600_bytecode_add_alu(ctx->bc, &alu);
3184 if (r)
3185 return r;
3186 }
3187
3188 for (i = 0 ; i <= lasti; i++) {
3189 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
3190 continue;
3191 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3192 alu.op = ALU_OP1_MOV;
3193 alu.src[0].sel = t1;
3194 alu.src[0].chan = (i == 0 || i == 2) ? 0 : 1;
3195 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3196 alu.dst.write = 1;
3197 if (i == lasti)
3198 alu.last = 1;
3199 r = r600_bytecode_add_alu(ctx->bc, &alu);
3200 if (r)
3201 return r;
3202 }
3203 return 0;
3204 }
3205
3206 static int cayman_emit_float_instr(struct r600_shader_ctx *ctx)
3207 {
3208 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3209 int i, j, r;
3210 struct r600_bytecode_alu alu;
3211 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
3212
3213 for (i = 0 ; i < last_slot; i++) {
3214 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3215 alu.op = ctx->inst_info->op;
3216 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
3217 r600_bytecode_src(&alu.src[j], &ctx->src[j], 0);
3218
3219 /* RSQ should take the absolute value of src */
3220 if (inst->Instruction.Opcode == TGSI_OPCODE_RSQ) {
3221 r600_bytecode_src_set_abs(&alu.src[j]);
3222 }
3223 }
3224 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3225 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
3226
3227 if (i == last_slot - 1)
3228 alu.last = 1;
3229 r = r600_bytecode_add_alu(ctx->bc, &alu);
3230 if (r)
3231 return r;
3232 }
3233 return 0;
3234 }
3235
3236 static int cayman_mul_int_instr(struct r600_shader_ctx *ctx)
3237 {
3238 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3239 int i, j, k, r;
3240 struct r600_bytecode_alu alu;
3241 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
3242 int t1 = ctx->temp_reg;
3243
3244 for (k = 0; k <= lasti; k++) {
3245 if (!(inst->Dst[0].Register.WriteMask & (1 << k)))
3246 continue;
3247
3248 for (i = 0 ; i < 4; i++) {
3249 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3250 alu.op = ctx->inst_info->op;
3251 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
3252 r600_bytecode_src(&alu.src[j], &ctx->src[j], k);
3253 }
3254 alu.dst.sel = t1;
3255 alu.dst.chan = i;
3256 alu.dst.write = (i == k);
3257 if (i == 3)
3258 alu.last = 1;
3259 r = r600_bytecode_add_alu(ctx->bc, &alu);
3260 if (r)
3261 return r;
3262 }
3263 }
3264
3265 for (i = 0 ; i <= lasti; i++) {
3266 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
3267 continue;
3268 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3269 alu.op = ALU_OP1_MOV;
3270 alu.src[0].sel = t1;
3271 alu.src[0].chan = i;
3272 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3273 alu.dst.write = 1;
3274 if (i == lasti)
3275 alu.last = 1;
3276 r = r600_bytecode_add_alu(ctx->bc, &alu);
3277 if (r)
3278 return r;
3279 }
3280
3281 return 0;
3282 }
3283
3284
3285 static int cayman_mul_double_instr(struct r600_shader_ctx *ctx)
3286 {
3287 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3288 int i, j, k, r;
3289 struct r600_bytecode_alu alu;
3290 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
3291 int t1 = ctx->temp_reg;
3292
3293 for (k = 0; k < 2; k++) {
3294 if (!(inst->Dst[0].Register.WriteMask & (0x3 << (k * 2))))
3295 continue;
3296
3297 for (i = 0; i < 4; i++) {
3298 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3299 alu.op = ctx->inst_info->op;
3300 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
3301 r600_bytecode_src(&alu.src[j], &ctx->src[j], k * 2 + ((i == 3) ? 0 : 1));;
3302 }
3303 alu.dst.sel = t1;
3304 alu.dst.chan = i;
3305 alu.dst.write = 1;
3306 if (i == 3)
3307 alu.last = 1;
3308 r = r600_bytecode_add_alu(ctx->bc, &alu);
3309 if (r)
3310 return r;
3311 }
3312 }
3313
3314 for (i = 0; i <= lasti; i++) {
3315 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
3316 continue;
3317 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3318 alu.op = ALU_OP1_MOV;
3319 alu.src[0].sel = t1;
3320 alu.src[0].chan = i;
3321 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3322 alu.dst.write = 1;
3323 if (i == lasti)
3324 alu.last = 1;
3325 r = r600_bytecode_add_alu(ctx->bc, &alu);
3326 if (r)
3327 return r;
3328 }
3329
3330 return 0;
3331 }
3332
3333 /*
3334 * r600 - trunc to -PI..PI range
3335 * r700 - normalize by dividing by 2PI
3336 * see fdo bug 27901
3337 */
3338 static int tgsi_setup_trig(struct r600_shader_ctx *ctx)
3339 {
3340 static float half_inv_pi = 1.0 /(3.1415926535 * 2);
3341 static float double_pi = 3.1415926535 * 2;
3342 static float neg_pi = -3.1415926535;
3343
3344 int r;
3345 struct r600_bytecode_alu alu;
3346
3347 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3348 alu.op = ALU_OP3_MULADD;
3349 alu.is_op3 = 1;
3350
3351 alu.dst.chan = 0;
3352 alu.dst.sel = ctx->temp_reg;
3353 alu.dst.write = 1;
3354
3355 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
3356
3357 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
3358 alu.src[1].chan = 0;
3359 alu.src[1].value = *(uint32_t *)&half_inv_pi;
3360 alu.src[2].sel = V_SQ_ALU_SRC_0_5;
3361 alu.src[2].chan = 0;
3362 alu.last = 1;
3363 r = r600_bytecode_add_alu(ctx->bc, &alu);
3364 if (r)
3365 return r;
3366
3367 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3368 alu.op = ALU_OP1_FRACT;
3369
3370 alu.dst.chan = 0;
3371 alu.dst.sel = ctx->temp_reg;
3372 alu.dst.write = 1;
3373
3374 alu.src[0].sel = ctx->temp_reg;
3375 alu.src[0].chan = 0;
3376 alu.last = 1;
3377 r = r600_bytecode_add_alu(ctx->bc, &alu);
3378 if (r)
3379 return r;
3380
3381 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3382 alu.op = ALU_OP3_MULADD;
3383 alu.is_op3 = 1;
3384
3385 alu.dst.chan = 0;
3386 alu.dst.sel = ctx->temp_reg;
3387 alu.dst.write = 1;
3388
3389 alu.src[0].sel = ctx->temp_reg;
3390 alu.src[0].chan = 0;
3391
3392 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
3393 alu.src[1].chan = 0;
3394 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
3395 alu.src[2].chan = 0;
3396
3397 if (ctx->bc->chip_class == R600) {
3398 alu.src[1].value = *(uint32_t *)&double_pi;
3399 alu.src[2].value = *(uint32_t *)&neg_pi;
3400 } else {
3401 alu.src[1].sel = V_SQ_ALU_SRC_1;
3402 alu.src[2].sel = V_SQ_ALU_SRC_0_5;
3403 alu.src[2].neg = 1;
3404 }
3405
3406 alu.last = 1;
3407 r = r600_bytecode_add_alu(ctx->bc, &alu);
3408 if (r)
3409 return r;
3410 return 0;
3411 }
3412
3413 static int cayman_trig(struct r600_shader_ctx *ctx)
3414 {
3415 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3416 struct r600_bytecode_alu alu;
3417 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
3418 int i, r;
3419
3420 r = tgsi_setup_trig(ctx);
3421 if (r)
3422 return r;
3423
3424
3425 for (i = 0; i < last_slot; i++) {
3426 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3427 alu.op = ctx->inst_info->op;
3428 alu.dst.chan = i;
3429
3430 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3431 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
3432
3433 alu.src[0].sel = ctx->temp_reg;
3434 alu.src[0].chan = 0;
3435 if (i == last_slot - 1)
3436 alu.last = 1;
3437 r = r600_bytecode_add_alu(ctx->bc, &alu);
3438 if (r)
3439 return r;
3440 }
3441 return 0;
3442 }
3443
3444 static int tgsi_trig(struct r600_shader_ctx *ctx)
3445 {
3446 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3447 struct r600_bytecode_alu alu;
3448 int i, r;
3449 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
3450
3451 r = tgsi_setup_trig(ctx);
3452 if (r)
3453 return r;
3454
3455 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3456 alu.op = ctx->inst_info->op;
3457 alu.dst.chan = 0;
3458 alu.dst.sel = ctx->temp_reg;
3459 alu.dst.write = 1;
3460
3461 alu.src[0].sel = ctx->temp_reg;
3462 alu.src[0].chan = 0;
3463 alu.last = 1;
3464 r = r600_bytecode_add_alu(ctx->bc, &alu);
3465 if (r)
3466 return r;
3467
3468 /* replicate result */
3469 for (i = 0; i < lasti + 1; i++) {
3470 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
3471 continue;
3472
3473 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3474 alu.op = ALU_OP1_MOV;
3475
3476 alu.src[0].sel = ctx->temp_reg;
3477 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3478 if (i == lasti)
3479 alu.last = 1;
3480 r = r600_bytecode_add_alu(ctx->bc, &alu);
3481 if (r)
3482 return r;
3483 }
3484 return 0;
3485 }
3486
3487 static int tgsi_scs(struct r600_shader_ctx *ctx)
3488 {
3489 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3490 struct r600_bytecode_alu alu;
3491 int i, r;
3492
3493 /* We'll only need the trig stuff if we are going to write to the
3494 * X or Y components of the destination vector.
3495 */
3496 if (likely(inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_XY)) {
3497 r = tgsi_setup_trig(ctx);
3498 if (r)
3499 return r;
3500 }
3501
3502 /* dst.x = COS */
3503 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
3504 if (ctx->bc->chip_class == CAYMAN) {
3505 for (i = 0 ; i < 3; i++) {
3506 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3507 alu.op = ALU_OP1_COS;
3508 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3509
3510 if (i == 0)
3511 alu.dst.write = 1;
3512 else
3513 alu.dst.write = 0;
3514 alu.src[0].sel = ctx->temp_reg;
3515 alu.src[0].chan = 0;
3516 if (i == 2)
3517 alu.last = 1;
3518 r = r600_bytecode_add_alu(ctx->bc, &alu);
3519 if (r)
3520 return r;
3521 }
3522 } else {
3523 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3524 alu.op = ALU_OP1_COS;
3525 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
3526
3527 alu.src[0].sel = ctx->temp_reg;
3528 alu.src[0].chan = 0;
3529 alu.last = 1;
3530 r = r600_bytecode_add_alu(ctx->bc, &alu);
3531 if (r)
3532 return r;
3533 }
3534 }
3535
3536 /* dst.y = SIN */
3537 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
3538 if (ctx->bc->chip_class == CAYMAN) {
3539 for (i = 0 ; i < 3; i++) {
3540 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3541 alu.op = ALU_OP1_SIN;
3542 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3543 if (i == 1)
3544 alu.dst.write = 1;
3545 else
3546 alu.dst.write = 0;
3547 alu.src[0].sel = ctx->temp_reg;
3548 alu.src[0].chan = 0;
3549 if (i == 2)
3550 alu.last = 1;
3551 r = r600_bytecode_add_alu(ctx->bc, &alu);
3552 if (r)
3553 return r;
3554 }
3555 } else {
3556 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3557 alu.op = ALU_OP1_SIN;
3558 tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
3559
3560 alu.src[0].sel = ctx->temp_reg;
3561 alu.src[0].chan = 0;
3562 alu.last = 1;
3563 r = r600_bytecode_add_alu(ctx->bc, &alu);
3564 if (r)
3565 return r;
3566 }
3567 }
3568
3569 /* dst.z = 0.0; */
3570 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
3571 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3572
3573 alu.op = ALU_OP1_MOV;
3574
3575 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
3576
3577 alu.src[0].sel = V_SQ_ALU_SRC_0;
3578 alu.src[0].chan = 0;
3579
3580 alu.last = 1;
3581
3582 r = r600_bytecode_add_alu(ctx->bc, &alu);
3583 if (r)
3584 return r;
3585 }
3586
3587 /* dst.w = 1.0; */
3588 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
3589 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3590
3591 alu.op = ALU_OP1_MOV;
3592
3593 tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
3594
3595 alu.src[0].sel = V_SQ_ALU_SRC_1;
3596 alu.src[0].chan = 0;
3597
3598 alu.last = 1;
3599
3600 r = r600_bytecode_add_alu(ctx->bc, &alu);
3601 if (r)
3602 return r;
3603 }
3604
3605 return 0;
3606 }
3607
3608 static int tgsi_kill(struct r600_shader_ctx *ctx)
3609 {
3610 const struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3611 struct r600_bytecode_alu alu;
3612 int i, r;
3613
3614 for (i = 0; i < 4; i++) {
3615 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3616 alu.op = ctx->inst_info->op;
3617
3618 alu.dst.chan = i;
3619
3620 alu.src[0].sel = V_SQ_ALU_SRC_0;
3621
3622 if (inst->Instruction.Opcode == TGSI_OPCODE_KILL) {
3623 alu.src[1].sel = V_SQ_ALU_SRC_1;
3624 alu.src[1].neg = 1;
3625 } else {
3626 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
3627 }
3628 if (i == 3) {
3629 alu.last = 1;
3630 }
3631 r = r600_bytecode_add_alu(ctx->bc, &alu);
3632 if (r)
3633 return r;
3634 }
3635
3636 /* kill must be last in ALU */
3637 ctx->bc->force_add_cf = 1;
3638 ctx->shader->uses_kill = TRUE;
3639 return 0;
3640 }
3641
3642 static int tgsi_lit(struct r600_shader_ctx *ctx)
3643 {
3644 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3645 struct r600_bytecode_alu alu;
3646 int r;
3647
3648 /* tmp.x = max(src.y, 0.0) */
3649 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3650 alu.op = ALU_OP2_MAX;
3651 r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
3652 alu.src[1].sel = V_SQ_ALU_SRC_0; /*0.0*/
3653 alu.src[1].chan = 1;
3654
3655 alu.dst.sel = ctx->temp_reg;
3656 alu.dst.chan = 0;
3657 alu.dst.write = 1;
3658
3659 alu.last = 1;
3660 r = r600_bytecode_add_alu(ctx->bc, &alu);
3661 if (r)
3662 return r;
3663
3664 if (inst->Dst[0].Register.WriteMask & (1 << 2))
3665 {
3666 int chan;
3667 int sel;
3668 int i;
3669
3670 if (ctx->bc->chip_class == CAYMAN) {
3671 for (i = 0; i < 3; i++) {
3672 /* tmp.z = log(tmp.x) */
3673 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3674 alu.op = ALU_OP1_LOG_CLAMPED;
3675 alu.src[0].sel = ctx->temp_reg;
3676 alu.src[0].chan = 0;
3677 alu.dst.sel = ctx->temp_reg;
3678 alu.dst.chan = i;
3679 if (i == 2) {
3680 alu.dst.write = 1;
3681 alu.last = 1;
3682 } else
3683 alu.dst.write = 0;
3684
3685 r = r600_bytecode_add_alu(ctx->bc, &alu);
3686 if (r)
3687 return r;
3688 }
3689 } else {
3690 /* tmp.z = log(tmp.x) */
3691 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3692 alu.op = ALU_OP1_LOG_CLAMPED;
3693 alu.src[0].sel = ctx->temp_reg;
3694 alu.src[0].chan = 0;
3695 alu.dst.sel = ctx->temp_reg;
3696 alu.dst.chan = 2;
3697 alu.dst.write = 1;
3698 alu.last = 1;
3699 r = r600_bytecode_add_alu(ctx->bc, &alu);
3700 if (r)
3701 return r;
3702 }
3703
3704 chan = alu.dst.chan;
3705 sel = alu.dst.sel;
3706
3707 /* tmp.x = amd MUL_LIT(tmp.z, src.w, src.x ) */
3708 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3709 alu.op = ALU_OP3_MUL_LIT;
3710 alu.src[0].sel = sel;
3711 alu.src[0].chan = chan;
3712 r600_bytecode_src(&alu.src[1], &ctx->src[0], 3);
3713 r600_bytecode_src(&alu.src[2], &ctx->src[0], 0);
3714 alu.dst.sel = ctx->temp_reg;
3715 alu.dst.chan = 0;
3716 alu.dst.write = 1;
3717 alu.is_op3 = 1;
3718 alu.last = 1;
3719 r = r600_bytecode_add_alu(ctx->bc, &alu);
3720 if (r)
3721 return r;
3722
3723 if (ctx->bc->chip_class == CAYMAN) {
3724 for (i = 0; i < 3; i++) {
3725 /* dst.z = exp(tmp.x) */
3726 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3727 alu.op = ALU_OP1_EXP_IEEE;
3728 alu.src[0].sel = ctx->temp_reg;
3729 alu.src[0].chan = 0;
3730 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3731 if (i == 2) {
3732 alu.dst.write = 1;
3733 alu.last = 1;
3734 } else
3735 alu.dst.write = 0;
3736 r = r600_bytecode_add_alu(ctx->bc, &alu);
3737 if (r)
3738 return r;
3739 }
3740 } else {
3741 /* dst.z = exp(tmp.x) */
3742 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3743 alu.op = ALU_OP1_EXP_IEEE;
3744 alu.src[0].sel = ctx->temp_reg;
3745 alu.src[0].chan = 0;
3746 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
3747 alu.last = 1;
3748 r = r600_bytecode_add_alu(ctx->bc, &alu);
3749 if (r)
3750 return r;
3751 }
3752 }
3753
3754 /* dst.x, <- 1.0 */
3755 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3756 alu.op = ALU_OP1_MOV;
3757 alu.src[0].sel = V_SQ_ALU_SRC_1; /*1.0*/
3758 alu.src[0].chan = 0;
3759 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
3760 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 0) & 1;
3761 r = r600_bytecode_add_alu(ctx->bc, &alu);
3762 if (r)
3763 return r;
3764
3765 /* dst.y = max(src.x, 0.0) */
3766 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3767 alu.op = ALU_OP2_MAX;
3768 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
3769 alu.src[1].sel = V_SQ_ALU_SRC_0; /*0.0*/
3770 alu.src[1].chan = 0;
3771 tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
3772 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 1) & 1;
3773 r = r600_bytecode_add_alu(ctx->bc, &alu);
3774 if (r)
3775 return r;
3776
3777 /* dst.w, <- 1.0 */
3778 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3779 alu.op = ALU_OP1_MOV;
3780 alu.src[0].sel = V_SQ_ALU_SRC_1;
3781 alu.src[0].chan = 0;
3782 tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
3783 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 3) & 1;
3784 alu.last = 1;
3785 r = r600_bytecode_add_alu(ctx->bc, &alu);
3786 if (r)
3787 return r;
3788
3789 return 0;
3790 }
3791
3792 static int tgsi_rsq(struct r600_shader_ctx *ctx)
3793 {
3794 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3795 struct r600_bytecode_alu alu;
3796 int i, r;
3797
3798 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3799
3800 /* XXX:
3801 * For state trackers other than OpenGL, we'll want to use
3802 * _RECIPSQRT_IEEE instead.
3803 */
3804 alu.op = ALU_OP1_RECIPSQRT_CLAMPED;
3805
3806 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
3807 r600_bytecode_src(&alu.src[i], &ctx->src[i], 0);
3808 r600_bytecode_src_set_abs(&alu.src[i]);
3809 }
3810 alu.dst.sel = ctx->temp_reg;
3811 alu.dst.write = 1;
3812 alu.last = 1;
3813 r = r600_bytecode_add_alu(ctx->bc, &alu);
3814 if (r)
3815 return r;
3816 /* replicate result */
3817 return tgsi_helper_tempx_replicate(ctx);
3818 }
3819
3820 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx)
3821 {
3822 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3823 struct r600_bytecode_alu alu;
3824 int i, r;
3825
3826 for (i = 0; i < 4; i++) {
3827 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3828 alu.src[0].sel = ctx->temp_reg;
3829 alu.op = ALU_OP1_MOV;
3830 alu.dst.chan = i;
3831 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3832 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
3833 if (i == 3)
3834 alu.last = 1;
3835 r = r600_bytecode_add_alu(ctx->bc, &alu);
3836 if (r)
3837 return r;
3838 }
3839 return 0;
3840 }
3841
3842 static int tgsi_trans_srcx_replicate(struct r600_shader_ctx *ctx)
3843 {
3844 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3845 struct r600_bytecode_alu alu;
3846 int i, r;
3847
3848 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3849 alu.op = ctx->inst_info->op;
3850 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
3851 r600_bytecode_src(&alu.src[i], &ctx->src[i], 0);
3852 }
3853 alu.dst.sel = ctx->temp_reg;
3854 alu.dst.write = 1;
3855 alu.last = 1;
3856 r = r600_bytecode_add_alu(ctx->bc, &alu);
3857 if (r)
3858 return r;
3859 /* replicate result */
3860 return tgsi_helper_tempx_replicate(ctx);
3861 }
3862
3863 static int cayman_pow(struct r600_shader_ctx *ctx)
3864 {
3865 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3866 int i, r;
3867 struct r600_bytecode_alu alu;
3868 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
3869
3870 for (i = 0; i < 3; i++) {
3871 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3872 alu.op = ALU_OP1_LOG_IEEE;
3873 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
3874 alu.dst.sel = ctx->temp_reg;
3875 alu.dst.chan = i;
3876 alu.dst.write = 1;
3877 if (i == 2)
3878 alu.last = 1;
3879 r = r600_bytecode_add_alu(ctx->bc, &alu);
3880 if (r)
3881 return r;
3882 }
3883
3884 /* b * LOG2(a) */
3885 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3886 alu.op = ALU_OP2_MUL;
3887 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
3888 alu.src[1].sel = ctx->temp_reg;
3889 alu.dst.sel = ctx->temp_reg;
3890 alu.dst.write = 1;
3891 alu.last = 1;
3892 r = r600_bytecode_add_alu(ctx->bc, &alu);
3893 if (r)
3894 return r;
3895
3896 for (i = 0; i < last_slot; i++) {
3897 /* POW(a,b) = EXP2(b * LOG2(a))*/
3898 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3899 alu.op = ALU_OP1_EXP_IEEE;
3900 alu.src[0].sel = ctx->temp_reg;
3901
3902 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3903 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
3904 if (i == last_slot - 1)
3905 alu.last = 1;
3906 r = r600_bytecode_add_alu(ctx->bc, &alu);
3907 if (r)
3908 return r;
3909 }
3910 return 0;
3911 }
3912
3913 static int tgsi_pow(struct r600_shader_ctx *ctx)
3914 {
3915 struct r600_bytecode_alu alu;
3916 int r;
3917
3918 /* LOG2(a) */
3919 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3920 alu.op = ALU_OP1_LOG_IEEE;
3921 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
3922 alu.dst.sel = ctx->temp_reg;
3923 alu.dst.write = 1;
3924 alu.last = 1;
3925 r = r600_bytecode_add_alu(ctx->bc, &alu);
3926 if (r)
3927 return r;
3928 /* b * LOG2(a) */
3929 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3930 alu.op = ALU_OP2_MUL;
3931 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
3932 alu.src[1].sel = ctx->temp_reg;
3933 alu.dst.sel = ctx->temp_reg;
3934 alu.dst.write = 1;
3935 alu.last = 1;
3936 r = r600_bytecode_add_alu(ctx->bc, &alu);
3937 if (r)
3938 return r;
3939 /* POW(a,b) = EXP2(b * LOG2(a))*/
3940 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3941 alu.op = ALU_OP1_EXP_IEEE;
3942 alu.src[0].sel = ctx->temp_reg;
3943 alu.dst.sel = ctx->temp_reg;
3944 alu.dst.write = 1;
3945 alu.last = 1;
3946 r = r600_bytecode_add_alu(ctx->bc, &alu);
3947 if (r)
3948 return r;
3949 return tgsi_helper_tempx_replicate(ctx);
3950 }
3951
3952 static int tgsi_divmod(struct r600_shader_ctx *ctx, int mod, int signed_op)
3953 {
3954 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3955 struct r600_bytecode_alu alu;
3956 int i, r, j;
3957 unsigned write_mask = inst->Dst[0].Register.WriteMask;
3958 int tmp0 = ctx->temp_reg;
3959 int tmp1 = r600_get_temp(ctx);
3960 int tmp2 = r600_get_temp(ctx);
3961 int tmp3 = r600_get_temp(ctx);
3962 /* Unsigned path:
3963 *
3964 * we need to represent src1 as src2*q + r, where q - quotient, r - remainder
3965 *
3966 * 1. tmp0.x = rcp (src2) = 2^32/src2 + e, where e is rounding error
3967 * 2. tmp0.z = lo (tmp0.x * src2)
3968 * 3. tmp0.w = -tmp0.z
3969 * 4. tmp0.y = hi (tmp0.x * src2)
3970 * 5. tmp0.z = (tmp0.y == 0 ? tmp0.w : tmp0.z) = abs(lo(rcp*src2))
3971 * 6. tmp0.w = hi (tmp0.z * tmp0.x) = e, rounding error
3972 * 7. tmp1.x = tmp0.x - tmp0.w
3973 * 8. tmp1.y = tmp0.x + tmp0.w
3974 * 9. tmp0.x = (tmp0.y == 0 ? tmp1.y : tmp1.x)
3975 * 10. tmp0.z = hi(tmp0.x * src1) = q
3976 * 11. tmp0.y = lo (tmp0.z * src2) = src2*q = src1 - r
3977 *
3978 * 12. tmp0.w = src1 - tmp0.y = r
3979 * 13. tmp1.x = tmp0.w >= src2 = r >= src2 (uint comparison)
3980 * 14. tmp1.y = src1 >= tmp0.y = r >= 0 (uint comparison)
3981 *
3982 * if DIV
3983 *
3984 * 15. tmp1.z = tmp0.z + 1 = q + 1
3985 * 16. tmp1.w = tmp0.z - 1 = q - 1
3986 *
3987 * else MOD
3988 *
3989 * 15. tmp1.z = tmp0.w - src2 = r - src2
3990 * 16. tmp1.w = tmp0.w + src2 = r + src2
3991 *
3992 * endif
3993 *
3994 * 17. tmp1.x = tmp1.x & tmp1.y
3995 *
3996 * DIV: 18. tmp0.z = tmp1.x==0 ? tmp0.z : tmp1.z
3997 * MOD: 18. tmp0.z = tmp1.x==0 ? tmp0.w : tmp1.z
3998 *
3999 * 19. tmp0.z = tmp1.y==0 ? tmp1.w : tmp0.z
4000 * 20. dst = src2==0 ? MAX_UINT : tmp0.z
4001 *
4002 * Signed path:
4003 *
4004 * Same as unsigned, using abs values of the operands,
4005 * and fixing the sign of the result in the end.
4006 */
4007
4008 for (i = 0; i < 4; i++) {
4009 if (!(write_mask & (1<<i)))
4010 continue;
4011
4012 if (signed_op) {
4013
4014 /* tmp2.x = -src0 */
4015 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4016 alu.op = ALU_OP2_SUB_INT;
4017
4018 alu.dst.sel = tmp2;
4019 alu.dst.chan = 0;
4020 alu.dst.write = 1;
4021
4022 alu.src[0].sel = V_SQ_ALU_SRC_0;
4023
4024 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4025
4026 alu.last = 1;
4027 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4028 return r;
4029
4030 /* tmp2.y = -src1 */
4031 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4032 alu.op = ALU_OP2_SUB_INT;
4033
4034 alu.dst.sel = tmp2;
4035 alu.dst.chan = 1;
4036 alu.dst.write = 1;
4037
4038 alu.src[0].sel = V_SQ_ALU_SRC_0;
4039
4040 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4041
4042 alu.last = 1;
4043 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4044 return r;
4045
4046 /* tmp2.z sign bit is set if src0 and src2 signs are different */
4047 /* it will be a sign of the quotient */
4048 if (!mod) {
4049
4050 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4051 alu.op = ALU_OP2_XOR_INT;
4052
4053 alu.dst.sel = tmp2;
4054 alu.dst.chan = 2;
4055 alu.dst.write = 1;
4056
4057 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4058 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4059
4060 alu.last = 1;
4061 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4062 return r;
4063 }
4064
4065 /* tmp2.x = |src0| */
4066 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4067 alu.op = ALU_OP3_CNDGE_INT;
4068 alu.is_op3 = 1;
4069
4070 alu.dst.sel = tmp2;
4071 alu.dst.chan = 0;
4072 alu.dst.write = 1;
4073
4074 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4075 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4076 alu.src[2].sel = tmp2;
4077 alu.src[2].chan = 0;
4078
4079 alu.last = 1;
4080 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4081 return r;
4082
4083 /* tmp2.y = |src1| */
4084 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4085 alu.op = ALU_OP3_CNDGE_INT;
4086 alu.is_op3 = 1;
4087
4088 alu.dst.sel = tmp2;
4089 alu.dst.chan = 1;
4090 alu.dst.write = 1;
4091
4092 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
4093 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4094 alu.src[2].sel = tmp2;
4095 alu.src[2].chan = 1;
4096
4097 alu.last = 1;
4098 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4099 return r;
4100
4101 }
4102
4103 /* 1. tmp0.x = rcp_u (src2) = 2^32/src2 + e, where e is rounding error */
4104 if (ctx->bc->chip_class == CAYMAN) {
4105 /* tmp3.x = u2f(src2) */
4106 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4107 alu.op = ALU_OP1_UINT_TO_FLT;
4108
4109 alu.dst.sel = tmp3;
4110 alu.dst.chan = 0;
4111 alu.dst.write = 1;
4112
4113 if (signed_op) {
4114 alu.src[0].sel = tmp2;
4115 alu.src[0].chan = 1;
4116 } else {
4117 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
4118 }
4119
4120 alu.last = 1;
4121 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4122 return r;
4123
4124 /* tmp0.x = recip(tmp3.x) */
4125 for (j = 0 ; j < 3; j++) {
4126 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4127 alu.op = ALU_OP1_RECIP_IEEE;
4128
4129 alu.dst.sel = tmp0;
4130 alu.dst.chan = j;
4131 alu.dst.write = (j == 0);
4132
4133 alu.src[0].sel = tmp3;
4134 alu.src[0].chan = 0;
4135
4136 if (j == 2)
4137 alu.last = 1;
4138 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4139 return r;
4140 }
4141
4142 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4143 alu.op = ALU_OP2_MUL;
4144
4145 alu.src[0].sel = tmp0;
4146 alu.src[0].chan = 0;
4147
4148 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
4149 alu.src[1].value = 0x4f800000;
4150
4151 alu.dst.sel = tmp3;
4152 alu.dst.write = 1;
4153 alu.last = 1;
4154 r = r600_bytecode_add_alu(ctx->bc, &alu);
4155 if (r)
4156 return r;
4157
4158 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4159 alu.op = ALU_OP1_FLT_TO_UINT;
4160
4161 alu.dst.sel = tmp0;
4162 alu.dst.chan = 0;
4163 alu.dst.write = 1;
4164
4165 alu.src[0].sel = tmp3;
4166 alu.src[0].chan = 0;
4167
4168 alu.last = 1;
4169 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4170 return r;
4171
4172 } else {
4173 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4174 alu.op = ALU_OP1_RECIP_UINT;
4175
4176 alu.dst.sel = tmp0;
4177 alu.dst.chan = 0;
4178 alu.dst.write = 1;
4179
4180 if (signed_op) {
4181 alu.src[0].sel = tmp2;
4182 alu.src[0].chan = 1;
4183 } else {
4184 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
4185 }
4186
4187 alu.last = 1;
4188 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4189 return r;
4190 }
4191
4192 /* 2. tmp0.z = lo (tmp0.x * src2) */
4193 if (ctx->bc->chip_class == CAYMAN) {
4194 for (j = 0 ; j < 4; j++) {
4195 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4196 alu.op = ALU_OP2_MULLO_UINT;
4197
4198 alu.dst.sel = tmp0;
4199 alu.dst.chan = j;
4200 alu.dst.write = (j == 2);
4201
4202 alu.src[0].sel = tmp0;
4203 alu.src[0].chan = 0;
4204 if (signed_op) {
4205 alu.src[1].sel = tmp2;
4206 alu.src[1].chan = 1;
4207 } else {
4208 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4209 }
4210
4211 alu.last = (j == 3);
4212 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4213 return r;
4214 }
4215 } else {
4216 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4217 alu.op = ALU_OP2_MULLO_UINT;
4218
4219 alu.dst.sel = tmp0;
4220 alu.dst.chan = 2;
4221 alu.dst.write = 1;
4222
4223 alu.src[0].sel = tmp0;
4224 alu.src[0].chan = 0;
4225 if (signed_op) {
4226 alu.src[1].sel = tmp2;
4227 alu.src[1].chan = 1;
4228 } else {
4229 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4230 }
4231
4232 alu.last = 1;
4233 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4234 return r;
4235 }
4236
4237 /* 3. tmp0.w = -tmp0.z */
4238 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4239 alu.op = ALU_OP2_SUB_INT;
4240
4241 alu.dst.sel = tmp0;
4242 alu.dst.chan = 3;
4243 alu.dst.write = 1;
4244
4245 alu.src[0].sel = V_SQ_ALU_SRC_0;
4246 alu.src[1].sel = tmp0;
4247 alu.src[1].chan = 2;
4248
4249 alu.last = 1;
4250 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4251 return r;
4252
4253 /* 4. tmp0.y = hi (tmp0.x * src2) */
4254 if (ctx->bc->chip_class == CAYMAN) {
4255 for (j = 0 ; j < 4; j++) {
4256 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4257 alu.op = ALU_OP2_MULHI_UINT;
4258
4259 alu.dst.sel = tmp0;
4260 alu.dst.chan = j;
4261 alu.dst.write = (j == 1);
4262
4263 alu.src[0].sel = tmp0;
4264 alu.src[0].chan = 0;
4265
4266 if (signed_op) {
4267 alu.src[1].sel = tmp2;
4268 alu.src[1].chan = 1;
4269 } else {
4270 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4271 }
4272 alu.last = (j == 3);
4273 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4274 return r;
4275 }
4276 } else {
4277 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4278 alu.op = ALU_OP2_MULHI_UINT;
4279
4280 alu.dst.sel = tmp0;
4281 alu.dst.chan = 1;
4282 alu.dst.write = 1;
4283
4284 alu.src[0].sel = tmp0;
4285 alu.src[0].chan = 0;
4286
4287 if (signed_op) {
4288 alu.src[1].sel = tmp2;
4289 alu.src[1].chan = 1;
4290 } else {
4291 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4292 }
4293
4294 alu.last = 1;
4295 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4296 return r;
4297 }
4298
4299 /* 5. tmp0.z = (tmp0.y == 0 ? tmp0.w : tmp0.z) = abs(lo(rcp*src)) */
4300 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4301 alu.op = ALU_OP3_CNDE_INT;
4302 alu.is_op3 = 1;
4303
4304 alu.dst.sel = tmp0;
4305 alu.dst.chan = 2;
4306 alu.dst.write = 1;
4307
4308 alu.src[0].sel = tmp0;
4309 alu.src[0].chan = 1;
4310 alu.src[1].sel = tmp0;
4311 alu.src[1].chan = 3;
4312 alu.src[2].sel = tmp0;
4313 alu.src[2].chan = 2;
4314
4315 alu.last = 1;
4316 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4317 return r;
4318
4319 /* 6. tmp0.w = hi (tmp0.z * tmp0.x) = e, rounding error */
4320 if (ctx->bc->chip_class == CAYMAN) {
4321 for (j = 0 ; j < 4; j++) {
4322 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4323 alu.op = ALU_OP2_MULHI_UINT;
4324
4325 alu.dst.sel = tmp0;
4326 alu.dst.chan = j;
4327 alu.dst.write = (j == 3);
4328
4329 alu.src[0].sel = tmp0;
4330 alu.src[0].chan = 2;
4331
4332 alu.src[1].sel = tmp0;
4333 alu.src[1].chan = 0;
4334
4335 alu.last = (j == 3);
4336 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4337 return r;
4338 }
4339 } else {
4340 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4341 alu.op = ALU_OP2_MULHI_UINT;
4342
4343 alu.dst.sel = tmp0;
4344 alu.dst.chan = 3;
4345 alu.dst.write = 1;
4346
4347 alu.src[0].sel = tmp0;
4348 alu.src[0].chan = 2;
4349
4350 alu.src[1].sel = tmp0;
4351 alu.src[1].chan = 0;
4352
4353 alu.last = 1;
4354 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4355 return r;
4356 }
4357
4358 /* 7. tmp1.x = tmp0.x - tmp0.w */
4359 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4360 alu.op = ALU_OP2_SUB_INT;
4361
4362 alu.dst.sel = tmp1;
4363 alu.dst.chan = 0;
4364 alu.dst.write = 1;
4365
4366 alu.src[0].sel = tmp0;
4367 alu.src[0].chan = 0;
4368 alu.src[1].sel = tmp0;
4369 alu.src[1].chan = 3;
4370
4371 alu.last = 1;
4372 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4373 return r;
4374
4375 /* 8. tmp1.y = tmp0.x + tmp0.w */
4376 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4377 alu.op = ALU_OP2_ADD_INT;
4378
4379 alu.dst.sel = tmp1;
4380 alu.dst.chan = 1;
4381 alu.dst.write = 1;
4382
4383 alu.src[0].sel = tmp0;
4384 alu.src[0].chan = 0;
4385 alu.src[1].sel = tmp0;
4386 alu.src[1].chan = 3;
4387
4388 alu.last = 1;
4389 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4390 return r;
4391
4392 /* 9. tmp0.x = (tmp0.y == 0 ? tmp1.y : tmp1.x) */
4393 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4394 alu.op = ALU_OP3_CNDE_INT;
4395 alu.is_op3 = 1;
4396
4397 alu.dst.sel = tmp0;
4398 alu.dst.chan = 0;
4399 alu.dst.write = 1;
4400
4401 alu.src[0].sel = tmp0;
4402 alu.src[0].chan = 1;
4403 alu.src[1].sel = tmp1;
4404 alu.src[1].chan = 1;
4405 alu.src[2].sel = tmp1;
4406 alu.src[2].chan = 0;
4407
4408 alu.last = 1;
4409 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4410 return r;
4411
4412 /* 10. tmp0.z = hi(tmp0.x * src1) = q */
4413 if (ctx->bc->chip_class == CAYMAN) {
4414 for (j = 0 ; j < 4; j++) {
4415 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4416 alu.op = ALU_OP2_MULHI_UINT;
4417
4418 alu.dst.sel = tmp0;
4419 alu.dst.chan = j;
4420 alu.dst.write = (j == 2);
4421
4422 alu.src[0].sel = tmp0;
4423 alu.src[0].chan = 0;
4424
4425 if (signed_op) {
4426 alu.src[1].sel = tmp2;
4427 alu.src[1].chan = 0;
4428 } else {
4429 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4430 }
4431
4432 alu.last = (j == 3);
4433 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4434 return r;
4435 }
4436 } else {
4437 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4438 alu.op = ALU_OP2_MULHI_UINT;
4439
4440 alu.dst.sel = tmp0;
4441 alu.dst.chan = 2;
4442 alu.dst.write = 1;
4443
4444 alu.src[0].sel = tmp0;
4445 alu.src[0].chan = 0;
4446
4447 if (signed_op) {
4448 alu.src[1].sel = tmp2;
4449 alu.src[1].chan = 0;
4450 } else {
4451 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4452 }
4453
4454 alu.last = 1;
4455 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4456 return r;
4457 }
4458
4459 /* 11. tmp0.y = lo (src2 * tmp0.z) = src2*q = src1 - r */
4460 if (ctx->bc->chip_class == CAYMAN) {
4461 for (j = 0 ; j < 4; j++) {
4462 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4463 alu.op = ALU_OP2_MULLO_UINT;
4464
4465 alu.dst.sel = tmp0;
4466 alu.dst.chan = j;
4467 alu.dst.write = (j == 1);
4468
4469 if (signed_op) {
4470 alu.src[0].sel = tmp2;
4471 alu.src[0].chan = 1;
4472 } else {
4473 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
4474 }
4475
4476 alu.src[1].sel = tmp0;
4477 alu.src[1].chan = 2;
4478
4479 alu.last = (j == 3);
4480 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4481 return r;
4482 }
4483 } else {
4484 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4485 alu.op = ALU_OP2_MULLO_UINT;
4486
4487 alu.dst.sel = tmp0;
4488 alu.dst.chan = 1;
4489 alu.dst.write = 1;
4490
4491 if (signed_op) {
4492 alu.src[0].sel = tmp2;
4493 alu.src[0].chan = 1;
4494 } else {
4495 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
4496 }
4497
4498 alu.src[1].sel = tmp0;
4499 alu.src[1].chan = 2;
4500
4501 alu.last = 1;
4502 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4503 return r;
4504 }
4505
4506 /* 12. tmp0.w = src1 - tmp0.y = r */
4507 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4508 alu.op = ALU_OP2_SUB_INT;
4509
4510 alu.dst.sel = tmp0;
4511 alu.dst.chan = 3;
4512 alu.dst.write = 1;
4513
4514 if (signed_op) {
4515 alu.src[0].sel = tmp2;
4516 alu.src[0].chan = 0;
4517 } else {
4518 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4519 }
4520
4521 alu.src[1].sel = tmp0;
4522 alu.src[1].chan = 1;
4523
4524 alu.last = 1;
4525 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4526 return r;
4527
4528 /* 13. tmp1.x = tmp0.w >= src2 = r >= src2 */
4529 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4530 alu.op = ALU_OP2_SETGE_UINT;
4531
4532 alu.dst.sel = tmp1;
4533 alu.dst.chan = 0;
4534 alu.dst.write = 1;
4535
4536 alu.src[0].sel = tmp0;
4537 alu.src[0].chan = 3;
4538 if (signed_op) {
4539 alu.src[1].sel = tmp2;
4540 alu.src[1].chan = 1;
4541 } else {
4542 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4543 }
4544
4545 alu.last = 1;
4546 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4547 return r;
4548
4549 /* 14. tmp1.y = src1 >= tmp0.y = r >= 0 */
4550 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4551 alu.op = ALU_OP2_SETGE_UINT;
4552
4553 alu.dst.sel = tmp1;
4554 alu.dst.chan = 1;
4555 alu.dst.write = 1;
4556
4557 if (signed_op) {
4558 alu.src[0].sel = tmp2;
4559 alu.src[0].chan = 0;
4560 } else {
4561 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4562 }
4563
4564 alu.src[1].sel = tmp0;
4565 alu.src[1].chan = 1;
4566
4567 alu.last = 1;
4568 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4569 return r;
4570
4571 if (mod) { /* UMOD */
4572
4573 /* 15. tmp1.z = tmp0.w - src2 = r - src2 */
4574 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4575 alu.op = ALU_OP2_SUB_INT;
4576
4577 alu.dst.sel = tmp1;
4578 alu.dst.chan = 2;
4579 alu.dst.write = 1;
4580
4581 alu.src[0].sel = tmp0;
4582 alu.src[0].chan = 3;
4583
4584 if (signed_op) {
4585 alu.src[1].sel = tmp2;
4586 alu.src[1].chan = 1;
4587 } else {
4588 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4589 }
4590
4591 alu.last = 1;
4592 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4593 return r;
4594
4595 /* 16. tmp1.w = tmp0.w + src2 = r + src2 */
4596 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4597 alu.op = ALU_OP2_ADD_INT;
4598
4599 alu.dst.sel = tmp1;
4600 alu.dst.chan = 3;
4601 alu.dst.write = 1;
4602
4603 alu.src[0].sel = tmp0;
4604 alu.src[0].chan = 3;
4605 if (signed_op) {
4606 alu.src[1].sel = tmp2;
4607 alu.src[1].chan = 1;
4608 } else {
4609 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4610 }
4611
4612 alu.last = 1;
4613 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4614 return r;
4615
4616 } else { /* UDIV */
4617
4618 /* 15. tmp1.z = tmp0.z + 1 = q + 1 DIV */
4619 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4620 alu.op = ALU_OP2_ADD_INT;
4621
4622 alu.dst.sel = tmp1;
4623 alu.dst.chan = 2;
4624 alu.dst.write = 1;
4625
4626 alu.src[0].sel = tmp0;
4627 alu.src[0].chan = 2;
4628 alu.src[1].sel = V_SQ_ALU_SRC_1_INT;
4629
4630 alu.last = 1;
4631 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4632 return r;
4633
4634 /* 16. tmp1.w = tmp0.z - 1 = q - 1 */
4635 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4636 alu.op = ALU_OP2_ADD_INT;
4637
4638 alu.dst.sel = tmp1;
4639 alu.dst.chan = 3;
4640 alu.dst.write = 1;
4641
4642 alu.src[0].sel = tmp0;
4643 alu.src[0].chan = 2;
4644 alu.src[1].sel = V_SQ_ALU_SRC_M_1_INT;
4645
4646 alu.last = 1;
4647 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4648 return r;
4649
4650 }
4651
4652 /* 17. tmp1.x = tmp1.x & tmp1.y */
4653 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4654 alu.op = ALU_OP2_AND_INT;
4655
4656 alu.dst.sel = tmp1;
4657 alu.dst.chan = 0;
4658 alu.dst.write = 1;
4659
4660 alu.src[0].sel = tmp1;
4661 alu.src[0].chan = 0;
4662 alu.src[1].sel = tmp1;
4663 alu.src[1].chan = 1;
4664
4665 alu.last = 1;
4666 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4667 return r;
4668
4669 /* 18. tmp0.z = tmp1.x==0 ? tmp0.z : tmp1.z DIV */
4670 /* 18. tmp0.z = tmp1.x==0 ? tmp0.w : tmp1.z MOD */
4671 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4672 alu.op = ALU_OP3_CNDE_INT;
4673 alu.is_op3 = 1;
4674
4675 alu.dst.sel = tmp0;
4676 alu.dst.chan = 2;
4677 alu.dst.write = 1;
4678
4679 alu.src[0].sel = tmp1;
4680 alu.src[0].chan = 0;
4681 alu.src[1].sel = tmp0;
4682 alu.src[1].chan = mod ? 3 : 2;
4683 alu.src[2].sel = tmp1;
4684 alu.src[2].chan = 2;
4685
4686 alu.last = 1;
4687 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4688 return r;
4689
4690 /* 19. tmp0.z = tmp1.y==0 ? tmp1.w : tmp0.z */
4691 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4692 alu.op = ALU_OP3_CNDE_INT;
4693 alu.is_op3 = 1;
4694
4695 if (signed_op) {
4696 alu.dst.sel = tmp0;
4697 alu.dst.chan = 2;
4698 alu.dst.write = 1;
4699 } else {
4700 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4701 }
4702
4703 alu.src[0].sel = tmp1;
4704 alu.src[0].chan = 1;
4705 alu.src[1].sel = tmp1;
4706 alu.src[1].chan = 3;
4707 alu.src[2].sel = tmp0;
4708 alu.src[2].chan = 2;
4709
4710 alu.last = 1;
4711 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4712 return r;
4713
4714 if (signed_op) {
4715
4716 /* fix the sign of the result */
4717
4718 if (mod) {
4719
4720 /* tmp0.x = -tmp0.z */
4721 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4722 alu.op = ALU_OP2_SUB_INT;
4723
4724 alu.dst.sel = tmp0;
4725 alu.dst.chan = 0;
4726 alu.dst.write = 1;
4727
4728 alu.src[0].sel = V_SQ_ALU_SRC_0;
4729 alu.src[1].sel = tmp0;
4730 alu.src[1].chan = 2;
4731
4732 alu.last = 1;
4733 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4734 return r;
4735
4736 /* sign of the remainder is the same as the sign of src0 */
4737 /* tmp0.x = src0>=0 ? tmp0.z : tmp0.x */
4738 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4739 alu.op = ALU_OP3_CNDGE_INT;
4740 alu.is_op3 = 1;
4741
4742 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4743
4744 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4745 alu.src[1].sel = tmp0;
4746 alu.src[1].chan = 2;
4747 alu.src[2].sel = tmp0;
4748 alu.src[2].chan = 0;
4749
4750 alu.last = 1;
4751 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4752 return r;
4753
4754 } else {
4755
4756 /* tmp0.x = -tmp0.z */
4757 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4758 alu.op = ALU_OP2_SUB_INT;
4759
4760 alu.dst.sel = tmp0;
4761 alu.dst.chan = 0;
4762 alu.dst.write = 1;
4763
4764 alu.src[0].sel = V_SQ_ALU_SRC_0;
4765 alu.src[1].sel = tmp0;
4766 alu.src[1].chan = 2;
4767
4768 alu.last = 1;
4769 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4770 return r;
4771
4772 /* fix the quotient sign (same as the sign of src0*src1) */
4773 /* tmp0.x = tmp2.z>=0 ? tmp0.z : tmp0.x */
4774 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4775 alu.op = ALU_OP3_CNDGE_INT;
4776 alu.is_op3 = 1;
4777
4778 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4779
4780 alu.src[0].sel = tmp2;
4781 alu.src[0].chan = 2;
4782 alu.src[1].sel = tmp0;
4783 alu.src[1].chan = 2;
4784 alu.src[2].sel = tmp0;
4785 alu.src[2].chan = 0;
4786
4787 alu.last = 1;
4788 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4789 return r;
4790 }
4791 }
4792 }
4793 return 0;
4794 }
4795
4796 static int tgsi_udiv(struct r600_shader_ctx *ctx)
4797 {
4798 return tgsi_divmod(ctx, 0, 0);
4799 }
4800
4801 static int tgsi_umod(struct r600_shader_ctx *ctx)
4802 {
4803 return tgsi_divmod(ctx, 1, 0);
4804 }
4805
4806 static int tgsi_idiv(struct r600_shader_ctx *ctx)
4807 {
4808 return tgsi_divmod(ctx, 0, 1);
4809 }
4810
4811 static int tgsi_imod(struct r600_shader_ctx *ctx)
4812 {
4813 return tgsi_divmod(ctx, 1, 1);
4814 }
4815
4816
4817 static int tgsi_f2i(struct r600_shader_ctx *ctx)
4818 {
4819 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4820 struct r600_bytecode_alu alu;
4821 int i, r;
4822 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4823 int last_inst = tgsi_last_instruction(write_mask);
4824
4825 for (i = 0; i < 4; i++) {
4826 if (!(write_mask & (1<<i)))
4827 continue;
4828
4829 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4830 alu.op = ALU_OP1_TRUNC;
4831
4832 alu.dst.sel = ctx->temp_reg;
4833 alu.dst.chan = i;
4834 alu.dst.write = 1;
4835
4836 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4837 if (i == last_inst)
4838 alu.last = 1;
4839 r = r600_bytecode_add_alu(ctx->bc, &alu);
4840 if (r)
4841 return r;
4842 }
4843
4844 for (i = 0; i < 4; i++) {
4845 if (!(write_mask & (1<<i)))
4846 continue;
4847
4848 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4849 alu.op = ctx->inst_info->op;
4850
4851 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4852
4853 alu.src[0].sel = ctx->temp_reg;
4854 alu.src[0].chan = i;
4855
4856 if (i == last_inst || alu.op == ALU_OP1_FLT_TO_UINT)
4857 alu.last = 1;
4858 r = r600_bytecode_add_alu(ctx->bc, &alu);
4859 if (r)
4860 return r;
4861 }
4862
4863 return 0;
4864 }
4865
4866 static int tgsi_iabs(struct r600_shader_ctx *ctx)
4867 {
4868 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4869 struct r600_bytecode_alu alu;
4870 int i, r;
4871 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4872 int last_inst = tgsi_last_instruction(write_mask);
4873
4874 /* tmp = -src */
4875 for (i = 0; i < 4; i++) {
4876 if (!(write_mask & (1<<i)))
4877 continue;
4878
4879 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4880 alu.op = ALU_OP2_SUB_INT;
4881
4882 alu.dst.sel = ctx->temp_reg;
4883 alu.dst.chan = i;
4884 alu.dst.write = 1;
4885
4886 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4887 alu.src[0].sel = V_SQ_ALU_SRC_0;
4888
4889 if (i == last_inst)
4890 alu.last = 1;
4891 r = r600_bytecode_add_alu(ctx->bc, &alu);
4892 if (r)
4893 return r;
4894 }
4895
4896 /* dst = (src >= 0 ? src : tmp) */
4897 for (i = 0; i < 4; i++) {
4898 if (!(write_mask & (1<<i)))
4899 continue;
4900
4901 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4902 alu.op = ALU_OP3_CNDGE_INT;
4903 alu.is_op3 = 1;
4904 alu.dst.write = 1;
4905
4906 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4907
4908 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4909 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4910 alu.src[2].sel = ctx->temp_reg;
4911 alu.src[2].chan = i;
4912
4913 if (i == last_inst)
4914 alu.last = 1;
4915 r = r600_bytecode_add_alu(ctx->bc, &alu);
4916 if (r)
4917 return r;
4918 }
4919 return 0;
4920 }
4921
4922 static int tgsi_issg(struct r600_shader_ctx *ctx)
4923 {
4924 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4925 struct r600_bytecode_alu alu;
4926 int i, r;
4927 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4928 int last_inst = tgsi_last_instruction(write_mask);
4929
4930 /* tmp = (src >= 0 ? src : -1) */
4931 for (i = 0; i < 4; i++) {
4932 if (!(write_mask & (1<<i)))
4933 continue;
4934
4935 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4936 alu.op = ALU_OP3_CNDGE_INT;
4937 alu.is_op3 = 1;
4938
4939 alu.dst.sel = ctx->temp_reg;
4940 alu.dst.chan = i;
4941 alu.dst.write = 1;
4942
4943 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4944 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4945 alu.src[2].sel = V_SQ_ALU_SRC_M_1_INT;
4946
4947 if (i == last_inst)
4948 alu.last = 1;
4949 r = r600_bytecode_add_alu(ctx->bc, &alu);
4950 if (r)
4951 return r;
4952 }
4953
4954 /* dst = (tmp > 0 ? 1 : tmp) */
4955 for (i = 0; i < 4; i++) {
4956 if (!(write_mask & (1<<i)))
4957 continue;
4958
4959 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4960 alu.op = ALU_OP3_CNDGT_INT;
4961 alu.is_op3 = 1;
4962 alu.dst.write = 1;
4963
4964 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4965
4966 alu.src[0].sel = ctx->temp_reg;
4967 alu.src[0].chan = i;
4968
4969 alu.src[1].sel = V_SQ_ALU_SRC_1_INT;
4970
4971 alu.src[2].sel = ctx->temp_reg;
4972 alu.src[2].chan = i;
4973
4974 if (i == last_inst)
4975 alu.last = 1;
4976 r = r600_bytecode_add_alu(ctx->bc, &alu);
4977 if (r)
4978 return r;
4979 }
4980 return 0;
4981 }
4982
4983
4984
4985 static int tgsi_ssg(struct r600_shader_ctx *ctx)
4986 {
4987 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4988 struct r600_bytecode_alu alu;
4989 int i, r;
4990
4991 /* tmp = (src > 0 ? 1 : src) */
4992 for (i = 0; i < 4; i++) {
4993 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4994 alu.op = ALU_OP3_CNDGT;
4995 alu.is_op3 = 1;
4996
4997 alu.dst.sel = ctx->temp_reg;
4998 alu.dst.chan = i;
4999
5000 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5001 alu.src[1].sel = V_SQ_ALU_SRC_1;
5002 r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
5003
5004 if (i == 3)
5005 alu.last = 1;
5006 r = r600_bytecode_add_alu(ctx->bc, &alu);
5007 if (r)
5008 return r;
5009 }
5010
5011 /* dst = (-tmp > 0 ? -1 : tmp) */
5012 for (i = 0; i < 4; i++) {
5013 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5014 alu.op = ALU_OP3_CNDGT;
5015 alu.is_op3 = 1;
5016 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5017
5018 alu.src[0].sel = ctx->temp_reg;
5019 alu.src[0].chan = i;
5020 alu.src[0].neg = 1;
5021
5022 alu.src[1].sel = V_SQ_ALU_SRC_1;
5023 alu.src[1].neg = 1;
5024
5025 alu.src[2].sel = ctx->temp_reg;
5026 alu.src[2].chan = i;
5027
5028 if (i == 3)
5029 alu.last = 1;
5030 r = r600_bytecode_add_alu(ctx->bc, &alu);
5031 if (r)
5032 return r;
5033 }
5034 return 0;
5035 }
5036
5037 static int tgsi_bfi(struct r600_shader_ctx *ctx)
5038 {
5039 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5040 struct r600_bytecode_alu alu;
5041 int i, r, t1, t2;
5042
5043 unsigned write_mask = inst->Dst[0].Register.WriteMask;
5044 int last_inst = tgsi_last_instruction(write_mask);
5045
5046 t1 = ctx->temp_reg;
5047
5048 for (i = 0; i < 4; i++) {
5049 if (!(write_mask & (1<<i)))
5050 continue;
5051
5052 /* create mask tmp */
5053 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5054 alu.op = ALU_OP2_BFM_INT;
5055 alu.dst.sel = t1;
5056 alu.dst.chan = i;
5057 alu.dst.write = 1;
5058 alu.last = i == last_inst;
5059
5060 r600_bytecode_src(&alu.src[0], &ctx->src[3], i);
5061 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
5062
5063 r = r600_bytecode_add_alu(ctx->bc, &alu);
5064 if (r)
5065 return r;
5066 }
5067
5068 t2 = r600_get_temp(ctx);
5069
5070 for (i = 0; i < 4; i++) {
5071 if (!(write_mask & (1<<i)))
5072 continue;
5073
5074 /* shift insert left */
5075 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5076 alu.op = ALU_OP2_LSHL_INT;
5077 alu.dst.sel = t2;
5078 alu.dst.chan = i;
5079 alu.dst.write = 1;
5080 alu.last = i == last_inst;
5081
5082 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
5083 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
5084
5085 r = r600_bytecode_add_alu(ctx->bc, &alu);
5086 if (r)
5087 return r;
5088 }
5089
5090 for (i = 0; i < 4; i++) {
5091 if (!(write_mask & (1<<i)))
5092 continue;
5093
5094 /* actual bitfield insert */
5095 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5096 alu.op = ALU_OP3_BFI_INT;
5097 alu.is_op3 = 1;
5098 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5099 alu.dst.chan = i;
5100 alu.dst.write = 1;
5101 alu.last = i == last_inst;
5102
5103 alu.src[0].sel = t1;
5104 alu.src[0].chan = i;
5105 alu.src[1].sel = t2;
5106 alu.src[1].chan = i;
5107 r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
5108
5109 r = r600_bytecode_add_alu(ctx->bc, &alu);
5110 if (r)
5111 return r;
5112 }
5113
5114 return 0;
5115 }
5116
5117 static int tgsi_msb(struct r600_shader_ctx *ctx)
5118 {
5119 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5120 struct r600_bytecode_alu alu;
5121 int i, r, t1, t2;
5122
5123 unsigned write_mask = inst->Dst[0].Register.WriteMask;
5124 int last_inst = tgsi_last_instruction(write_mask);
5125
5126 assert(ctx->inst_info->op == ALU_OP1_FFBH_INT ||
5127 ctx->inst_info->op == ALU_OP1_FFBH_UINT);
5128
5129 t1 = ctx->temp_reg;
5130
5131 /* bit position is indexed from lsb by TGSI, and from msb by the hardware */
5132 for (i = 0; i < 4; i++) {
5133 if (!(write_mask & (1<<i)))
5134 continue;
5135
5136 /* t1 = FFBH_INT / FFBH_UINT */
5137 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5138 alu.op = ctx->inst_info->op;
5139 alu.dst.sel = t1;
5140 alu.dst.chan = i;
5141 alu.dst.write = 1;
5142 alu.last = i == last_inst;
5143
5144 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5145
5146 r = r600_bytecode_add_alu(ctx->bc, &alu);
5147 if (r)
5148 return r;
5149 }
5150
5151 t2 = r600_get_temp(ctx);
5152
5153 for (i = 0; i < 4; i++) {
5154 if (!(write_mask & (1<<i)))
5155 continue;
5156
5157 /* t2 = 31 - t1 */
5158 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5159 alu.op = ALU_OP2_SUB_INT;
5160 alu.dst.sel = t2;
5161 alu.dst.chan = i;
5162 alu.dst.write = 1;
5163 alu.last = i == last_inst;
5164
5165 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
5166 alu.src[0].value = 31;
5167 alu.src[1].sel = t1;
5168 alu.src[1].chan = i;
5169
5170 r = r600_bytecode_add_alu(ctx->bc, &alu);
5171 if (r)
5172 return r;
5173 }
5174
5175 for (i = 0; i < 4; i++) {
5176 if (!(write_mask & (1<<i)))
5177 continue;
5178
5179 /* result = t1 >= 0 ? t2 : t1 */
5180 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5181 alu.op = ALU_OP3_CNDGE_INT;
5182 alu.is_op3 = 1;
5183 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5184 alu.dst.chan = i;
5185 alu.dst.write = 1;
5186 alu.last = i == last_inst;
5187
5188 alu.src[0].sel = t1;
5189 alu.src[0].chan = i;
5190 alu.src[1].sel = t2;
5191 alu.src[1].chan = i;
5192 alu.src[2].sel = t1;
5193 alu.src[2].chan = i;
5194
5195 r = r600_bytecode_add_alu(ctx->bc, &alu);
5196 if (r)
5197 return r;
5198 }
5199
5200 return 0;
5201 }
5202
5203 static int tgsi_interp_egcm(struct r600_shader_ctx *ctx)
5204 {
5205 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5206 struct r600_bytecode_alu alu;
5207 int r, i = 0, k, interp_gpr, interp_base_chan, tmp, lasti;
5208 unsigned location;
5209 int input;
5210
5211 assert(inst->Src[0].Register.File == TGSI_FILE_INPUT);
5212
5213 input = inst->Src[0].Register.Index;
5214
5215 /* Interpolators have been marked for use already by allocate_system_value_inputs */
5216 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
5217 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5218 location = TGSI_INTERPOLATE_LOC_CENTER; /* sample offset will be added explicitly */
5219 }
5220 else {
5221 location = TGSI_INTERPOLATE_LOC_CENTROID;
5222 }
5223
5224 k = eg_get_interpolator_index(ctx->shader->input[input].interpolate, location);
5225 if (k < 0)
5226 k = 0;
5227 interp_gpr = ctx->eg_interpolators[k].ij_index / 2;
5228 interp_base_chan = 2 * (ctx->eg_interpolators[k].ij_index % 2);
5229
5230 /* NOTE: currently offset is not perspective correct */
5231 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
5232 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5233 int sample_gpr = -1;
5234 int gradientsH, gradientsV;
5235 struct r600_bytecode_tex tex;
5236
5237 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5238 sample_gpr = load_sample_position(ctx, &ctx->src[1], ctx->src[1].swizzle[0]);
5239 }
5240
5241 gradientsH = r600_get_temp(ctx);
5242 gradientsV = r600_get_temp(ctx);
5243 for (i = 0; i < 2; i++) {
5244 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
5245 tex.op = i == 0 ? FETCH_OP_GET_GRADIENTS_H : FETCH_OP_GET_GRADIENTS_V;
5246 tex.src_gpr = interp_gpr;
5247 tex.src_sel_x = interp_base_chan + 0;
5248 tex.src_sel_y = interp_base_chan + 1;
5249 tex.src_sel_z = 0;
5250 tex.src_sel_w = 0;
5251 tex.dst_gpr = i == 0 ? gradientsH : gradientsV;
5252 tex.dst_sel_x = 0;
5253 tex.dst_sel_y = 1;
5254 tex.dst_sel_z = 7;
5255 tex.dst_sel_w = 7;
5256 tex.inst_mod = 1; // Use per pixel gradient calculation
5257 tex.sampler_id = 0;
5258 tex.resource_id = tex.sampler_id;
5259 r = r600_bytecode_add_tex(ctx->bc, &tex);
5260 if (r)
5261 return r;
5262 }
5263
5264 for (i = 0; i < 2; i++) {
5265 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5266 alu.op = ALU_OP3_MULADD;
5267 alu.is_op3 = 1;
5268 alu.src[0].sel = gradientsH;
5269 alu.src[0].chan = i;
5270 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5271 alu.src[1].sel = sample_gpr;
5272 alu.src[1].chan = 2;
5273 }
5274 else {
5275 r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
5276 }
5277 alu.src[2].sel = interp_gpr;
5278 alu.src[2].chan = interp_base_chan + i;
5279 alu.dst.sel = ctx->temp_reg;
5280 alu.dst.chan = i;
5281 alu.last = i == 1;
5282
5283 r = r600_bytecode_add_alu(ctx->bc, &alu);
5284 if (r)
5285 return r;
5286 }
5287
5288 for (i = 0; i < 2; i++) {
5289 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5290 alu.op = ALU_OP3_MULADD;
5291 alu.is_op3 = 1;
5292 alu.src[0].sel = gradientsV;
5293 alu.src[0].chan = i;
5294 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5295 alu.src[1].sel = sample_gpr;
5296 alu.src[1].chan = 3;
5297 }
5298 else {
5299 r600_bytecode_src(&alu.src[1], &ctx->src[1], 1);
5300 }
5301 alu.src[2].sel = ctx->temp_reg;
5302 alu.src[2].chan = i;
5303 alu.dst.sel = ctx->temp_reg;
5304 alu.dst.chan = i;
5305 alu.last = i == 1;
5306
5307 r = r600_bytecode_add_alu(ctx->bc, &alu);
5308 if (r)
5309 return r;
5310 }
5311 }
5312
5313 tmp = r600_get_temp(ctx);
5314 for (i = 0; i < 8; i++) {
5315 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5316 alu.op = i < 4 ? ALU_OP2_INTERP_ZW : ALU_OP2_INTERP_XY;
5317
5318 alu.dst.sel = tmp;
5319 if ((i > 1 && i < 6)) {
5320 alu.dst.write = 1;
5321 }
5322 else {
5323 alu.dst.write = 0;
5324 }
5325 alu.dst.chan = i % 4;
5326
5327 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
5328 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5329 alu.src[0].sel = ctx->temp_reg;
5330 alu.src[0].chan = 1 - (i % 2);
5331 } else {
5332 alu.src[0].sel = interp_gpr;
5333 alu.src[0].chan = interp_base_chan + 1 - (i % 2);
5334 }
5335 alu.src[1].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
5336 alu.src[1].chan = 0;
5337
5338 alu.last = i % 4 == 3;
5339 alu.bank_swizzle_force = SQ_ALU_VEC_210;
5340
5341 r = r600_bytecode_add_alu(ctx->bc, &alu);
5342 if (r)
5343 return r;
5344 }
5345
5346 // INTERP can't swizzle dst
5347 lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
5348 for (i = 0; i <= lasti; i++) {
5349 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
5350 continue;
5351
5352 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5353 alu.op = ALU_OP1_MOV;
5354 alu.src[0].sel = tmp;
5355 alu.src[0].chan = ctx->src[0].swizzle[i];
5356 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5357 alu.dst.write = 1;
5358 alu.last = i == lasti;
5359 r = r600_bytecode_add_alu(ctx->bc, &alu);
5360 if (r)
5361 return r;
5362 }
5363
5364 return 0;
5365 }
5366
5367
5368 static int tgsi_helper_copy(struct r600_shader_ctx *ctx, struct tgsi_full_instruction *inst)
5369 {
5370 struct r600_bytecode_alu alu;
5371 int i, r;
5372
5373 for (i = 0; i < 4; i++) {
5374 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5375 if (!(inst->Dst[0].Register.WriteMask & (1 << i))) {
5376 alu.op = ALU_OP0_NOP;
5377 alu.dst.chan = i;
5378 } else {
5379 alu.op = ALU_OP1_MOV;
5380 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5381 alu.src[0].sel = ctx->temp_reg;
5382 alu.src[0].chan = i;
5383 }
5384 if (i == 3) {
5385 alu.last = 1;
5386 }
5387 r = r600_bytecode_add_alu(ctx->bc, &alu);
5388 if (r)
5389 return r;
5390 }
5391 return 0;
5392 }
5393
5394 static int tgsi_make_src_for_op3(struct r600_shader_ctx *ctx,
5395 unsigned temp, int chan,
5396 struct r600_bytecode_alu_src *bc_src,
5397 const struct r600_shader_src *shader_src)
5398 {
5399 struct r600_bytecode_alu alu;
5400 int r;
5401
5402 r600_bytecode_src(bc_src, shader_src, chan);
5403
5404 /* op3 operands don't support abs modifier */
5405 if (bc_src->abs) {
5406 assert(temp!=0); /* we actually need the extra register, make sure it is allocated. */
5407 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5408 alu.op = ALU_OP1_MOV;
5409 alu.dst.sel = temp;
5410 alu.dst.chan = chan;
5411 alu.dst.write = 1;
5412
5413 alu.src[0] = *bc_src;
5414 alu.last = true; // sufficient?
5415 r = r600_bytecode_add_alu(ctx->bc, &alu);
5416 if (r)
5417 return r;
5418
5419 memset(bc_src, 0, sizeof(*bc_src));
5420 bc_src->sel = temp;
5421 bc_src->chan = chan;
5422 }
5423 return 0;
5424 }
5425
5426 static int tgsi_op3(struct r600_shader_ctx *ctx)
5427 {
5428 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5429 struct r600_bytecode_alu alu;
5430 int i, j, r;
5431 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
5432 int temp_regs[4];
5433
5434 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
5435 temp_regs[j] = 0;
5436 if (ctx->src[j].abs)
5437 temp_regs[j] = r600_get_temp(ctx);
5438 }
5439 for (i = 0; i < lasti + 1; i++) {
5440 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
5441 continue;
5442
5443 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5444 alu.op = ctx->inst_info->op;
5445 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
5446 r = tgsi_make_src_for_op3(ctx, temp_regs[j], i, &alu.src[j], &ctx->src[j]);
5447 if (r)
5448 return r;
5449 }
5450
5451 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5452 alu.dst.chan = i;
5453 alu.dst.write = 1;
5454 alu.is_op3 = 1;
5455 if (i == lasti) {
5456 alu.last = 1;
5457 }
5458 r = r600_bytecode_add_alu(ctx->bc, &alu);
5459 if (r)
5460 return r;
5461 }
5462 return 0;
5463 }
5464
5465 static int tgsi_dp(struct r600_shader_ctx *ctx)
5466 {
5467 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5468 struct r600_bytecode_alu alu;
5469 int i, j, r;
5470
5471 for (i = 0; i < 4; i++) {
5472 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5473 alu.op = ctx->inst_info->op;
5474 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
5475 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
5476 }
5477
5478 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5479 alu.dst.chan = i;
5480 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
5481 /* handle some special cases */
5482 switch (inst->Instruction.Opcode) {
5483 case TGSI_OPCODE_DP2:
5484 if (i > 1) {
5485 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
5486 alu.src[0].chan = alu.src[1].chan = 0;
5487 }
5488 break;
5489 case TGSI_OPCODE_DP3:
5490 if (i > 2) {
5491 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
5492 alu.src[0].chan = alu.src[1].chan = 0;
5493 }
5494 break;
5495 case TGSI_OPCODE_DPH:
5496 if (i == 3) {
5497 alu.src[0].sel = V_SQ_ALU_SRC_1;
5498 alu.src[0].chan = 0;
5499 alu.src[0].neg = 0;
5500 }
5501 break;
5502 default:
5503 break;
5504 }
5505 if (i == 3) {
5506 alu.last = 1;
5507 }
5508 r = r600_bytecode_add_alu(ctx->bc, &alu);
5509 if (r)
5510 return r;
5511 }
5512 return 0;
5513 }
5514
5515 static inline boolean tgsi_tex_src_requires_loading(struct r600_shader_ctx *ctx,
5516 unsigned index)
5517 {
5518 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5519 return (inst->Src[index].Register.File != TGSI_FILE_TEMPORARY &&
5520 inst->Src[index].Register.File != TGSI_FILE_INPUT &&
5521 inst->Src[index].Register.File != TGSI_FILE_OUTPUT) ||
5522 ctx->src[index].neg || ctx->src[index].abs ||
5523 (inst->Src[index].Register.File == TGSI_FILE_INPUT && ctx->type == TGSI_PROCESSOR_GEOMETRY);
5524 }
5525
5526 static inline unsigned tgsi_tex_get_src_gpr(struct r600_shader_ctx *ctx,
5527 unsigned index)
5528 {
5529 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5530 return ctx->file_offset[inst->Src[index].Register.File] + inst->Src[index].Register.Index;
5531 }
5532
5533 static int do_vtx_fetch_inst(struct r600_shader_ctx *ctx, boolean src_requires_loading)
5534 {
5535 struct r600_bytecode_vtx vtx;
5536 struct r600_bytecode_alu alu;
5537 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5538 int src_gpr, r, i;
5539 int id = tgsi_tex_get_src_gpr(ctx, 1);
5540
5541 src_gpr = tgsi_tex_get_src_gpr(ctx, 0);
5542 if (src_requires_loading) {
5543 for (i = 0; i < 4; i++) {
5544 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5545 alu.op = ALU_OP1_MOV;
5546 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5547 alu.dst.sel = ctx->temp_reg;
5548 alu.dst.chan = i;
5549 if (i == 3)
5550 alu.last = 1;
5551 alu.dst.write = 1;
5552 r = r600_bytecode_add_alu(ctx->bc, &alu);
5553 if (r)
5554 return r;
5555 }
5556 src_gpr = ctx->temp_reg;
5557 }
5558
5559 memset(&vtx, 0, sizeof(vtx));
5560 vtx.op = FETCH_OP_VFETCH;
5561 vtx.buffer_id = id + R600_MAX_CONST_BUFFERS;
5562 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
5563 vtx.src_gpr = src_gpr;
5564 vtx.mega_fetch_count = 16;
5565 vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
5566 vtx.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7; /* SEL_X */
5567 vtx.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7; /* SEL_Y */
5568 vtx.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7; /* SEL_Z */
5569 vtx.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7; /* SEL_W */
5570 vtx.use_const_fields = 1;
5571
5572 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
5573 return r;
5574
5575 if (ctx->bc->chip_class >= EVERGREEN)
5576 return 0;
5577
5578 for (i = 0; i < 4; i++) {
5579 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
5580 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
5581 continue;
5582
5583 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5584 alu.op = ALU_OP2_AND_INT;
5585
5586 alu.dst.chan = i;
5587 alu.dst.sel = vtx.dst_gpr;
5588 alu.dst.write = 1;
5589
5590 alu.src[0].sel = vtx.dst_gpr;
5591 alu.src[0].chan = i;
5592
5593 alu.src[1].sel = R600_SHADER_BUFFER_INFO_SEL;
5594 alu.src[1].sel += (id * 2);
5595 alu.src[1].chan = i % 4;
5596 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
5597
5598 if (i == lasti)
5599 alu.last = 1;
5600 r = r600_bytecode_add_alu(ctx->bc, &alu);
5601 if (r)
5602 return r;
5603 }
5604
5605 if (inst->Dst[0].Register.WriteMask & 3) {
5606 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5607 alu.op = ALU_OP2_OR_INT;
5608
5609 alu.dst.chan = 3;
5610 alu.dst.sel = vtx.dst_gpr;
5611 alu.dst.write = 1;
5612
5613 alu.src[0].sel = vtx.dst_gpr;
5614 alu.src[0].chan = 3;
5615
5616 alu.src[1].sel = R600_SHADER_BUFFER_INFO_SEL + (id * 2) + 1;
5617 alu.src[1].chan = 0;
5618 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
5619
5620 alu.last = 1;
5621 r = r600_bytecode_add_alu(ctx->bc, &alu);
5622 if (r)
5623 return r;
5624 }
5625 return 0;
5626 }
5627
5628 static int r600_do_buffer_txq(struct r600_shader_ctx *ctx)
5629 {
5630 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5631 struct r600_bytecode_alu alu;
5632 int r;
5633 int id = tgsi_tex_get_src_gpr(ctx, 1);
5634
5635 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5636 alu.op = ALU_OP1_MOV;
5637 alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL;
5638 if (ctx->bc->chip_class >= EVERGREEN) {
5639 /* channel 0 or 2 of each word */
5640 alu.src[0].sel += (id / 2);
5641 alu.src[0].chan = (id % 2) * 2;
5642 } else {
5643 /* r600 we have them at channel 2 of the second dword */
5644 alu.src[0].sel += (id * 2) + 1;
5645 alu.src[0].chan = 1;
5646 }
5647 alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
5648 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
5649 alu.last = 1;
5650 r = r600_bytecode_add_alu(ctx->bc, &alu);
5651 if (r)
5652 return r;
5653 return 0;
5654 }
5655
5656 static int tgsi_tex(struct r600_shader_ctx *ctx)
5657 {
5658 static float one_point_five = 1.5f;
5659 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5660 struct r600_bytecode_tex tex;
5661 struct r600_bytecode_alu alu;
5662 unsigned src_gpr;
5663 int r, i, j;
5664 int opcode;
5665 bool read_compressed_msaa = ctx->bc->has_compressed_msaa_texturing &&
5666 inst->Instruction.Opcode == TGSI_OPCODE_TXF &&
5667 (inst->Texture.Texture == TGSI_TEXTURE_2D_MSAA ||
5668 inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY_MSAA);
5669
5670 bool txf_add_offsets = inst->Texture.NumOffsets &&
5671 inst->Instruction.Opcode == TGSI_OPCODE_TXF &&
5672 inst->Texture.Texture != TGSI_TEXTURE_BUFFER;
5673
5674 /* Texture fetch instructions can only use gprs as source.
5675 * Also they cannot negate the source or take the absolute value */
5676 const boolean src_requires_loading = (inst->Instruction.Opcode != TGSI_OPCODE_TXQ_LZ &&
5677 tgsi_tex_src_requires_loading(ctx, 0)) ||
5678 read_compressed_msaa || txf_add_offsets;
5679
5680 boolean src_loaded = FALSE;
5681 unsigned sampler_src_reg = inst->Instruction.Opcode == TGSI_OPCODE_TXQ_LZ ? 0 : 1;
5682 int8_t offset_x = 0, offset_y = 0, offset_z = 0;
5683 boolean has_txq_cube_array_z = false;
5684 unsigned sampler_index_mode;
5685
5686 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ &&
5687 ((inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
5688 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY)))
5689 if (inst->Dst[0].Register.WriteMask & 4) {
5690 ctx->shader->has_txq_cube_array_z_comp = true;
5691 has_txq_cube_array_z = true;
5692 }
5693
5694 if (inst->Instruction.Opcode == TGSI_OPCODE_TEX2 ||
5695 inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
5696 inst->Instruction.Opcode == TGSI_OPCODE_TXL2 ||
5697 inst->Instruction.Opcode == TGSI_OPCODE_TG4)
5698 sampler_src_reg = 2;
5699
5700 /* TGSI moves the sampler to src reg 3 for TXD */
5701 if (inst->Instruction.Opcode == TGSI_OPCODE_TXD)
5702 sampler_src_reg = 3;
5703
5704 sampler_index_mode = inst->Src[sampler_src_reg].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
5705 if (sampler_index_mode)
5706 ctx->shader->uses_index_registers = true;
5707
5708 src_gpr = tgsi_tex_get_src_gpr(ctx, 0);
5709
5710 if (inst->Texture.Texture == TGSI_TEXTURE_BUFFER) {
5711 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ) {
5712 ctx->shader->uses_tex_buffers = true;
5713 return r600_do_buffer_txq(ctx);
5714 }
5715 else if (inst->Instruction.Opcode == TGSI_OPCODE_TXF) {
5716 if (ctx->bc->chip_class < EVERGREEN)
5717 ctx->shader->uses_tex_buffers = true;
5718 return do_vtx_fetch_inst(ctx, src_requires_loading);
5719 }
5720 }
5721
5722 if (inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
5723 int out_chan;
5724 /* Add perspective divide */
5725 if (ctx->bc->chip_class == CAYMAN) {
5726 out_chan = 2;
5727 for (i = 0; i < 3; i++) {
5728 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5729 alu.op = ALU_OP1_RECIP_IEEE;
5730 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
5731
5732 alu.dst.sel = ctx->temp_reg;
5733 alu.dst.chan = i;
5734 if (i == 2)
5735 alu.last = 1;
5736 if (out_chan == i)
5737 alu.dst.write = 1;
5738 r = r600_bytecode_add_alu(ctx->bc, &alu);
5739 if (r)
5740 return r;
5741 }
5742
5743 } else {
5744 out_chan = 3;
5745 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5746 alu.op = ALU_OP1_RECIP_IEEE;
5747 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
5748
5749 alu.dst.sel = ctx->temp_reg;
5750 alu.dst.chan = out_chan;
5751 alu.last = 1;
5752 alu.dst.write = 1;
5753 r = r600_bytecode_add_alu(ctx->bc, &alu);
5754 if (r)
5755 return r;
5756 }
5757
5758 for (i = 0; i < 3; i++) {
5759 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5760 alu.op = ALU_OP2_MUL;
5761 alu.src[0].sel = ctx->temp_reg;
5762 alu.src[0].chan = out_chan;
5763 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
5764 alu.dst.sel = ctx->temp_reg;
5765 alu.dst.chan = i;
5766 alu.dst.write = 1;
5767 r = r600_bytecode_add_alu(ctx->bc, &alu);
5768 if (r)
5769 return r;
5770 }
5771 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5772 alu.op = ALU_OP1_MOV;
5773 alu.src[0].sel = V_SQ_ALU_SRC_1;
5774 alu.src[0].chan = 0;
5775 alu.dst.sel = ctx->temp_reg;
5776 alu.dst.chan = 3;
5777 alu.last = 1;
5778 alu.dst.write = 1;
5779 r = r600_bytecode_add_alu(ctx->bc, &alu);
5780 if (r)
5781 return r;
5782 src_loaded = TRUE;
5783 src_gpr = ctx->temp_reg;
5784 }
5785
5786
5787 if ((inst->Texture.Texture == TGSI_TEXTURE_CUBE ||
5788 inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
5789 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
5790 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) &&
5791 inst->Instruction.Opcode != TGSI_OPCODE_TXQ &&
5792 inst->Instruction.Opcode != TGSI_OPCODE_TXQ_LZ) {
5793
5794 static const unsigned src0_swizzle[] = {2, 2, 0, 1};
5795 static const unsigned src1_swizzle[] = {1, 0, 2, 2};
5796
5797 /* tmp1.xyzw = CUBE(R0.zzxy, R0.yxzz) */
5798 for (i = 0; i < 4; i++) {
5799 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5800 alu.op = ALU_OP2_CUBE;
5801 r600_bytecode_src(&alu.src[0], &ctx->src[0], src0_swizzle[i]);
5802 r600_bytecode_src(&alu.src[1], &ctx->src[0], src1_swizzle[i]);
5803 alu.dst.sel = ctx->temp_reg;
5804 alu.dst.chan = i;
5805 if (i == 3)
5806 alu.last = 1;
5807 alu.dst.write = 1;
5808 r = r600_bytecode_add_alu(ctx->bc, &alu);
5809 if (r)
5810 return r;
5811 }
5812
5813 /* tmp1.z = RCP_e(|tmp1.z|) */
5814 if (ctx->bc->chip_class == CAYMAN) {
5815 for (i = 0; i < 3; i++) {
5816 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5817 alu.op = ALU_OP1_RECIP_IEEE;
5818 alu.src[0].sel = ctx->temp_reg;
5819 alu.src[0].chan = 2;
5820 alu.src[0].abs = 1;
5821 alu.dst.sel = ctx->temp_reg;
5822 alu.dst.chan = i;
5823 if (i == 2)
5824 alu.dst.write = 1;
5825 if (i == 2)
5826 alu.last = 1;
5827 r = r600_bytecode_add_alu(ctx->bc, &alu);
5828 if (r)
5829 return r;
5830 }
5831 } else {
5832 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5833 alu.op = ALU_OP1_RECIP_IEEE;
5834 alu.src[0].sel = ctx->temp_reg;
5835 alu.src[0].chan = 2;
5836 alu.src[0].abs = 1;
5837 alu.dst.sel = ctx->temp_reg;
5838 alu.dst.chan = 2;
5839 alu.dst.write = 1;
5840 alu.last = 1;
5841 r = r600_bytecode_add_alu(ctx->bc, &alu);
5842 if (r)
5843 return r;
5844 }
5845
5846 /* MULADD R0.x, R0.x, PS1, (0x3FC00000, 1.5f).x
5847 * MULADD R0.y, R0.y, PS1, (0x3FC00000, 1.5f).x
5848 * muladd has no writemask, have to use another temp
5849 */
5850 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5851 alu.op = ALU_OP3_MULADD;
5852 alu.is_op3 = 1;
5853
5854 alu.src[0].sel = ctx->temp_reg;
5855 alu.src[0].chan = 0;
5856 alu.src[1].sel = ctx->temp_reg;
5857 alu.src[1].chan = 2;
5858
5859 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
5860 alu.src[2].chan = 0;
5861 alu.src[2].value = *(uint32_t *)&one_point_five;
5862
5863 alu.dst.sel = ctx->temp_reg;
5864 alu.dst.chan = 0;
5865 alu.dst.write = 1;
5866
5867 r = r600_bytecode_add_alu(ctx->bc, &alu);
5868 if (r)
5869 return r;
5870
5871 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5872 alu.op = ALU_OP3_MULADD;
5873 alu.is_op3 = 1;
5874
5875 alu.src[0].sel = ctx->temp_reg;
5876 alu.src[0].chan = 1;
5877 alu.src[1].sel = ctx->temp_reg;
5878 alu.src[1].chan = 2;
5879
5880 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
5881 alu.src[2].chan = 0;
5882 alu.src[2].value = *(uint32_t *)&one_point_five;
5883
5884 alu.dst.sel = ctx->temp_reg;
5885 alu.dst.chan = 1;
5886 alu.dst.write = 1;
5887
5888 alu.last = 1;
5889 r = r600_bytecode_add_alu(ctx->bc, &alu);
5890 if (r)
5891 return r;
5892 /* write initial compare value into Z component
5893 - W src 0 for shadow cube
5894 - X src 1 for shadow cube array */
5895 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
5896 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
5897 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5898 alu.op = ALU_OP1_MOV;
5899 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY)
5900 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
5901 else
5902 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
5903 alu.dst.sel = ctx->temp_reg;
5904 alu.dst.chan = 2;
5905 alu.dst.write = 1;
5906 alu.last = 1;
5907 r = r600_bytecode_add_alu(ctx->bc, &alu);
5908 if (r)
5909 return r;
5910 }
5911
5912 if (inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
5913 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
5914 if (ctx->bc->chip_class >= EVERGREEN) {
5915 int mytmp = r600_get_temp(ctx);
5916 static const float eight = 8.0f;
5917 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5918 alu.op = ALU_OP1_MOV;
5919 alu.src[0].sel = ctx->temp_reg;
5920 alu.src[0].chan = 3;
5921 alu.dst.sel = mytmp;
5922 alu.dst.chan = 0;
5923 alu.dst.write = 1;
5924 alu.last = 1;
5925 r = r600_bytecode_add_alu(ctx->bc, &alu);
5926 if (r)
5927 return r;
5928
5929 /* have to multiply original layer by 8 and add to face id (temp.w) in Z */
5930 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5931 alu.op = ALU_OP3_MULADD;
5932 alu.is_op3 = 1;
5933 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
5934 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
5935 alu.src[1].chan = 0;
5936 alu.src[1].value = *(uint32_t *)&eight;
5937 alu.src[2].sel = mytmp;
5938 alu.src[2].chan = 0;
5939 alu.dst.sel = ctx->temp_reg;
5940 alu.dst.chan = 3;
5941 alu.dst.write = 1;
5942 alu.last = 1;
5943 r = r600_bytecode_add_alu(ctx->bc, &alu);
5944 if (r)
5945 return r;
5946 } else if (ctx->bc->chip_class < EVERGREEN) {
5947 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
5948 tex.op = FETCH_OP_SET_CUBEMAP_INDEX;
5949 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
5950 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
5951 tex.src_gpr = r600_get_temp(ctx);
5952 tex.src_sel_x = 0;
5953 tex.src_sel_y = 0;
5954 tex.src_sel_z = 0;
5955 tex.src_sel_w = 0;
5956 tex.dst_sel_x = tex.dst_sel_y = tex.dst_sel_z = tex.dst_sel_w = 7;
5957 tex.coord_type_x = 1;
5958 tex.coord_type_y = 1;
5959 tex.coord_type_z = 1;
5960 tex.coord_type_w = 1;
5961 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5962 alu.op = ALU_OP1_MOV;
5963 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
5964 alu.dst.sel = tex.src_gpr;
5965 alu.dst.chan = 0;
5966 alu.last = 1;
5967 alu.dst.write = 1;
5968 r = r600_bytecode_add_alu(ctx->bc, &alu);
5969 if (r)
5970 return r;
5971
5972 r = r600_bytecode_add_tex(ctx->bc, &tex);
5973 if (r)
5974 return r;
5975 }
5976
5977 }
5978
5979 /* for cube forms of lod and bias we need to route things */
5980 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB ||
5981 inst->Instruction.Opcode == TGSI_OPCODE_TXL ||
5982 inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
5983 inst->Instruction.Opcode == TGSI_OPCODE_TXL2) {
5984 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5985 alu.op = ALU_OP1_MOV;
5986 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
5987 inst->Instruction.Opcode == TGSI_OPCODE_TXL2)
5988 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
5989 else
5990 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
5991 alu.dst.sel = ctx->temp_reg;
5992 alu.dst.chan = 2;
5993 alu.last = 1;
5994 alu.dst.write = 1;
5995 r = r600_bytecode_add_alu(ctx->bc, &alu);
5996 if (r)
5997 return r;
5998 }
5999
6000 src_loaded = TRUE;
6001 src_gpr = ctx->temp_reg;
6002 }
6003
6004 if (inst->Instruction.Opcode == TGSI_OPCODE_TXD) {
6005 int temp_h = 0, temp_v = 0;
6006 int start_val = 0;
6007
6008 /* if we've already loaded the src (i.e. CUBE don't reload it). */
6009 if (src_loaded == TRUE)
6010 start_val = 1;
6011 else
6012 src_loaded = TRUE;
6013 for (i = start_val; i < 3; i++) {
6014 int treg = r600_get_temp(ctx);
6015
6016 if (i == 0)
6017 src_gpr = treg;
6018 else if (i == 1)
6019 temp_h = treg;
6020 else
6021 temp_v = treg;
6022
6023 for (j = 0; j < 4; j++) {
6024 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6025 alu.op = ALU_OP1_MOV;
6026 r600_bytecode_src(&alu.src[0], &ctx->src[i], j);
6027 alu.dst.sel = treg;
6028 alu.dst.chan = j;
6029 if (j == 3)
6030 alu.last = 1;
6031 alu.dst.write = 1;
6032 r = r600_bytecode_add_alu(ctx->bc, &alu);
6033 if (r)
6034 return r;
6035 }
6036 }
6037 for (i = 1; i < 3; i++) {
6038 /* set gradients h/v */
6039 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
6040 tex.op = (i == 1) ? FETCH_OP_SET_GRADIENTS_H :
6041 FETCH_OP_SET_GRADIENTS_V;
6042 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
6043 tex.sampler_index_mode = sampler_index_mode;
6044 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
6045 tex.resource_index_mode = sampler_index_mode;
6046
6047 tex.src_gpr = (i == 1) ? temp_h : temp_v;
6048 tex.src_sel_x = 0;
6049 tex.src_sel_y = 1;
6050 tex.src_sel_z = 2;
6051 tex.src_sel_w = 3;
6052
6053 tex.dst_gpr = r600_get_temp(ctx); /* just to avoid confusing the asm scheduler */
6054 tex.dst_sel_x = tex.dst_sel_y = tex.dst_sel_z = tex.dst_sel_w = 7;
6055 if (inst->Texture.Texture != TGSI_TEXTURE_RECT) {
6056 tex.coord_type_x = 1;
6057 tex.coord_type_y = 1;
6058 tex.coord_type_z = 1;
6059 tex.coord_type_w = 1;
6060 }
6061 r = r600_bytecode_add_tex(ctx->bc, &tex);
6062 if (r)
6063 return r;
6064 }
6065 }
6066
6067 if (src_requires_loading && !src_loaded) {
6068 for (i = 0; i < 4; i++) {
6069 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6070 alu.op = ALU_OP1_MOV;
6071 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6072 alu.dst.sel = ctx->temp_reg;
6073 alu.dst.chan = i;
6074 if (i == 3)
6075 alu.last = 1;
6076 alu.dst.write = 1;
6077 r = r600_bytecode_add_alu(ctx->bc, &alu);
6078 if (r)
6079 return r;
6080 }
6081 src_loaded = TRUE;
6082 src_gpr = ctx->temp_reg;
6083 }
6084
6085 /* get offset values */
6086 if (inst->Texture.NumOffsets) {
6087 assert(inst->Texture.NumOffsets == 1);
6088
6089 /* The texture offset feature doesn't work with the TXF instruction
6090 * and must be emulated by adding the offset to the texture coordinates. */
6091 if (txf_add_offsets) {
6092 const struct tgsi_texture_offset *off = inst->TexOffsets;
6093
6094 switch (inst->Texture.Texture) {
6095 case TGSI_TEXTURE_3D:
6096 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6097 alu.op = ALU_OP2_ADD_INT;
6098 alu.src[0].sel = src_gpr;
6099 alu.src[0].chan = 2;
6100 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6101 alu.src[1].value = ctx->literals[4 * off[0].Index + off[0].SwizzleZ];
6102 alu.dst.sel = src_gpr;
6103 alu.dst.chan = 2;
6104 alu.dst.write = 1;
6105 alu.last = 1;
6106 r = r600_bytecode_add_alu(ctx->bc, &alu);
6107 if (r)
6108 return r;
6109 /* fall through */
6110
6111 case TGSI_TEXTURE_2D:
6112 case TGSI_TEXTURE_SHADOW2D:
6113 case TGSI_TEXTURE_RECT:
6114 case TGSI_TEXTURE_SHADOWRECT:
6115 case TGSI_TEXTURE_2D_ARRAY:
6116 case TGSI_TEXTURE_SHADOW2D_ARRAY:
6117 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6118 alu.op = ALU_OP2_ADD_INT;
6119 alu.src[0].sel = src_gpr;
6120 alu.src[0].chan = 1;
6121 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6122 alu.src[1].value = ctx->literals[4 * off[0].Index + off[0].SwizzleY];
6123 alu.dst.sel = src_gpr;
6124 alu.dst.chan = 1;
6125 alu.dst.write = 1;
6126 alu.last = 1;
6127 r = r600_bytecode_add_alu(ctx->bc, &alu);
6128 if (r)
6129 return r;
6130 /* fall through */
6131
6132 case TGSI_TEXTURE_1D:
6133 case TGSI_TEXTURE_SHADOW1D:
6134 case TGSI_TEXTURE_1D_ARRAY:
6135 case TGSI_TEXTURE_SHADOW1D_ARRAY:
6136 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6137 alu.op = ALU_OP2_ADD_INT;
6138 alu.src[0].sel = src_gpr;
6139 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6140 alu.src[1].value = ctx->literals[4 * off[0].Index + off[0].SwizzleX];
6141 alu.dst.sel = src_gpr;
6142 alu.dst.write = 1;
6143 alu.last = 1;
6144 r = r600_bytecode_add_alu(ctx->bc, &alu);
6145 if (r)
6146 return r;
6147 break;
6148 /* texture offsets do not apply to other texture targets */
6149 }
6150 } else {
6151 switch (inst->Texture.Texture) {
6152 case TGSI_TEXTURE_3D:
6153 offset_z = ctx->literals[4 * inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleZ] << 1;
6154 /* fallthrough */
6155 case TGSI_TEXTURE_2D:
6156 case TGSI_TEXTURE_SHADOW2D:
6157 case TGSI_TEXTURE_RECT:
6158 case TGSI_TEXTURE_SHADOWRECT:
6159 case TGSI_TEXTURE_2D_ARRAY:
6160 case TGSI_TEXTURE_SHADOW2D_ARRAY:
6161 offset_y = ctx->literals[4 * inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleY] << 1;
6162 /* fallthrough */
6163 case TGSI_TEXTURE_1D:
6164 case TGSI_TEXTURE_SHADOW1D:
6165 case TGSI_TEXTURE_1D_ARRAY:
6166 case TGSI_TEXTURE_SHADOW1D_ARRAY:
6167 offset_x = ctx->literals[4 * inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleX] << 1;
6168 }
6169 }
6170 }
6171
6172 /* Obtain the sample index for reading a compressed MSAA color texture.
6173 * To read the FMASK, we use the ldfptr instruction, which tells us
6174 * where the samples are stored.
6175 * For uncompressed 8x MSAA surfaces, ldfptr should return 0x76543210,
6176 * which is the identity mapping. Each nibble says which physical sample
6177 * should be fetched to get that sample.
6178 *
6179 * Assume src.z contains the sample index. It should be modified like this:
6180 * src.z = (ldfptr() >> (src.z * 4)) & 0xF;
6181 * Then fetch the texel with src.
6182 */
6183 if (read_compressed_msaa) {
6184 unsigned sample_chan = 3;
6185 unsigned temp = r600_get_temp(ctx);
6186 assert(src_loaded);
6187
6188 /* temp.w = ldfptr() */
6189 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
6190 tex.op = FETCH_OP_LD;
6191 tex.inst_mod = 1; /* to indicate this is ldfptr */
6192 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
6193 tex.sampler_index_mode = sampler_index_mode;
6194 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
6195 tex.resource_index_mode = sampler_index_mode;
6196 tex.src_gpr = src_gpr;
6197 tex.dst_gpr = temp;
6198 tex.dst_sel_x = 7; /* mask out these components */
6199 tex.dst_sel_y = 7;
6200 tex.dst_sel_z = 7;
6201 tex.dst_sel_w = 0; /* store X */
6202 tex.src_sel_x = 0;
6203 tex.src_sel_y = 1;
6204 tex.src_sel_z = 2;
6205 tex.src_sel_w = 3;
6206 tex.offset_x = offset_x;
6207 tex.offset_y = offset_y;
6208 tex.offset_z = offset_z;
6209 r = r600_bytecode_add_tex(ctx->bc, &tex);
6210 if (r)
6211 return r;
6212
6213 /* temp.x = sample_index*4 */
6214 if (ctx->bc->chip_class == CAYMAN) {
6215 for (i = 0 ; i < 4; i++) {
6216 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6217 alu.op = ALU_OP2_MULLO_INT;
6218 alu.src[0].sel = src_gpr;
6219 alu.src[0].chan = sample_chan;
6220 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6221 alu.src[1].value = 4;
6222 alu.dst.sel = temp;
6223 alu.dst.chan = i;
6224 alu.dst.write = i == 0;
6225 if (i == 3)
6226 alu.last = 1;
6227 r = r600_bytecode_add_alu(ctx->bc, &alu);
6228 if (r)
6229 return r;
6230 }
6231 } else {
6232 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6233 alu.op = ALU_OP2_MULLO_INT;
6234 alu.src[0].sel = src_gpr;
6235 alu.src[0].chan = sample_chan;
6236 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6237 alu.src[1].value = 4;
6238 alu.dst.sel = temp;
6239 alu.dst.chan = 0;
6240 alu.dst.write = 1;
6241 alu.last = 1;
6242 r = r600_bytecode_add_alu(ctx->bc, &alu);
6243 if (r)
6244 return r;
6245 }
6246
6247 /* sample_index = temp.w >> temp.x */
6248 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6249 alu.op = ALU_OP2_LSHR_INT;
6250 alu.src[0].sel = temp;
6251 alu.src[0].chan = 3;
6252 alu.src[1].sel = temp;
6253 alu.src[1].chan = 0;
6254 alu.dst.sel = src_gpr;
6255 alu.dst.chan = sample_chan;
6256 alu.dst.write = 1;
6257 alu.last = 1;
6258 r = r600_bytecode_add_alu(ctx->bc, &alu);
6259 if (r)
6260 return r;
6261
6262 /* sample_index & 0xF */
6263 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6264 alu.op = ALU_OP2_AND_INT;
6265 alu.src[0].sel = src_gpr;
6266 alu.src[0].chan = sample_chan;
6267 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6268 alu.src[1].value = 0xF;
6269 alu.dst.sel = src_gpr;
6270 alu.dst.chan = sample_chan;
6271 alu.dst.write = 1;
6272 alu.last = 1;
6273 r = r600_bytecode_add_alu(ctx->bc, &alu);
6274 if (r)
6275 return r;
6276 #if 0
6277 /* visualize the FMASK */
6278 for (i = 0; i < 4; i++) {
6279 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6280 alu.op = ALU_OP1_INT_TO_FLT;
6281 alu.src[0].sel = src_gpr;
6282 alu.src[0].chan = sample_chan;
6283 alu.dst.sel = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
6284 alu.dst.chan = i;
6285 alu.dst.write = 1;
6286 alu.last = 1;
6287 r = r600_bytecode_add_alu(ctx->bc, &alu);
6288 if (r)
6289 return r;
6290 }
6291 return 0;
6292 #endif
6293 }
6294
6295 /* does this shader want a num layers from TXQ for a cube array? */
6296 if (has_txq_cube_array_z) {
6297 int id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
6298
6299 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6300 alu.op = ALU_OP1_MOV;
6301
6302 alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL;
6303 if (ctx->bc->chip_class >= EVERGREEN) {
6304 /* channel 1 or 3 of each word */
6305 alu.src[0].sel += (id / 2);
6306 alu.src[0].chan = ((id % 2) * 2) + 1;
6307 } else {
6308 /* r600 we have them at channel 2 of the second dword */
6309 alu.src[0].sel += (id * 2) + 1;
6310 alu.src[0].chan = 2;
6311 }
6312 alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
6313 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
6314 alu.last = 1;
6315 r = r600_bytecode_add_alu(ctx->bc, &alu);
6316 if (r)
6317 return r;
6318 /* disable writemask from texture instruction */
6319 inst->Dst[0].Register.WriteMask &= ~4;
6320 }
6321
6322 opcode = ctx->inst_info->op;
6323 if (opcode == FETCH_OP_GATHER4 &&
6324 inst->TexOffsets[0].File != TGSI_FILE_NULL &&
6325 inst->TexOffsets[0].File != TGSI_FILE_IMMEDIATE) {
6326 opcode = FETCH_OP_GATHER4_O;
6327
6328 /* GATHER4_O/GATHER4_C_O use offset values loaded by
6329 SET_TEXTURE_OFFSETS instruction. The immediate offset values
6330 encoded in the instruction are ignored. */
6331 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
6332 tex.op = FETCH_OP_SET_TEXTURE_OFFSETS;
6333 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
6334 tex.sampler_index_mode = sampler_index_mode;
6335 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
6336 tex.resource_index_mode = sampler_index_mode;
6337
6338 tex.src_gpr = ctx->file_offset[inst->TexOffsets[0].File] + inst->TexOffsets[0].Index;
6339 tex.src_sel_x = inst->TexOffsets[0].SwizzleX;
6340 tex.src_sel_y = inst->TexOffsets[0].SwizzleY;
6341 tex.src_sel_z = inst->TexOffsets[0].SwizzleZ;
6342 tex.src_sel_w = 4;
6343
6344 tex.dst_sel_x = 7;
6345 tex.dst_sel_y = 7;
6346 tex.dst_sel_z = 7;
6347 tex.dst_sel_w = 7;
6348
6349 r = r600_bytecode_add_tex(ctx->bc, &tex);
6350 if (r)
6351 return r;
6352 }
6353
6354 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
6355 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
6356 inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
6357 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
6358 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY ||
6359 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
6360 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
6361 switch (opcode) {
6362 case FETCH_OP_SAMPLE:
6363 opcode = FETCH_OP_SAMPLE_C;
6364 break;
6365 case FETCH_OP_SAMPLE_L:
6366 opcode = FETCH_OP_SAMPLE_C_L;
6367 break;
6368 case FETCH_OP_SAMPLE_LB:
6369 opcode = FETCH_OP_SAMPLE_C_LB;
6370 break;
6371 case FETCH_OP_SAMPLE_G:
6372 opcode = FETCH_OP_SAMPLE_C_G;
6373 break;
6374 /* Texture gather variants */
6375 case FETCH_OP_GATHER4:
6376 opcode = FETCH_OP_GATHER4_C;
6377 break;
6378 case FETCH_OP_GATHER4_O:
6379 opcode = FETCH_OP_GATHER4_C_O;
6380 break;
6381 }
6382 }
6383
6384 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
6385 tex.op = opcode;
6386
6387 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
6388 tex.sampler_index_mode = sampler_index_mode;
6389 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
6390 tex.resource_index_mode = sampler_index_mode;
6391 tex.src_gpr = src_gpr;
6392 tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
6393
6394 if (inst->Instruction.Opcode == TGSI_OPCODE_DDX_FINE ||
6395 inst->Instruction.Opcode == TGSI_OPCODE_DDY_FINE) {
6396 tex.inst_mod = 1; /* per pixel gradient calculation instead of per 2x2 quad */
6397 }
6398
6399 if (inst->Instruction.Opcode == TGSI_OPCODE_TG4) {
6400 int8_t texture_component_select = ctx->literals[4 * inst->Src[1].Register.Index + inst->Src[1].Register.SwizzleX];
6401 tex.inst_mod = texture_component_select;
6402
6403 if (ctx->bc->chip_class == CAYMAN) {
6404 /* GATHER4 result order is different from TGSI TG4 */
6405 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 0 : 7;
6406 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 4) ? 1 : 7;
6407 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 1) ? 2 : 7;
6408 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
6409 } else {
6410 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
6411 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
6412 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
6413 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
6414 }
6415 }
6416 else if (inst->Instruction.Opcode == TGSI_OPCODE_LODQ) {
6417 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
6418 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
6419 tex.dst_sel_z = 7;
6420 tex.dst_sel_w = 7;
6421 }
6422 else {
6423 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
6424 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
6425 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
6426 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
6427 }
6428
6429
6430 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ_LZ) {
6431 tex.src_sel_x = 4;
6432 tex.src_sel_y = 4;
6433 tex.src_sel_z = 4;
6434 tex.src_sel_w = 4;
6435 } else if (src_loaded) {
6436 tex.src_sel_x = 0;
6437 tex.src_sel_y = 1;
6438 tex.src_sel_z = 2;
6439 tex.src_sel_w = 3;
6440 } else {
6441 tex.src_sel_x = ctx->src[0].swizzle[0];
6442 tex.src_sel_y = ctx->src[0].swizzle[1];
6443 tex.src_sel_z = ctx->src[0].swizzle[2];
6444 tex.src_sel_w = ctx->src[0].swizzle[3];
6445 tex.src_rel = ctx->src[0].rel;
6446 }
6447
6448 if (inst->Texture.Texture == TGSI_TEXTURE_CUBE ||
6449 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
6450 inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
6451 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
6452 tex.src_sel_x = 1;
6453 tex.src_sel_y = 0;
6454 tex.src_sel_z = 3;
6455 tex.src_sel_w = 2; /* route Z compare or Lod value into W */
6456 }
6457
6458 if (inst->Texture.Texture != TGSI_TEXTURE_RECT &&
6459 inst->Texture.Texture != TGSI_TEXTURE_SHADOWRECT) {
6460 tex.coord_type_x = 1;
6461 tex.coord_type_y = 1;
6462 }
6463 tex.coord_type_z = 1;
6464 tex.coord_type_w = 1;
6465
6466 tex.offset_x = offset_x;
6467 tex.offset_y = offset_y;
6468 if (inst->Instruction.Opcode == TGSI_OPCODE_TG4 &&
6469 (inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
6470 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY)) {
6471 tex.offset_z = 0;
6472 }
6473 else {
6474 tex.offset_z = offset_z;
6475 }
6476
6477 /* Put the depth for comparison in W.
6478 * TGSI_TEXTURE_SHADOW2D_ARRAY already has the depth in W.
6479 * Some instructions expect the depth in Z. */
6480 if ((inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
6481 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
6482 inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
6483 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY) &&
6484 opcode != FETCH_OP_SAMPLE_C_L &&
6485 opcode != FETCH_OP_SAMPLE_C_LB) {
6486 tex.src_sel_w = tex.src_sel_z;
6487 }
6488
6489 if (inst->Texture.Texture == TGSI_TEXTURE_1D_ARRAY ||
6490 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY) {
6491 if (opcode == FETCH_OP_SAMPLE_C_L ||
6492 opcode == FETCH_OP_SAMPLE_C_LB) {
6493 /* the array index is read from Y */
6494 tex.coord_type_y = 0;
6495 } else {
6496 /* the array index is read from Z */
6497 tex.coord_type_z = 0;
6498 tex.src_sel_z = tex.src_sel_y;
6499 }
6500 } else if (inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
6501 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
6502 ((inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
6503 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) &&
6504 (ctx->bc->chip_class >= EVERGREEN)))
6505 /* the array index is read from Z */
6506 tex.coord_type_z = 0;
6507
6508 /* mask unused source components */
6509 if (opcode == FETCH_OP_SAMPLE || opcode == FETCH_OP_GATHER4) {
6510 switch (inst->Texture.Texture) {
6511 case TGSI_TEXTURE_2D:
6512 case TGSI_TEXTURE_RECT:
6513 tex.src_sel_z = 7;
6514 tex.src_sel_w = 7;
6515 break;
6516 case TGSI_TEXTURE_1D_ARRAY:
6517 tex.src_sel_y = 7;
6518 tex.src_sel_w = 7;
6519 break;
6520 case TGSI_TEXTURE_1D:
6521 tex.src_sel_y = 7;
6522 tex.src_sel_z = 7;
6523 tex.src_sel_w = 7;
6524 break;
6525 }
6526 }
6527
6528 r = r600_bytecode_add_tex(ctx->bc, &tex);
6529 if (r)
6530 return r;
6531
6532 /* add shadow ambient support - gallium doesn't do it yet */
6533 return 0;
6534 }
6535
6536 static int tgsi_lrp(struct r600_shader_ctx *ctx)
6537 {
6538 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6539 struct r600_bytecode_alu alu;
6540 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
6541 unsigned i, temp_regs[2];
6542 int r;
6543
6544 /* optimize if it's just an equal balance */
6545 if (ctx->src[0].sel == V_SQ_ALU_SRC_0_5) {
6546 for (i = 0; i < lasti + 1; i++) {
6547 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6548 continue;
6549
6550 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6551 alu.op = ALU_OP2_ADD;
6552 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
6553 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
6554 alu.omod = 3;
6555 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6556 alu.dst.chan = i;
6557 if (i == lasti) {
6558 alu.last = 1;
6559 }
6560 r = r600_bytecode_add_alu(ctx->bc, &alu);
6561 if (r)
6562 return r;
6563 }
6564 return 0;
6565 }
6566
6567 /* 1 - src0 */
6568 for (i = 0; i < lasti + 1; i++) {
6569 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6570 continue;
6571
6572 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6573 alu.op = ALU_OP2_ADD;
6574 alu.src[0].sel = V_SQ_ALU_SRC_1;
6575 alu.src[0].chan = 0;
6576 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
6577 r600_bytecode_src_toggle_neg(&alu.src[1]);
6578 alu.dst.sel = ctx->temp_reg;
6579 alu.dst.chan = i;
6580 if (i == lasti) {
6581 alu.last = 1;
6582 }
6583 alu.dst.write = 1;
6584 r = r600_bytecode_add_alu(ctx->bc, &alu);
6585 if (r)
6586 return r;
6587 }
6588
6589 /* (1 - src0) * src2 */
6590 for (i = 0; i < lasti + 1; i++) {
6591 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6592 continue;
6593
6594 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6595 alu.op = ALU_OP2_MUL;
6596 alu.src[0].sel = ctx->temp_reg;
6597 alu.src[0].chan = i;
6598 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
6599 alu.dst.sel = ctx->temp_reg;
6600 alu.dst.chan = i;
6601 if (i == lasti) {
6602 alu.last = 1;
6603 }
6604 alu.dst.write = 1;
6605 r = r600_bytecode_add_alu(ctx->bc, &alu);
6606 if (r)
6607 return r;
6608 }
6609
6610 /* src0 * src1 + (1 - src0) * src2 */
6611 if (ctx->src[0].abs)
6612 temp_regs[0] = r600_get_temp(ctx);
6613 else
6614 temp_regs[0] = 0;
6615 if (ctx->src[1].abs)
6616 temp_regs[1] = r600_get_temp(ctx);
6617 else
6618 temp_regs[1] = 0;
6619
6620 for (i = 0; i < lasti + 1; i++) {
6621 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6622 continue;
6623
6624 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6625 alu.op = ALU_OP3_MULADD;
6626 alu.is_op3 = 1;
6627 r = tgsi_make_src_for_op3(ctx, temp_regs[0], i, &alu.src[0], &ctx->src[0]);
6628 if (r)
6629 return r;
6630 r = tgsi_make_src_for_op3(ctx, temp_regs[1], i, &alu.src[1], &ctx->src[1]);
6631 if (r)
6632 return r;
6633 alu.src[2].sel = ctx->temp_reg;
6634 alu.src[2].chan = i;
6635
6636 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6637 alu.dst.chan = i;
6638 if (i == lasti) {
6639 alu.last = 1;
6640 }
6641 r = r600_bytecode_add_alu(ctx->bc, &alu);
6642 if (r)
6643 return r;
6644 }
6645 return 0;
6646 }
6647
6648 static int tgsi_cmp(struct r600_shader_ctx *ctx)
6649 {
6650 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6651 struct r600_bytecode_alu alu;
6652 int i, r, j;
6653 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
6654 int temp_regs[3];
6655
6656 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
6657 temp_regs[j] = 0;
6658 if (ctx->src[j].abs)
6659 temp_regs[j] = r600_get_temp(ctx);
6660 }
6661
6662 for (i = 0; i < lasti + 1; i++) {
6663 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6664 continue;
6665
6666 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6667 alu.op = ALU_OP3_CNDGE;
6668 r = tgsi_make_src_for_op3(ctx, temp_regs[0], i, &alu.src[0], &ctx->src[0]);
6669 if (r)
6670 return r;
6671 r = tgsi_make_src_for_op3(ctx, temp_regs[2], i, &alu.src[1], &ctx->src[2]);
6672 if (r)
6673 return r;
6674 r = tgsi_make_src_for_op3(ctx, temp_regs[1], i, &alu.src[2], &ctx->src[1]);
6675 if (r)
6676 return r;
6677 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6678 alu.dst.chan = i;
6679 alu.dst.write = 1;
6680 alu.is_op3 = 1;
6681 if (i == lasti)
6682 alu.last = 1;
6683 r = r600_bytecode_add_alu(ctx->bc, &alu);
6684 if (r)
6685 return r;
6686 }
6687 return 0;
6688 }
6689
6690 static int tgsi_ucmp(struct r600_shader_ctx *ctx)
6691 {
6692 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6693 struct r600_bytecode_alu alu;
6694 int i, r;
6695 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
6696
6697 for (i = 0; i < lasti + 1; i++) {
6698 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6699 continue;
6700
6701 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6702 alu.op = ALU_OP3_CNDE_INT;
6703 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6704 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
6705 r600_bytecode_src(&alu.src[2], &ctx->src[1], i);
6706 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6707 alu.dst.chan = i;
6708 alu.dst.write = 1;
6709 alu.is_op3 = 1;
6710 if (i == lasti)
6711 alu.last = 1;
6712 r = r600_bytecode_add_alu(ctx->bc, &alu);
6713 if (r)
6714 return r;
6715 }
6716 return 0;
6717 }
6718
6719 static int tgsi_xpd(struct r600_shader_ctx *ctx)
6720 {
6721 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6722 static const unsigned int src0_swizzle[] = {2, 0, 1};
6723 static const unsigned int src1_swizzle[] = {1, 2, 0};
6724 struct r600_bytecode_alu alu;
6725 uint32_t use_temp = 0;
6726 int i, r;
6727
6728 if (inst->Dst[0].Register.WriteMask != 0xf)
6729 use_temp = 1;
6730
6731 for (i = 0; i < 4; i++) {
6732 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6733 alu.op = ALU_OP2_MUL;
6734 if (i < 3) {
6735 r600_bytecode_src(&alu.src[0], &ctx->src[0], src0_swizzle[i]);
6736 r600_bytecode_src(&alu.src[1], &ctx->src[1], src1_swizzle[i]);
6737 } else {
6738 alu.src[0].sel = V_SQ_ALU_SRC_0;
6739 alu.src[0].chan = i;
6740 alu.src[1].sel = V_SQ_ALU_SRC_0;
6741 alu.src[1].chan = i;
6742 }
6743
6744 alu.dst.sel = ctx->temp_reg;
6745 alu.dst.chan = i;
6746 alu.dst.write = 1;
6747
6748 if (i == 3)
6749 alu.last = 1;
6750 r = r600_bytecode_add_alu(ctx->bc, &alu);
6751 if (r)
6752 return r;
6753 }
6754
6755 for (i = 0; i < 4; i++) {
6756 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6757 alu.op = ALU_OP3_MULADD;
6758
6759 if (i < 3) {
6760 r600_bytecode_src(&alu.src[0], &ctx->src[0], src1_swizzle[i]);
6761 r600_bytecode_src(&alu.src[1], &ctx->src[1], src0_swizzle[i]);
6762 } else {
6763 alu.src[0].sel = V_SQ_ALU_SRC_0;
6764 alu.src[0].chan = i;
6765 alu.src[1].sel = V_SQ_ALU_SRC_0;
6766 alu.src[1].chan = i;
6767 }
6768
6769 alu.src[2].sel = ctx->temp_reg;
6770 alu.src[2].neg = 1;
6771 alu.src[2].chan = i;
6772
6773 if (use_temp)
6774 alu.dst.sel = ctx->temp_reg;
6775 else
6776 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6777 alu.dst.chan = i;
6778 alu.dst.write = 1;
6779 alu.is_op3 = 1;
6780 if (i == 3)
6781 alu.last = 1;
6782 r = r600_bytecode_add_alu(ctx->bc, &alu);
6783 if (r)
6784 return r;
6785 }
6786 if (use_temp)
6787 return tgsi_helper_copy(ctx, inst);
6788 return 0;
6789 }
6790
6791 static int tgsi_exp(struct r600_shader_ctx *ctx)
6792 {
6793 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6794 struct r600_bytecode_alu alu;
6795 int r;
6796 int i;
6797
6798 /* result.x = 2^floor(src); */
6799 if (inst->Dst[0].Register.WriteMask & 1) {
6800 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6801
6802 alu.op = ALU_OP1_FLOOR;
6803 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
6804
6805 alu.dst.sel = ctx->temp_reg;
6806 alu.dst.chan = 0;
6807 alu.dst.write = 1;
6808 alu.last = 1;
6809 r = r600_bytecode_add_alu(ctx->bc, &alu);
6810 if (r)
6811 return r;
6812
6813 if (ctx->bc->chip_class == CAYMAN) {
6814 for (i = 0; i < 3; i++) {
6815 alu.op = ALU_OP1_EXP_IEEE;
6816 alu.src[0].sel = ctx->temp_reg;
6817 alu.src[0].chan = 0;
6818
6819 alu.dst.sel = ctx->temp_reg;
6820 alu.dst.chan = i;
6821 alu.dst.write = i == 0;
6822 alu.last = i == 2;
6823 r = r600_bytecode_add_alu(ctx->bc, &alu);
6824 if (r)
6825 return r;
6826 }
6827 } else {
6828 alu.op = ALU_OP1_EXP_IEEE;
6829 alu.src[0].sel = ctx->temp_reg;
6830 alu.src[0].chan = 0;
6831
6832 alu.dst.sel = ctx->temp_reg;
6833 alu.dst.chan = 0;
6834 alu.dst.write = 1;
6835 alu.last = 1;
6836 r = r600_bytecode_add_alu(ctx->bc, &alu);
6837 if (r)
6838 return r;
6839 }
6840 }
6841
6842 /* result.y = tmp - floor(tmp); */
6843 if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
6844 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6845
6846 alu.op = ALU_OP1_FRACT;
6847 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
6848
6849 alu.dst.sel = ctx->temp_reg;
6850 #if 0
6851 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6852 if (r)
6853 return r;
6854 #endif
6855 alu.dst.write = 1;
6856 alu.dst.chan = 1;
6857
6858 alu.last = 1;
6859
6860 r = r600_bytecode_add_alu(ctx->bc, &alu);
6861 if (r)
6862 return r;
6863 }
6864
6865 /* result.z = RoughApprox2ToX(tmp);*/
6866 if ((inst->Dst[0].Register.WriteMask >> 2) & 0x1) {
6867 if (ctx->bc->chip_class == CAYMAN) {
6868 for (i = 0; i < 3; i++) {
6869 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6870 alu.op = ALU_OP1_EXP_IEEE;
6871 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
6872
6873 alu.dst.sel = ctx->temp_reg;
6874 alu.dst.chan = i;
6875 if (i == 2) {
6876 alu.dst.write = 1;
6877 alu.last = 1;
6878 }
6879
6880 r = r600_bytecode_add_alu(ctx->bc, &alu);
6881 if (r)
6882 return r;
6883 }
6884 } else {
6885 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6886 alu.op = ALU_OP1_EXP_IEEE;
6887 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
6888
6889 alu.dst.sel = ctx->temp_reg;
6890 alu.dst.write = 1;
6891 alu.dst.chan = 2;
6892
6893 alu.last = 1;
6894
6895 r = r600_bytecode_add_alu(ctx->bc, &alu);
6896 if (r)
6897 return r;
6898 }
6899 }
6900
6901 /* result.w = 1.0;*/
6902 if ((inst->Dst[0].Register.WriteMask >> 3) & 0x1) {
6903 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6904
6905 alu.op = ALU_OP1_MOV;
6906 alu.src[0].sel = V_SQ_ALU_SRC_1;
6907 alu.src[0].chan = 0;
6908
6909 alu.dst.sel = ctx->temp_reg;
6910 alu.dst.chan = 3;
6911 alu.dst.write = 1;
6912 alu.last = 1;
6913 r = r600_bytecode_add_alu(ctx->bc, &alu);
6914 if (r)
6915 return r;
6916 }
6917 return tgsi_helper_copy(ctx, inst);
6918 }
6919
6920 static int tgsi_log(struct r600_shader_ctx *ctx)
6921 {
6922 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6923 struct r600_bytecode_alu alu;
6924 int r;
6925 int i;
6926
6927 /* result.x = floor(log2(|src|)); */
6928 if (inst->Dst[0].Register.WriteMask & 1) {
6929 if (ctx->bc->chip_class == CAYMAN) {
6930 for (i = 0; i < 3; i++) {
6931 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6932
6933 alu.op = ALU_OP1_LOG_IEEE;
6934 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
6935 r600_bytecode_src_set_abs(&alu.src[0]);
6936
6937 alu.dst.sel = ctx->temp_reg;
6938 alu.dst.chan = i;
6939 if (i == 0)
6940 alu.dst.write = 1;
6941 if (i == 2)
6942 alu.last = 1;
6943 r = r600_bytecode_add_alu(ctx->bc, &alu);
6944 if (r)
6945 return r;
6946 }
6947
6948 } else {
6949 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6950
6951 alu.op = ALU_OP1_LOG_IEEE;
6952 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
6953 r600_bytecode_src_set_abs(&alu.src[0]);
6954
6955 alu.dst.sel = ctx->temp_reg;
6956 alu.dst.chan = 0;
6957 alu.dst.write = 1;
6958 alu.last = 1;
6959 r = r600_bytecode_add_alu(ctx->bc, &alu);
6960 if (r)
6961 return r;
6962 }
6963
6964 alu.op = ALU_OP1_FLOOR;
6965 alu.src[0].sel = ctx->temp_reg;
6966 alu.src[0].chan = 0;
6967
6968 alu.dst.sel = ctx->temp_reg;
6969 alu.dst.chan = 0;
6970 alu.dst.write = 1;
6971 alu.last = 1;
6972
6973 r = r600_bytecode_add_alu(ctx->bc, &alu);
6974 if (r)
6975 return r;
6976 }
6977
6978 /* result.y = |src.x| / (2 ^ floor(log2(|src.x|))); */
6979 if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
6980
6981 if (ctx->bc->chip_class == CAYMAN) {
6982 for (i = 0; i < 3; i++) {
6983 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6984
6985 alu.op = ALU_OP1_LOG_IEEE;
6986 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
6987 r600_bytecode_src_set_abs(&alu.src[0]);
6988
6989 alu.dst.sel = ctx->temp_reg;
6990 alu.dst.chan = i;
6991 if (i == 1)
6992 alu.dst.write = 1;
6993 if (i == 2)
6994 alu.last = 1;
6995
6996 r = r600_bytecode_add_alu(ctx->bc, &alu);
6997 if (r)
6998 return r;
6999 }
7000 } else {
7001 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7002
7003 alu.op = ALU_OP1_LOG_IEEE;
7004 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
7005 r600_bytecode_src_set_abs(&alu.src[0]);
7006
7007 alu.dst.sel = ctx->temp_reg;
7008 alu.dst.chan = 1;
7009 alu.dst.write = 1;
7010 alu.last = 1;
7011
7012 r = r600_bytecode_add_alu(ctx->bc, &alu);
7013 if (r)
7014 return r;
7015 }
7016
7017 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7018
7019 alu.op = ALU_OP1_FLOOR;
7020 alu.src[0].sel = ctx->temp_reg;
7021 alu.src[0].chan = 1;
7022
7023 alu.dst.sel = ctx->temp_reg;
7024 alu.dst.chan = 1;
7025 alu.dst.write = 1;
7026 alu.last = 1;
7027
7028 r = r600_bytecode_add_alu(ctx->bc, &alu);
7029 if (r)
7030 return r;
7031
7032 if (ctx->bc->chip_class == CAYMAN) {
7033 for (i = 0; i < 3; i++) {
7034 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7035 alu.op = ALU_OP1_EXP_IEEE;
7036 alu.src[0].sel = ctx->temp_reg;
7037 alu.src[0].chan = 1;
7038
7039 alu.dst.sel = ctx->temp_reg;
7040 alu.dst.chan = i;
7041 if (i == 1)
7042 alu.dst.write = 1;
7043 if (i == 2)
7044 alu.last = 1;
7045
7046 r = r600_bytecode_add_alu(ctx->bc, &alu);
7047 if (r)
7048 return r;
7049 }
7050 } else {
7051 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7052 alu.op = ALU_OP1_EXP_IEEE;
7053 alu.src[0].sel = ctx->temp_reg;
7054 alu.src[0].chan = 1;
7055
7056 alu.dst.sel = ctx->temp_reg;
7057 alu.dst.chan = 1;
7058 alu.dst.write = 1;
7059 alu.last = 1;
7060
7061 r = r600_bytecode_add_alu(ctx->bc, &alu);
7062 if (r)
7063 return r;
7064 }
7065
7066 if (ctx->bc->chip_class == CAYMAN) {
7067 for (i = 0; i < 3; i++) {
7068 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7069 alu.op = ALU_OP1_RECIP_IEEE;
7070 alu.src[0].sel = ctx->temp_reg;
7071 alu.src[0].chan = 1;
7072
7073 alu.dst.sel = ctx->temp_reg;
7074 alu.dst.chan = i;
7075 if (i == 1)
7076 alu.dst.write = 1;
7077 if (i == 2)
7078 alu.last = 1;
7079
7080 r = r600_bytecode_add_alu(ctx->bc, &alu);
7081 if (r)
7082 return r;
7083 }
7084 } else {
7085 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7086 alu.op = ALU_OP1_RECIP_IEEE;
7087 alu.src[0].sel = ctx->temp_reg;
7088 alu.src[0].chan = 1;
7089
7090 alu.dst.sel = ctx->temp_reg;
7091 alu.dst.chan = 1;
7092 alu.dst.write = 1;
7093 alu.last = 1;
7094
7095 r = r600_bytecode_add_alu(ctx->bc, &alu);
7096 if (r)
7097 return r;
7098 }
7099
7100 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7101
7102 alu.op = ALU_OP2_MUL;
7103
7104 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
7105 r600_bytecode_src_set_abs(&alu.src[0]);
7106
7107 alu.src[1].sel = ctx->temp_reg;
7108 alu.src[1].chan = 1;
7109
7110 alu.dst.sel = ctx->temp_reg;
7111 alu.dst.chan = 1;
7112 alu.dst.write = 1;
7113 alu.last = 1;
7114
7115 r = r600_bytecode_add_alu(ctx->bc, &alu);
7116 if (r)
7117 return r;
7118 }
7119
7120 /* result.z = log2(|src|);*/
7121 if ((inst->Dst[0].Register.WriteMask >> 2) & 1) {
7122 if (ctx->bc->chip_class == CAYMAN) {
7123 for (i = 0; i < 3; i++) {
7124 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7125
7126 alu.op = ALU_OP1_LOG_IEEE;
7127 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
7128 r600_bytecode_src_set_abs(&alu.src[0]);
7129
7130 alu.dst.sel = ctx->temp_reg;
7131 if (i == 2)
7132 alu.dst.write = 1;
7133 alu.dst.chan = i;
7134 if (i == 2)
7135 alu.last = 1;
7136
7137 r = r600_bytecode_add_alu(ctx->bc, &alu);
7138 if (r)
7139 return r;
7140 }
7141 } else {
7142 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7143
7144 alu.op = ALU_OP1_LOG_IEEE;
7145 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
7146 r600_bytecode_src_set_abs(&alu.src[0]);
7147
7148 alu.dst.sel = ctx->temp_reg;
7149 alu.dst.write = 1;
7150 alu.dst.chan = 2;
7151 alu.last = 1;
7152
7153 r = r600_bytecode_add_alu(ctx->bc, &alu);
7154 if (r)
7155 return r;
7156 }
7157 }
7158
7159 /* result.w = 1.0; */
7160 if ((inst->Dst[0].Register.WriteMask >> 3) & 1) {
7161 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7162
7163 alu.op = ALU_OP1_MOV;
7164 alu.src[0].sel = V_SQ_ALU_SRC_1;
7165 alu.src[0].chan = 0;
7166
7167 alu.dst.sel = ctx->temp_reg;
7168 alu.dst.chan = 3;
7169 alu.dst.write = 1;
7170 alu.last = 1;
7171
7172 r = r600_bytecode_add_alu(ctx->bc, &alu);
7173 if (r)
7174 return r;
7175 }
7176
7177 return tgsi_helper_copy(ctx, inst);
7178 }
7179
7180 static int tgsi_eg_arl(struct r600_shader_ctx *ctx)
7181 {
7182 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7183 struct r600_bytecode_alu alu;
7184 int r;
7185 int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
7186 unsigned reg = inst->Dst[0].Register.Index > 0 ? ctx->bc->index_reg[inst->Dst[0].Register.Index - 1] : ctx->bc->ar_reg;
7187
7188 assert(inst->Dst[0].Register.Index < 3);
7189 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7190
7191 switch (inst->Instruction.Opcode) {
7192 case TGSI_OPCODE_ARL:
7193 alu.op = ALU_OP1_FLT_TO_INT_FLOOR;
7194 break;
7195 case TGSI_OPCODE_ARR:
7196 alu.op = ALU_OP1_FLT_TO_INT;
7197 break;
7198 case TGSI_OPCODE_UARL:
7199 alu.op = ALU_OP1_MOV;
7200 break;
7201 default:
7202 assert(0);
7203 return -1;
7204 }
7205
7206 for (i = 0; i <= lasti; ++i) {
7207 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
7208 continue;
7209 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
7210 alu.last = i == lasti;
7211 alu.dst.sel = reg;
7212 alu.dst.chan = i;
7213 alu.dst.write = 1;
7214 r = r600_bytecode_add_alu(ctx->bc, &alu);
7215 if (r)
7216 return r;
7217 }
7218
7219 if (inst->Dst[0].Register.Index > 0)
7220 ctx->bc->index_loaded[inst->Dst[0].Register.Index - 1] = 0;
7221 else
7222 ctx->bc->ar_loaded = 0;
7223
7224 return 0;
7225 }
7226 static int tgsi_r600_arl(struct r600_shader_ctx *ctx)
7227 {
7228 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7229 struct r600_bytecode_alu alu;
7230 int r;
7231 int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
7232
7233 switch (inst->Instruction.Opcode) {
7234 case TGSI_OPCODE_ARL:
7235 memset(&alu, 0, sizeof(alu));
7236 alu.op = ALU_OP1_FLOOR;
7237 alu.dst.sel = ctx->bc->ar_reg;
7238 alu.dst.write = 1;
7239 for (i = 0; i <= lasti; ++i) {
7240 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
7241 alu.dst.chan = i;
7242 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
7243 alu.last = i == lasti;
7244 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
7245 return r;
7246 }
7247 }
7248
7249 memset(&alu, 0, sizeof(alu));
7250 alu.op = ALU_OP1_FLT_TO_INT;
7251 alu.src[0].sel = ctx->bc->ar_reg;
7252 alu.dst.sel = ctx->bc->ar_reg;
7253 alu.dst.write = 1;
7254 /* FLT_TO_INT is trans-only on r600/r700 */
7255 alu.last = TRUE;
7256 for (i = 0; i <= lasti; ++i) {
7257 alu.dst.chan = i;
7258 alu.src[0].chan = i;
7259 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
7260 return r;
7261 }
7262 break;
7263 case TGSI_OPCODE_ARR:
7264 memset(&alu, 0, sizeof(alu));
7265 alu.op = ALU_OP1_FLT_TO_INT;
7266 alu.dst.sel = ctx->bc->ar_reg;
7267 alu.dst.write = 1;
7268 /* FLT_TO_INT is trans-only on r600/r700 */
7269 alu.last = TRUE;
7270 for (i = 0; i <= lasti; ++i) {
7271 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
7272 alu.dst.chan = i;
7273 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
7274 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
7275 return r;
7276 }
7277 }
7278 break;
7279 case TGSI_OPCODE_UARL:
7280 memset(&alu, 0, sizeof(alu));
7281 alu.op = ALU_OP1_MOV;
7282 alu.dst.sel = ctx->bc->ar_reg;
7283 alu.dst.write = 1;
7284 for (i = 0; i <= lasti; ++i) {
7285 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
7286 alu.dst.chan = i;
7287 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
7288 alu.last = i == lasti;
7289 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
7290 return r;
7291 }
7292 }
7293 break;
7294 default:
7295 assert(0);
7296 return -1;
7297 }
7298
7299 ctx->bc->ar_loaded = 0;
7300 return 0;
7301 }
7302
7303 static int tgsi_opdst(struct r600_shader_ctx *ctx)
7304 {
7305 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7306 struct r600_bytecode_alu alu;
7307 int i, r = 0;
7308
7309 for (i = 0; i < 4; i++) {
7310 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7311
7312 alu.op = ALU_OP2_MUL;
7313 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
7314
7315 if (i == 0 || i == 3) {
7316 alu.src[0].sel = V_SQ_ALU_SRC_1;
7317 } else {
7318 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
7319 }
7320
7321 if (i == 0 || i == 2) {
7322 alu.src[1].sel = V_SQ_ALU_SRC_1;
7323 } else {
7324 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
7325 }
7326 if (i == 3)
7327 alu.last = 1;
7328 r = r600_bytecode_add_alu(ctx->bc, &alu);
7329 if (r)
7330 return r;
7331 }
7332 return 0;
7333 }
7334
7335 static int emit_logic_pred(struct r600_shader_ctx *ctx, int opcode, int alu_type)
7336 {
7337 struct r600_bytecode_alu alu;
7338 int r;
7339
7340 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7341 alu.op = opcode;
7342 alu.execute_mask = 1;
7343 alu.update_pred = 1;
7344
7345 alu.dst.sel = ctx->temp_reg;
7346 alu.dst.write = 1;
7347 alu.dst.chan = 0;
7348
7349 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
7350 alu.src[1].sel = V_SQ_ALU_SRC_0;
7351 alu.src[1].chan = 0;
7352
7353 alu.last = 1;
7354
7355 r = r600_bytecode_add_alu_type(ctx->bc, &alu, alu_type);
7356 if (r)
7357 return r;
7358 return 0;
7359 }
7360
7361 static int pops(struct r600_shader_ctx *ctx, int pops)
7362 {
7363 unsigned force_pop = ctx->bc->force_add_cf;
7364
7365 if (!force_pop) {
7366 int alu_pop = 3;
7367 if (ctx->bc->cf_last) {
7368 if (ctx->bc->cf_last->op == CF_OP_ALU)
7369 alu_pop = 0;
7370 else if (ctx->bc->cf_last->op == CF_OP_ALU_POP_AFTER)
7371 alu_pop = 1;
7372 }
7373 alu_pop += pops;
7374 if (alu_pop == 1) {
7375 ctx->bc->cf_last->op = CF_OP_ALU_POP_AFTER;
7376 ctx->bc->force_add_cf = 1;
7377 } else if (alu_pop == 2) {
7378 ctx->bc->cf_last->op = CF_OP_ALU_POP2_AFTER;
7379 ctx->bc->force_add_cf = 1;
7380 } else {
7381 force_pop = 1;
7382 }
7383 }
7384
7385 if (force_pop) {
7386 r600_bytecode_add_cfinst(ctx->bc, CF_OP_POP);
7387 ctx->bc->cf_last->pop_count = pops;
7388 ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2;
7389 }
7390
7391 return 0;
7392 }
7393
7394 static inline void callstack_update_max_depth(struct r600_shader_ctx *ctx,
7395 unsigned reason)
7396 {
7397 struct r600_stack_info *stack = &ctx->bc->stack;
7398 unsigned elements, entries;
7399
7400 unsigned entry_size = stack->entry_size;
7401
7402 elements = (stack->loop + stack->push_wqm ) * entry_size;
7403 elements += stack->push;
7404
7405 switch (ctx->bc->chip_class) {
7406 case R600:
7407 case R700:
7408 /* pre-r8xx: if any non-WQM PUSH instruction is invoked, 2 elements on
7409 * the stack must be reserved to hold the current active/continue
7410 * masks */
7411 if (reason == FC_PUSH_VPM) {
7412 elements += 2;
7413 }
7414 break;
7415
7416 case CAYMAN:
7417 /* r9xx: any stack operation on empty stack consumes 2 additional
7418 * elements */
7419 elements += 2;
7420
7421 /* fallthrough */
7422 /* FIXME: do the two elements added above cover the cases for the
7423 * r8xx+ below? */
7424
7425 case EVERGREEN:
7426 /* r8xx+: 2 extra elements are not always required, but one extra
7427 * element must be added for each of the following cases:
7428 * 1. There is an ALU_ELSE_AFTER instruction at the point of greatest
7429 * stack usage.
7430 * (Currently we don't use ALU_ELSE_AFTER.)
7431 * 2. There are LOOP/WQM frames on the stack when any flavor of non-WQM
7432 * PUSH instruction executed.
7433 *
7434 * NOTE: it seems we also need to reserve additional element in some
7435 * other cases, e.g. when we have 4 levels of PUSH_VPM in the shader,
7436 * then STACK_SIZE should be 2 instead of 1 */
7437 if (reason == FC_PUSH_VPM) {
7438 elements += 1;
7439 }
7440 break;
7441
7442 default:
7443 assert(0);
7444 break;
7445 }
7446
7447 /* NOTE: it seems STACK_SIZE is interpreted by hw as if entry_size is 4
7448 * for all chips, so we use 4 in the final formula, not the real entry_size
7449 * for the chip */
7450 entry_size = 4;
7451
7452 entries = (elements + (entry_size - 1)) / entry_size;
7453
7454 if (entries > stack->max_entries)
7455 stack->max_entries = entries;
7456 }
7457
7458 static inline void callstack_pop(struct r600_shader_ctx *ctx, unsigned reason)
7459 {
7460 switch(reason) {
7461 case FC_PUSH_VPM:
7462 --ctx->bc->stack.push;
7463 assert(ctx->bc->stack.push >= 0);
7464 break;
7465 case FC_PUSH_WQM:
7466 --ctx->bc->stack.push_wqm;
7467 assert(ctx->bc->stack.push_wqm >= 0);
7468 break;
7469 case FC_LOOP:
7470 --ctx->bc->stack.loop;
7471 assert(ctx->bc->stack.loop >= 0);
7472 break;
7473 default:
7474 assert(0);
7475 break;
7476 }
7477 }
7478
7479 static inline void callstack_push(struct r600_shader_ctx *ctx, unsigned reason)
7480 {
7481 switch (reason) {
7482 case FC_PUSH_VPM:
7483 ++ctx->bc->stack.push;
7484 break;
7485 case FC_PUSH_WQM:
7486 ++ctx->bc->stack.push_wqm;
7487 case FC_LOOP:
7488 ++ctx->bc->stack.loop;
7489 break;
7490 default:
7491 assert(0);
7492 }
7493
7494 callstack_update_max_depth(ctx, reason);
7495 }
7496
7497 static void fc_set_mid(struct r600_shader_ctx *ctx, int fc_sp)
7498 {
7499 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[fc_sp];
7500
7501 sp->mid = realloc((void *)sp->mid,
7502 sizeof(struct r600_bytecode_cf *) * (sp->num_mid + 1));
7503 sp->mid[sp->num_mid] = ctx->bc->cf_last;
7504 sp->num_mid++;
7505 }
7506
7507 static void fc_pushlevel(struct r600_shader_ctx *ctx, int type)
7508 {
7509 ctx->bc->fc_sp++;
7510 ctx->bc->fc_stack[ctx->bc->fc_sp].type = type;
7511 ctx->bc->fc_stack[ctx->bc->fc_sp].start = ctx->bc->cf_last;
7512 }
7513
7514 static void fc_poplevel(struct r600_shader_ctx *ctx)
7515 {
7516 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[ctx->bc->fc_sp];
7517 free(sp->mid);
7518 sp->mid = NULL;
7519 sp->num_mid = 0;
7520 sp->start = NULL;
7521 sp->type = 0;
7522 ctx->bc->fc_sp--;
7523 }
7524
7525 #if 0
7526 static int emit_return(struct r600_shader_ctx *ctx)
7527 {
7528 r600_bytecode_add_cfinst(ctx->bc, CF_OP_RETURN));
7529 return 0;
7530 }
7531
7532 static int emit_jump_to_offset(struct r600_shader_ctx *ctx, int pops, int offset)
7533 {
7534
7535 r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP));
7536 ctx->bc->cf_last->pop_count = pops;
7537 /* XXX work out offset */
7538 return 0;
7539 }
7540
7541 static int emit_setret_in_loop_flag(struct r600_shader_ctx *ctx, unsigned flag_value)
7542 {
7543 return 0;
7544 }
7545
7546 static void emit_testflag(struct r600_shader_ctx *ctx)
7547 {
7548
7549 }
7550
7551 static void emit_return_on_flag(struct r600_shader_ctx *ctx, unsigned ifidx)
7552 {
7553 emit_testflag(ctx);
7554 emit_jump_to_offset(ctx, 1, 4);
7555 emit_setret_in_loop_flag(ctx, V_SQ_ALU_SRC_0);
7556 pops(ctx, ifidx + 1);
7557 emit_return(ctx);
7558 }
7559
7560 static void break_loop_on_flag(struct r600_shader_ctx *ctx, unsigned fc_sp)
7561 {
7562 emit_testflag(ctx);
7563
7564 r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
7565 ctx->bc->cf_last->pop_count = 1;
7566
7567 fc_set_mid(ctx, fc_sp);
7568
7569 pops(ctx, 1);
7570 }
7571 #endif
7572
7573 static int emit_if(struct r600_shader_ctx *ctx, int opcode)
7574 {
7575 int alu_type = CF_OP_ALU_PUSH_BEFORE;
7576
7577 /* There is a hardware bug on Cayman where a BREAK/CONTINUE followed by
7578 * LOOP_STARTxxx for nested loops may put the branch stack into a state
7579 * such that ALU_PUSH_BEFORE doesn't work as expected. Workaround this
7580 * by replacing the ALU_PUSH_BEFORE with a PUSH + ALU */
7581 if (ctx->bc->chip_class == CAYMAN && ctx->bc->stack.loop > 1) {
7582 r600_bytecode_add_cfinst(ctx->bc, CF_OP_PUSH);
7583 ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2;
7584 alu_type = CF_OP_ALU;
7585 }
7586
7587 emit_logic_pred(ctx, opcode, alu_type);
7588
7589 r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP);
7590
7591 fc_pushlevel(ctx, FC_IF);
7592
7593 callstack_push(ctx, FC_PUSH_VPM);
7594 return 0;
7595 }
7596
7597 static int tgsi_if(struct r600_shader_ctx *ctx)
7598 {
7599 return emit_if(ctx, ALU_OP2_PRED_SETNE);
7600 }
7601
7602 static int tgsi_uif(struct r600_shader_ctx *ctx)
7603 {
7604 return emit_if(ctx, ALU_OP2_PRED_SETNE_INT);
7605 }
7606
7607 static int tgsi_else(struct r600_shader_ctx *ctx)
7608 {
7609 r600_bytecode_add_cfinst(ctx->bc, CF_OP_ELSE);
7610 ctx->bc->cf_last->pop_count = 1;
7611
7612 fc_set_mid(ctx, ctx->bc->fc_sp);
7613 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id;
7614 return 0;
7615 }
7616
7617 static int tgsi_endif(struct r600_shader_ctx *ctx)
7618 {
7619 pops(ctx, 1);
7620 if (ctx->bc->fc_stack[ctx->bc->fc_sp].type != FC_IF) {
7621 R600_ERR("if/endif unbalanced in shader\n");
7622 return -1;
7623 }
7624
7625 if (ctx->bc->fc_stack[ctx->bc->fc_sp].mid == NULL) {
7626 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id + 2;
7627 ctx->bc->fc_stack[ctx->bc->fc_sp].start->pop_count = 1;
7628 } else {
7629 ctx->bc->fc_stack[ctx->bc->fc_sp].mid[0]->cf_addr = ctx->bc->cf_last->id + 2;
7630 }
7631 fc_poplevel(ctx);
7632
7633 callstack_pop(ctx, FC_PUSH_VPM);
7634 return 0;
7635 }
7636
7637 static int tgsi_bgnloop(struct r600_shader_ctx *ctx)
7638 {
7639 /* LOOP_START_DX10 ignores the LOOP_CONFIG* registers, so it is not
7640 * limited to 4096 iterations, like the other LOOP_* instructions. */
7641 r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_START_DX10);
7642
7643 fc_pushlevel(ctx, FC_LOOP);
7644
7645 /* check stack depth */
7646 callstack_push(ctx, FC_LOOP);
7647 return 0;
7648 }
7649
7650 static int tgsi_endloop(struct r600_shader_ctx *ctx)
7651 {
7652 int i;
7653
7654 r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_END);
7655
7656 if (ctx->bc->fc_stack[ctx->bc->fc_sp].type != FC_LOOP) {
7657 R600_ERR("loop/endloop in shader code are not paired.\n");
7658 return -EINVAL;
7659 }
7660
7661 /* fixup loop pointers - from r600isa
7662 LOOP END points to CF after LOOP START,
7663 LOOP START point to CF after LOOP END
7664 BRK/CONT point to LOOP END CF
7665 */
7666 ctx->bc->cf_last->cf_addr = ctx->bc->fc_stack[ctx->bc->fc_sp].start->id + 2;
7667
7668 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id + 2;
7669
7670 for (i = 0; i < ctx->bc->fc_stack[ctx->bc->fc_sp].num_mid; i++) {
7671 ctx->bc->fc_stack[ctx->bc->fc_sp].mid[i]->cf_addr = ctx->bc->cf_last->id;
7672 }
7673 /* XXX add LOOPRET support */
7674 fc_poplevel(ctx);
7675 callstack_pop(ctx, FC_LOOP);
7676 return 0;
7677 }
7678
7679 static int tgsi_loop_breakc(struct r600_shader_ctx *ctx)
7680 {
7681 int r;
7682 unsigned int fscp;
7683
7684 for (fscp = ctx->bc->fc_sp; fscp > 0; fscp--)
7685 {
7686 if (FC_LOOP == ctx->bc->fc_stack[fscp].type)
7687 break;
7688 }
7689 if (fscp == 0) {
7690 R600_ERR("BREAKC not inside loop/endloop pair\n");
7691 return -EINVAL;
7692 }
7693
7694 if (ctx->bc->chip_class == EVERGREEN &&
7695 ctx->bc->family != CHIP_CYPRESS &&
7696 ctx->bc->family != CHIP_JUNIPER) {
7697 /* HW bug: ALU_BREAK does not save the active mask correctly */
7698 r = tgsi_uif(ctx);
7699 if (r)
7700 return r;
7701
7702 r = r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_BREAK);
7703 if (r)
7704 return r;
7705 fc_set_mid(ctx, fscp);
7706
7707 return tgsi_endif(ctx);
7708 } else {
7709 r = emit_logic_pred(ctx, ALU_OP2_PRED_SETE_INT, CF_OP_ALU_BREAK);
7710 if (r)
7711 return r;
7712 fc_set_mid(ctx, fscp);
7713 }
7714
7715 return 0;
7716 }
7717
7718 static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx)
7719 {
7720 unsigned int fscp;
7721
7722 for (fscp = ctx->bc->fc_sp; fscp > 0; fscp--)
7723 {
7724 if (FC_LOOP == ctx->bc->fc_stack[fscp].type)
7725 break;
7726 }
7727
7728 if (fscp == 0) {
7729 R600_ERR("Break not inside loop/endloop pair\n");
7730 return -EINVAL;
7731 }
7732
7733 r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
7734
7735 fc_set_mid(ctx, fscp);
7736
7737 return 0;
7738 }
7739
7740 static int tgsi_gs_emit(struct r600_shader_ctx *ctx)
7741 {
7742 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7743 int stream = ctx->literals[inst->Src[0].Register.Index * 4 + inst->Src[0].Register.SwizzleX];
7744 int r;
7745
7746 if (ctx->inst_info->op == CF_OP_EMIT_VERTEX)
7747 emit_gs_ring_writes(ctx, ctx->gs_stream_output_info, stream, TRUE);
7748
7749 r = r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
7750 if (!r)
7751 ctx->bc->cf_last->count = stream; // Count field for CUT/EMIT_VERTEX indicates which stream
7752 return r;
7753 }
7754
7755 static int tgsi_umad(struct r600_shader_ctx *ctx)
7756 {
7757 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7758 struct r600_bytecode_alu alu;
7759 int i, j, k, r;
7760 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
7761
7762 /* src0 * src1 */
7763 for (i = 0; i < lasti + 1; i++) {
7764 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
7765 continue;
7766
7767 if (ctx->bc->chip_class == CAYMAN) {
7768 for (j = 0 ; j < 4; j++) {
7769 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7770
7771 alu.op = ALU_OP2_MULLO_UINT;
7772 for (k = 0; k < inst->Instruction.NumSrcRegs; k++) {
7773 r600_bytecode_src(&alu.src[k], &ctx->src[k], i);
7774 }
7775 alu.dst.chan = j;
7776 alu.dst.sel = ctx->temp_reg;
7777 alu.dst.write = (j == i);
7778 if (j == 3)
7779 alu.last = 1;
7780 r = r600_bytecode_add_alu(ctx->bc, &alu);
7781 if (r)
7782 return r;
7783 }
7784 } else {
7785 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7786
7787 alu.dst.chan = i;
7788 alu.dst.sel = ctx->temp_reg;
7789 alu.dst.write = 1;
7790
7791 alu.op = ALU_OP2_MULLO_UINT;
7792 for (j = 0; j < 2; j++) {
7793 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
7794 }
7795
7796 alu.last = 1;
7797 r = r600_bytecode_add_alu(ctx->bc, &alu);
7798 if (r)
7799 return r;
7800 }
7801 }
7802
7803
7804 for (i = 0; i < lasti + 1; i++) {
7805 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
7806 continue;
7807
7808 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7809 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
7810
7811 alu.op = ALU_OP2_ADD_INT;
7812
7813 alu.src[0].sel = ctx->temp_reg;
7814 alu.src[0].chan = i;
7815
7816 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
7817 if (i == lasti) {
7818 alu.last = 1;
7819 }
7820 r = r600_bytecode_add_alu(ctx->bc, &alu);
7821 if (r)
7822 return r;
7823 }
7824 return 0;
7825 }
7826
7827 static const struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] = {
7828 [TGSI_OPCODE_ARL] = { ALU_OP0_NOP, tgsi_r600_arl},
7829 [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
7830 [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
7831
7832 /* XXX:
7833 * For state trackers other than OpenGL, we'll want to use
7834 * _RECIP_IEEE instead.
7835 */
7836 [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_CLAMPED, tgsi_trans_srcx_replicate},
7837
7838 [TGSI_OPCODE_RSQ] = { ALU_OP0_NOP, tgsi_rsq},
7839 [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
7840 [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log},
7841 [TGSI_OPCODE_MUL] = { ALU_OP2_MUL, tgsi_op2},
7842 [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2},
7843 [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4, tgsi_dp},
7844 [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4, tgsi_dp},
7845 [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
7846 [TGSI_OPCODE_MIN] = { ALU_OP2_MIN, tgsi_op2},
7847 [TGSI_OPCODE_MAX] = { ALU_OP2_MAX, tgsi_op2},
7848 [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
7849 [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
7850 [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD, tgsi_op3},
7851 [TGSI_OPCODE_SUB] = { ALU_OP2_ADD, tgsi_op2},
7852 [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
7853 [TGSI_OPCODE_FMA] = { ALU_OP0_NOP, tgsi_unsupported},
7854 [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, tgsi_trans_srcx_replicate},
7855 [TGSI_OPCODE_DP2A] = { ALU_OP0_NOP, tgsi_unsupported},
7856 [22] = { ALU_OP0_NOP, tgsi_unsupported},
7857 [23] = { ALU_OP0_NOP, tgsi_unsupported},
7858 [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
7859 [TGSI_OPCODE_CLAMP] = { ALU_OP0_NOP, tgsi_unsupported},
7860 [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2},
7861 [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2},
7862 [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, tgsi_trans_srcx_replicate},
7863 [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, tgsi_trans_srcx_replicate},
7864 [TGSI_OPCODE_POW] = { ALU_OP0_NOP, tgsi_pow},
7865 [TGSI_OPCODE_XPD] = { ALU_OP0_NOP, tgsi_xpd},
7866 [32] = { ALU_OP0_NOP, tgsi_unsupported},
7867 [TGSI_OPCODE_ABS] = { ALU_OP1_MOV, tgsi_op2},
7868 [34] = { ALU_OP0_NOP, tgsi_unsupported},
7869 [TGSI_OPCODE_DPH] = { ALU_OP2_DOT4, tgsi_dp},
7870 [TGSI_OPCODE_COS] = { ALU_OP1_COS, tgsi_trig},
7871 [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
7872 [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
7873 [TGSI_OPCODE_KILL] = { ALU_OP2_KILLGT, tgsi_kill}, /* unconditional kill */
7874 [TGSI_OPCODE_PK2H] = { ALU_OP0_NOP, tgsi_unsupported},
7875 [TGSI_OPCODE_PK2US] = { ALU_OP0_NOP, tgsi_unsupported},
7876 [TGSI_OPCODE_PK4B] = { ALU_OP0_NOP, tgsi_unsupported},
7877 [TGSI_OPCODE_PK4UB] = { ALU_OP0_NOP, tgsi_unsupported},
7878 [44] = { ALU_OP0_NOP, tgsi_unsupported},
7879 [TGSI_OPCODE_SEQ] = { ALU_OP2_SETE, tgsi_op2},
7880 [46] = { ALU_OP0_NOP, tgsi_unsupported},
7881 [TGSI_OPCODE_SGT] = { ALU_OP2_SETGT, tgsi_op2},
7882 [TGSI_OPCODE_SIN] = { ALU_OP1_SIN, tgsi_trig},
7883 [TGSI_OPCODE_SLE] = { ALU_OP2_SETGE, tgsi_op2_swap},
7884 [TGSI_OPCODE_SNE] = { ALU_OP2_SETNE, tgsi_op2},
7885 [51] = { ALU_OP0_NOP, tgsi_unsupported},
7886 [TGSI_OPCODE_TEX] = { FETCH_OP_SAMPLE, tgsi_tex},
7887 [TGSI_OPCODE_TXD] = { FETCH_OP_SAMPLE_G, tgsi_tex},
7888 [TGSI_OPCODE_TXP] = { FETCH_OP_SAMPLE, tgsi_tex},
7889 [TGSI_OPCODE_UP2H] = { ALU_OP0_NOP, tgsi_unsupported},
7890 [TGSI_OPCODE_UP2US] = { ALU_OP0_NOP, tgsi_unsupported},
7891 [TGSI_OPCODE_UP4B] = { ALU_OP0_NOP, tgsi_unsupported},
7892 [TGSI_OPCODE_UP4UB] = { ALU_OP0_NOP, tgsi_unsupported},
7893 [59] = { ALU_OP0_NOP, tgsi_unsupported},
7894 [60] = { ALU_OP0_NOP, tgsi_unsupported},
7895 [TGSI_OPCODE_ARR] = { ALU_OP0_NOP, tgsi_r600_arl},
7896 [62] = { ALU_OP0_NOP, tgsi_unsupported},
7897 [TGSI_OPCODE_CAL] = { ALU_OP0_NOP, tgsi_unsupported},
7898 [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
7899 [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
7900 [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
7901 [TGSI_OPCODE_SCS] = { ALU_OP0_NOP, tgsi_scs},
7902 [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
7903 [69] = { ALU_OP0_NOP, tgsi_unsupported},
7904 [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
7905 [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4, tgsi_dp},
7906 [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex},
7907 [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
7908 [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if},
7909 [TGSI_OPCODE_UIF] = { ALU_OP0_NOP, tgsi_uif},
7910 [76] = { ALU_OP0_NOP, tgsi_unsupported},
7911 [TGSI_OPCODE_ELSE] = { ALU_OP0_NOP, tgsi_else},
7912 [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
7913 [TGSI_OPCODE_DDX_FINE] = { ALU_OP0_NOP, tgsi_unsupported},
7914 [TGSI_OPCODE_DDY_FINE] = { ALU_OP0_NOP, tgsi_unsupported},
7915 [TGSI_OPCODE_PUSHA] = { ALU_OP0_NOP, tgsi_unsupported},
7916 [TGSI_OPCODE_POPA] = { ALU_OP0_NOP, tgsi_unsupported},
7917 [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
7918 [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2_trans},
7919 [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
7920 [TGSI_OPCODE_TRUNC] = { ALU_OP1_TRUNC, tgsi_op2},
7921 [TGSI_OPCODE_SHL] = { ALU_OP2_LSHL_INT, tgsi_op2_trans},
7922 [88] = { ALU_OP0_NOP, tgsi_unsupported},
7923 [TGSI_OPCODE_AND] = { ALU_OP2_AND_INT, tgsi_op2},
7924 [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
7925 [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
7926 [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
7927 [TGSI_OPCODE_SAD] = { ALU_OP0_NOP, tgsi_unsupported},
7928 [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
7929 [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
7930 [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
7931 [TGSI_OPCODE_EMIT] = { CF_OP_EMIT_VERTEX, tgsi_gs_emit},
7932 [TGSI_OPCODE_ENDPRIM] = { CF_OP_CUT_VERTEX, tgsi_gs_emit},
7933 [TGSI_OPCODE_BGNLOOP] = { ALU_OP0_NOP, tgsi_bgnloop},
7934 [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
7935 [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
7936 [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
7937 [TGSI_OPCODE_TXQ_LZ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
7938 [104] = { ALU_OP0_NOP, tgsi_unsupported},
7939 [105] = { ALU_OP0_NOP, tgsi_unsupported},
7940 [106] = { ALU_OP0_NOP, tgsi_unsupported},
7941 [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
7942 [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
7943 [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
7944 [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
7945 [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
7946 [112] = { ALU_OP0_NOP, tgsi_unsupported},
7947 [TGSI_OPCODE_CALLNZ] = { ALU_OP0_NOP, tgsi_unsupported},
7948 [114] = { ALU_OP0_NOP, tgsi_unsupported},
7949 [TGSI_OPCODE_BREAKC] = { ALU_OP0_NOP, tgsi_loop_breakc},
7950 [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
7951 [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
7952 [118] = { ALU_OP0_NOP, tgsi_unsupported},
7953 [TGSI_OPCODE_F2I] = { ALU_OP1_FLT_TO_INT, tgsi_op2_trans},
7954 [TGSI_OPCODE_IDIV] = { ALU_OP0_NOP, tgsi_idiv},
7955 [TGSI_OPCODE_IMAX] = { ALU_OP2_MAX_INT, tgsi_op2},
7956 [TGSI_OPCODE_IMIN] = { ALU_OP2_MIN_INT, tgsi_op2},
7957 [TGSI_OPCODE_INEG] = { ALU_OP2_SUB_INT, tgsi_ineg},
7958 [TGSI_OPCODE_ISGE] = { ALU_OP2_SETGE_INT, tgsi_op2},
7959 [TGSI_OPCODE_ISHR] = { ALU_OP2_ASHR_INT, tgsi_op2_trans},
7960 [TGSI_OPCODE_ISLT] = { ALU_OP2_SETGT_INT, tgsi_op2_swap},
7961 [TGSI_OPCODE_F2U] = { ALU_OP1_FLT_TO_UINT, tgsi_op2_trans},
7962 [TGSI_OPCODE_U2F] = { ALU_OP1_UINT_TO_FLT, tgsi_op2_trans},
7963 [TGSI_OPCODE_UADD] = { ALU_OP2_ADD_INT, tgsi_op2},
7964 [TGSI_OPCODE_UDIV] = { ALU_OP0_NOP, tgsi_udiv},
7965 [TGSI_OPCODE_UMAD] = { ALU_OP0_NOP, tgsi_umad},
7966 [TGSI_OPCODE_UMAX] = { ALU_OP2_MAX_UINT, tgsi_op2},
7967 [TGSI_OPCODE_UMIN] = { ALU_OP2_MIN_UINT, tgsi_op2},
7968 [TGSI_OPCODE_UMOD] = { ALU_OP0_NOP, tgsi_umod},
7969 [TGSI_OPCODE_UMUL] = { ALU_OP2_MULLO_UINT, tgsi_op2_trans},
7970 [TGSI_OPCODE_USEQ] = { ALU_OP2_SETE_INT, tgsi_op2},
7971 [TGSI_OPCODE_USGE] = { ALU_OP2_SETGE_UINT, tgsi_op2},
7972 [TGSI_OPCODE_USHR] = { ALU_OP2_LSHR_INT, tgsi_op2_trans},
7973 [TGSI_OPCODE_USLT] = { ALU_OP2_SETGT_UINT, tgsi_op2_swap},
7974 [TGSI_OPCODE_USNE] = { ALU_OP2_SETNE_INT, tgsi_op2_swap},
7975 [TGSI_OPCODE_SWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
7976 [TGSI_OPCODE_CASE] = { ALU_OP0_NOP, tgsi_unsupported},
7977 [TGSI_OPCODE_DEFAULT] = { ALU_OP0_NOP, tgsi_unsupported},
7978 [TGSI_OPCODE_ENDSWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
7979 [TGSI_OPCODE_SAMPLE] = { 0, tgsi_unsupported},
7980 [TGSI_OPCODE_SAMPLE_I] = { 0, tgsi_unsupported},
7981 [TGSI_OPCODE_SAMPLE_I_MS] = { 0, tgsi_unsupported},
7982 [TGSI_OPCODE_SAMPLE_B] = { 0, tgsi_unsupported},
7983 [TGSI_OPCODE_SAMPLE_C] = { 0, tgsi_unsupported},
7984 [TGSI_OPCODE_SAMPLE_C_LZ] = { 0, tgsi_unsupported},
7985 [TGSI_OPCODE_SAMPLE_D] = { 0, tgsi_unsupported},
7986 [TGSI_OPCODE_SAMPLE_L] = { 0, tgsi_unsupported},
7987 [TGSI_OPCODE_GATHER4] = { 0, tgsi_unsupported},
7988 [TGSI_OPCODE_SVIEWINFO] = { 0, tgsi_unsupported},
7989 [TGSI_OPCODE_SAMPLE_POS] = { 0, tgsi_unsupported},
7990 [TGSI_OPCODE_SAMPLE_INFO] = { 0, tgsi_unsupported},
7991 [TGSI_OPCODE_UARL] = { ALU_OP1_MOVA_INT, tgsi_r600_arl},
7992 [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
7993 [TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
7994 [TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
7995 [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_unsupported},
7996 [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_unsupported},
7997 [TGSI_OPCODE_MFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
7998 [TGSI_OPCODE_LFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
7999 [TGSI_OPCODE_SFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8000 [TGSI_OPCODE_BARRIER] = { ALU_OP0_NOP, tgsi_unsupported},
8001 [TGSI_OPCODE_ATOMUADD] = { ALU_OP0_NOP, tgsi_unsupported},
8002 [TGSI_OPCODE_ATOMXCHG] = { ALU_OP0_NOP, tgsi_unsupported},
8003 [TGSI_OPCODE_ATOMCAS] = { ALU_OP0_NOP, tgsi_unsupported},
8004 [TGSI_OPCODE_ATOMAND] = { ALU_OP0_NOP, tgsi_unsupported},
8005 [TGSI_OPCODE_ATOMOR] = { ALU_OP0_NOP, tgsi_unsupported},
8006 [TGSI_OPCODE_ATOMXOR] = { ALU_OP0_NOP, tgsi_unsupported},
8007 [TGSI_OPCODE_ATOMUMIN] = { ALU_OP0_NOP, tgsi_unsupported},
8008 [TGSI_OPCODE_ATOMUMAX] = { ALU_OP0_NOP, tgsi_unsupported},
8009 [TGSI_OPCODE_ATOMIMIN] = { ALU_OP0_NOP, tgsi_unsupported},
8010 [TGSI_OPCODE_ATOMIMAX] = { ALU_OP0_NOP, tgsi_unsupported},
8011 [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
8012 [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
8013 [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
8014 [TGSI_OPCODE_IMUL_HI] = { ALU_OP2_MULHI_INT, tgsi_op2_trans},
8015 [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, tgsi_op2_trans},
8016 [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_unsupported},
8017 [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_unsupported},
8018 [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_unsupported},
8019 [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_unsupported},
8020 [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_unsupported},
8021 [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_unsupported},
8022 [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_unsupported},
8023 [TGSI_OPCODE_LSB] = { ALU_OP1_FFBL_INT, tgsi_unsupported},
8024 [TGSI_OPCODE_IMSB] = { ALU_OP1_FFBH_INT, tgsi_unsupported},
8025 [TGSI_OPCODE_UMSB] = { ALU_OP1_FFBH_UINT, tgsi_unsupported},
8026 [TGSI_OPCODE_INTERP_CENTROID] = { ALU_OP0_NOP, tgsi_unsupported},
8027 [TGSI_OPCODE_INTERP_SAMPLE] = { ALU_OP0_NOP, tgsi_unsupported},
8028 [TGSI_OPCODE_INTERP_OFFSET] = { ALU_OP0_NOP, tgsi_unsupported},
8029 [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
8030 };
8031
8032 static const struct r600_shader_tgsi_instruction eg_shader_tgsi_instruction[] = {
8033 [TGSI_OPCODE_ARL] = { ALU_OP0_NOP, tgsi_eg_arl},
8034 [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
8035 [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
8036 [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_IEEE, tgsi_trans_srcx_replicate},
8037 [TGSI_OPCODE_RSQ] = { ALU_OP1_RECIPSQRT_IEEE, tgsi_rsq},
8038 [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
8039 [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log},
8040 [TGSI_OPCODE_MUL] = { ALU_OP2_MUL, tgsi_op2},
8041 [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2},
8042 [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4, tgsi_dp},
8043 [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4, tgsi_dp},
8044 [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
8045 [TGSI_OPCODE_MIN] = { ALU_OP2_MIN, tgsi_op2},
8046 [TGSI_OPCODE_MAX] = { ALU_OP2_MAX, tgsi_op2},
8047 [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
8048 [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
8049 [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD, tgsi_op3},
8050 [TGSI_OPCODE_SUB] = { ALU_OP2_ADD, tgsi_op2},
8051 [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
8052 [TGSI_OPCODE_FMA] = { ALU_OP0_NOP, tgsi_unsupported},
8053 [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, tgsi_trans_srcx_replicate},
8054 [TGSI_OPCODE_DP2A] = { ALU_OP0_NOP, tgsi_unsupported},
8055 [22] = { ALU_OP0_NOP, tgsi_unsupported},
8056 [23] = { ALU_OP0_NOP, tgsi_unsupported},
8057 [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
8058 [TGSI_OPCODE_CLAMP] = { ALU_OP0_NOP, tgsi_unsupported},
8059 [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2},
8060 [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2},
8061 [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, tgsi_trans_srcx_replicate},
8062 [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, tgsi_trans_srcx_replicate},
8063 [TGSI_OPCODE_POW] = { ALU_OP0_NOP, tgsi_pow},
8064 [TGSI_OPCODE_XPD] = { ALU_OP0_NOP, tgsi_xpd},
8065 [32] = { ALU_OP0_NOP, tgsi_unsupported},
8066 [TGSI_OPCODE_ABS] = { ALU_OP1_MOV, tgsi_op2},
8067 [34] = { ALU_OP0_NOP, tgsi_unsupported},
8068 [TGSI_OPCODE_DPH] = { ALU_OP2_DOT4, tgsi_dp},
8069 [TGSI_OPCODE_COS] = { ALU_OP1_COS, tgsi_trig},
8070 [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
8071 [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
8072 [TGSI_OPCODE_KILL] = { ALU_OP2_KILLGT, tgsi_kill}, /* unconditional kill */
8073 [TGSI_OPCODE_PK2H] = { ALU_OP0_NOP, tgsi_unsupported},
8074 [TGSI_OPCODE_PK2US] = { ALU_OP0_NOP, tgsi_unsupported},
8075 [TGSI_OPCODE_PK4B] = { ALU_OP0_NOP, tgsi_unsupported},
8076 [TGSI_OPCODE_PK4UB] = { ALU_OP0_NOP, tgsi_unsupported},
8077 [44] = { ALU_OP0_NOP, tgsi_unsupported},
8078 [TGSI_OPCODE_SEQ] = { ALU_OP2_SETE, tgsi_op2},
8079 [46] = { ALU_OP0_NOP, tgsi_unsupported},
8080 [TGSI_OPCODE_SGT] = { ALU_OP2_SETGT, tgsi_op2},
8081 [TGSI_OPCODE_SIN] = { ALU_OP1_SIN, tgsi_trig},
8082 [TGSI_OPCODE_SLE] = { ALU_OP2_SETGE, tgsi_op2_swap},
8083 [TGSI_OPCODE_SNE] = { ALU_OP2_SETNE, tgsi_op2},
8084 [51] = { ALU_OP0_NOP, tgsi_unsupported},
8085 [TGSI_OPCODE_TEX] = { FETCH_OP_SAMPLE, tgsi_tex},
8086 [TGSI_OPCODE_TXD] = { FETCH_OP_SAMPLE_G, tgsi_tex},
8087 [TGSI_OPCODE_TXP] = { FETCH_OP_SAMPLE, tgsi_tex},
8088 [TGSI_OPCODE_UP2H] = { ALU_OP0_NOP, tgsi_unsupported},
8089 [TGSI_OPCODE_UP2US] = { ALU_OP0_NOP, tgsi_unsupported},
8090 [TGSI_OPCODE_UP4B] = { ALU_OP0_NOP, tgsi_unsupported},
8091 [TGSI_OPCODE_UP4UB] = { ALU_OP0_NOP, tgsi_unsupported},
8092 [59] = { ALU_OP0_NOP, tgsi_unsupported},
8093 [60] = { ALU_OP0_NOP, tgsi_unsupported},
8094 [TGSI_OPCODE_ARR] = { ALU_OP0_NOP, tgsi_eg_arl},
8095 [62] = { ALU_OP0_NOP, tgsi_unsupported},
8096 [TGSI_OPCODE_CAL] = { ALU_OP0_NOP, tgsi_unsupported},
8097 [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
8098 [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
8099 [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
8100 [TGSI_OPCODE_SCS] = { ALU_OP0_NOP, tgsi_scs},
8101 [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
8102 [69] = { ALU_OP0_NOP, tgsi_unsupported},
8103 [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
8104 [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4, tgsi_dp},
8105 [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex},
8106 [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
8107 [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if},
8108 [TGSI_OPCODE_UIF] = { ALU_OP0_NOP, tgsi_uif},
8109 [76] = { ALU_OP0_NOP, tgsi_unsupported},
8110 [TGSI_OPCODE_ELSE] = { ALU_OP0_NOP, tgsi_else},
8111 [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
8112 [TGSI_OPCODE_DDX_FINE] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
8113 [TGSI_OPCODE_DDY_FINE] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
8114 [TGSI_OPCODE_PUSHA] = { ALU_OP0_NOP, tgsi_unsupported},
8115 [TGSI_OPCODE_POPA] = { ALU_OP0_NOP, tgsi_unsupported},
8116 [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
8117 [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2_trans},
8118 [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
8119 [TGSI_OPCODE_TRUNC] = { ALU_OP1_TRUNC, tgsi_op2},
8120 [TGSI_OPCODE_SHL] = { ALU_OP2_LSHL_INT, tgsi_op2},
8121 [88] = { ALU_OP0_NOP, tgsi_unsupported},
8122 [TGSI_OPCODE_AND] = { ALU_OP2_AND_INT, tgsi_op2},
8123 [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
8124 [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
8125 [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
8126 [TGSI_OPCODE_SAD] = { ALU_OP0_NOP, tgsi_unsupported},
8127 [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
8128 [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
8129 [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
8130 [TGSI_OPCODE_EMIT] = { CF_OP_EMIT_VERTEX, tgsi_gs_emit},
8131 [TGSI_OPCODE_ENDPRIM] = { CF_OP_CUT_VERTEX, tgsi_gs_emit},
8132 [TGSI_OPCODE_BGNLOOP] = { ALU_OP0_NOP, tgsi_bgnloop},
8133 [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
8134 [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
8135 [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
8136 [TGSI_OPCODE_TXQ_LZ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
8137 [104] = { ALU_OP0_NOP, tgsi_unsupported},
8138 [105] = { ALU_OP0_NOP, tgsi_unsupported},
8139 [106] = { ALU_OP0_NOP, tgsi_unsupported},
8140 [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
8141 [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
8142 [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
8143 [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
8144 [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
8145 [112] = { ALU_OP0_NOP, tgsi_unsupported},
8146 [TGSI_OPCODE_CALLNZ] = { ALU_OP0_NOP, tgsi_unsupported},
8147 [114] = { ALU_OP0_NOP, tgsi_unsupported},
8148 [TGSI_OPCODE_BREAKC] = { ALU_OP0_NOP, tgsi_unsupported},
8149 [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
8150 [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
8151 [118] = { ALU_OP0_NOP, tgsi_unsupported},
8152 [TGSI_OPCODE_F2I] = { ALU_OP1_FLT_TO_INT, tgsi_f2i},
8153 [TGSI_OPCODE_IDIV] = { ALU_OP0_NOP, tgsi_idiv},
8154 [TGSI_OPCODE_IMAX] = { ALU_OP2_MAX_INT, tgsi_op2},
8155 [TGSI_OPCODE_IMIN] = { ALU_OP2_MIN_INT, tgsi_op2},
8156 [TGSI_OPCODE_INEG] = { ALU_OP2_SUB_INT, tgsi_ineg},
8157 [TGSI_OPCODE_ISGE] = { ALU_OP2_SETGE_INT, tgsi_op2},
8158 [TGSI_OPCODE_ISHR] = { ALU_OP2_ASHR_INT, tgsi_op2},
8159 [TGSI_OPCODE_ISLT] = { ALU_OP2_SETGT_INT, tgsi_op2_swap},
8160 [TGSI_OPCODE_F2U] = { ALU_OP1_FLT_TO_UINT, tgsi_f2i},
8161 [TGSI_OPCODE_U2F] = { ALU_OP1_UINT_TO_FLT, tgsi_op2_trans},
8162 [TGSI_OPCODE_UADD] = { ALU_OP2_ADD_INT, tgsi_op2},
8163 [TGSI_OPCODE_UDIV] = { ALU_OP0_NOP, tgsi_udiv},
8164 [TGSI_OPCODE_UMAD] = { ALU_OP0_NOP, tgsi_umad},
8165 [TGSI_OPCODE_UMAX] = { ALU_OP2_MAX_UINT, tgsi_op2},
8166 [TGSI_OPCODE_UMIN] = { ALU_OP2_MIN_UINT, tgsi_op2},
8167 [TGSI_OPCODE_UMOD] = { ALU_OP0_NOP, tgsi_umod},
8168 [TGSI_OPCODE_UMUL] = { ALU_OP2_MULLO_UINT, tgsi_op2_trans},
8169 [TGSI_OPCODE_USEQ] = { ALU_OP2_SETE_INT, tgsi_op2},
8170 [TGSI_OPCODE_USGE] = { ALU_OP2_SETGE_UINT, tgsi_op2},
8171 [TGSI_OPCODE_USHR] = { ALU_OP2_LSHR_INT, tgsi_op2},
8172 [TGSI_OPCODE_USLT] = { ALU_OP2_SETGT_UINT, tgsi_op2_swap},
8173 [TGSI_OPCODE_USNE] = { ALU_OP2_SETNE_INT, tgsi_op2},
8174 [TGSI_OPCODE_SWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
8175 [TGSI_OPCODE_CASE] = { ALU_OP0_NOP, tgsi_unsupported},
8176 [TGSI_OPCODE_DEFAULT] = { ALU_OP0_NOP, tgsi_unsupported},
8177 [TGSI_OPCODE_ENDSWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
8178 [TGSI_OPCODE_SAMPLE] = { 0, tgsi_unsupported},
8179 [TGSI_OPCODE_SAMPLE_I] = { 0, tgsi_unsupported},
8180 [TGSI_OPCODE_SAMPLE_I_MS] = { 0, tgsi_unsupported},
8181 [TGSI_OPCODE_SAMPLE_B] = { 0, tgsi_unsupported},
8182 [TGSI_OPCODE_SAMPLE_C] = { 0, tgsi_unsupported},
8183 [TGSI_OPCODE_SAMPLE_C_LZ] = { 0, tgsi_unsupported},
8184 [TGSI_OPCODE_SAMPLE_D] = { 0, tgsi_unsupported},
8185 [TGSI_OPCODE_SAMPLE_L] = { 0, tgsi_unsupported},
8186 [TGSI_OPCODE_GATHER4] = { 0, tgsi_unsupported},
8187 [TGSI_OPCODE_SVIEWINFO] = { 0, tgsi_unsupported},
8188 [TGSI_OPCODE_SAMPLE_POS] = { 0, tgsi_unsupported},
8189 [TGSI_OPCODE_SAMPLE_INFO] = { 0, tgsi_unsupported},
8190 [TGSI_OPCODE_UARL] = { ALU_OP1_MOVA_INT, tgsi_eg_arl},
8191 [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
8192 [TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
8193 [TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
8194 [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_unsupported},
8195 [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_unsupported},
8196 [TGSI_OPCODE_MFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8197 [TGSI_OPCODE_LFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8198 [TGSI_OPCODE_SFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8199 [TGSI_OPCODE_BARRIER] = { ALU_OP0_NOP, tgsi_unsupported},
8200 [TGSI_OPCODE_ATOMUADD] = { ALU_OP0_NOP, tgsi_unsupported},
8201 [TGSI_OPCODE_ATOMXCHG] = { ALU_OP0_NOP, tgsi_unsupported},
8202 [TGSI_OPCODE_ATOMCAS] = { ALU_OP0_NOP, tgsi_unsupported},
8203 [TGSI_OPCODE_ATOMAND] = { ALU_OP0_NOP, tgsi_unsupported},
8204 [TGSI_OPCODE_ATOMOR] = { ALU_OP0_NOP, tgsi_unsupported},
8205 [TGSI_OPCODE_ATOMXOR] = { ALU_OP0_NOP, tgsi_unsupported},
8206 [TGSI_OPCODE_ATOMUMIN] = { ALU_OP0_NOP, tgsi_unsupported},
8207 [TGSI_OPCODE_ATOMUMAX] = { ALU_OP0_NOP, tgsi_unsupported},
8208 [TGSI_OPCODE_ATOMIMIN] = { ALU_OP0_NOP, tgsi_unsupported},
8209 [TGSI_OPCODE_ATOMIMAX] = { ALU_OP0_NOP, tgsi_unsupported},
8210 [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
8211 [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
8212 [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
8213 [TGSI_OPCODE_IMUL_HI] = { ALU_OP2_MULHI_INT, tgsi_op2_trans},
8214 [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, tgsi_op2_trans},
8215 [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_tex},
8216 [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_tex},
8217 [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_op3},
8218 [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_op3},
8219 [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_bfi},
8220 [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_op2},
8221 [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_op2},
8222 [TGSI_OPCODE_LSB] = { ALU_OP1_FFBL_INT, tgsi_op2},
8223 [TGSI_OPCODE_IMSB] = { ALU_OP1_FFBH_INT, tgsi_msb},
8224 [TGSI_OPCODE_UMSB] = { ALU_OP1_FFBH_UINT, tgsi_msb},
8225 [TGSI_OPCODE_INTERP_CENTROID] = { ALU_OP0_NOP, tgsi_interp_egcm},
8226 [TGSI_OPCODE_INTERP_SAMPLE] = { ALU_OP0_NOP, tgsi_interp_egcm},
8227 [TGSI_OPCODE_INTERP_OFFSET] = { ALU_OP0_NOP, tgsi_interp_egcm},
8228 [TGSI_OPCODE_F2D] = { ALU_OP1_FLT32_TO_FLT64, tgsi_op2_64},
8229 [TGSI_OPCODE_D2F] = { ALU_OP1_FLT64_TO_FLT32, tgsi_op2_64_single_dest},
8230 [TGSI_OPCODE_DABS] = { ALU_OP1_MOV, tgsi_op2_64},
8231 [TGSI_OPCODE_DNEG] = { ALU_OP2_ADD_64, tgsi_dneg},
8232 [TGSI_OPCODE_DADD] = { ALU_OP2_ADD_64, tgsi_op2_64},
8233 [TGSI_OPCODE_DMUL] = { ALU_OP2_MUL_64, cayman_mul_double_instr},
8234 [TGSI_OPCODE_DMAX] = { ALU_OP2_MAX_64, tgsi_op2_64},
8235 [TGSI_OPCODE_DMIN] = { ALU_OP2_MIN_64, tgsi_op2_64},
8236 [TGSI_OPCODE_DSLT] = { ALU_OP2_SETGT_64, tgsi_op2_64_single_dest_s},
8237 [TGSI_OPCODE_DSGE] = { ALU_OP2_SETGE_64, tgsi_op2_64_single_dest},
8238 [TGSI_OPCODE_DSEQ] = { ALU_OP2_SETE_64, tgsi_op2_64_single_dest},
8239 [TGSI_OPCODE_DSNE] = { ALU_OP2_SETNE_64, tgsi_op2_64_single_dest},
8240 [TGSI_OPCODE_DRCP] = { ALU_OP2_RECIP_64, cayman_emit_double_instr},
8241 [TGSI_OPCODE_DSQRT] = { ALU_OP2_SQRT_64, cayman_emit_double_instr},
8242 [TGSI_OPCODE_DMAD] = { ALU_OP3_FMA_64, tgsi_op3_64},
8243 [TGSI_OPCODE_DFRAC] = { ALU_OP1_FRACT_64, tgsi_op2_64},
8244 [TGSI_OPCODE_DLDEXP] = { ALU_OP2_LDEXP_64, tgsi_op2_64},
8245 [TGSI_OPCODE_DFRACEXP] = { ALU_OP1_FREXP_64, tgsi_dfracexp},
8246 [TGSI_OPCODE_D2I] = { ALU_OP1_FLT_TO_INT, egcm_double_to_int},
8247 [TGSI_OPCODE_I2D] = { ALU_OP1_INT_TO_FLT, egcm_int_to_double},
8248 [TGSI_OPCODE_D2U] = { ALU_OP1_FLT_TO_UINT, egcm_double_to_int},
8249 [TGSI_OPCODE_U2D] = { ALU_OP1_UINT_TO_FLT, egcm_int_to_double},
8250 [TGSI_OPCODE_DRSQ] = { ALU_OP2_RECIPSQRT_64, cayman_emit_double_instr},
8251 [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
8252 };
8253
8254 static const struct r600_shader_tgsi_instruction cm_shader_tgsi_instruction[] = {
8255 [TGSI_OPCODE_ARL] = { ALU_OP0_NOP, tgsi_eg_arl},
8256 [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
8257 [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
8258 [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_IEEE, cayman_emit_float_instr},
8259 [TGSI_OPCODE_RSQ] = { ALU_OP1_RECIPSQRT_IEEE, cayman_emit_float_instr},
8260 [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
8261 [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log},
8262 [TGSI_OPCODE_MUL] = { ALU_OP2_MUL, tgsi_op2},
8263 [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2},
8264 [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4, tgsi_dp},
8265 [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4, tgsi_dp},
8266 [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
8267 [TGSI_OPCODE_MIN] = { ALU_OP2_MIN, tgsi_op2},
8268 [TGSI_OPCODE_MAX] = { ALU_OP2_MAX, tgsi_op2},
8269 [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
8270 [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
8271 [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD, tgsi_op3},
8272 [TGSI_OPCODE_SUB] = { ALU_OP2_ADD, tgsi_op2},
8273 [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
8274 [TGSI_OPCODE_FMA] = { ALU_OP0_NOP, tgsi_unsupported},
8275 [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, cayman_emit_float_instr},
8276 [TGSI_OPCODE_DP2A] = { ALU_OP0_NOP, tgsi_unsupported},
8277 [22] = { ALU_OP0_NOP, tgsi_unsupported},
8278 [23] = { ALU_OP0_NOP, tgsi_unsupported},
8279 [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
8280 [TGSI_OPCODE_CLAMP] = { ALU_OP0_NOP, tgsi_unsupported},
8281 [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2},
8282 [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2},
8283 [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, cayman_emit_float_instr},
8284 [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, cayman_emit_float_instr},
8285 [TGSI_OPCODE_POW] = { ALU_OP0_NOP, cayman_pow},
8286 [TGSI_OPCODE_XPD] = { ALU_OP0_NOP, tgsi_xpd},
8287 [32] = { ALU_OP0_NOP, tgsi_unsupported},
8288 [TGSI_OPCODE_ABS] = { ALU_OP1_MOV, tgsi_op2},
8289 [34] = { ALU_OP0_NOP, tgsi_unsupported},
8290 [TGSI_OPCODE_DPH] = { ALU_OP2_DOT4, tgsi_dp},
8291 [TGSI_OPCODE_COS] = { ALU_OP1_COS, cayman_trig},
8292 [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
8293 [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
8294 [TGSI_OPCODE_KILL] = { ALU_OP2_KILLGT, tgsi_kill}, /* unconditional kill */
8295 [TGSI_OPCODE_PK2H] = { ALU_OP0_NOP, tgsi_unsupported},
8296 [TGSI_OPCODE_PK2US] = { ALU_OP0_NOP, tgsi_unsupported},
8297 [TGSI_OPCODE_PK4B] = { ALU_OP0_NOP, tgsi_unsupported},
8298 [TGSI_OPCODE_PK4UB] = { ALU_OP0_NOP, tgsi_unsupported},
8299 [44] = { ALU_OP0_NOP, tgsi_unsupported},
8300 [TGSI_OPCODE_SEQ] = { ALU_OP2_SETE, tgsi_op2},
8301 [46] = { ALU_OP0_NOP, tgsi_unsupported},
8302 [TGSI_OPCODE_SGT] = { ALU_OP2_SETGT, tgsi_op2},
8303 [TGSI_OPCODE_SIN] = { ALU_OP1_SIN, cayman_trig},
8304 [TGSI_OPCODE_SLE] = { ALU_OP2_SETGE, tgsi_op2_swap},
8305 [TGSI_OPCODE_SNE] = { ALU_OP2_SETNE, tgsi_op2},
8306 [51] = { ALU_OP0_NOP, tgsi_unsupported},
8307 [TGSI_OPCODE_TEX] = { FETCH_OP_SAMPLE, tgsi_tex},
8308 [TGSI_OPCODE_TXD] = { FETCH_OP_SAMPLE_G, tgsi_tex},
8309 [TGSI_OPCODE_TXP] = { FETCH_OP_SAMPLE, tgsi_tex},
8310 [TGSI_OPCODE_UP2H] = { ALU_OP0_NOP, tgsi_unsupported},
8311 [TGSI_OPCODE_UP2US] = { ALU_OP0_NOP, tgsi_unsupported},
8312 [TGSI_OPCODE_UP4B] = { ALU_OP0_NOP, tgsi_unsupported},
8313 [TGSI_OPCODE_UP4UB] = { ALU_OP0_NOP, tgsi_unsupported},
8314 [59] = { ALU_OP0_NOP, tgsi_unsupported},
8315 [60] = { ALU_OP0_NOP, tgsi_unsupported},
8316 [TGSI_OPCODE_ARR] = { ALU_OP0_NOP, tgsi_eg_arl},
8317 [62] = { ALU_OP0_NOP, tgsi_unsupported},
8318 [TGSI_OPCODE_CAL] = { ALU_OP0_NOP, tgsi_unsupported},
8319 [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
8320 [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
8321 [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
8322 [TGSI_OPCODE_SCS] = { ALU_OP0_NOP, tgsi_scs},
8323 [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
8324 [69] = { ALU_OP0_NOP, tgsi_unsupported},
8325 [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
8326 [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4, tgsi_dp},
8327 [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex},
8328 [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
8329 [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if},
8330 [TGSI_OPCODE_UIF] = { ALU_OP0_NOP, tgsi_uif},
8331 [76] = { ALU_OP0_NOP, tgsi_unsupported},
8332 [TGSI_OPCODE_ELSE] = { ALU_OP0_NOP, tgsi_else},
8333 [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
8334 [TGSI_OPCODE_DDX_FINE] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
8335 [TGSI_OPCODE_DDY_FINE] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
8336 [TGSI_OPCODE_PUSHA] = { ALU_OP0_NOP, tgsi_unsupported},
8337 [TGSI_OPCODE_POPA] = { ALU_OP0_NOP, tgsi_unsupported},
8338 [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
8339 [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2},
8340 [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
8341 [TGSI_OPCODE_TRUNC] = { ALU_OP1_TRUNC, tgsi_op2},
8342 [TGSI_OPCODE_SHL] = { ALU_OP2_LSHL_INT, tgsi_op2},
8343 [88] = { ALU_OP0_NOP, tgsi_unsupported},
8344 [TGSI_OPCODE_AND] = { ALU_OP2_AND_INT, tgsi_op2},
8345 [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
8346 [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
8347 [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
8348 [TGSI_OPCODE_SAD] = { ALU_OP0_NOP, tgsi_unsupported},
8349 [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
8350 [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
8351 [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
8352 [TGSI_OPCODE_EMIT] = { CF_OP_EMIT_VERTEX, tgsi_gs_emit},
8353 [TGSI_OPCODE_ENDPRIM] = { CF_OP_CUT_VERTEX, tgsi_gs_emit},
8354 [TGSI_OPCODE_BGNLOOP] = { ALU_OP0_NOP, tgsi_bgnloop},
8355 [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
8356 [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
8357 [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
8358 [TGSI_OPCODE_TXQ_LZ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
8359 [104] = { ALU_OP0_NOP, tgsi_unsupported},
8360 [105] = { ALU_OP0_NOP, tgsi_unsupported},
8361 [106] = { ALU_OP0_NOP, tgsi_unsupported},
8362 [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
8363 [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
8364 [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
8365 [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
8366 [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
8367 [112] = { ALU_OP0_NOP, tgsi_unsupported},
8368 [TGSI_OPCODE_CALLNZ] = { ALU_OP0_NOP, tgsi_unsupported},
8369 [114] = { ALU_OP0_NOP, tgsi_unsupported},
8370 [TGSI_OPCODE_BREAKC] = { ALU_OP0_NOP, tgsi_unsupported},
8371 [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
8372 [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
8373 [118] = { ALU_OP0_NOP, tgsi_unsupported},
8374 [TGSI_OPCODE_F2I] = { ALU_OP1_FLT_TO_INT, tgsi_op2},
8375 [TGSI_OPCODE_IDIV] = { ALU_OP0_NOP, tgsi_idiv},
8376 [TGSI_OPCODE_IMAX] = { ALU_OP2_MAX_INT, tgsi_op2},
8377 [TGSI_OPCODE_IMIN] = { ALU_OP2_MIN_INT, tgsi_op2},
8378 [TGSI_OPCODE_INEG] = { ALU_OP2_SUB_INT, tgsi_ineg},
8379 [TGSI_OPCODE_ISGE] = { ALU_OP2_SETGE_INT, tgsi_op2},
8380 [TGSI_OPCODE_ISHR] = { ALU_OP2_ASHR_INT, tgsi_op2},
8381 [TGSI_OPCODE_ISLT] = { ALU_OP2_SETGT_INT, tgsi_op2_swap},
8382 [TGSI_OPCODE_F2U] = { ALU_OP1_FLT_TO_UINT, tgsi_op2},
8383 [TGSI_OPCODE_U2F] = { ALU_OP1_UINT_TO_FLT, tgsi_op2},
8384 [TGSI_OPCODE_UADD] = { ALU_OP2_ADD_INT, tgsi_op2},
8385 [TGSI_OPCODE_UDIV] = { ALU_OP0_NOP, tgsi_udiv},
8386 [TGSI_OPCODE_UMAD] = { ALU_OP0_NOP, tgsi_umad},
8387 [TGSI_OPCODE_UMAX] = { ALU_OP2_MAX_UINT, tgsi_op2},
8388 [TGSI_OPCODE_UMIN] = { ALU_OP2_MIN_UINT, tgsi_op2},
8389 [TGSI_OPCODE_UMOD] = { ALU_OP0_NOP, tgsi_umod},
8390 [TGSI_OPCODE_UMUL] = { ALU_OP2_MULLO_INT, cayman_mul_int_instr},
8391 [TGSI_OPCODE_USEQ] = { ALU_OP2_SETE_INT, tgsi_op2},
8392 [TGSI_OPCODE_USGE] = { ALU_OP2_SETGE_UINT, tgsi_op2},
8393 [TGSI_OPCODE_USHR] = { ALU_OP2_LSHR_INT, tgsi_op2},
8394 [TGSI_OPCODE_USLT] = { ALU_OP2_SETGT_UINT, tgsi_op2_swap},
8395 [TGSI_OPCODE_USNE] = { ALU_OP2_SETNE_INT, tgsi_op2},
8396 [TGSI_OPCODE_SWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
8397 [TGSI_OPCODE_CASE] = { ALU_OP0_NOP, tgsi_unsupported},
8398 [TGSI_OPCODE_DEFAULT] = { ALU_OP0_NOP, tgsi_unsupported},
8399 [TGSI_OPCODE_ENDSWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
8400 [TGSI_OPCODE_SAMPLE] = { 0, tgsi_unsupported},
8401 [TGSI_OPCODE_SAMPLE_I] = { 0, tgsi_unsupported},
8402 [TGSI_OPCODE_SAMPLE_I_MS] = { 0, tgsi_unsupported},
8403 [TGSI_OPCODE_SAMPLE_B] = { 0, tgsi_unsupported},
8404 [TGSI_OPCODE_SAMPLE_C] = { 0, tgsi_unsupported},
8405 [TGSI_OPCODE_SAMPLE_C_LZ] = { 0, tgsi_unsupported},
8406 [TGSI_OPCODE_SAMPLE_D] = { 0, tgsi_unsupported},
8407 [TGSI_OPCODE_SAMPLE_L] = { 0, tgsi_unsupported},
8408 [TGSI_OPCODE_GATHER4] = { 0, tgsi_unsupported},
8409 [TGSI_OPCODE_SVIEWINFO] = { 0, tgsi_unsupported},
8410 [TGSI_OPCODE_SAMPLE_POS] = { 0, tgsi_unsupported},
8411 [TGSI_OPCODE_SAMPLE_INFO] = { 0, tgsi_unsupported},
8412 [TGSI_OPCODE_UARL] = { ALU_OP1_MOVA_INT, tgsi_eg_arl},
8413 [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
8414 [TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
8415 [TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
8416 [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_unsupported},
8417 [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_unsupported},
8418 [TGSI_OPCODE_MFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8419 [TGSI_OPCODE_LFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8420 [TGSI_OPCODE_SFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8421 [TGSI_OPCODE_BARRIER] = { ALU_OP0_NOP, tgsi_unsupported},
8422 [TGSI_OPCODE_ATOMUADD] = { ALU_OP0_NOP, tgsi_unsupported},
8423 [TGSI_OPCODE_ATOMXCHG] = { ALU_OP0_NOP, tgsi_unsupported},
8424 [TGSI_OPCODE_ATOMCAS] = { ALU_OP0_NOP, tgsi_unsupported},
8425 [TGSI_OPCODE_ATOMAND] = { ALU_OP0_NOP, tgsi_unsupported},
8426 [TGSI_OPCODE_ATOMOR] = { ALU_OP0_NOP, tgsi_unsupported},
8427 [TGSI_OPCODE_ATOMXOR] = { ALU_OP0_NOP, tgsi_unsupported},
8428 [TGSI_OPCODE_ATOMUMIN] = { ALU_OP0_NOP, tgsi_unsupported},
8429 [TGSI_OPCODE_ATOMUMAX] = { ALU_OP0_NOP, tgsi_unsupported},
8430 [TGSI_OPCODE_ATOMIMIN] = { ALU_OP0_NOP, tgsi_unsupported},
8431 [TGSI_OPCODE_ATOMIMAX] = { ALU_OP0_NOP, tgsi_unsupported},
8432 [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
8433 [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
8434 [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
8435 [TGSI_OPCODE_IMUL_HI] = { ALU_OP2_MULHI_INT, cayman_mul_int_instr},
8436 [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, cayman_mul_int_instr},
8437 [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_tex},
8438 [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_tex},
8439 [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_op3},
8440 [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_op3},
8441 [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_bfi},
8442 [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_op2},
8443 [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_op2},
8444 [TGSI_OPCODE_LSB] = { ALU_OP1_FFBL_INT, tgsi_op2},
8445 [TGSI_OPCODE_IMSB] = { ALU_OP1_FFBH_INT, tgsi_msb},
8446 [TGSI_OPCODE_UMSB] = { ALU_OP1_FFBH_UINT, tgsi_msb},
8447 [TGSI_OPCODE_INTERP_CENTROID] = { ALU_OP0_NOP, tgsi_interp_egcm},
8448 [TGSI_OPCODE_INTERP_SAMPLE] = { ALU_OP0_NOP, tgsi_interp_egcm},
8449 [TGSI_OPCODE_INTERP_OFFSET] = { ALU_OP0_NOP, tgsi_interp_egcm},
8450 [TGSI_OPCODE_F2D] = { ALU_OP1_FLT32_TO_FLT64, tgsi_op2_64},
8451 [TGSI_OPCODE_D2F] = { ALU_OP1_FLT64_TO_FLT32, tgsi_op2_64_single_dest},
8452 [TGSI_OPCODE_DABS] = { ALU_OP1_MOV, tgsi_op2_64},
8453 [TGSI_OPCODE_DNEG] = { ALU_OP2_ADD_64, tgsi_dneg},
8454 [TGSI_OPCODE_DADD] = { ALU_OP2_ADD_64, tgsi_op2_64},
8455 [TGSI_OPCODE_DMUL] = { ALU_OP2_MUL_64, cayman_mul_double_instr},
8456 [TGSI_OPCODE_DMAX] = { ALU_OP2_MAX_64, tgsi_op2_64},
8457 [TGSI_OPCODE_DMIN] = { ALU_OP2_MIN_64, tgsi_op2_64},
8458 [TGSI_OPCODE_DSLT] = { ALU_OP2_SETGT_64, tgsi_op2_64_single_dest_s},
8459 [TGSI_OPCODE_DSGE] = { ALU_OP2_SETGE_64, tgsi_op2_64_single_dest},
8460 [TGSI_OPCODE_DSEQ] = { ALU_OP2_SETE_64, tgsi_op2_64_single_dest},
8461 [TGSI_OPCODE_DSNE] = { ALU_OP2_SETNE_64, tgsi_op2_64_single_dest},
8462 [TGSI_OPCODE_DRCP] = { ALU_OP2_RECIP_64, cayman_emit_double_instr},
8463 [TGSI_OPCODE_DSQRT] = { ALU_OP2_SQRT_64, cayman_emit_double_instr},
8464 [TGSI_OPCODE_DMAD] = { ALU_OP3_FMA_64, tgsi_op3_64},
8465 [TGSI_OPCODE_DFRAC] = { ALU_OP1_FRACT_64, tgsi_op2_64},
8466 [TGSI_OPCODE_DLDEXP] = { ALU_OP2_LDEXP_64, tgsi_op2_64},
8467 [TGSI_OPCODE_DFRACEXP] = { ALU_OP1_FREXP_64, tgsi_dfracexp},
8468 [TGSI_OPCODE_D2I] = { ALU_OP1_FLT_TO_INT, egcm_double_to_int},
8469 [TGSI_OPCODE_I2D] = { ALU_OP1_INT_TO_FLT, egcm_int_to_double},
8470 [TGSI_OPCODE_D2U] = { ALU_OP1_FLT_TO_UINT, egcm_double_to_int},
8471 [TGSI_OPCODE_U2D] = { ALU_OP1_UINT_TO_FLT, egcm_int_to_double},
8472 [TGSI_OPCODE_DRSQ] = { ALU_OP2_RECIPSQRT_64, cayman_emit_double_instr},
8473 [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
8474 };