r600: fix sampler indexing with texture buffers sampling
[mesa.git] / src / gallium / drivers / r600 / r600_shader.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "r600_sq.h"
24 #include "r600_formats.h"
25 #include "r600_opcodes.h"
26 #include "r600_shader.h"
27 #include "r600d.h"
28
29 #include "sb/sb_public.h"
30
31 #include "pipe/p_shader_tokens.h"
32 #include "tgsi/tgsi_info.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "tgsi/tgsi_scan.h"
35 #include "tgsi/tgsi_dump.h"
36 #include "util/u_bitcast.h"
37 #include "util/u_memory.h"
38 #include "util/u_math.h"
39 #include <stdio.h>
40 #include <errno.h>
41
42 /* CAYMAN notes
43 Why CAYMAN got loops for lots of instructions is explained here.
44
45 -These 8xx t-slot only ops are implemented in all vector slots.
46 MUL_LIT, FLT_TO_UINT, INT_TO_FLT, UINT_TO_FLT
47 These 8xx t-slot only opcodes become vector ops, with all four
48 slots expecting the arguments on sources a and b. Result is
49 broadcast to all channels.
50 MULLO_INT, MULHI_INT, MULLO_UINT, MULHI_UINT, MUL_64
51 These 8xx t-slot only opcodes become vector ops in the z, y, and
52 x slots.
53 EXP_IEEE, LOG_IEEE/CLAMPED, RECIP_IEEE/CLAMPED/FF/INT/UINT/_64/CLAMPED_64
54 RECIPSQRT_IEEE/CLAMPED/FF/_64/CLAMPED_64
55 SQRT_IEEE/_64
56 SIN/COS
57 The w slot may have an independent co-issued operation, or if the
58 result is required to be in the w slot, the opcode above may be
59 issued in the w slot as well.
60 The compiler must issue the source argument to slots z, y, and x
61 */
62
63 /* Contents of r0 on entry to various shaders
64
65 VS - .x = VertexID
66 .y = RelVertexID (??)
67 .w = InstanceID
68
69 GS - r0.xyw, r1.xyz = per-vertex offsets
70 r0.z = PrimitiveID
71
72 TCS - .x = PatchID
73 .y = RelPatchID (??)
74 .z = InvocationID
75 .w = tess factor base.
76
77 TES - .x = TessCoord.x
78 - .y = TessCoord.y
79 - .z = RelPatchID (??)
80 - .w = PrimitiveID
81
82 PS - face_gpr.z = SampleMask
83 face_gpr.w = SampleID
84 */
85 #define R600_SHADER_BUFFER_INFO_SEL (512 + R600_BUFFER_INFO_OFFSET / 16)
86 static int r600_shader_from_tgsi(struct r600_context *rctx,
87 struct r600_pipe_shader *pipeshader,
88 union r600_shader_key key);
89
90 static void r600_add_gpr_array(struct r600_shader *ps, int start_gpr,
91 int size, unsigned comp_mask) {
92
93 if (!size)
94 return;
95
96 if (ps->num_arrays == ps->max_arrays) {
97 ps->max_arrays += 64;
98 ps->arrays = realloc(ps->arrays, ps->max_arrays *
99 sizeof(struct r600_shader_array));
100 }
101
102 int n = ps->num_arrays;
103 ++ps->num_arrays;
104
105 ps->arrays[n].comp_mask = comp_mask;
106 ps->arrays[n].gpr_start = start_gpr;
107 ps->arrays[n].gpr_count = size;
108 }
109
110 static void r600_dump_streamout(struct pipe_stream_output_info *so)
111 {
112 unsigned i;
113
114 fprintf(stderr, "STREAMOUT\n");
115 for (i = 0; i < so->num_outputs; i++) {
116 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
117 so->output[i].start_component;
118 fprintf(stderr, " %i: MEM_STREAM%d_BUF%i[%i..%i] <- OUT[%i].%s%s%s%s%s\n",
119 i,
120 so->output[i].stream,
121 so->output[i].output_buffer,
122 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
123 so->output[i].register_index,
124 mask & 1 ? "x" : "",
125 mask & 2 ? "y" : "",
126 mask & 4 ? "z" : "",
127 mask & 8 ? "w" : "",
128 so->output[i].dst_offset < so->output[i].start_component ? " (will lower)" : "");
129 }
130 }
131
132 static int store_shader(struct pipe_context *ctx,
133 struct r600_pipe_shader *shader)
134 {
135 struct r600_context *rctx = (struct r600_context *)ctx;
136 uint32_t *ptr, i;
137
138 if (shader->bo == NULL) {
139 shader->bo = (struct r600_resource*)
140 pipe_buffer_create(ctx->screen, 0, PIPE_USAGE_IMMUTABLE, shader->shader.bc.ndw * 4);
141 if (shader->bo == NULL) {
142 return -ENOMEM;
143 }
144 ptr = r600_buffer_map_sync_with_rings(&rctx->b, shader->bo, PIPE_TRANSFER_WRITE);
145 if (R600_BIG_ENDIAN) {
146 for (i = 0; i < shader->shader.bc.ndw; ++i) {
147 ptr[i] = util_cpu_to_le32(shader->shader.bc.bytecode[i]);
148 }
149 } else {
150 memcpy(ptr, shader->shader.bc.bytecode, shader->shader.bc.ndw * sizeof(*ptr));
151 }
152 rctx->b.ws->buffer_unmap(shader->bo->buf);
153 }
154
155 return 0;
156 }
157
158 int r600_pipe_shader_create(struct pipe_context *ctx,
159 struct r600_pipe_shader *shader,
160 union r600_shader_key key)
161 {
162 struct r600_context *rctx = (struct r600_context *)ctx;
163 struct r600_pipe_shader_selector *sel = shader->selector;
164 int r;
165 bool dump = r600_can_dump_shader(&rctx->screen->b,
166 tgsi_get_processor_type(sel->tokens));
167 unsigned use_sb = !(rctx->screen->b.debug_flags & DBG_NO_SB);
168 unsigned sb_disasm = use_sb || (rctx->screen->b.debug_flags & DBG_SB_DISASM);
169 unsigned export_shader;
170
171 shader->shader.bc.isa = rctx->isa;
172
173 if (dump) {
174 fprintf(stderr, "--------------------------------------------------------------\n");
175 tgsi_dump(sel->tokens, 0);
176
177 if (sel->so.num_outputs) {
178 r600_dump_streamout(&sel->so);
179 }
180 }
181 r = r600_shader_from_tgsi(rctx, shader, key);
182 if (r) {
183 R600_ERR("translation from TGSI failed !\n");
184 goto error;
185 }
186 if (shader->shader.processor_type == PIPE_SHADER_VERTEX) {
187 /* only disable for vertex shaders in tess paths */
188 if (key.vs.as_ls)
189 use_sb = 0;
190 }
191 use_sb &= (shader->shader.processor_type != PIPE_SHADER_TESS_CTRL);
192 use_sb &= (shader->shader.processor_type != PIPE_SHADER_TESS_EVAL);
193 use_sb &= (shader->shader.processor_type != PIPE_SHADER_COMPUTE);
194
195 /* disable SB for shaders using doubles */
196 use_sb &= !shader->shader.uses_doubles;
197
198 use_sb &= !shader->shader.uses_atomics;
199 use_sb &= !shader->shader.uses_images;
200
201 /* Check if the bytecode has already been built. */
202 if (!shader->shader.bc.bytecode) {
203 r = r600_bytecode_build(&shader->shader.bc);
204 if (r) {
205 R600_ERR("building bytecode failed !\n");
206 goto error;
207 }
208 }
209
210 if (dump && !sb_disasm) {
211 fprintf(stderr, "--------------------------------------------------------------\n");
212 r600_bytecode_disasm(&shader->shader.bc);
213 fprintf(stderr, "______________________________________________________________\n");
214 } else if ((dump && sb_disasm) || use_sb) {
215 r = r600_sb_bytecode_process(rctx, &shader->shader.bc, &shader->shader,
216 dump, use_sb);
217 if (r) {
218 R600_ERR("r600_sb_bytecode_process failed !\n");
219 goto error;
220 }
221 }
222
223 if (shader->gs_copy_shader) {
224 if (dump) {
225 // dump copy shader
226 r = r600_sb_bytecode_process(rctx, &shader->gs_copy_shader->shader.bc,
227 &shader->gs_copy_shader->shader, dump, 0);
228 if (r)
229 goto error;
230 }
231
232 if ((r = store_shader(ctx, shader->gs_copy_shader)))
233 goto error;
234 }
235
236 /* Store the shader in a buffer. */
237 if ((r = store_shader(ctx, shader)))
238 goto error;
239
240 /* Build state. */
241 switch (shader->shader.processor_type) {
242 case PIPE_SHADER_TESS_CTRL:
243 evergreen_update_hs_state(ctx, shader);
244 break;
245 case PIPE_SHADER_TESS_EVAL:
246 if (key.tes.as_es)
247 evergreen_update_es_state(ctx, shader);
248 else
249 evergreen_update_vs_state(ctx, shader);
250 break;
251 case PIPE_SHADER_GEOMETRY:
252 if (rctx->b.chip_class >= EVERGREEN) {
253 evergreen_update_gs_state(ctx, shader);
254 evergreen_update_vs_state(ctx, shader->gs_copy_shader);
255 } else {
256 r600_update_gs_state(ctx, shader);
257 r600_update_vs_state(ctx, shader->gs_copy_shader);
258 }
259 break;
260 case PIPE_SHADER_VERTEX:
261 export_shader = key.vs.as_es;
262 if (rctx->b.chip_class >= EVERGREEN) {
263 if (key.vs.as_ls)
264 evergreen_update_ls_state(ctx, shader);
265 else if (key.vs.as_es)
266 evergreen_update_es_state(ctx, shader);
267 else
268 evergreen_update_vs_state(ctx, shader);
269 } else {
270 if (export_shader)
271 r600_update_es_state(ctx, shader);
272 else
273 r600_update_vs_state(ctx, shader);
274 }
275 break;
276 case PIPE_SHADER_FRAGMENT:
277 if (rctx->b.chip_class >= EVERGREEN) {
278 evergreen_update_ps_state(ctx, shader);
279 } else {
280 r600_update_ps_state(ctx, shader);
281 }
282 break;
283 case PIPE_SHADER_COMPUTE:
284 evergreen_update_ls_state(ctx, shader);
285 break;
286 default:
287 r = -EINVAL;
288 goto error;
289 }
290 return 0;
291
292 error:
293 r600_pipe_shader_destroy(ctx, shader);
294 return r;
295 }
296
297 void r600_pipe_shader_destroy(struct pipe_context *ctx UNUSED, struct r600_pipe_shader *shader)
298 {
299 r600_resource_reference(&shader->bo, NULL);
300 r600_bytecode_clear(&shader->shader.bc);
301 r600_release_command_buffer(&shader->command_buffer);
302 }
303
304 /*
305 * tgsi -> r600 shader
306 */
307 struct r600_shader_tgsi_instruction;
308
309 struct r600_shader_src {
310 unsigned sel;
311 unsigned swizzle[4];
312 unsigned neg;
313 unsigned abs;
314 unsigned rel;
315 unsigned kc_bank;
316 boolean kc_rel; /* true if cache bank is indexed */
317 uint32_t value[4];
318 };
319
320 struct eg_interp {
321 boolean enabled;
322 unsigned ij_index;
323 };
324
325 struct r600_shader_ctx {
326 struct tgsi_shader_info info;
327 struct tgsi_parse_context parse;
328 const struct tgsi_token *tokens;
329 unsigned type;
330 unsigned file_offset[TGSI_FILE_COUNT];
331 unsigned temp_reg;
332 const struct r600_shader_tgsi_instruction *inst_info;
333 struct r600_bytecode *bc;
334 struct r600_shader *shader;
335 struct r600_shader_src src[4];
336 uint32_t *literals;
337 uint32_t nliterals;
338 uint32_t max_driver_temp_used;
339 /* needed for evergreen interpolation */
340 struct eg_interp eg_interpolators[6]; // indexed by Persp/Linear * 3 + sample/center/centroid
341 /* evergreen/cayman also store sample mask in face register */
342 int face_gpr;
343 /* sample id is .w component stored in fixed point position register */
344 int fixed_pt_position_gpr;
345 int colors_used;
346 boolean clip_vertex_write;
347 unsigned cv_output;
348 unsigned edgeflag_output;
349 int cs_block_size_reg;
350 int cs_grid_size_reg;
351 bool cs_block_size_loaded, cs_grid_size_loaded;
352 int fragcoord_input;
353 int next_ring_offset;
354 int gs_out_ring_offset;
355 int gs_next_vertex;
356 struct r600_shader *gs_for_vs;
357 int gs_export_gpr_tregs[4];
358 int gs_rotated_input[2];
359 const struct pipe_stream_output_info *gs_stream_output_info;
360 unsigned enabled_stream_buffers_mask;
361 unsigned tess_input_info; /* temp with tess input offsets */
362 unsigned tess_output_info; /* temp with tess input offsets */
363 unsigned thread_id_gpr; /* temp with thread id calculated for images */
364 bool thread_id_gpr_loaded;
365 };
366
367 struct r600_shader_tgsi_instruction {
368 unsigned op;
369 int (*process)(struct r600_shader_ctx *ctx);
370 };
371
372 static int emit_gs_ring_writes(struct r600_shader_ctx *ctx, const struct pipe_stream_output_info *so, int stream, bool ind);
373 static const struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[], eg_shader_tgsi_instruction[], cm_shader_tgsi_instruction[];
374 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx);
375 static inline void callstack_push(struct r600_shader_ctx *ctx, unsigned reason);
376 static void fc_pushlevel(struct r600_shader_ctx *ctx, int type);
377 static int tgsi_else(struct r600_shader_ctx *ctx);
378 static int tgsi_endif(struct r600_shader_ctx *ctx);
379 static int tgsi_bgnloop(struct r600_shader_ctx *ctx);
380 static int tgsi_endloop(struct r600_shader_ctx *ctx);
381 static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx);
382 static int tgsi_fetch_rel_const(struct r600_shader_ctx *ctx,
383 unsigned int cb_idx, unsigned cb_rel, unsigned int offset, unsigned ar_chan,
384 unsigned int dst_reg);
385 static void r600_bytecode_src(struct r600_bytecode_alu_src *bc_src,
386 const struct r600_shader_src *shader_src,
387 unsigned chan);
388 static int do_lds_fetch_values(struct r600_shader_ctx *ctx, unsigned temp_reg,
389 unsigned dst_reg, unsigned mask);
390
391 static int tgsi_last_instruction(unsigned writemask)
392 {
393 int i, lasti = 0;
394
395 for (i = 0; i < 4; i++) {
396 if (writemask & (1 << i)) {
397 lasti = i;
398 }
399 }
400 return lasti;
401 }
402
403 static int tgsi_is_supported(struct r600_shader_ctx *ctx)
404 {
405 struct tgsi_full_instruction *i = &ctx->parse.FullToken.FullInstruction;
406 unsigned j;
407
408 if (i->Instruction.NumDstRegs > 1 && i->Instruction.Opcode != TGSI_OPCODE_DFRACEXP) {
409 R600_ERR("too many dst (%d)\n", i->Instruction.NumDstRegs);
410 return -EINVAL;
411 }
412 #if 0
413 if (i->Instruction.Label) {
414 R600_ERR("label unsupported\n");
415 return -EINVAL;
416 }
417 #endif
418 for (j = 0; j < i->Instruction.NumSrcRegs; j++) {
419 if (i->Src[j].Register.Dimension) {
420 switch (i->Src[j].Register.File) {
421 case TGSI_FILE_CONSTANT:
422 case TGSI_FILE_HW_ATOMIC:
423 break;
424 case TGSI_FILE_INPUT:
425 if (ctx->type == PIPE_SHADER_GEOMETRY ||
426 ctx->type == PIPE_SHADER_TESS_CTRL ||
427 ctx->type == PIPE_SHADER_TESS_EVAL)
428 break;
429 case TGSI_FILE_OUTPUT:
430 if (ctx->type == PIPE_SHADER_TESS_CTRL)
431 break;
432 default:
433 R600_ERR("unsupported src %d (file %d, dimension %d)\n", j,
434 i->Src[j].Register.File,
435 i->Src[j].Register.Dimension);
436 return -EINVAL;
437 }
438 }
439 }
440 for (j = 0; j < i->Instruction.NumDstRegs; j++) {
441 if (i->Dst[j].Register.Dimension) {
442 if (ctx->type == PIPE_SHADER_TESS_CTRL)
443 continue;
444 R600_ERR("unsupported dst (dimension)\n");
445 return -EINVAL;
446 }
447 }
448 return 0;
449 }
450
451 int eg_get_interpolator_index(unsigned interpolate, unsigned location)
452 {
453 if (interpolate == TGSI_INTERPOLATE_COLOR ||
454 interpolate == TGSI_INTERPOLATE_LINEAR ||
455 interpolate == TGSI_INTERPOLATE_PERSPECTIVE)
456 {
457 int is_linear = interpolate == TGSI_INTERPOLATE_LINEAR;
458 int loc;
459
460 switch(location) {
461 case TGSI_INTERPOLATE_LOC_CENTER:
462 loc = 1;
463 break;
464 case TGSI_INTERPOLATE_LOC_CENTROID:
465 loc = 2;
466 break;
467 case TGSI_INTERPOLATE_LOC_SAMPLE:
468 default:
469 loc = 0; break;
470 }
471
472 return is_linear * 3 + loc;
473 }
474
475 return -1;
476 }
477
478 static void evergreen_interp_assign_ij_index(struct r600_shader_ctx *ctx,
479 int input)
480 {
481 int i = eg_get_interpolator_index(
482 ctx->shader->input[input].interpolate,
483 ctx->shader->input[input].interpolate_location);
484 assert(i >= 0);
485 ctx->shader->input[input].ij_index = ctx->eg_interpolators[i].ij_index;
486 }
487
488 static int evergreen_interp_alu(struct r600_shader_ctx *ctx, int input)
489 {
490 int i, r;
491 struct r600_bytecode_alu alu;
492 int gpr = 0, base_chan = 0;
493 int ij_index = ctx->shader->input[input].ij_index;
494
495 /* work out gpr and base_chan from index */
496 gpr = ij_index / 2;
497 base_chan = (2 * (ij_index % 2)) + 1;
498
499 for (i = 0; i < 8; i++) {
500 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
501
502 if (i < 4)
503 alu.op = ALU_OP2_INTERP_ZW;
504 else
505 alu.op = ALU_OP2_INTERP_XY;
506
507 if ((i > 1) && (i < 6)) {
508 alu.dst.sel = ctx->shader->input[input].gpr;
509 alu.dst.write = 1;
510 }
511
512 alu.dst.chan = i % 4;
513
514 alu.src[0].sel = gpr;
515 alu.src[0].chan = (base_chan - (i % 2));
516
517 alu.src[1].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
518
519 alu.bank_swizzle_force = SQ_ALU_VEC_210;
520 if ((i % 4) == 3)
521 alu.last = 1;
522 r = r600_bytecode_add_alu(ctx->bc, &alu);
523 if (r)
524 return r;
525 }
526 return 0;
527 }
528
529 static int evergreen_interp_flat(struct r600_shader_ctx *ctx, int input)
530 {
531 int i, r;
532 struct r600_bytecode_alu alu;
533
534 for (i = 0; i < 4; i++) {
535 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
536
537 alu.op = ALU_OP1_INTERP_LOAD_P0;
538
539 alu.dst.sel = ctx->shader->input[input].gpr;
540 alu.dst.write = 1;
541
542 alu.dst.chan = i;
543
544 alu.src[0].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
545 alu.src[0].chan = i;
546
547 if (i == 3)
548 alu.last = 1;
549 r = r600_bytecode_add_alu(ctx->bc, &alu);
550 if (r)
551 return r;
552 }
553 return 0;
554 }
555
556 /*
557 * Special export handling in shaders
558 *
559 * shader export ARRAY_BASE for EXPORT_POS:
560 * 60 is position
561 * 61 is misc vector
562 * 62, 63 are clip distance vectors
563 *
564 * The use of the values exported in 61-63 are controlled by PA_CL_VS_OUT_CNTL:
565 * VS_OUT_MISC_VEC_ENA - enables the use of all fields in export 61
566 * USE_VTX_POINT_SIZE - point size in the X channel of export 61
567 * USE_VTX_EDGE_FLAG - edge flag in the Y channel of export 61
568 * USE_VTX_RENDER_TARGET_INDX - render target index in the Z channel of export 61
569 * USE_VTX_VIEWPORT_INDX - viewport index in the W channel of export 61
570 * USE_VTX_KILL_FLAG - kill flag in the Z channel of export 61 (mutually
571 * exclusive from render target index)
572 * VS_OUT_CCDIST0_VEC_ENA/VS_OUT_CCDIST1_VEC_ENA - enable clip distance vectors
573 *
574 *
575 * shader export ARRAY_BASE for EXPORT_PIXEL:
576 * 0-7 CB targets
577 * 61 computed Z vector
578 *
579 * The use of the values exported in the computed Z vector are controlled
580 * by DB_SHADER_CONTROL:
581 * Z_EXPORT_ENABLE - Z as a float in RED
582 * STENCIL_REF_EXPORT_ENABLE - stencil ref as int in GREEN
583 * COVERAGE_TO_MASK_ENABLE - alpha to mask in ALPHA
584 * MASK_EXPORT_ENABLE - pixel sample mask in BLUE
585 * DB_SOURCE_FORMAT - export control restrictions
586 *
587 */
588
589
590 /* Map name/sid pair from tgsi to the 8-bit semantic index for SPI setup */
591 static int r600_spi_sid(struct r600_shader_io * io)
592 {
593 int index, name = io->name;
594
595 /* These params are handled differently, they don't need
596 * semantic indices, so we'll use 0 for them.
597 */
598 if (name == TGSI_SEMANTIC_POSITION ||
599 name == TGSI_SEMANTIC_PSIZE ||
600 name == TGSI_SEMANTIC_EDGEFLAG ||
601 name == TGSI_SEMANTIC_FACE ||
602 name == TGSI_SEMANTIC_SAMPLEMASK)
603 index = 0;
604 else {
605 if (name == TGSI_SEMANTIC_GENERIC) {
606 /* For generic params simply use sid from tgsi */
607 index = io->sid;
608 } else {
609 /* For non-generic params - pack name and sid into 8 bits */
610 index = 0x80 | (name<<3) | (io->sid);
611 }
612
613 /* Make sure that all really used indices have nonzero value, so
614 * we can just compare it to 0 later instead of comparing the name
615 * with different values to detect special cases. */
616 index++;
617 }
618
619 return index;
620 };
621
622 /* we need this to get a common lds index for vs/tcs/tes input/outputs */
623 int r600_get_lds_unique_index(unsigned semantic_name, unsigned index)
624 {
625 switch (semantic_name) {
626 case TGSI_SEMANTIC_POSITION:
627 return 0;
628 case TGSI_SEMANTIC_PSIZE:
629 return 1;
630 case TGSI_SEMANTIC_CLIPDIST:
631 assert(index <= 1);
632 return 2 + index;
633 case TGSI_SEMANTIC_GENERIC:
634 if (index <= 63-4)
635 return 4 + index - 9;
636 else
637 /* same explanation as in the default statement,
638 * the only user hitting this is st/nine.
639 */
640 return 0;
641
642 /* patch indices are completely separate and thus start from 0 */
643 case TGSI_SEMANTIC_TESSOUTER:
644 return 0;
645 case TGSI_SEMANTIC_TESSINNER:
646 return 1;
647 case TGSI_SEMANTIC_PATCH:
648 return 2 + index;
649
650 default:
651 /* Don't fail here. The result of this function is only used
652 * for LS, TCS, TES, and GS, where legacy GL semantics can't
653 * occur, but this function is called for all vertex shaders
654 * before it's known whether LS will be compiled or not.
655 */
656 return 0;
657 }
658 }
659
660 /* turn input into interpolate on EG */
661 static int evergreen_interp_input(struct r600_shader_ctx *ctx, int index)
662 {
663 int r = 0;
664
665 if (ctx->shader->input[index].spi_sid) {
666 ctx->shader->input[index].lds_pos = ctx->shader->nlds++;
667 if (ctx->shader->input[index].interpolate > 0) {
668 evergreen_interp_assign_ij_index(ctx, index);
669 r = evergreen_interp_alu(ctx, index);
670 } else {
671 r = evergreen_interp_flat(ctx, index);
672 }
673 }
674 return r;
675 }
676
677 static int select_twoside_color(struct r600_shader_ctx *ctx, int front, int back)
678 {
679 struct r600_bytecode_alu alu;
680 int i, r;
681 int gpr_front = ctx->shader->input[front].gpr;
682 int gpr_back = ctx->shader->input[back].gpr;
683
684 for (i = 0; i < 4; i++) {
685 memset(&alu, 0, sizeof(alu));
686 alu.op = ALU_OP3_CNDGT;
687 alu.is_op3 = 1;
688 alu.dst.write = 1;
689 alu.dst.sel = gpr_front;
690 alu.src[0].sel = ctx->face_gpr;
691 alu.src[1].sel = gpr_front;
692 alu.src[2].sel = gpr_back;
693
694 alu.dst.chan = i;
695 alu.src[1].chan = i;
696 alu.src[2].chan = i;
697 alu.last = (i==3);
698
699 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
700 return r;
701 }
702
703 return 0;
704 }
705
706 /* execute a single slot ALU calculation */
707 static int single_alu_op2(struct r600_shader_ctx *ctx, int op,
708 int dst_sel, int dst_chan,
709 int src0_sel, unsigned src0_chan_val,
710 int src1_sel, unsigned src1_chan_val)
711 {
712 struct r600_bytecode_alu alu;
713 int r, i;
714
715 if (ctx->bc->chip_class == CAYMAN && op == ALU_OP2_MULLO_INT) {
716 for (i = 0; i < 4; i++) {
717 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
718 alu.op = op;
719 alu.src[0].sel = src0_sel;
720 if (src0_sel == V_SQ_ALU_SRC_LITERAL)
721 alu.src[0].value = src0_chan_val;
722 else
723 alu.src[0].chan = src0_chan_val;
724 alu.src[1].sel = src1_sel;
725 if (src1_sel == V_SQ_ALU_SRC_LITERAL)
726 alu.src[1].value = src1_chan_val;
727 else
728 alu.src[1].chan = src1_chan_val;
729 alu.dst.sel = dst_sel;
730 alu.dst.chan = i;
731 alu.dst.write = i == dst_chan;
732 alu.last = (i == 3);
733 r = r600_bytecode_add_alu(ctx->bc, &alu);
734 if (r)
735 return r;
736 }
737 return 0;
738 }
739
740 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
741 alu.op = op;
742 alu.src[0].sel = src0_sel;
743 if (src0_sel == V_SQ_ALU_SRC_LITERAL)
744 alu.src[0].value = src0_chan_val;
745 else
746 alu.src[0].chan = src0_chan_val;
747 alu.src[1].sel = src1_sel;
748 if (src1_sel == V_SQ_ALU_SRC_LITERAL)
749 alu.src[1].value = src1_chan_val;
750 else
751 alu.src[1].chan = src1_chan_val;
752 alu.dst.sel = dst_sel;
753 alu.dst.chan = dst_chan;
754 alu.dst.write = 1;
755 alu.last = 1;
756 r = r600_bytecode_add_alu(ctx->bc, &alu);
757 if (r)
758 return r;
759 return 0;
760 }
761
762 /* execute a single slot ALU calculation */
763 static int single_alu_op3(struct r600_shader_ctx *ctx, int op,
764 int dst_sel, int dst_chan,
765 int src0_sel, unsigned src0_chan_val,
766 int src1_sel, unsigned src1_chan_val,
767 int src2_sel, unsigned src2_chan_val)
768 {
769 struct r600_bytecode_alu alu;
770 int r;
771
772 /* validate this for other ops */
773 assert(op == ALU_OP3_MULADD_UINT24 || op == ALU_OP3_CNDE_INT);
774 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
775 alu.op = op;
776 alu.src[0].sel = src0_sel;
777 if (src0_sel == V_SQ_ALU_SRC_LITERAL)
778 alu.src[0].value = src0_chan_val;
779 else
780 alu.src[0].chan = src0_chan_val;
781 alu.src[1].sel = src1_sel;
782 if (src1_sel == V_SQ_ALU_SRC_LITERAL)
783 alu.src[1].value = src1_chan_val;
784 else
785 alu.src[1].chan = src1_chan_val;
786 alu.src[2].sel = src2_sel;
787 if (src2_sel == V_SQ_ALU_SRC_LITERAL)
788 alu.src[2].value = src2_chan_val;
789 else
790 alu.src[2].chan = src2_chan_val;
791 alu.dst.sel = dst_sel;
792 alu.dst.chan = dst_chan;
793 alu.is_op3 = 1;
794 alu.last = 1;
795 r = r600_bytecode_add_alu(ctx->bc, &alu);
796 if (r)
797 return r;
798 return 0;
799 }
800
801 /* put it in temp_reg.x */
802 static int get_lds_offset0(struct r600_shader_ctx *ctx,
803 int rel_patch_chan,
804 int temp_reg, bool is_patch_var)
805 {
806 int r;
807
808 /* MUL temp.x, patch_stride (input_vals.x), rel_patch_id (r0.y (tcs)) */
809 /* ADD
810 Dimension - patch0_offset (input_vals.z),
811 Non-dim - patch0_data_offset (input_vals.w)
812 */
813 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
814 temp_reg, 0,
815 ctx->tess_output_info, 0,
816 0, rel_patch_chan,
817 ctx->tess_output_info, is_patch_var ? 3 : 2);
818 if (r)
819 return r;
820 return 0;
821 }
822
823 static inline int get_address_file_reg(struct r600_shader_ctx *ctx, int index)
824 {
825 return index > 0 ? ctx->bc->index_reg[index - 1] : ctx->bc->ar_reg;
826 }
827
828 static int r600_get_temp(struct r600_shader_ctx *ctx)
829 {
830 return ctx->temp_reg + ctx->max_driver_temp_used++;
831 }
832
833 static int vs_add_primid_output(struct r600_shader_ctx *ctx, int prim_id_sid)
834 {
835 int i;
836 i = ctx->shader->noutput++;
837 ctx->shader->output[i].name = TGSI_SEMANTIC_PRIMID;
838 ctx->shader->output[i].sid = 0;
839 ctx->shader->output[i].gpr = 0;
840 ctx->shader->output[i].interpolate = TGSI_INTERPOLATE_CONSTANT;
841 ctx->shader->output[i].write_mask = 0x4;
842 ctx->shader->output[i].spi_sid = prim_id_sid;
843
844 return 0;
845 }
846
847 static int tgsi_barrier(struct r600_shader_ctx *ctx)
848 {
849 struct r600_bytecode_alu alu;
850 int r;
851
852 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
853 alu.op = ctx->inst_info->op;
854 alu.last = 1;
855
856 r = r600_bytecode_add_alu(ctx->bc, &alu);
857 if (r)
858 return r;
859 return 0;
860 }
861
862 static int tgsi_declaration(struct r600_shader_ctx *ctx)
863 {
864 struct tgsi_full_declaration *d = &ctx->parse.FullToken.FullDeclaration;
865 int r, i, j, count = d->Range.Last - d->Range.First + 1;
866
867 switch (d->Declaration.File) {
868 case TGSI_FILE_INPUT:
869 for (j = 0; j < count; j++) {
870 i = ctx->shader->ninput + j;
871 assert(i < ARRAY_SIZE(ctx->shader->input));
872 ctx->shader->input[i].name = d->Semantic.Name;
873 ctx->shader->input[i].sid = d->Semantic.Index + j;
874 ctx->shader->input[i].interpolate = d->Interp.Interpolate;
875 ctx->shader->input[i].interpolate_location = d->Interp.Location;
876 ctx->shader->input[i].gpr = ctx->file_offset[TGSI_FILE_INPUT] + d->Range.First + j;
877 if (ctx->type == PIPE_SHADER_FRAGMENT) {
878 ctx->shader->input[i].spi_sid = r600_spi_sid(&ctx->shader->input[i]);
879 switch (ctx->shader->input[i].name) {
880 case TGSI_SEMANTIC_FACE:
881 if (ctx->face_gpr != -1)
882 ctx->shader->input[i].gpr = ctx->face_gpr; /* already allocated by allocate_system_value_inputs */
883 else
884 ctx->face_gpr = ctx->shader->input[i].gpr;
885 break;
886 case TGSI_SEMANTIC_COLOR:
887 ctx->colors_used++;
888 break;
889 case TGSI_SEMANTIC_POSITION:
890 ctx->fragcoord_input = i;
891 break;
892 case TGSI_SEMANTIC_PRIMID:
893 /* set this for now */
894 ctx->shader->gs_prim_id_input = true;
895 ctx->shader->ps_prim_id_input = i;
896 break;
897 }
898 if (ctx->bc->chip_class >= EVERGREEN) {
899 if ((r = evergreen_interp_input(ctx, i)))
900 return r;
901 }
902 } else if (ctx->type == PIPE_SHADER_GEOMETRY) {
903 /* FIXME probably skip inputs if they aren't passed in the ring */
904 ctx->shader->input[i].ring_offset = ctx->next_ring_offset;
905 ctx->next_ring_offset += 16;
906 if (ctx->shader->input[i].name == TGSI_SEMANTIC_PRIMID)
907 ctx->shader->gs_prim_id_input = true;
908 }
909 }
910 ctx->shader->ninput += count;
911 break;
912 case TGSI_FILE_OUTPUT:
913 for (j = 0; j < count; j++) {
914 i = ctx->shader->noutput + j;
915 assert(i < ARRAY_SIZE(ctx->shader->output));
916 ctx->shader->output[i].name = d->Semantic.Name;
917 ctx->shader->output[i].sid = d->Semantic.Index + j;
918 ctx->shader->output[i].gpr = ctx->file_offset[TGSI_FILE_OUTPUT] + d->Range.First + j;
919 ctx->shader->output[i].interpolate = d->Interp.Interpolate;
920 ctx->shader->output[i].write_mask = d->Declaration.UsageMask;
921 if (ctx->type == PIPE_SHADER_VERTEX ||
922 ctx->type == PIPE_SHADER_GEOMETRY ||
923 ctx->type == PIPE_SHADER_TESS_EVAL) {
924 ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]);
925 switch (d->Semantic.Name) {
926 case TGSI_SEMANTIC_CLIPDIST:
927 break;
928 case TGSI_SEMANTIC_PSIZE:
929 ctx->shader->vs_out_misc_write = 1;
930 ctx->shader->vs_out_point_size = 1;
931 break;
932 case TGSI_SEMANTIC_EDGEFLAG:
933 ctx->shader->vs_out_misc_write = 1;
934 ctx->shader->vs_out_edgeflag = 1;
935 ctx->edgeflag_output = i;
936 break;
937 case TGSI_SEMANTIC_VIEWPORT_INDEX:
938 ctx->shader->vs_out_misc_write = 1;
939 ctx->shader->vs_out_viewport = 1;
940 break;
941 case TGSI_SEMANTIC_LAYER:
942 ctx->shader->vs_out_misc_write = 1;
943 ctx->shader->vs_out_layer = 1;
944 break;
945 case TGSI_SEMANTIC_CLIPVERTEX:
946 ctx->clip_vertex_write = TRUE;
947 ctx->cv_output = i;
948 break;
949 }
950 if (ctx->type == PIPE_SHADER_GEOMETRY) {
951 ctx->gs_out_ring_offset += 16;
952 }
953 } else if (ctx->type == PIPE_SHADER_FRAGMENT) {
954 switch (d->Semantic.Name) {
955 case TGSI_SEMANTIC_COLOR:
956 ctx->shader->nr_ps_max_color_exports++;
957 break;
958 }
959 }
960 }
961 ctx->shader->noutput += count;
962 break;
963 case TGSI_FILE_TEMPORARY:
964 if (ctx->info.indirect_files & (1 << TGSI_FILE_TEMPORARY)) {
965 if (d->Array.ArrayID) {
966 r600_add_gpr_array(ctx->shader,
967 ctx->file_offset[TGSI_FILE_TEMPORARY] +
968 d->Range.First,
969 d->Range.Last - d->Range.First + 1, 0x0F);
970 }
971 }
972 break;
973
974 case TGSI_FILE_CONSTANT:
975 case TGSI_FILE_SAMPLER:
976 case TGSI_FILE_SAMPLER_VIEW:
977 case TGSI_FILE_ADDRESS:
978 case TGSI_FILE_BUFFER:
979 case TGSI_FILE_IMAGE:
980 case TGSI_FILE_MEMORY:
981 break;
982
983 case TGSI_FILE_HW_ATOMIC:
984 i = ctx->shader->nhwatomic_ranges;
985 ctx->shader->atomics[i].start = d->Range.First;
986 ctx->shader->atomics[i].end = d->Range.Last;
987 ctx->shader->atomics[i].hw_idx = ctx->shader->atomic_base + ctx->shader->nhwatomic;
988 ctx->shader->atomics[i].array_id = d->Array.ArrayID;
989 ctx->shader->atomics[i].buffer_id = d->Dim.Index2D;
990 ctx->shader->nhwatomic_ranges++;
991 ctx->shader->nhwatomic += count;
992 break;
993
994 case TGSI_FILE_SYSTEM_VALUE:
995 if (d->Semantic.Name == TGSI_SEMANTIC_SAMPLEMASK ||
996 d->Semantic.Name == TGSI_SEMANTIC_SAMPLEID ||
997 d->Semantic.Name == TGSI_SEMANTIC_SAMPLEPOS) {
998 break; /* Already handled from allocate_system_value_inputs */
999 } else if (d->Semantic.Name == TGSI_SEMANTIC_INSTANCEID) {
1000 break;
1001 } else if (d->Semantic.Name == TGSI_SEMANTIC_VERTEXID)
1002 break;
1003 else if (d->Semantic.Name == TGSI_SEMANTIC_INVOCATIONID)
1004 break;
1005 else if (d->Semantic.Name == TGSI_SEMANTIC_TESSINNER ||
1006 d->Semantic.Name == TGSI_SEMANTIC_TESSOUTER) {
1007 int param = r600_get_lds_unique_index(d->Semantic.Name, 0);
1008 int dreg = d->Semantic.Name == TGSI_SEMANTIC_TESSINNER ? 3 : 2;
1009 unsigned temp_reg = r600_get_temp(ctx);
1010
1011 r = get_lds_offset0(ctx, 2, temp_reg, true);
1012 if (r)
1013 return r;
1014
1015 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
1016 temp_reg, 0,
1017 temp_reg, 0,
1018 V_SQ_ALU_SRC_LITERAL, param * 16);
1019 if (r)
1020 return r;
1021
1022 do_lds_fetch_values(ctx, temp_reg, dreg, 0xf);
1023 }
1024 else if (d->Semantic.Name == TGSI_SEMANTIC_TESSCOORD) {
1025 /* MOV r1.x, r0.x;
1026 MOV r1.y, r0.y;
1027 */
1028 for (i = 0; i < 2; i++) {
1029 struct r600_bytecode_alu alu;
1030 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1031 alu.op = ALU_OP1_MOV;
1032 alu.src[0].sel = 0;
1033 alu.src[0].chan = 0 + i;
1034 alu.dst.sel = 1;
1035 alu.dst.chan = 0 + i;
1036 alu.dst.write = 1;
1037 alu.last = (i == 1) ? 1 : 0;
1038 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
1039 return r;
1040 }
1041 /* ADD r1.z, 1.0f, -r0.x */
1042 struct r600_bytecode_alu alu;
1043 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1044 alu.op = ALU_OP2_ADD;
1045 alu.src[0].sel = V_SQ_ALU_SRC_1;
1046 alu.src[1].sel = 1;
1047 alu.src[1].chan = 0;
1048 alu.src[1].neg = 1;
1049 alu.dst.sel = 1;
1050 alu.dst.chan = 2;
1051 alu.dst.write = 1;
1052 alu.last = 1;
1053 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
1054 return r;
1055
1056 /* ADD r1.z, r1.z, -r1.y */
1057 alu.op = ALU_OP2_ADD;
1058 alu.src[0].sel = 1;
1059 alu.src[0].chan = 2;
1060 alu.src[1].sel = 1;
1061 alu.src[1].chan = 1;
1062 alu.src[1].neg = 1;
1063 alu.dst.sel = 1;
1064 alu.dst.chan = 2;
1065 alu.dst.write = 1;
1066 alu.last = 1;
1067 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
1068 return r;
1069 break;
1070 }
1071 break;
1072 default:
1073 R600_ERR("unsupported file %d declaration\n", d->Declaration.File);
1074 return -EINVAL;
1075 }
1076 return 0;
1077 }
1078
1079 static int allocate_system_value_inputs(struct r600_shader_ctx *ctx, int gpr_offset)
1080 {
1081 struct tgsi_parse_context parse;
1082 struct {
1083 boolean enabled;
1084 int *reg;
1085 unsigned name, alternate_name;
1086 } inputs[2] = {
1087 { false, &ctx->face_gpr, TGSI_SEMANTIC_SAMPLEMASK, ~0u }, /* lives in Front Face GPR.z */
1088
1089 { false, &ctx->fixed_pt_position_gpr, TGSI_SEMANTIC_SAMPLEID, TGSI_SEMANTIC_SAMPLEPOS } /* SAMPLEID is in Fixed Point Position GPR.w */
1090 };
1091 int num_regs = 0;
1092 unsigned k, i;
1093
1094 if (tgsi_parse_init(&parse, ctx->tokens) != TGSI_PARSE_OK) {
1095 return 0;
1096 }
1097
1098 /* need to scan shader for system values and interpolateAtSample/Offset/Centroid */
1099 while (!tgsi_parse_end_of_tokens(&parse)) {
1100 tgsi_parse_token(&parse);
1101
1102 if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_INSTRUCTION) {
1103 const struct tgsi_full_instruction *inst = &parse.FullToken.FullInstruction;
1104 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE ||
1105 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
1106 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_CENTROID)
1107 {
1108 int interpolate, location, k;
1109
1110 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
1111 location = TGSI_INTERPOLATE_LOC_CENTER;
1112 inputs[1].enabled = true; /* needs SAMPLEID */
1113 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
1114 location = TGSI_INTERPOLATE_LOC_CENTER;
1115 /* Needs sample positions, currently those are always available */
1116 } else {
1117 location = TGSI_INTERPOLATE_LOC_CENTROID;
1118 }
1119
1120 interpolate = ctx->info.input_interpolate[inst->Src[0].Register.Index];
1121 k = eg_get_interpolator_index(interpolate, location);
1122 if (k >= 0)
1123 ctx->eg_interpolators[k].enabled = true;
1124 }
1125 } else if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_DECLARATION) {
1126 struct tgsi_full_declaration *d = &parse.FullToken.FullDeclaration;
1127 if (d->Declaration.File == TGSI_FILE_SYSTEM_VALUE) {
1128 for (k = 0; k < ARRAY_SIZE(inputs); k++) {
1129 if (d->Semantic.Name == inputs[k].name ||
1130 d->Semantic.Name == inputs[k].alternate_name) {
1131 inputs[k].enabled = true;
1132 }
1133 }
1134 }
1135 }
1136 }
1137
1138 tgsi_parse_free(&parse);
1139
1140 for (i = 0; i < ARRAY_SIZE(inputs); i++) {
1141 boolean enabled = inputs[i].enabled;
1142 int *reg = inputs[i].reg;
1143 unsigned name = inputs[i].name;
1144
1145 if (enabled) {
1146 int gpr = gpr_offset + num_regs++;
1147 ctx->shader->nsys_inputs++;
1148
1149 // add to inputs, allocate a gpr
1150 k = ctx->shader->ninput++;
1151 ctx->shader->input[k].name = name;
1152 ctx->shader->input[k].sid = 0;
1153 ctx->shader->input[k].interpolate = TGSI_INTERPOLATE_CONSTANT;
1154 ctx->shader->input[k].interpolate_location = TGSI_INTERPOLATE_LOC_CENTER;
1155 *reg = ctx->shader->input[k].gpr = gpr;
1156 }
1157 }
1158
1159 return gpr_offset + num_regs;
1160 }
1161
1162 /*
1163 * for evergreen we need to scan the shader to find the number of GPRs we need to
1164 * reserve for interpolation and system values
1165 *
1166 * we need to know if we are going to emit
1167 * any sample or centroid inputs
1168 * if perspective and linear are required
1169 */
1170 static int evergreen_gpr_count(struct r600_shader_ctx *ctx)
1171 {
1172 unsigned i;
1173 int num_baryc;
1174 struct tgsi_parse_context parse;
1175
1176 memset(&ctx->eg_interpolators, 0, sizeof(ctx->eg_interpolators));
1177
1178 for (i = 0; i < ctx->info.num_inputs; i++) {
1179 int k;
1180 /* skip position/face/mask/sampleid */
1181 if (ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_POSITION ||
1182 ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_FACE ||
1183 ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_SAMPLEMASK ||
1184 ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_SAMPLEID)
1185 continue;
1186
1187 k = eg_get_interpolator_index(
1188 ctx->info.input_interpolate[i],
1189 ctx->info.input_interpolate_loc[i]);
1190 if (k >= 0)
1191 ctx->eg_interpolators[k].enabled = TRUE;
1192 }
1193
1194 if (tgsi_parse_init(&parse, ctx->tokens) != TGSI_PARSE_OK) {
1195 return 0;
1196 }
1197
1198 /* need to scan shader for system values and interpolateAtSample/Offset/Centroid */
1199 while (!tgsi_parse_end_of_tokens(&parse)) {
1200 tgsi_parse_token(&parse);
1201
1202 if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_INSTRUCTION) {
1203 const struct tgsi_full_instruction *inst = &parse.FullToken.FullInstruction;
1204 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE ||
1205 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
1206 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_CENTROID)
1207 {
1208 int interpolate, location, k;
1209
1210 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
1211 location = TGSI_INTERPOLATE_LOC_CENTER;
1212 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
1213 location = TGSI_INTERPOLATE_LOC_CENTER;
1214 } else {
1215 location = TGSI_INTERPOLATE_LOC_CENTROID;
1216 }
1217
1218 interpolate = ctx->info.input_interpolate[inst->Src[0].Register.Index];
1219 k = eg_get_interpolator_index(interpolate, location);
1220 if (k >= 0)
1221 ctx->eg_interpolators[k].enabled = true;
1222 }
1223 }
1224 }
1225
1226 tgsi_parse_free(&parse);
1227
1228 /* assign gpr to each interpolator according to priority */
1229 num_baryc = 0;
1230 for (i = 0; i < ARRAY_SIZE(ctx->eg_interpolators); i++) {
1231 if (ctx->eg_interpolators[i].enabled) {
1232 ctx->eg_interpolators[i].ij_index = num_baryc;
1233 num_baryc ++;
1234 }
1235 }
1236
1237 /* XXX PULL MODEL and LINE STIPPLE */
1238
1239 num_baryc = (num_baryc + 1) >> 1;
1240 return allocate_system_value_inputs(ctx, num_baryc);
1241 }
1242
1243 /* sample_id_sel == NULL means fetch for current sample */
1244 static int load_sample_position(struct r600_shader_ctx *ctx, struct r600_shader_src *sample_id, int chan_sel)
1245 {
1246 struct r600_bytecode_vtx vtx;
1247 int r, t1;
1248
1249 assert(ctx->fixed_pt_position_gpr != -1);
1250
1251 t1 = r600_get_temp(ctx);
1252
1253 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
1254 vtx.op = FETCH_OP_VFETCH;
1255 vtx.buffer_id = R600_BUFFER_INFO_CONST_BUFFER;
1256 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1257 if (sample_id == NULL) {
1258 vtx.src_gpr = ctx->fixed_pt_position_gpr; // SAMPLEID is in .w;
1259 vtx.src_sel_x = 3;
1260 }
1261 else {
1262 struct r600_bytecode_alu alu;
1263
1264 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1265 alu.op = ALU_OP1_MOV;
1266 r600_bytecode_src(&alu.src[0], sample_id, chan_sel);
1267 alu.dst.sel = t1;
1268 alu.dst.write = 1;
1269 alu.last = 1;
1270 r = r600_bytecode_add_alu(ctx->bc, &alu);
1271 if (r)
1272 return r;
1273
1274 vtx.src_gpr = t1;
1275 vtx.src_sel_x = 0;
1276 }
1277 vtx.mega_fetch_count = 16;
1278 vtx.dst_gpr = t1;
1279 vtx.dst_sel_x = 0;
1280 vtx.dst_sel_y = 1;
1281 vtx.dst_sel_z = 2;
1282 vtx.dst_sel_w = 3;
1283 vtx.data_format = FMT_32_32_32_32_FLOAT;
1284 vtx.num_format_all = 2;
1285 vtx.format_comp_all = 1;
1286 vtx.use_const_fields = 0;
1287 vtx.offset = 0;
1288 vtx.endian = r600_endian_swap(32);
1289 vtx.srf_mode_all = 1; /* SRF_MODE_NO_ZERO */
1290
1291 r = r600_bytecode_add_vtx(ctx->bc, &vtx);
1292 if (r)
1293 return r;
1294
1295 return t1;
1296 }
1297
1298 static int load_block_grid_size(struct r600_shader_ctx *ctx, bool load_block)
1299 {
1300 struct r600_bytecode_vtx vtx;
1301 int r, t1;
1302
1303 if (ctx->cs_block_size_loaded)
1304 return ctx->cs_block_size_reg;
1305 if (ctx->cs_grid_size_loaded)
1306 return ctx->cs_grid_size_reg;
1307
1308 t1 = load_block ? ctx->cs_block_size_reg : ctx->cs_grid_size_reg;
1309 struct r600_bytecode_alu alu;
1310 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1311 alu.op = ALU_OP1_MOV;
1312 alu.src[0].sel = V_SQ_ALU_SRC_0;
1313 alu.dst.sel = t1;
1314 alu.dst.write = 1;
1315 alu.last = 1;
1316 r = r600_bytecode_add_alu(ctx->bc, &alu);
1317 if (r)
1318 return r;
1319
1320 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
1321 vtx.op = FETCH_OP_VFETCH;
1322 vtx.buffer_id = R600_BUFFER_INFO_CONST_BUFFER;
1323 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1324 vtx.src_gpr = t1;
1325 vtx.src_sel_x = 0;
1326
1327 vtx.mega_fetch_count = 16;
1328 vtx.dst_gpr = t1;
1329 vtx.dst_sel_x = 0;
1330 vtx.dst_sel_y = 1;
1331 vtx.dst_sel_z = 2;
1332 vtx.dst_sel_w = 7;
1333 vtx.data_format = FMT_32_32_32_32;
1334 vtx.num_format_all = 1;
1335 vtx.format_comp_all = 0;
1336 vtx.use_const_fields = 0;
1337 vtx.offset = load_block ? 0 : 16; // first element is size of buffer
1338 vtx.endian = r600_endian_swap(32);
1339 vtx.srf_mode_all = 1; /* SRF_MODE_NO_ZERO */
1340
1341 r = r600_bytecode_add_vtx(ctx->bc, &vtx);
1342 if (r)
1343 return r;
1344
1345 if (load_block)
1346 ctx->cs_block_size_loaded = true;
1347 else
1348 ctx->cs_grid_size_loaded = true;
1349 return t1;
1350 }
1351
1352 static void tgsi_src(struct r600_shader_ctx *ctx,
1353 const struct tgsi_full_src_register *tgsi_src,
1354 struct r600_shader_src *r600_src)
1355 {
1356 memset(r600_src, 0, sizeof(*r600_src));
1357 r600_src->swizzle[0] = tgsi_src->Register.SwizzleX;
1358 r600_src->swizzle[1] = tgsi_src->Register.SwizzleY;
1359 r600_src->swizzle[2] = tgsi_src->Register.SwizzleZ;
1360 r600_src->swizzle[3] = tgsi_src->Register.SwizzleW;
1361 r600_src->neg = tgsi_src->Register.Negate;
1362 r600_src->abs = tgsi_src->Register.Absolute;
1363
1364 if (tgsi_src->Register.File == TGSI_FILE_IMMEDIATE) {
1365 int index;
1366 if ((tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleY) &&
1367 (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleZ) &&
1368 (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleW)) {
1369
1370 index = tgsi_src->Register.Index * 4 + tgsi_src->Register.SwizzleX;
1371 r600_bytecode_special_constants(ctx->literals[index], &r600_src->sel, &r600_src->neg, r600_src->abs);
1372 if (r600_src->sel != V_SQ_ALU_SRC_LITERAL)
1373 return;
1374 }
1375 index = tgsi_src->Register.Index;
1376 r600_src->sel = V_SQ_ALU_SRC_LITERAL;
1377 memcpy(r600_src->value, ctx->literals + index * 4, sizeof(r600_src->value));
1378 } else if (tgsi_src->Register.File == TGSI_FILE_SYSTEM_VALUE) {
1379 if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_SAMPLEMASK) {
1380 r600_src->swizzle[0] = 2; // Z value
1381 r600_src->swizzle[1] = 2;
1382 r600_src->swizzle[2] = 2;
1383 r600_src->swizzle[3] = 2;
1384 r600_src->sel = ctx->face_gpr;
1385 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_SAMPLEID) {
1386 r600_src->swizzle[0] = 3; // W value
1387 r600_src->swizzle[1] = 3;
1388 r600_src->swizzle[2] = 3;
1389 r600_src->swizzle[3] = 3;
1390 r600_src->sel = ctx->fixed_pt_position_gpr;
1391 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_SAMPLEPOS) {
1392 r600_src->swizzle[0] = 0;
1393 r600_src->swizzle[1] = 1;
1394 r600_src->swizzle[2] = 4;
1395 r600_src->swizzle[3] = 4;
1396 r600_src->sel = load_sample_position(ctx, NULL, -1);
1397 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_INSTANCEID) {
1398 r600_src->swizzle[0] = 3;
1399 r600_src->swizzle[1] = 3;
1400 r600_src->swizzle[2] = 3;
1401 r600_src->swizzle[3] = 3;
1402 r600_src->sel = 0;
1403 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_VERTEXID) {
1404 r600_src->swizzle[0] = 0;
1405 r600_src->swizzle[1] = 0;
1406 r600_src->swizzle[2] = 0;
1407 r600_src->swizzle[3] = 0;
1408 r600_src->sel = 0;
1409 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_THREAD_ID) {
1410 r600_src->sel = 0;
1411 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_BLOCK_ID) {
1412 r600_src->sel = 1;
1413 } else if (ctx->type != PIPE_SHADER_TESS_CTRL && ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_INVOCATIONID) {
1414 r600_src->swizzle[0] = 3;
1415 r600_src->swizzle[1] = 3;
1416 r600_src->swizzle[2] = 3;
1417 r600_src->swizzle[3] = 3;
1418 r600_src->sel = 1;
1419 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_INVOCATIONID) {
1420 r600_src->swizzle[0] = 2;
1421 r600_src->swizzle[1] = 2;
1422 r600_src->swizzle[2] = 2;
1423 r600_src->swizzle[3] = 2;
1424 r600_src->sel = 0;
1425 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_TESSCOORD) {
1426 r600_src->sel = 1;
1427 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_TESSINNER) {
1428 r600_src->sel = 3;
1429 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_TESSOUTER) {
1430 r600_src->sel = 2;
1431 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_VERTICESIN) {
1432 if (ctx->type == PIPE_SHADER_TESS_CTRL) {
1433 r600_src->sel = ctx->tess_input_info;
1434 r600_src->swizzle[0] = 2;
1435 r600_src->swizzle[1] = 2;
1436 r600_src->swizzle[2] = 2;
1437 r600_src->swizzle[3] = 2;
1438 } else {
1439 r600_src->sel = ctx->tess_input_info;
1440 r600_src->swizzle[0] = 3;
1441 r600_src->swizzle[1] = 3;
1442 r600_src->swizzle[2] = 3;
1443 r600_src->swizzle[3] = 3;
1444 }
1445 } else if (ctx->type == PIPE_SHADER_TESS_CTRL && ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_PRIMID) {
1446 r600_src->sel = 0;
1447 r600_src->swizzle[0] = 0;
1448 r600_src->swizzle[1] = 0;
1449 r600_src->swizzle[2] = 0;
1450 r600_src->swizzle[3] = 0;
1451 } else if (ctx->type == PIPE_SHADER_TESS_EVAL && ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_PRIMID) {
1452 r600_src->sel = 0;
1453 r600_src->swizzle[0] = 3;
1454 r600_src->swizzle[1] = 3;
1455 r600_src->swizzle[2] = 3;
1456 r600_src->swizzle[3] = 3;
1457 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_GRID_SIZE) {
1458 r600_src->sel = load_block_grid_size(ctx, false);
1459 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_BLOCK_SIZE) {
1460 r600_src->sel = load_block_grid_size(ctx, true);
1461 }
1462 } else {
1463 if (tgsi_src->Register.Indirect)
1464 r600_src->rel = V_SQ_REL_RELATIVE;
1465 r600_src->sel = tgsi_src->Register.Index;
1466 r600_src->sel += ctx->file_offset[tgsi_src->Register.File];
1467 }
1468 if (tgsi_src->Register.File == TGSI_FILE_CONSTANT) {
1469 if (tgsi_src->Register.Dimension) {
1470 r600_src->kc_bank = tgsi_src->Dimension.Index;
1471 if (tgsi_src->Dimension.Indirect) {
1472 r600_src->kc_rel = 1;
1473 }
1474 }
1475 }
1476 }
1477
1478 static int tgsi_fetch_rel_const(struct r600_shader_ctx *ctx,
1479 unsigned int cb_idx, unsigned cb_rel, unsigned int offset, unsigned ar_chan,
1480 unsigned int dst_reg)
1481 {
1482 struct r600_bytecode_vtx vtx;
1483 unsigned int ar_reg;
1484 int r;
1485
1486 if (offset) {
1487 struct r600_bytecode_alu alu;
1488
1489 memset(&alu, 0, sizeof(alu));
1490
1491 alu.op = ALU_OP2_ADD_INT;
1492 alu.src[0].sel = ctx->bc->ar_reg;
1493 alu.src[0].chan = ar_chan;
1494
1495 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
1496 alu.src[1].value = offset;
1497
1498 alu.dst.sel = dst_reg;
1499 alu.dst.chan = ar_chan;
1500 alu.dst.write = 1;
1501 alu.last = 1;
1502
1503 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
1504 return r;
1505
1506 ar_reg = dst_reg;
1507 } else {
1508 ar_reg = ctx->bc->ar_reg;
1509 }
1510
1511 memset(&vtx, 0, sizeof(vtx));
1512 vtx.buffer_id = cb_idx;
1513 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1514 vtx.src_gpr = ar_reg;
1515 vtx.src_sel_x = ar_chan;
1516 vtx.mega_fetch_count = 16;
1517 vtx.dst_gpr = dst_reg;
1518 vtx.dst_sel_x = 0; /* SEL_X */
1519 vtx.dst_sel_y = 1; /* SEL_Y */
1520 vtx.dst_sel_z = 2; /* SEL_Z */
1521 vtx.dst_sel_w = 3; /* SEL_W */
1522 vtx.data_format = FMT_32_32_32_32_FLOAT;
1523 vtx.num_format_all = 2; /* NUM_FORMAT_SCALED */
1524 vtx.format_comp_all = 1; /* FORMAT_COMP_SIGNED */
1525 vtx.endian = r600_endian_swap(32);
1526 vtx.buffer_index_mode = cb_rel; // cb_rel ? V_SQ_CF_INDEX_0 : V_SQ_CF_INDEX_NONE;
1527
1528 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
1529 return r;
1530
1531 return 0;
1532 }
1533
1534 static int fetch_gs_input(struct r600_shader_ctx *ctx, struct tgsi_full_src_register *src, unsigned int dst_reg)
1535 {
1536 struct r600_bytecode_vtx vtx;
1537 int r;
1538 unsigned index = src->Register.Index;
1539 unsigned vtx_id = src->Dimension.Index;
1540 int offset_reg = ctx->gs_rotated_input[vtx_id / 3];
1541 int offset_chan = vtx_id % 3;
1542 int t2 = 0;
1543
1544 /* offsets of per-vertex data in ESGS ring are passed to GS in R0.x, R0.y,
1545 * R0.w, R1.x, R1.y, R1.z (it seems R0.z is used for PrimitiveID) */
1546
1547 if (offset_reg == ctx->gs_rotated_input[0] && offset_chan == 2)
1548 offset_chan = 3;
1549
1550 if (src->Dimension.Indirect || src->Register.Indirect)
1551 t2 = r600_get_temp(ctx);
1552
1553 if (src->Dimension.Indirect) {
1554 int treg[3];
1555 struct r600_bytecode_alu alu;
1556 int r, i;
1557 unsigned addr_reg;
1558 addr_reg = get_address_file_reg(ctx, src->DimIndirect.Index);
1559 if (src->DimIndirect.Index > 0) {
1560 r = single_alu_op2(ctx, ALU_OP1_MOV,
1561 ctx->bc->ar_reg, 0,
1562 addr_reg, 0,
1563 0, 0);
1564 if (r)
1565 return r;
1566 }
1567 /*
1568 we have to put the R0.x/y/w into Rt.x Rt+1.x Rt+2.x then index reg from Rt.
1569 at least this is what fglrx seems to do. */
1570 for (i = 0; i < 3; i++) {
1571 treg[i] = r600_get_temp(ctx);
1572 }
1573 r600_add_gpr_array(ctx->shader, treg[0], 3, 0x0F);
1574
1575 for (i = 0; i < 3; i++) {
1576 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1577 alu.op = ALU_OP1_MOV;
1578 alu.src[0].sel = ctx->gs_rotated_input[0];
1579 alu.src[0].chan = i == 2 ? 3 : i;
1580 alu.dst.sel = treg[i];
1581 alu.dst.chan = 0;
1582 alu.dst.write = 1;
1583 alu.last = 1;
1584 r = r600_bytecode_add_alu(ctx->bc, &alu);
1585 if (r)
1586 return r;
1587 }
1588 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1589 alu.op = ALU_OP1_MOV;
1590 alu.src[0].sel = treg[0];
1591 alu.src[0].rel = 1;
1592 alu.dst.sel = t2;
1593 alu.dst.write = 1;
1594 alu.last = 1;
1595 r = r600_bytecode_add_alu(ctx->bc, &alu);
1596 if (r)
1597 return r;
1598 offset_reg = t2;
1599 offset_chan = 0;
1600 }
1601
1602 if (src->Register.Indirect) {
1603 int addr_reg;
1604 unsigned first = ctx->info.input_array_first[src->Indirect.ArrayID];
1605
1606 addr_reg = get_address_file_reg(ctx, src->Indirect.Index);
1607
1608 /* pull the value from index_reg */
1609 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
1610 t2, 1,
1611 addr_reg, 0,
1612 V_SQ_ALU_SRC_LITERAL, first);
1613 if (r)
1614 return r;
1615 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
1616 t2, 0,
1617 t2, 1,
1618 V_SQ_ALU_SRC_LITERAL, 4,
1619 offset_reg, offset_chan);
1620 if (r)
1621 return r;
1622 offset_reg = t2;
1623 offset_chan = 0;
1624 index = src->Register.Index - first;
1625 }
1626
1627 memset(&vtx, 0, sizeof(vtx));
1628 vtx.buffer_id = R600_GS_RING_CONST_BUFFER;
1629 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1630 vtx.src_gpr = offset_reg;
1631 vtx.src_sel_x = offset_chan;
1632 vtx.offset = index * 16; /*bytes*/
1633 vtx.mega_fetch_count = 16;
1634 vtx.dst_gpr = dst_reg;
1635 vtx.dst_sel_x = 0; /* SEL_X */
1636 vtx.dst_sel_y = 1; /* SEL_Y */
1637 vtx.dst_sel_z = 2; /* SEL_Z */
1638 vtx.dst_sel_w = 3; /* SEL_W */
1639 if (ctx->bc->chip_class >= EVERGREEN) {
1640 vtx.use_const_fields = 1;
1641 } else {
1642 vtx.data_format = FMT_32_32_32_32_FLOAT;
1643 }
1644
1645 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
1646 return r;
1647
1648 return 0;
1649 }
1650
1651 static int tgsi_split_gs_inputs(struct r600_shader_ctx *ctx)
1652 {
1653 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1654 unsigned i;
1655
1656 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
1657 struct tgsi_full_src_register *src = &inst->Src[i];
1658
1659 if (src->Register.File == TGSI_FILE_INPUT) {
1660 if (ctx->shader->input[src->Register.Index].name == TGSI_SEMANTIC_PRIMID) {
1661 /* primitive id is in R0.z */
1662 ctx->src[i].sel = 0;
1663 ctx->src[i].swizzle[0] = 2;
1664 }
1665 }
1666 if (src->Register.File == TGSI_FILE_INPUT && src->Register.Dimension) {
1667 int treg = r600_get_temp(ctx);
1668
1669 fetch_gs_input(ctx, src, treg);
1670 ctx->src[i].sel = treg;
1671 ctx->src[i].rel = 0;
1672 }
1673 }
1674 return 0;
1675 }
1676
1677
1678 /* Tessellation shaders pass outputs to the next shader using LDS.
1679 *
1680 * LS outputs = TCS(HS) inputs
1681 * TCS(HS) outputs = TES(DS) inputs
1682 *
1683 * The LDS layout is:
1684 * - TCS inputs for patch 0
1685 * - TCS inputs for patch 1
1686 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
1687 * - ...
1688 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
1689 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
1690 * - TCS outputs for patch 1
1691 * - Per-patch TCS outputs for patch 1
1692 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
1693 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
1694 * - ...
1695 *
1696 * All three shaders VS(LS), TCS, TES share the same LDS space.
1697 */
1698 /* this will return with the dw address in temp_reg.x */
1699 static int r600_get_byte_address(struct r600_shader_ctx *ctx, int temp_reg,
1700 const struct tgsi_full_dst_register *dst,
1701 const struct tgsi_full_src_register *src,
1702 int stride_bytes_reg, int stride_bytes_chan)
1703 {
1704 struct tgsi_full_dst_register reg;
1705 ubyte *name, *index, *array_first;
1706 int r;
1707 int param;
1708 struct tgsi_shader_info *info = &ctx->info;
1709 /* Set the register description. The address computation is the same
1710 * for sources and destinations. */
1711 if (src) {
1712 reg.Register.File = src->Register.File;
1713 reg.Register.Index = src->Register.Index;
1714 reg.Register.Indirect = src->Register.Indirect;
1715 reg.Register.Dimension = src->Register.Dimension;
1716 reg.Indirect = src->Indirect;
1717 reg.Dimension = src->Dimension;
1718 reg.DimIndirect = src->DimIndirect;
1719 } else
1720 reg = *dst;
1721
1722 /* If the register is 2-dimensional (e.g. an array of vertices
1723 * in a primitive), calculate the base address of the vertex. */
1724 if (reg.Register.Dimension) {
1725 int sel, chan;
1726 if (reg.Dimension.Indirect) {
1727 unsigned addr_reg;
1728 assert (reg.DimIndirect.File == TGSI_FILE_ADDRESS);
1729
1730 addr_reg = get_address_file_reg(ctx, reg.DimIndirect.Index);
1731 /* pull the value from index_reg */
1732 sel = addr_reg;
1733 chan = 0;
1734 } else {
1735 sel = V_SQ_ALU_SRC_LITERAL;
1736 chan = reg.Dimension.Index;
1737 }
1738
1739 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
1740 temp_reg, 0,
1741 stride_bytes_reg, stride_bytes_chan,
1742 sel, chan,
1743 temp_reg, 0);
1744 if (r)
1745 return r;
1746 }
1747
1748 if (reg.Register.File == TGSI_FILE_INPUT) {
1749 name = info->input_semantic_name;
1750 index = info->input_semantic_index;
1751 array_first = info->input_array_first;
1752 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
1753 name = info->output_semantic_name;
1754 index = info->output_semantic_index;
1755 array_first = info->output_array_first;
1756 } else {
1757 assert(0);
1758 return -1;
1759 }
1760 if (reg.Register.Indirect) {
1761 int addr_reg;
1762 int first;
1763 /* Add the relative address of the element. */
1764 if (reg.Indirect.ArrayID)
1765 first = array_first[reg.Indirect.ArrayID];
1766 else
1767 first = reg.Register.Index;
1768
1769 addr_reg = get_address_file_reg(ctx, reg.Indirect.Index);
1770
1771 /* pull the value from index_reg */
1772 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
1773 temp_reg, 0,
1774 V_SQ_ALU_SRC_LITERAL, 16,
1775 addr_reg, 0,
1776 temp_reg, 0);
1777 if (r)
1778 return r;
1779
1780 param = r600_get_lds_unique_index(name[first],
1781 index[first]);
1782
1783 } else {
1784 param = r600_get_lds_unique_index(name[reg.Register.Index],
1785 index[reg.Register.Index]);
1786 }
1787
1788 /* add to base_addr - passed in temp_reg.x */
1789 if (param) {
1790 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
1791 temp_reg, 0,
1792 temp_reg, 0,
1793 V_SQ_ALU_SRC_LITERAL, param * 16);
1794 if (r)
1795 return r;
1796
1797 }
1798 return 0;
1799 }
1800
1801 static int do_lds_fetch_values(struct r600_shader_ctx *ctx, unsigned temp_reg,
1802 unsigned dst_reg, unsigned mask)
1803 {
1804 struct r600_bytecode_alu alu;
1805 int r, i, lasti;
1806
1807 if ((ctx->bc->cf_last->ndw>>1) >= 0x60)
1808 ctx->bc->force_add_cf = 1;
1809
1810 lasti = tgsi_last_instruction(mask);
1811 for (i = 1; i <= lasti; i++) {
1812 if (!(mask & (1 << i)))
1813 continue;
1814
1815 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
1816 temp_reg, i,
1817 temp_reg, 0,
1818 V_SQ_ALU_SRC_LITERAL, 4 * i);
1819 if (r)
1820 return r;
1821 }
1822 for (i = 0; i <= lasti; i++) {
1823 if (!(mask & (1 << i)))
1824 continue;
1825
1826 /* emit an LDS_READ_RET */
1827 memset(&alu, 0, sizeof(alu));
1828 alu.op = LDS_OP1_LDS_READ_RET;
1829 alu.src[0].sel = temp_reg;
1830 alu.src[0].chan = i;
1831 alu.src[1].sel = V_SQ_ALU_SRC_0;
1832 alu.src[2].sel = V_SQ_ALU_SRC_0;
1833 alu.dst.chan = 0;
1834 alu.is_lds_idx_op = true;
1835 alu.last = 1;
1836 r = r600_bytecode_add_alu(ctx->bc, &alu);
1837 if (r)
1838 return r;
1839 }
1840 for (i = 0; i <= lasti; i++) {
1841 if (!(mask & (1 << i)))
1842 continue;
1843
1844 /* then read from LDS_OQ_A_POP */
1845 memset(&alu, 0, sizeof(alu));
1846
1847 alu.op = ALU_OP1_MOV;
1848 alu.src[0].sel = EG_V_SQ_ALU_SRC_LDS_OQ_A_POP;
1849 alu.src[0].chan = 0;
1850 alu.dst.sel = dst_reg;
1851 alu.dst.chan = i;
1852 alu.dst.write = 1;
1853 alu.last = 1;
1854 r = r600_bytecode_add_alu(ctx->bc, &alu);
1855 if (r)
1856 return r;
1857 }
1858 return 0;
1859 }
1860
1861 static int fetch_mask(struct tgsi_src_register *reg)
1862 {
1863 int mask = 0;
1864 mask |= 1 << reg->SwizzleX;
1865 mask |= 1 << reg->SwizzleY;
1866 mask |= 1 << reg->SwizzleZ;
1867 mask |= 1 << reg->SwizzleW;
1868 return mask;
1869 }
1870
1871 static int fetch_tes_input(struct r600_shader_ctx *ctx, struct tgsi_full_src_register *src, unsigned int dst_reg)
1872 {
1873 int r;
1874 unsigned temp_reg = r600_get_temp(ctx);
1875
1876 r = get_lds_offset0(ctx, 2, temp_reg,
1877 src->Register.Dimension ? false : true);
1878 if (r)
1879 return r;
1880
1881 /* the base address is now in temp.x */
1882 r = r600_get_byte_address(ctx, temp_reg,
1883 NULL, src, ctx->tess_output_info, 1);
1884 if (r)
1885 return r;
1886
1887 r = do_lds_fetch_values(ctx, temp_reg, dst_reg, fetch_mask(&src->Register));
1888 if (r)
1889 return r;
1890 return 0;
1891 }
1892
1893 static int fetch_tcs_input(struct r600_shader_ctx *ctx, struct tgsi_full_src_register *src, unsigned int dst_reg)
1894 {
1895 int r;
1896 unsigned temp_reg = r600_get_temp(ctx);
1897
1898 /* t.x = ips * r0.y */
1899 r = single_alu_op2(ctx, ALU_OP2_MUL_UINT24,
1900 temp_reg, 0,
1901 ctx->tess_input_info, 0,
1902 0, 1);
1903
1904 if (r)
1905 return r;
1906
1907 /* the base address is now in temp.x */
1908 r = r600_get_byte_address(ctx, temp_reg,
1909 NULL, src, ctx->tess_input_info, 1);
1910 if (r)
1911 return r;
1912
1913 r = do_lds_fetch_values(ctx, temp_reg, dst_reg, fetch_mask(&src->Register));
1914 if (r)
1915 return r;
1916 return 0;
1917 }
1918
1919 static int fetch_tcs_output(struct r600_shader_ctx *ctx, struct tgsi_full_src_register *src, unsigned int dst_reg)
1920 {
1921 int r;
1922 unsigned temp_reg = r600_get_temp(ctx);
1923
1924 r = get_lds_offset0(ctx, 1, temp_reg,
1925 src->Register.Dimension ? false : true);
1926 if (r)
1927 return r;
1928 /* the base address is now in temp.x */
1929 r = r600_get_byte_address(ctx, temp_reg,
1930 NULL, src,
1931 ctx->tess_output_info, 1);
1932 if (r)
1933 return r;
1934
1935 r = do_lds_fetch_values(ctx, temp_reg, dst_reg, fetch_mask(&src->Register));
1936 if (r)
1937 return r;
1938 return 0;
1939 }
1940
1941 static int tgsi_split_lds_inputs(struct r600_shader_ctx *ctx)
1942 {
1943 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1944 unsigned i;
1945
1946 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
1947 struct tgsi_full_src_register *src = &inst->Src[i];
1948
1949 if (ctx->type == PIPE_SHADER_TESS_EVAL && src->Register.File == TGSI_FILE_INPUT) {
1950 int treg = r600_get_temp(ctx);
1951 fetch_tes_input(ctx, src, treg);
1952 ctx->src[i].sel = treg;
1953 ctx->src[i].rel = 0;
1954 }
1955 if (ctx->type == PIPE_SHADER_TESS_CTRL && src->Register.File == TGSI_FILE_INPUT) {
1956 int treg = r600_get_temp(ctx);
1957 fetch_tcs_input(ctx, src, treg);
1958 ctx->src[i].sel = treg;
1959 ctx->src[i].rel = 0;
1960 }
1961 if (ctx->type == PIPE_SHADER_TESS_CTRL && src->Register.File == TGSI_FILE_OUTPUT) {
1962 int treg = r600_get_temp(ctx);
1963 fetch_tcs_output(ctx, src, treg);
1964 ctx->src[i].sel = treg;
1965 ctx->src[i].rel = 0;
1966 }
1967 }
1968 return 0;
1969 }
1970
1971 static int tgsi_split_constant(struct r600_shader_ctx *ctx)
1972 {
1973 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1974 struct r600_bytecode_alu alu;
1975 int i, j, k, nconst, r;
1976
1977 for (i = 0, nconst = 0; i < inst->Instruction.NumSrcRegs; i++) {
1978 if (inst->Src[i].Register.File == TGSI_FILE_CONSTANT) {
1979 nconst++;
1980 }
1981 tgsi_src(ctx, &inst->Src[i], &ctx->src[i]);
1982 }
1983 for (i = 0, j = nconst - 1; i < inst->Instruction.NumSrcRegs; i++) {
1984 if (inst->Src[i].Register.File != TGSI_FILE_CONSTANT) {
1985 continue;
1986 }
1987
1988 if (ctx->src[i].rel) {
1989 int chan = inst->Src[i].Indirect.Swizzle;
1990 int treg = r600_get_temp(ctx);
1991 if ((r = tgsi_fetch_rel_const(ctx, ctx->src[i].kc_bank, ctx->src[i].kc_rel, ctx->src[i].sel - 512, chan, treg)))
1992 return r;
1993
1994 ctx->src[i].kc_bank = 0;
1995 ctx->src[i].kc_rel = 0;
1996 ctx->src[i].sel = treg;
1997 ctx->src[i].rel = 0;
1998 j--;
1999 } else if (j > 0) {
2000 int treg = r600_get_temp(ctx);
2001 for (k = 0; k < 4; k++) {
2002 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2003 alu.op = ALU_OP1_MOV;
2004 alu.src[0].sel = ctx->src[i].sel;
2005 alu.src[0].chan = k;
2006 alu.src[0].rel = ctx->src[i].rel;
2007 alu.src[0].kc_bank = ctx->src[i].kc_bank;
2008 alu.src[0].kc_rel = ctx->src[i].kc_rel;
2009 alu.dst.sel = treg;
2010 alu.dst.chan = k;
2011 alu.dst.write = 1;
2012 if (k == 3)
2013 alu.last = 1;
2014 r = r600_bytecode_add_alu(ctx->bc, &alu);
2015 if (r)
2016 return r;
2017 }
2018 ctx->src[i].sel = treg;
2019 ctx->src[i].rel =0;
2020 j--;
2021 }
2022 }
2023 return 0;
2024 }
2025
2026 /* need to move any immediate into a temp - for trig functions which use literal for PI stuff */
2027 static int tgsi_split_literal_constant(struct r600_shader_ctx *ctx)
2028 {
2029 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2030 struct r600_bytecode_alu alu;
2031 int i, j, k, nliteral, r;
2032
2033 for (i = 0, nliteral = 0; i < inst->Instruction.NumSrcRegs; i++) {
2034 if (ctx->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
2035 nliteral++;
2036 }
2037 }
2038 for (i = 0, j = nliteral - 1; i < inst->Instruction.NumSrcRegs; i++) {
2039 if (j > 0 && ctx->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
2040 int treg = r600_get_temp(ctx);
2041 for (k = 0; k < 4; k++) {
2042 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2043 alu.op = ALU_OP1_MOV;
2044 alu.src[0].sel = ctx->src[i].sel;
2045 alu.src[0].chan = k;
2046 alu.src[0].value = ctx->src[i].value[k];
2047 alu.dst.sel = treg;
2048 alu.dst.chan = k;
2049 alu.dst.write = 1;
2050 if (k == 3)
2051 alu.last = 1;
2052 r = r600_bytecode_add_alu(ctx->bc, &alu);
2053 if (r)
2054 return r;
2055 }
2056 ctx->src[i].sel = treg;
2057 j--;
2058 }
2059 }
2060 return 0;
2061 }
2062
2063 static int process_twoside_color_inputs(struct r600_shader_ctx *ctx)
2064 {
2065 int i, r, count = ctx->shader->ninput;
2066
2067 for (i = 0; i < count; i++) {
2068 if (ctx->shader->input[i].name == TGSI_SEMANTIC_COLOR) {
2069 r = select_twoside_color(ctx, i, ctx->shader->input[i].back_color_input);
2070 if (r)
2071 return r;
2072 }
2073 }
2074 return 0;
2075 }
2076
2077 static int emit_streamout(struct r600_shader_ctx *ctx, struct pipe_stream_output_info *so,
2078 int stream, unsigned *stream_item_size UNUSED)
2079 {
2080 unsigned so_gpr[PIPE_MAX_SHADER_OUTPUTS];
2081 unsigned start_comp[PIPE_MAX_SHADER_OUTPUTS];
2082 int j, r;
2083 unsigned i;
2084
2085 /* Sanity checking. */
2086 if (so->num_outputs > PIPE_MAX_SO_OUTPUTS) {
2087 R600_ERR("Too many stream outputs: %d\n", so->num_outputs);
2088 r = -EINVAL;
2089 goto out_err;
2090 }
2091 for (i = 0; i < so->num_outputs; i++) {
2092 if (so->output[i].output_buffer >= 4) {
2093 R600_ERR("Exceeded the max number of stream output buffers, got: %d\n",
2094 so->output[i].output_buffer);
2095 r = -EINVAL;
2096 goto out_err;
2097 }
2098 }
2099
2100 /* Initialize locations where the outputs are stored. */
2101 for (i = 0; i < so->num_outputs; i++) {
2102
2103 so_gpr[i] = ctx->shader->output[so->output[i].register_index].gpr;
2104 start_comp[i] = so->output[i].start_component;
2105 /* Lower outputs with dst_offset < start_component.
2106 *
2107 * We can only output 4D vectors with a write mask, e.g. we can
2108 * only output the W component at offset 3, etc. If we want
2109 * to store Y, Z, or W at buffer offset 0, we need to use MOV
2110 * to move it to X and output X. */
2111 if (so->output[i].dst_offset < so->output[i].start_component) {
2112 unsigned tmp = r600_get_temp(ctx);
2113
2114 for (j = 0; j < so->output[i].num_components; j++) {
2115 struct r600_bytecode_alu alu;
2116 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2117 alu.op = ALU_OP1_MOV;
2118 alu.src[0].sel = so_gpr[i];
2119 alu.src[0].chan = so->output[i].start_component + j;
2120
2121 alu.dst.sel = tmp;
2122 alu.dst.chan = j;
2123 alu.dst.write = 1;
2124 if (j == so->output[i].num_components - 1)
2125 alu.last = 1;
2126 r = r600_bytecode_add_alu(ctx->bc, &alu);
2127 if (r)
2128 return r;
2129 }
2130 start_comp[i] = 0;
2131 so_gpr[i] = tmp;
2132 }
2133 }
2134
2135 /* Write outputs to buffers. */
2136 for (i = 0; i < so->num_outputs; i++) {
2137 struct r600_bytecode_output output;
2138
2139 if (stream != -1 && stream != so->output[i].output_buffer)
2140 continue;
2141
2142 memset(&output, 0, sizeof(struct r600_bytecode_output));
2143 output.gpr = so_gpr[i];
2144 output.elem_size = so->output[i].num_components - 1;
2145 if (output.elem_size == 2)
2146 output.elem_size = 3; // 3 not supported, write 4 with junk at end
2147 output.array_base = so->output[i].dst_offset - start_comp[i];
2148 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE;
2149 output.burst_count = 1;
2150 /* array_size is an upper limit for the burst_count
2151 * with MEM_STREAM instructions */
2152 output.array_size = 0xFFF;
2153 output.comp_mask = ((1 << so->output[i].num_components) - 1) << start_comp[i];
2154
2155 if (ctx->bc->chip_class >= EVERGREEN) {
2156 switch (so->output[i].output_buffer) {
2157 case 0:
2158 output.op = CF_OP_MEM_STREAM0_BUF0;
2159 break;
2160 case 1:
2161 output.op = CF_OP_MEM_STREAM0_BUF1;
2162 break;
2163 case 2:
2164 output.op = CF_OP_MEM_STREAM0_BUF2;
2165 break;
2166 case 3:
2167 output.op = CF_OP_MEM_STREAM0_BUF3;
2168 break;
2169 }
2170 output.op += so->output[i].stream * 4;
2171 assert(output.op >= CF_OP_MEM_STREAM0_BUF0 && output.op <= CF_OP_MEM_STREAM3_BUF3);
2172 ctx->enabled_stream_buffers_mask |= (1 << so->output[i].output_buffer) << so->output[i].stream * 4;
2173 } else {
2174 switch (so->output[i].output_buffer) {
2175 case 0:
2176 output.op = CF_OP_MEM_STREAM0;
2177 break;
2178 case 1:
2179 output.op = CF_OP_MEM_STREAM1;
2180 break;
2181 case 2:
2182 output.op = CF_OP_MEM_STREAM2;
2183 break;
2184 case 3:
2185 output.op = CF_OP_MEM_STREAM3;
2186 break;
2187 }
2188 ctx->enabled_stream_buffers_mask |= 1 << so->output[i].output_buffer;
2189 }
2190 r = r600_bytecode_add_output(ctx->bc, &output);
2191 if (r)
2192 goto out_err;
2193 }
2194 return 0;
2195 out_err:
2196 return r;
2197 }
2198
2199 static void convert_edgeflag_to_int(struct r600_shader_ctx *ctx)
2200 {
2201 struct r600_bytecode_alu alu;
2202 unsigned reg;
2203
2204 if (!ctx->shader->vs_out_edgeflag)
2205 return;
2206
2207 reg = ctx->shader->output[ctx->edgeflag_output].gpr;
2208
2209 /* clamp(x, 0, 1) */
2210 memset(&alu, 0, sizeof(alu));
2211 alu.op = ALU_OP1_MOV;
2212 alu.src[0].sel = reg;
2213 alu.dst.sel = reg;
2214 alu.dst.write = 1;
2215 alu.dst.clamp = 1;
2216 alu.last = 1;
2217 r600_bytecode_add_alu(ctx->bc, &alu);
2218
2219 memset(&alu, 0, sizeof(alu));
2220 alu.op = ALU_OP1_FLT_TO_INT;
2221 alu.src[0].sel = reg;
2222 alu.dst.sel = reg;
2223 alu.dst.write = 1;
2224 alu.last = 1;
2225 r600_bytecode_add_alu(ctx->bc, &alu);
2226 }
2227
2228 static int generate_gs_copy_shader(struct r600_context *rctx,
2229 struct r600_pipe_shader *gs,
2230 struct pipe_stream_output_info *so)
2231 {
2232 struct r600_shader_ctx ctx = {};
2233 struct r600_shader *gs_shader = &gs->shader;
2234 struct r600_pipe_shader *cshader;
2235 unsigned ocnt = gs_shader->noutput;
2236 struct r600_bytecode_alu alu;
2237 struct r600_bytecode_vtx vtx;
2238 struct r600_bytecode_output output;
2239 struct r600_bytecode_cf *cf_jump, *cf_pop,
2240 *last_exp_pos = NULL, *last_exp_param = NULL;
2241 int next_clip_pos = 61, next_param = 0;
2242 unsigned i, j;
2243 int ring;
2244 bool only_ring_0 = true;
2245 cshader = calloc(1, sizeof(struct r600_pipe_shader));
2246 if (!cshader)
2247 return 0;
2248
2249 memcpy(cshader->shader.output, gs_shader->output, ocnt *
2250 sizeof(struct r600_shader_io));
2251
2252 cshader->shader.noutput = ocnt;
2253
2254 ctx.shader = &cshader->shader;
2255 ctx.bc = &ctx.shader->bc;
2256 ctx.type = ctx.bc->type = PIPE_SHADER_VERTEX;
2257
2258 r600_bytecode_init(ctx.bc, rctx->b.chip_class, rctx->b.family,
2259 rctx->screen->has_compressed_msaa_texturing);
2260
2261 ctx.bc->isa = rctx->isa;
2262
2263 cf_jump = NULL;
2264 memset(cshader->shader.ring_item_sizes, 0, sizeof(cshader->shader.ring_item_sizes));
2265
2266 /* R0.x = R0.x & 0x3fffffff */
2267 memset(&alu, 0, sizeof(alu));
2268 alu.op = ALU_OP2_AND_INT;
2269 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2270 alu.src[1].value = 0x3fffffff;
2271 alu.dst.write = 1;
2272 r600_bytecode_add_alu(ctx.bc, &alu);
2273
2274 /* R0.y = R0.x >> 30 */
2275 memset(&alu, 0, sizeof(alu));
2276 alu.op = ALU_OP2_LSHR_INT;
2277 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2278 alu.src[1].value = 0x1e;
2279 alu.dst.chan = 1;
2280 alu.dst.write = 1;
2281 alu.last = 1;
2282 r600_bytecode_add_alu(ctx.bc, &alu);
2283
2284 /* fetch vertex data from GSVS ring */
2285 for (i = 0; i < ocnt; ++i) {
2286 struct r600_shader_io *out = &ctx.shader->output[i];
2287
2288 out->gpr = i + 1;
2289 out->ring_offset = i * 16;
2290
2291 memset(&vtx, 0, sizeof(vtx));
2292 vtx.op = FETCH_OP_VFETCH;
2293 vtx.buffer_id = R600_GS_RING_CONST_BUFFER;
2294 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
2295 vtx.mega_fetch_count = 16;
2296 vtx.offset = out->ring_offset;
2297 vtx.dst_gpr = out->gpr;
2298 vtx.src_gpr = 0;
2299 vtx.dst_sel_x = 0;
2300 vtx.dst_sel_y = 1;
2301 vtx.dst_sel_z = 2;
2302 vtx.dst_sel_w = 3;
2303 if (rctx->b.chip_class >= EVERGREEN) {
2304 vtx.use_const_fields = 1;
2305 } else {
2306 vtx.data_format = FMT_32_32_32_32_FLOAT;
2307 }
2308
2309 r600_bytecode_add_vtx(ctx.bc, &vtx);
2310 }
2311 ctx.temp_reg = i + 1;
2312 for (ring = 3; ring >= 0; --ring) {
2313 bool enabled = false;
2314 for (i = 0; i < so->num_outputs; i++) {
2315 if (so->output[i].stream == ring) {
2316 enabled = true;
2317 if (ring > 0)
2318 only_ring_0 = false;
2319 break;
2320 }
2321 }
2322 if (ring != 0 && !enabled) {
2323 cshader->shader.ring_item_sizes[ring] = 0;
2324 continue;
2325 }
2326
2327 if (cf_jump) {
2328 // Patch up jump label
2329 r600_bytecode_add_cfinst(ctx.bc, CF_OP_POP);
2330 cf_pop = ctx.bc->cf_last;
2331
2332 cf_jump->cf_addr = cf_pop->id + 2;
2333 cf_jump->pop_count = 1;
2334 cf_pop->cf_addr = cf_pop->id + 2;
2335 cf_pop->pop_count = 1;
2336 }
2337
2338 /* PRED_SETE_INT __, R0.y, ring */
2339 memset(&alu, 0, sizeof(alu));
2340 alu.op = ALU_OP2_PRED_SETE_INT;
2341 alu.src[0].chan = 1;
2342 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2343 alu.src[1].value = ring;
2344 alu.execute_mask = 1;
2345 alu.update_pred = 1;
2346 alu.last = 1;
2347 r600_bytecode_add_alu_type(ctx.bc, &alu, CF_OP_ALU_PUSH_BEFORE);
2348
2349 r600_bytecode_add_cfinst(ctx.bc, CF_OP_JUMP);
2350 cf_jump = ctx.bc->cf_last;
2351
2352 if (enabled)
2353 emit_streamout(&ctx, so, only_ring_0 ? -1 : ring, &cshader->shader.ring_item_sizes[ring]);
2354 cshader->shader.ring_item_sizes[ring] = ocnt * 16;
2355 }
2356
2357 /* bc adds nops - copy it */
2358 if (ctx.bc->chip_class == R600) {
2359 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2360 alu.op = ALU_OP0_NOP;
2361 alu.last = 1;
2362 r600_bytecode_add_alu(ctx.bc, &alu);
2363
2364 r600_bytecode_add_cfinst(ctx.bc, CF_OP_NOP);
2365 }
2366
2367 /* export vertex data */
2368 /* XXX factor out common code with r600_shader_from_tgsi ? */
2369 for (i = 0; i < ocnt; ++i) {
2370 struct r600_shader_io *out = &ctx.shader->output[i];
2371 bool instream0 = true;
2372 if (out->name == TGSI_SEMANTIC_CLIPVERTEX)
2373 continue;
2374
2375 for (j = 0; j < so->num_outputs; j++) {
2376 if (so->output[j].register_index == i) {
2377 if (so->output[j].stream == 0)
2378 break;
2379 if (so->output[j].stream > 0)
2380 instream0 = false;
2381 }
2382 }
2383 if (!instream0)
2384 continue;
2385 memset(&output, 0, sizeof(output));
2386 output.gpr = out->gpr;
2387 output.elem_size = 3;
2388 output.swizzle_x = 0;
2389 output.swizzle_y = 1;
2390 output.swizzle_z = 2;
2391 output.swizzle_w = 3;
2392 output.burst_count = 1;
2393 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2394 output.op = CF_OP_EXPORT;
2395 switch (out->name) {
2396 case TGSI_SEMANTIC_POSITION:
2397 output.array_base = 60;
2398 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2399 break;
2400
2401 case TGSI_SEMANTIC_PSIZE:
2402 output.array_base = 61;
2403 if (next_clip_pos == 61)
2404 next_clip_pos = 62;
2405 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2406 output.swizzle_y = 7;
2407 output.swizzle_z = 7;
2408 output.swizzle_w = 7;
2409 ctx.shader->vs_out_misc_write = 1;
2410 ctx.shader->vs_out_point_size = 1;
2411 break;
2412 case TGSI_SEMANTIC_LAYER:
2413 if (out->spi_sid) {
2414 /* duplicate it as PARAM to pass to the pixel shader */
2415 output.array_base = next_param++;
2416 r600_bytecode_add_output(ctx.bc, &output);
2417 last_exp_param = ctx.bc->cf_last;
2418 }
2419 output.array_base = 61;
2420 if (next_clip_pos == 61)
2421 next_clip_pos = 62;
2422 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2423 output.swizzle_x = 7;
2424 output.swizzle_y = 7;
2425 output.swizzle_z = 0;
2426 output.swizzle_w = 7;
2427 ctx.shader->vs_out_misc_write = 1;
2428 ctx.shader->vs_out_layer = 1;
2429 break;
2430 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2431 if (out->spi_sid) {
2432 /* duplicate it as PARAM to pass to the pixel shader */
2433 output.array_base = next_param++;
2434 r600_bytecode_add_output(ctx.bc, &output);
2435 last_exp_param = ctx.bc->cf_last;
2436 }
2437 output.array_base = 61;
2438 if (next_clip_pos == 61)
2439 next_clip_pos = 62;
2440 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2441 ctx.shader->vs_out_misc_write = 1;
2442 ctx.shader->vs_out_viewport = 1;
2443 output.swizzle_x = 7;
2444 output.swizzle_y = 7;
2445 output.swizzle_z = 7;
2446 output.swizzle_w = 0;
2447 break;
2448 case TGSI_SEMANTIC_CLIPDIST:
2449 /* spi_sid is 0 for clipdistance outputs that were generated
2450 * for clipvertex - we don't need to pass them to PS */
2451 ctx.shader->clip_dist_write = gs->shader.clip_dist_write;
2452 ctx.shader->cull_dist_write = gs->shader.cull_dist_write;
2453 ctx.shader->cc_dist_mask = gs->shader.cc_dist_mask;
2454 if (out->spi_sid) {
2455 /* duplicate it as PARAM to pass to the pixel shader */
2456 output.array_base = next_param++;
2457 r600_bytecode_add_output(ctx.bc, &output);
2458 last_exp_param = ctx.bc->cf_last;
2459 }
2460 output.array_base = next_clip_pos++;
2461 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2462 break;
2463 case TGSI_SEMANTIC_FOG:
2464 output.swizzle_y = 4; /* 0 */
2465 output.swizzle_z = 4; /* 0 */
2466 output.swizzle_w = 5; /* 1 */
2467 break;
2468 default:
2469 output.array_base = next_param++;
2470 break;
2471 }
2472 r600_bytecode_add_output(ctx.bc, &output);
2473 if (output.type == V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM)
2474 last_exp_param = ctx.bc->cf_last;
2475 else
2476 last_exp_pos = ctx.bc->cf_last;
2477 }
2478
2479 if (!last_exp_pos) {
2480 memset(&output, 0, sizeof(output));
2481 output.gpr = 0;
2482 output.elem_size = 3;
2483 output.swizzle_x = 7;
2484 output.swizzle_y = 7;
2485 output.swizzle_z = 7;
2486 output.swizzle_w = 7;
2487 output.burst_count = 1;
2488 output.type = 2;
2489 output.op = CF_OP_EXPORT;
2490 output.array_base = 60;
2491 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2492 r600_bytecode_add_output(ctx.bc, &output);
2493 last_exp_pos = ctx.bc->cf_last;
2494 }
2495
2496 if (!last_exp_param) {
2497 memset(&output, 0, sizeof(output));
2498 output.gpr = 0;
2499 output.elem_size = 3;
2500 output.swizzle_x = 7;
2501 output.swizzle_y = 7;
2502 output.swizzle_z = 7;
2503 output.swizzle_w = 7;
2504 output.burst_count = 1;
2505 output.type = 2;
2506 output.op = CF_OP_EXPORT;
2507 output.array_base = next_param++;
2508 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2509 r600_bytecode_add_output(ctx.bc, &output);
2510 last_exp_param = ctx.bc->cf_last;
2511 }
2512
2513 last_exp_pos->op = CF_OP_EXPORT_DONE;
2514 last_exp_param->op = CF_OP_EXPORT_DONE;
2515
2516 r600_bytecode_add_cfinst(ctx.bc, CF_OP_POP);
2517 cf_pop = ctx.bc->cf_last;
2518
2519 cf_jump->cf_addr = cf_pop->id + 2;
2520 cf_jump->pop_count = 1;
2521 cf_pop->cf_addr = cf_pop->id + 2;
2522 cf_pop->pop_count = 1;
2523
2524 if (ctx.bc->chip_class == CAYMAN)
2525 cm_bytecode_add_cf_end(ctx.bc);
2526 else {
2527 r600_bytecode_add_cfinst(ctx.bc, CF_OP_NOP);
2528 ctx.bc->cf_last->end_of_program = 1;
2529 }
2530
2531 gs->gs_copy_shader = cshader;
2532 cshader->enabled_stream_buffers_mask = ctx.enabled_stream_buffers_mask;
2533
2534 ctx.bc->nstack = 1;
2535
2536 return r600_bytecode_build(ctx.bc);
2537 }
2538
2539 static int emit_inc_ring_offset(struct r600_shader_ctx *ctx, int idx, bool ind)
2540 {
2541 if (ind) {
2542 struct r600_bytecode_alu alu;
2543 int r;
2544
2545 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2546 alu.op = ALU_OP2_ADD_INT;
2547 alu.src[0].sel = ctx->gs_export_gpr_tregs[idx];
2548 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2549 alu.src[1].value = ctx->gs_out_ring_offset >> 4;
2550 alu.dst.sel = ctx->gs_export_gpr_tregs[idx];
2551 alu.dst.write = 1;
2552 alu.last = 1;
2553 r = r600_bytecode_add_alu(ctx->bc, &alu);
2554 if (r)
2555 return r;
2556 }
2557 return 0;
2558 }
2559
2560 static int emit_gs_ring_writes(struct r600_shader_ctx *ctx, const struct pipe_stream_output_info *so UNUSED, int stream, bool ind)
2561 {
2562 struct r600_bytecode_output output;
2563 int ring_offset;
2564 unsigned i, k;
2565 int effective_stream = stream == -1 ? 0 : stream;
2566 int idx = 0;
2567
2568 for (i = 0; i < ctx->shader->noutput; i++) {
2569 if (ctx->gs_for_vs) {
2570 /* for ES we need to lookup corresponding ring offset expected by GS
2571 * (map this output to GS input by name and sid) */
2572 /* FIXME precompute offsets */
2573 ring_offset = -1;
2574 for(k = 0; k < ctx->gs_for_vs->ninput; ++k) {
2575 struct r600_shader_io *in = &ctx->gs_for_vs->input[k];
2576 struct r600_shader_io *out = &ctx->shader->output[i];
2577 if (in->name == out->name && in->sid == out->sid)
2578 ring_offset = in->ring_offset;
2579 }
2580
2581 if (ring_offset == -1)
2582 continue;
2583 } else {
2584 ring_offset = idx * 16;
2585 idx++;
2586 }
2587
2588 if (stream > 0 && ctx->shader->output[i].name == TGSI_SEMANTIC_POSITION)
2589 continue;
2590 /* next_ring_offset after parsing input decls contains total size of
2591 * single vertex data, gs_next_vertex - current vertex index */
2592 if (!ind)
2593 ring_offset += ctx->gs_out_ring_offset * ctx->gs_next_vertex;
2594
2595 memset(&output, 0, sizeof(struct r600_bytecode_output));
2596 output.gpr = ctx->shader->output[i].gpr;
2597 output.elem_size = 3;
2598 output.comp_mask = 0xF;
2599 output.burst_count = 1;
2600
2601 if (ind)
2602 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND;
2603 else
2604 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE;
2605
2606 switch (stream) {
2607 default:
2608 case 0:
2609 output.op = CF_OP_MEM_RING; break;
2610 case 1:
2611 output.op = CF_OP_MEM_RING1; break;
2612 case 2:
2613 output.op = CF_OP_MEM_RING2; break;
2614 case 3:
2615 output.op = CF_OP_MEM_RING3; break;
2616 }
2617
2618 if (ind) {
2619 output.array_base = ring_offset >> 2; /* in dwords */
2620 output.array_size = 0xfff;
2621 output.index_gpr = ctx->gs_export_gpr_tregs[effective_stream];
2622 } else
2623 output.array_base = ring_offset >> 2; /* in dwords */
2624 r600_bytecode_add_output(ctx->bc, &output);
2625 }
2626
2627 ++ctx->gs_next_vertex;
2628 return 0;
2629 }
2630
2631
2632 static int r600_fetch_tess_io_info(struct r600_shader_ctx *ctx)
2633 {
2634 int r;
2635 struct r600_bytecode_vtx vtx;
2636 int temp_val = ctx->temp_reg;
2637 /* need to store the TCS output somewhere */
2638 r = single_alu_op2(ctx, ALU_OP1_MOV,
2639 temp_val, 0,
2640 V_SQ_ALU_SRC_LITERAL, 0,
2641 0, 0);
2642 if (r)
2643 return r;
2644
2645 /* used by VS/TCS */
2646 if (ctx->tess_input_info) {
2647 /* fetch tcs input values into resv space */
2648 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
2649 vtx.op = FETCH_OP_VFETCH;
2650 vtx.buffer_id = R600_LDS_INFO_CONST_BUFFER;
2651 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
2652 vtx.mega_fetch_count = 16;
2653 vtx.data_format = FMT_32_32_32_32;
2654 vtx.num_format_all = 2;
2655 vtx.format_comp_all = 1;
2656 vtx.use_const_fields = 0;
2657 vtx.endian = r600_endian_swap(32);
2658 vtx.srf_mode_all = 1;
2659 vtx.offset = 0;
2660 vtx.dst_gpr = ctx->tess_input_info;
2661 vtx.dst_sel_x = 0;
2662 vtx.dst_sel_y = 1;
2663 vtx.dst_sel_z = 2;
2664 vtx.dst_sel_w = 3;
2665 vtx.src_gpr = temp_val;
2666 vtx.src_sel_x = 0;
2667
2668 r = r600_bytecode_add_vtx(ctx->bc, &vtx);
2669 if (r)
2670 return r;
2671 }
2672
2673 /* used by TCS/TES */
2674 if (ctx->tess_output_info) {
2675 /* fetch tcs output values into resv space */
2676 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
2677 vtx.op = FETCH_OP_VFETCH;
2678 vtx.buffer_id = R600_LDS_INFO_CONST_BUFFER;
2679 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
2680 vtx.mega_fetch_count = 16;
2681 vtx.data_format = FMT_32_32_32_32;
2682 vtx.num_format_all = 2;
2683 vtx.format_comp_all = 1;
2684 vtx.use_const_fields = 0;
2685 vtx.endian = r600_endian_swap(32);
2686 vtx.srf_mode_all = 1;
2687 vtx.offset = 16;
2688 vtx.dst_gpr = ctx->tess_output_info;
2689 vtx.dst_sel_x = 0;
2690 vtx.dst_sel_y = 1;
2691 vtx.dst_sel_z = 2;
2692 vtx.dst_sel_w = 3;
2693 vtx.src_gpr = temp_val;
2694 vtx.src_sel_x = 0;
2695
2696 r = r600_bytecode_add_vtx(ctx->bc, &vtx);
2697 if (r)
2698 return r;
2699 }
2700 return 0;
2701 }
2702
2703 static int emit_lds_vs_writes(struct r600_shader_ctx *ctx)
2704 {
2705 int j, r;
2706 int temp_reg;
2707 unsigned i;
2708
2709 /* fetch tcs input values into input_vals */
2710 ctx->tess_input_info = r600_get_temp(ctx);
2711 ctx->tess_output_info = 0;
2712 r = r600_fetch_tess_io_info(ctx);
2713 if (r)
2714 return r;
2715
2716 temp_reg = r600_get_temp(ctx);
2717 /* dst reg contains LDS address stride * idx */
2718 /* MUL vertexID, vertex_dw_stride */
2719 r = single_alu_op2(ctx, ALU_OP2_MUL_UINT24,
2720 temp_reg, 0,
2721 ctx->tess_input_info, 1,
2722 0, 1); /* rel id in r0.y? */
2723 if (r)
2724 return r;
2725
2726 for (i = 0; i < ctx->shader->noutput; i++) {
2727 struct r600_bytecode_alu alu;
2728 int param = r600_get_lds_unique_index(ctx->shader->output[i].name, ctx->shader->output[i].sid);
2729
2730 if (param) {
2731 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
2732 temp_reg, 1,
2733 temp_reg, 0,
2734 V_SQ_ALU_SRC_LITERAL, param * 16);
2735 if (r)
2736 return r;
2737 }
2738
2739 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
2740 temp_reg, 2,
2741 temp_reg, param ? 1 : 0,
2742 V_SQ_ALU_SRC_LITERAL, 8);
2743 if (r)
2744 return r;
2745
2746
2747 for (j = 0; j < 2; j++) {
2748 int chan = (j == 1) ? 2 : (param ? 1 : 0);
2749 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2750 alu.op = LDS_OP3_LDS_WRITE_REL;
2751 alu.src[0].sel = temp_reg;
2752 alu.src[0].chan = chan;
2753 alu.src[1].sel = ctx->shader->output[i].gpr;
2754 alu.src[1].chan = j * 2;
2755 alu.src[2].sel = ctx->shader->output[i].gpr;
2756 alu.src[2].chan = (j * 2) + 1;
2757 alu.last = 1;
2758 alu.dst.chan = 0;
2759 alu.lds_idx = 1;
2760 alu.is_lds_idx_op = true;
2761 r = r600_bytecode_add_alu(ctx->bc, &alu);
2762 if (r)
2763 return r;
2764 }
2765 }
2766 return 0;
2767 }
2768
2769 static int r600_store_tcs_output(struct r600_shader_ctx *ctx)
2770 {
2771 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2772 const struct tgsi_full_dst_register *dst = &inst->Dst[0];
2773 int i, r, lasti;
2774 int temp_reg = r600_get_temp(ctx);
2775 struct r600_bytecode_alu alu;
2776 unsigned write_mask = dst->Register.WriteMask;
2777
2778 if (inst->Dst[0].Register.File != TGSI_FILE_OUTPUT)
2779 return 0;
2780
2781 r = get_lds_offset0(ctx, 1, temp_reg, dst->Register.Dimension ? false : true);
2782 if (r)
2783 return r;
2784
2785 /* the base address is now in temp.x */
2786 r = r600_get_byte_address(ctx, temp_reg,
2787 &inst->Dst[0], NULL, ctx->tess_output_info, 1);
2788 if (r)
2789 return r;
2790
2791 /* LDS write */
2792 lasti = tgsi_last_instruction(write_mask);
2793 for (i = 1; i <= lasti; i++) {
2794
2795 if (!(write_mask & (1 << i)))
2796 continue;
2797 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
2798 temp_reg, i,
2799 temp_reg, 0,
2800 V_SQ_ALU_SRC_LITERAL, 4 * i);
2801 if (r)
2802 return r;
2803 }
2804
2805 for (i = 0; i <= lasti; i++) {
2806 if (!(write_mask & (1 << i)))
2807 continue;
2808
2809 if ((i == 0 && ((write_mask & 3) == 3)) ||
2810 (i == 2 && ((write_mask & 0xc) == 0xc))) {
2811 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2812 alu.op = LDS_OP3_LDS_WRITE_REL;
2813 alu.src[0].sel = temp_reg;
2814 alu.src[0].chan = i;
2815
2816 alu.src[1].sel = dst->Register.Index;
2817 alu.src[1].sel += ctx->file_offset[dst->Register.File];
2818 alu.src[1].chan = i;
2819
2820 alu.src[2].sel = dst->Register.Index;
2821 alu.src[2].sel += ctx->file_offset[dst->Register.File];
2822 alu.src[2].chan = i + 1;
2823 alu.lds_idx = 1;
2824 alu.dst.chan = 0;
2825 alu.last = 1;
2826 alu.is_lds_idx_op = true;
2827 r = r600_bytecode_add_alu(ctx->bc, &alu);
2828 if (r)
2829 return r;
2830 i += 1;
2831 continue;
2832 }
2833 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2834 alu.op = LDS_OP2_LDS_WRITE;
2835 alu.src[0].sel = temp_reg;
2836 alu.src[0].chan = i;
2837
2838 alu.src[1].sel = dst->Register.Index;
2839 alu.src[1].sel += ctx->file_offset[dst->Register.File];
2840 alu.src[1].chan = i;
2841
2842 alu.src[2].sel = V_SQ_ALU_SRC_0;
2843 alu.dst.chan = 0;
2844 alu.last = 1;
2845 alu.is_lds_idx_op = true;
2846 r = r600_bytecode_add_alu(ctx->bc, &alu);
2847 if (r)
2848 return r;
2849 }
2850 return 0;
2851 }
2852
2853 static int r600_tess_factor_read(struct r600_shader_ctx *ctx,
2854 int output_idx, int nc)
2855 {
2856 int param;
2857 unsigned temp_reg = r600_get_temp(ctx);
2858 unsigned name = ctx->shader->output[output_idx].name;
2859 int dreg = ctx->shader->output[output_idx].gpr;
2860 int r;
2861
2862 param = r600_get_lds_unique_index(name, 0);
2863 r = get_lds_offset0(ctx, 1, temp_reg, true);
2864 if (r)
2865 return r;
2866
2867 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
2868 temp_reg, 0,
2869 temp_reg, 0,
2870 V_SQ_ALU_SRC_LITERAL, param * 16);
2871 if (r)
2872 return r;
2873
2874 do_lds_fetch_values(ctx, temp_reg, dreg, ((1u << nc) - 1));
2875 return 0;
2876 }
2877
2878 static int r600_emit_tess_factor(struct r600_shader_ctx *ctx)
2879 {
2880 int stride, outer_comps, inner_comps;
2881 int tessinner_idx = -1, tessouter_idx = -1;
2882 int i, r;
2883 unsigned j;
2884 int temp_reg = r600_get_temp(ctx);
2885 int treg[3] = {-1, -1, -1};
2886 struct r600_bytecode_alu alu;
2887 struct r600_bytecode_cf *cf_jump, *cf_pop;
2888
2889 /* only execute factor emission for invocation 0 */
2890 /* PRED_SETE_INT __, R0.x, 0 */
2891 memset(&alu, 0, sizeof(alu));
2892 alu.op = ALU_OP2_PRED_SETE_INT;
2893 alu.src[0].chan = 2;
2894 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2895 alu.execute_mask = 1;
2896 alu.update_pred = 1;
2897 alu.last = 1;
2898 r600_bytecode_add_alu_type(ctx->bc, &alu, CF_OP_ALU_PUSH_BEFORE);
2899
2900 r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP);
2901 cf_jump = ctx->bc->cf_last;
2902
2903 treg[0] = r600_get_temp(ctx);
2904 switch (ctx->shader->tcs_prim_mode) {
2905 case PIPE_PRIM_LINES:
2906 stride = 8; /* 2 dwords, 1 vec2 store */
2907 outer_comps = 2;
2908 inner_comps = 0;
2909 break;
2910 case PIPE_PRIM_TRIANGLES:
2911 stride = 16; /* 4 dwords, 1 vec4 store */
2912 outer_comps = 3;
2913 inner_comps = 1;
2914 treg[1] = r600_get_temp(ctx);
2915 break;
2916 case PIPE_PRIM_QUADS:
2917 stride = 24; /* 6 dwords, 2 stores (vec4 + vec2) */
2918 outer_comps = 4;
2919 inner_comps = 2;
2920 treg[1] = r600_get_temp(ctx);
2921 treg[2] = r600_get_temp(ctx);
2922 break;
2923 default:
2924 assert(0);
2925 return -1;
2926 }
2927
2928 /* R0 is InvocationID, RelPatchID, PatchID, tf_base */
2929 /* TF_WRITE takes index in R.x, value in R.y */
2930 for (j = 0; j < ctx->shader->noutput; j++) {
2931 if (ctx->shader->output[j].name == TGSI_SEMANTIC_TESSINNER)
2932 tessinner_idx = j;
2933 if (ctx->shader->output[j].name == TGSI_SEMANTIC_TESSOUTER)
2934 tessouter_idx = j;
2935 }
2936
2937 if (tessouter_idx == -1)
2938 return -1;
2939
2940 if (tessinner_idx == -1 && inner_comps)
2941 return -1;
2942
2943 if (tessouter_idx != -1) {
2944 r = r600_tess_factor_read(ctx, tessouter_idx, outer_comps);
2945 if (r)
2946 return r;
2947 }
2948
2949 if (tessinner_idx != -1) {
2950 r = r600_tess_factor_read(ctx, tessinner_idx, inner_comps);
2951 if (r)
2952 return r;
2953 }
2954
2955 /* r.x = tf_base(r0.w) + relpatchid(r0.y) * tf_stride */
2956 /* r.x = relpatchid(r0.y) * tf_stride */
2957
2958 /* multiply incoming r0.y * stride - t.x = r0.y * stride */
2959 /* add incoming r0.w to it: t.x = t.x + r0.w */
2960 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
2961 temp_reg, 0,
2962 0, 1,
2963 V_SQ_ALU_SRC_LITERAL, stride,
2964 0, 3);
2965 if (r)
2966 return r;
2967
2968 for (i = 0; i < outer_comps + inner_comps; i++) {
2969 int out_idx = i >= outer_comps ? tessinner_idx : tessouter_idx;
2970 int out_comp = i >= outer_comps ? i - outer_comps : i;
2971
2972 if (ctx->shader->tcs_prim_mode == PIPE_PRIM_LINES) {
2973 if (out_comp == 1)
2974 out_comp = 0;
2975 else if (out_comp == 0)
2976 out_comp = 1;
2977 }
2978
2979 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
2980 treg[i / 2], (2 * (i % 2)),
2981 temp_reg, 0,
2982 V_SQ_ALU_SRC_LITERAL, 4 * i);
2983 if (r)
2984 return r;
2985 r = single_alu_op2(ctx, ALU_OP1_MOV,
2986 treg[i / 2], 1 + (2 * (i%2)),
2987 ctx->shader->output[out_idx].gpr, out_comp,
2988 0, 0);
2989 if (r)
2990 return r;
2991 }
2992 for (i = 0; i < outer_comps + inner_comps; i++) {
2993 struct r600_bytecode_gds gds;
2994
2995 memset(&gds, 0, sizeof(struct r600_bytecode_gds));
2996 gds.src_gpr = treg[i / 2];
2997 gds.src_sel_x = 2 * (i % 2);
2998 gds.src_sel_y = 1 + (2 * (i % 2));
2999 gds.src_sel_z = 4;
3000 gds.dst_sel_x = 7;
3001 gds.dst_sel_y = 7;
3002 gds.dst_sel_z = 7;
3003 gds.dst_sel_w = 7;
3004 gds.op = FETCH_OP_TF_WRITE;
3005 r = r600_bytecode_add_gds(ctx->bc, &gds);
3006 if (r)
3007 return r;
3008 }
3009
3010 // Patch up jump label
3011 r600_bytecode_add_cfinst(ctx->bc, CF_OP_POP);
3012 cf_pop = ctx->bc->cf_last;
3013
3014 cf_jump->cf_addr = cf_pop->id + 2;
3015 cf_jump->pop_count = 1;
3016 cf_pop->cf_addr = cf_pop->id + 2;
3017 cf_pop->pop_count = 1;
3018
3019 return 0;
3020 }
3021
3022 /*
3023 * We have to work out the thread ID for load and atomic
3024 * operations, which store the returned value to an index
3025 * in an intermediate buffer.
3026 * The index is calculated by taking the thread id,
3027 * calculated from the MBCNT instructions.
3028 * Then the shader engine ID is multiplied by 256,
3029 * and the wave id is added.
3030 * Then the result is multipled by 64 and thread id is
3031 * added.
3032 */
3033 static int load_thread_id_gpr(struct r600_shader_ctx *ctx)
3034 {
3035 struct r600_bytecode_alu alu;
3036 int r;
3037
3038 if (ctx->thread_id_gpr_loaded)
3039 return 0;
3040
3041 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3042 alu.op = ALU_OP1_MBCNT_32LO_ACCUM_PREV_INT;
3043 alu.dst.sel = ctx->temp_reg;
3044 alu.dst.chan = 0;
3045 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
3046 alu.src[0].value = 0xffffffff;
3047 alu.dst.write = 1;
3048 r = r600_bytecode_add_alu(ctx->bc, &alu);
3049 if (r)
3050 return r;
3051
3052 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3053 alu.op = ALU_OP1_MBCNT_32HI_INT;
3054 alu.dst.sel = ctx->temp_reg;
3055 alu.dst.chan = 1;
3056 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
3057 alu.src[0].value = 0xffffffff;
3058 alu.dst.write = 1;
3059 r = r600_bytecode_add_alu(ctx->bc, &alu);
3060 if (r)
3061 return r;
3062
3063 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3064 alu.op = ALU_OP3_MULADD_UINT24;
3065 alu.dst.sel = ctx->temp_reg;
3066 alu.dst.chan = 2;
3067 alu.src[0].sel = EG_V_SQ_ALU_SRC_SE_ID;
3068 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
3069 alu.src[1].value = 256;
3070 alu.src[2].sel = EG_V_SQ_ALU_SRC_HW_WAVE_ID;
3071 alu.dst.write = 1;
3072 alu.is_op3 = 1;
3073 alu.last = 1;
3074 r = r600_bytecode_add_alu(ctx->bc, &alu);
3075 if (r)
3076 return r;
3077
3078 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
3079 ctx->thread_id_gpr, 1,
3080 ctx->temp_reg, 2,
3081 V_SQ_ALU_SRC_LITERAL, 0x40,
3082 ctx->temp_reg, 0);
3083 if (r)
3084 return r;
3085 ctx->thread_id_gpr_loaded = true;
3086 return 0;
3087 }
3088
3089 static int r600_shader_from_tgsi(struct r600_context *rctx,
3090 struct r600_pipe_shader *pipeshader,
3091 union r600_shader_key key)
3092 {
3093 struct r600_screen *rscreen = rctx->screen;
3094 struct r600_shader *shader = &pipeshader->shader;
3095 struct tgsi_token *tokens = pipeshader->selector->tokens;
3096 struct pipe_stream_output_info so = pipeshader->selector->so;
3097 struct tgsi_full_immediate *immediate;
3098 struct r600_shader_ctx ctx;
3099 struct r600_bytecode_output output[ARRAY_SIZE(shader->output)];
3100 unsigned output_done, noutput;
3101 unsigned opcode;
3102 int j, k, r = 0;
3103 unsigned i;
3104 int next_param_base = 0, next_clip_base;
3105 int max_color_exports = MAX2(key.ps.nr_cbufs, 1);
3106 bool indirect_gprs;
3107 bool ring_outputs = false;
3108 bool lds_outputs = false;
3109 bool lds_inputs = false;
3110 bool pos_emitted = false;
3111
3112 ctx.bc = &shader->bc;
3113 ctx.shader = shader;
3114
3115 r600_bytecode_init(ctx.bc, rscreen->b.chip_class, rscreen->b.family,
3116 rscreen->has_compressed_msaa_texturing);
3117 ctx.tokens = tokens;
3118 tgsi_scan_shader(tokens, &ctx.info);
3119 shader->indirect_files = ctx.info.indirect_files;
3120
3121 shader->uses_doubles = ctx.info.uses_doubles;
3122 shader->uses_atomics = ctx.info.file_mask[TGSI_FILE_HW_ATOMIC];
3123 shader->nsys_inputs = 0;
3124
3125 shader->uses_images = ctx.info.file_count[TGSI_FILE_IMAGE] > 0 ||
3126 ctx.info.file_count[TGSI_FILE_BUFFER] > 0;
3127 indirect_gprs = ctx.info.indirect_files & ~((1 << TGSI_FILE_CONSTANT) | (1 << TGSI_FILE_SAMPLER));
3128 tgsi_parse_init(&ctx.parse, tokens);
3129 ctx.type = ctx.info.processor;
3130 shader->processor_type = ctx.type;
3131 ctx.bc->type = shader->processor_type;
3132
3133 switch (ctx.type) {
3134 case PIPE_SHADER_VERTEX:
3135 shader->vs_as_gs_a = key.vs.as_gs_a;
3136 shader->vs_as_es = key.vs.as_es;
3137 shader->vs_as_ls = key.vs.as_ls;
3138 shader->atomic_base = key.vs.first_atomic_counter;
3139 if (shader->vs_as_es)
3140 ring_outputs = true;
3141 if (shader->vs_as_ls)
3142 lds_outputs = true;
3143 break;
3144 case PIPE_SHADER_GEOMETRY:
3145 ring_outputs = true;
3146 shader->atomic_base = key.gs.first_atomic_counter;
3147 shader->gs_tri_strip_adj_fix = key.gs.tri_strip_adj_fix;
3148 break;
3149 case PIPE_SHADER_TESS_CTRL:
3150 shader->tcs_prim_mode = key.tcs.prim_mode;
3151 shader->atomic_base = key.tcs.first_atomic_counter;
3152 lds_outputs = true;
3153 lds_inputs = true;
3154 break;
3155 case PIPE_SHADER_TESS_EVAL:
3156 shader->tes_as_es = key.tes.as_es;
3157 shader->atomic_base = key.tes.first_atomic_counter;
3158 lds_inputs = true;
3159 if (shader->tes_as_es)
3160 ring_outputs = true;
3161 break;
3162 case PIPE_SHADER_FRAGMENT:
3163 shader->two_side = key.ps.color_two_side;
3164 shader->atomic_base = key.ps.first_atomic_counter;
3165 shader->rat_base = key.ps.nr_cbufs;
3166 shader->image_size_const_offset = key.ps.image_size_const_offset;
3167 break;
3168 case PIPE_SHADER_COMPUTE:
3169 shader->rat_base = 0;
3170 shader->image_size_const_offset = 0;
3171 break;
3172 default:
3173 break;
3174 }
3175
3176 if (shader->vs_as_es || shader->tes_as_es) {
3177 ctx.gs_for_vs = &rctx->gs_shader->current->shader;
3178 } else {
3179 ctx.gs_for_vs = NULL;
3180 }
3181
3182 ctx.next_ring_offset = 0;
3183 ctx.gs_out_ring_offset = 0;
3184 ctx.gs_next_vertex = 0;
3185 ctx.gs_stream_output_info = &so;
3186
3187 ctx.face_gpr = -1;
3188 ctx.fixed_pt_position_gpr = -1;
3189 ctx.fragcoord_input = -1;
3190 ctx.colors_used = 0;
3191 ctx.clip_vertex_write = 0;
3192 ctx.thread_id_gpr_loaded = false;
3193
3194 ctx.cs_block_size_reg = -1;
3195 ctx.cs_grid_size_reg = -1;
3196 ctx.cs_block_size_loaded = false;
3197 ctx.cs_grid_size_loaded = false;
3198
3199 shader->nr_ps_color_exports = 0;
3200 shader->nr_ps_max_color_exports = 0;
3201
3202
3203 /* register allocations */
3204 /* Values [0,127] correspond to GPR[0..127].
3205 * Values [128,159] correspond to constant buffer bank 0
3206 * Values [160,191] correspond to constant buffer bank 1
3207 * Values [256,511] correspond to cfile constants c[0..255]. (Gone on EG)
3208 * Values [256,287] correspond to constant buffer bank 2 (EG)
3209 * Values [288,319] correspond to constant buffer bank 3 (EG)
3210 * Other special values are shown in the list below.
3211 * 244 ALU_SRC_1_DBL_L: special constant 1.0 double-float, LSW. (RV670+)
3212 * 245 ALU_SRC_1_DBL_M: special constant 1.0 double-float, MSW. (RV670+)
3213 * 246 ALU_SRC_0_5_DBL_L: special constant 0.5 double-float, LSW. (RV670+)
3214 * 247 ALU_SRC_0_5_DBL_M: special constant 0.5 double-float, MSW. (RV670+)
3215 * 248 SQ_ALU_SRC_0: special constant 0.0.
3216 * 249 SQ_ALU_SRC_1: special constant 1.0 float.
3217 * 250 SQ_ALU_SRC_1_INT: special constant 1 integer.
3218 * 251 SQ_ALU_SRC_M_1_INT: special constant -1 integer.
3219 * 252 SQ_ALU_SRC_0_5: special constant 0.5 float.
3220 * 253 SQ_ALU_SRC_LITERAL: literal constant.
3221 * 254 SQ_ALU_SRC_PV: previous vector result.
3222 * 255 SQ_ALU_SRC_PS: previous scalar result.
3223 */
3224 for (i = 0; i < TGSI_FILE_COUNT; i++) {
3225 ctx.file_offset[i] = 0;
3226 }
3227
3228 if (ctx.type == PIPE_SHADER_VERTEX) {
3229
3230 ctx.file_offset[TGSI_FILE_INPUT] = 1;
3231 if (ctx.info.num_inputs)
3232 r600_bytecode_add_cfinst(ctx.bc, CF_OP_CALL_FS);
3233 }
3234 if (ctx.type == PIPE_SHADER_FRAGMENT) {
3235 if (ctx.bc->chip_class >= EVERGREEN)
3236 ctx.file_offset[TGSI_FILE_INPUT] = evergreen_gpr_count(&ctx);
3237 else
3238 ctx.file_offset[TGSI_FILE_INPUT] = allocate_system_value_inputs(&ctx, ctx.file_offset[TGSI_FILE_INPUT]);
3239 }
3240 if (ctx.type == PIPE_SHADER_GEOMETRY) {
3241 /* FIXME 1 would be enough in some cases (3 or less input vertices) */
3242 ctx.file_offset[TGSI_FILE_INPUT] = 2;
3243 }
3244 if (ctx.type == PIPE_SHADER_TESS_CTRL)
3245 ctx.file_offset[TGSI_FILE_INPUT] = 1;
3246 if (ctx.type == PIPE_SHADER_TESS_EVAL) {
3247 bool add_tesscoord = false, add_tess_inout = false;
3248 ctx.file_offset[TGSI_FILE_INPUT] = 1;
3249 for (i = 0; i < PIPE_MAX_SHADER_INPUTS; i++) {
3250 /* if we have tesscoord save one reg */
3251 if (ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_TESSCOORD)
3252 add_tesscoord = true;
3253 if (ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_TESSINNER ||
3254 ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_TESSOUTER)
3255 add_tess_inout = true;
3256 }
3257 if (add_tesscoord || add_tess_inout)
3258 ctx.file_offset[TGSI_FILE_INPUT]++;
3259 if (add_tess_inout)
3260 ctx.file_offset[TGSI_FILE_INPUT]+=2;
3261 }
3262 if (ctx.type == PIPE_SHADER_COMPUTE) {
3263 ctx.file_offset[TGSI_FILE_INPUT] = 2;
3264 for (i = 0; i < PIPE_MAX_SHADER_INPUTS; i++) {
3265 if (ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_GRID_SIZE)
3266 ctx.cs_grid_size_reg = ctx.file_offset[TGSI_FILE_INPUT]++;
3267 if (ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_BLOCK_SIZE)
3268 ctx.cs_block_size_reg = ctx.file_offset[TGSI_FILE_INPUT]++;
3269 }
3270 }
3271
3272 ctx.file_offset[TGSI_FILE_OUTPUT] =
3273 ctx.file_offset[TGSI_FILE_INPUT] +
3274 ctx.info.file_max[TGSI_FILE_INPUT] + 1;
3275 ctx.file_offset[TGSI_FILE_TEMPORARY] = ctx.file_offset[TGSI_FILE_OUTPUT] +
3276 ctx.info.file_max[TGSI_FILE_OUTPUT] + 1;
3277
3278 /* Outside the GPR range. This will be translated to one of the
3279 * kcache banks later. */
3280 ctx.file_offset[TGSI_FILE_CONSTANT] = 512;
3281
3282 ctx.file_offset[TGSI_FILE_IMMEDIATE] = V_SQ_ALU_SRC_LITERAL;
3283 ctx.bc->ar_reg = ctx.file_offset[TGSI_FILE_TEMPORARY] +
3284 ctx.info.file_max[TGSI_FILE_TEMPORARY] + 1;
3285 ctx.bc->index_reg[0] = ctx.bc->ar_reg + 1;
3286 ctx.bc->index_reg[1] = ctx.bc->ar_reg + 2;
3287
3288 if (ctx.type == PIPE_SHADER_TESS_CTRL) {
3289 ctx.tess_input_info = ctx.bc->ar_reg + 3;
3290 ctx.tess_output_info = ctx.bc->ar_reg + 4;
3291 ctx.temp_reg = ctx.bc->ar_reg + 5;
3292 } else if (ctx.type == PIPE_SHADER_TESS_EVAL) {
3293 ctx.tess_input_info = 0;
3294 ctx.tess_output_info = ctx.bc->ar_reg + 3;
3295 ctx.temp_reg = ctx.bc->ar_reg + 4;
3296 } else if (ctx.type == PIPE_SHADER_GEOMETRY) {
3297 ctx.gs_export_gpr_tregs[0] = ctx.bc->ar_reg + 3;
3298 ctx.gs_export_gpr_tregs[1] = ctx.bc->ar_reg + 4;
3299 ctx.gs_export_gpr_tregs[2] = ctx.bc->ar_reg + 5;
3300 ctx.gs_export_gpr_tregs[3] = ctx.bc->ar_reg + 6;
3301 ctx.temp_reg = ctx.bc->ar_reg + 7;
3302 if (ctx.shader->gs_tri_strip_adj_fix) {
3303 ctx.gs_rotated_input[0] = ctx.bc->ar_reg + 7;
3304 ctx.gs_rotated_input[1] = ctx.bc->ar_reg + 8;
3305 ctx.temp_reg += 2;
3306 } else {
3307 ctx.gs_rotated_input[0] = 0;
3308 ctx.gs_rotated_input[1] = 1;
3309 }
3310 } else {
3311 ctx.temp_reg = ctx.bc->ar_reg + 3;
3312 }
3313
3314 if (shader->uses_images) {
3315 ctx.thread_id_gpr = ctx.temp_reg++;
3316 ctx.thread_id_gpr_loaded = false;
3317 }
3318
3319 shader->max_arrays = 0;
3320 shader->num_arrays = 0;
3321 if (indirect_gprs) {
3322
3323 if (ctx.info.indirect_files & (1 << TGSI_FILE_INPUT)) {
3324 r600_add_gpr_array(shader, ctx.file_offset[TGSI_FILE_INPUT],
3325 ctx.file_offset[TGSI_FILE_OUTPUT] -
3326 ctx.file_offset[TGSI_FILE_INPUT],
3327 0x0F);
3328 }
3329 if (ctx.info.indirect_files & (1 << TGSI_FILE_OUTPUT)) {
3330 r600_add_gpr_array(shader, ctx.file_offset[TGSI_FILE_OUTPUT],
3331 ctx.file_offset[TGSI_FILE_TEMPORARY] -
3332 ctx.file_offset[TGSI_FILE_OUTPUT],
3333 0x0F);
3334 }
3335 }
3336
3337 ctx.nliterals = 0;
3338 ctx.literals = NULL;
3339
3340 shader->fs_write_all = ctx.info.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS] &&
3341 ctx.info.colors_written == 1;
3342 shader->vs_position_window_space = ctx.info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
3343 shader->ps_conservative_z = (uint8_t)ctx.info.properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT];
3344
3345 if (ctx.type == PIPE_SHADER_VERTEX ||
3346 ctx.type == PIPE_SHADER_GEOMETRY ||
3347 ctx.type == PIPE_SHADER_TESS_EVAL) {
3348 shader->cc_dist_mask = (1 << (ctx.info.properties[TGSI_PROPERTY_NUM_CULLDIST_ENABLED] +
3349 ctx.info.properties[TGSI_PROPERTY_NUM_CLIPDIST_ENABLED])) - 1;
3350 shader->clip_dist_write = (1 << ctx.info.properties[TGSI_PROPERTY_NUM_CLIPDIST_ENABLED]) - 1;
3351 shader->cull_dist_write = ((1 << ctx.info.properties[TGSI_PROPERTY_NUM_CULLDIST_ENABLED]) - 1) << ctx.info.properties[TGSI_PROPERTY_NUM_CLIPDIST_ENABLED];
3352 }
3353
3354 if (shader->vs_as_gs_a)
3355 vs_add_primid_output(&ctx, key.vs.prim_id_out);
3356
3357 if (ctx.type == PIPE_SHADER_TESS_EVAL)
3358 r600_fetch_tess_io_info(&ctx);
3359
3360 while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
3361 tgsi_parse_token(&ctx.parse);
3362 switch (ctx.parse.FullToken.Token.Type) {
3363 case TGSI_TOKEN_TYPE_IMMEDIATE:
3364 immediate = &ctx.parse.FullToken.FullImmediate;
3365 ctx.literals = realloc(ctx.literals, (ctx.nliterals + 1) * 16);
3366 if(ctx.literals == NULL) {
3367 r = -ENOMEM;
3368 goto out_err;
3369 }
3370 ctx.literals[ctx.nliterals * 4 + 0] = immediate->u[0].Uint;
3371 ctx.literals[ctx.nliterals * 4 + 1] = immediate->u[1].Uint;
3372 ctx.literals[ctx.nliterals * 4 + 2] = immediate->u[2].Uint;
3373 ctx.literals[ctx.nliterals * 4 + 3] = immediate->u[3].Uint;
3374 ctx.nliterals++;
3375 break;
3376 case TGSI_TOKEN_TYPE_DECLARATION:
3377 r = tgsi_declaration(&ctx);
3378 if (r)
3379 goto out_err;
3380 break;
3381 case TGSI_TOKEN_TYPE_INSTRUCTION:
3382 case TGSI_TOKEN_TYPE_PROPERTY:
3383 break;
3384 default:
3385 R600_ERR("unsupported token type %d\n", ctx.parse.FullToken.Token.Type);
3386 r = -EINVAL;
3387 goto out_err;
3388 }
3389 }
3390
3391 shader->ring_item_sizes[0] = ctx.next_ring_offset;
3392 shader->ring_item_sizes[1] = 0;
3393 shader->ring_item_sizes[2] = 0;
3394 shader->ring_item_sizes[3] = 0;
3395
3396 /* Process two side if needed */
3397 if (shader->two_side && ctx.colors_used) {
3398 int i, count = ctx.shader->ninput;
3399 unsigned next_lds_loc = ctx.shader->nlds;
3400
3401 /* additional inputs will be allocated right after the existing inputs,
3402 * we won't need them after the color selection, so we don't need to
3403 * reserve these gprs for the rest of the shader code and to adjust
3404 * output offsets etc. */
3405 int gpr = ctx.file_offset[TGSI_FILE_INPUT] +
3406 ctx.info.file_max[TGSI_FILE_INPUT] + 1;
3407
3408 /* if two sided and neither face or sample mask is used by shader, ensure face_gpr is emitted */
3409 if (ctx.face_gpr == -1) {
3410 i = ctx.shader->ninput++;
3411 ctx.shader->input[i].name = TGSI_SEMANTIC_FACE;
3412 ctx.shader->input[i].spi_sid = 0;
3413 ctx.shader->input[i].gpr = gpr++;
3414 ctx.face_gpr = ctx.shader->input[i].gpr;
3415 }
3416
3417 for (i = 0; i < count; i++) {
3418 if (ctx.shader->input[i].name == TGSI_SEMANTIC_COLOR) {
3419 int ni = ctx.shader->ninput++;
3420 memcpy(&ctx.shader->input[ni],&ctx.shader->input[i], sizeof(struct r600_shader_io));
3421 ctx.shader->input[ni].name = TGSI_SEMANTIC_BCOLOR;
3422 ctx.shader->input[ni].spi_sid = r600_spi_sid(&ctx.shader->input[ni]);
3423 ctx.shader->input[ni].gpr = gpr++;
3424 // TGSI to LLVM needs to know the lds position of inputs.
3425 // Non LLVM path computes it later (in process_twoside_color)
3426 ctx.shader->input[ni].lds_pos = next_lds_loc++;
3427 ctx.shader->input[i].back_color_input = ni;
3428 if (ctx.bc->chip_class >= EVERGREEN) {
3429 if ((r = evergreen_interp_input(&ctx, ni)))
3430 return r;
3431 }
3432 }
3433 }
3434 }
3435
3436 if (shader->fs_write_all && rscreen->b.chip_class >= EVERGREEN)
3437 shader->nr_ps_max_color_exports = 8;
3438
3439 if (ctx.fragcoord_input >= 0) {
3440 if (ctx.bc->chip_class == CAYMAN) {
3441 for (j = 0 ; j < 4; j++) {
3442 struct r600_bytecode_alu alu;
3443 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3444 alu.op = ALU_OP1_RECIP_IEEE;
3445 alu.src[0].sel = shader->input[ctx.fragcoord_input].gpr;
3446 alu.src[0].chan = 3;
3447
3448 alu.dst.sel = shader->input[ctx.fragcoord_input].gpr;
3449 alu.dst.chan = j;
3450 alu.dst.write = (j == 3);
3451 alu.last = 1;
3452 if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
3453 return r;
3454 }
3455 } else {
3456 struct r600_bytecode_alu alu;
3457 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3458 alu.op = ALU_OP1_RECIP_IEEE;
3459 alu.src[0].sel = shader->input[ctx.fragcoord_input].gpr;
3460 alu.src[0].chan = 3;
3461
3462 alu.dst.sel = shader->input[ctx.fragcoord_input].gpr;
3463 alu.dst.chan = 3;
3464 alu.dst.write = 1;
3465 alu.last = 1;
3466 if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
3467 return r;
3468 }
3469 }
3470
3471 if (ctx.type == PIPE_SHADER_GEOMETRY) {
3472 struct r600_bytecode_alu alu;
3473 int r;
3474
3475 /* GS thread with no output workaround - emit a cut at start of GS */
3476 if (ctx.bc->chip_class == R600)
3477 r600_bytecode_add_cfinst(ctx.bc, CF_OP_CUT_VERTEX);
3478
3479 for (j = 0; j < 4; j++) {
3480 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3481 alu.op = ALU_OP1_MOV;
3482 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
3483 alu.src[0].value = 0;
3484 alu.dst.sel = ctx.gs_export_gpr_tregs[j];
3485 alu.dst.write = 1;
3486 alu.last = 1;
3487 r = r600_bytecode_add_alu(ctx.bc, &alu);
3488 if (r)
3489 return r;
3490 }
3491
3492 if (ctx.shader->gs_tri_strip_adj_fix) {
3493 r = single_alu_op2(&ctx, ALU_OP2_AND_INT,
3494 ctx.gs_rotated_input[0], 2,
3495 0, 2,
3496 V_SQ_ALU_SRC_LITERAL, 1);
3497 if (r)
3498 return r;
3499
3500 for (i = 0; i < 6; i++) {
3501 int rotated = (i + 4) % 6;
3502 int offset_reg = i / 3;
3503 int offset_chan = i % 3;
3504 int rotated_offset_reg = rotated / 3;
3505 int rotated_offset_chan = rotated % 3;
3506
3507 if (offset_reg == 0 && offset_chan == 2)
3508 offset_chan = 3;
3509 if (rotated_offset_reg == 0 && rotated_offset_chan == 2)
3510 rotated_offset_chan = 3;
3511
3512 r = single_alu_op3(&ctx, ALU_OP3_CNDE_INT,
3513 ctx.gs_rotated_input[offset_reg], offset_chan,
3514 ctx.gs_rotated_input[0], 2,
3515 offset_reg, offset_chan,
3516 rotated_offset_reg, rotated_offset_chan);
3517 if (r)
3518 return r;
3519 }
3520 }
3521 }
3522
3523 if (ctx.type == PIPE_SHADER_TESS_CTRL)
3524 r600_fetch_tess_io_info(&ctx);
3525
3526 if (shader->two_side && ctx.colors_used) {
3527 if ((r = process_twoside_color_inputs(&ctx)))
3528 return r;
3529 }
3530
3531 tgsi_parse_init(&ctx.parse, tokens);
3532 while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
3533 tgsi_parse_token(&ctx.parse);
3534 switch (ctx.parse.FullToken.Token.Type) {
3535 case TGSI_TOKEN_TYPE_INSTRUCTION:
3536 r = tgsi_is_supported(&ctx);
3537 if (r)
3538 goto out_err;
3539 ctx.max_driver_temp_used = 0;
3540 /* reserve first tmp for everyone */
3541 r600_get_temp(&ctx);
3542
3543 opcode = ctx.parse.FullToken.FullInstruction.Instruction.Opcode;
3544 if ((r = tgsi_split_constant(&ctx)))
3545 goto out_err;
3546 if ((r = tgsi_split_literal_constant(&ctx)))
3547 goto out_err;
3548 if (ctx.type == PIPE_SHADER_GEOMETRY) {
3549 if ((r = tgsi_split_gs_inputs(&ctx)))
3550 goto out_err;
3551 } else if (lds_inputs) {
3552 if ((r = tgsi_split_lds_inputs(&ctx)))
3553 goto out_err;
3554 }
3555 if (ctx.bc->chip_class == CAYMAN)
3556 ctx.inst_info = &cm_shader_tgsi_instruction[opcode];
3557 else if (ctx.bc->chip_class >= EVERGREEN)
3558 ctx.inst_info = &eg_shader_tgsi_instruction[opcode];
3559 else
3560 ctx.inst_info = &r600_shader_tgsi_instruction[opcode];
3561 r = ctx.inst_info->process(&ctx);
3562 if (r)
3563 goto out_err;
3564
3565 if (ctx.type == PIPE_SHADER_TESS_CTRL) {
3566 r = r600_store_tcs_output(&ctx);
3567 if (r)
3568 goto out_err;
3569 }
3570 break;
3571 default:
3572 break;
3573 }
3574 }
3575
3576 /* Reset the temporary register counter. */
3577 ctx.max_driver_temp_used = 0;
3578
3579 noutput = shader->noutput;
3580
3581 if (!ring_outputs && ctx.clip_vertex_write) {
3582 unsigned clipdist_temp[2];
3583
3584 clipdist_temp[0] = r600_get_temp(&ctx);
3585 clipdist_temp[1] = r600_get_temp(&ctx);
3586
3587 /* need to convert a clipvertex write into clipdistance writes and not export
3588 the clip vertex anymore */
3589
3590 memset(&shader->output[noutput], 0, 2*sizeof(struct r600_shader_io));
3591 shader->output[noutput].name = TGSI_SEMANTIC_CLIPDIST;
3592 shader->output[noutput].gpr = clipdist_temp[0];
3593 noutput++;
3594 shader->output[noutput].name = TGSI_SEMANTIC_CLIPDIST;
3595 shader->output[noutput].gpr = clipdist_temp[1];
3596 noutput++;
3597
3598 /* reset spi_sid for clipvertex output to avoid confusing spi */
3599 shader->output[ctx.cv_output].spi_sid = 0;
3600
3601 shader->clip_dist_write = 0xFF;
3602 shader->cc_dist_mask = 0xFF;
3603
3604 for (i = 0; i < 8; i++) {
3605 int oreg = i >> 2;
3606 int ochan = i & 3;
3607
3608 for (j = 0; j < 4; j++) {
3609 struct r600_bytecode_alu alu;
3610 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3611 alu.op = ALU_OP2_DOT4;
3612 alu.src[0].sel = shader->output[ctx.cv_output].gpr;
3613 alu.src[0].chan = j;
3614
3615 alu.src[1].sel = 512 + i;
3616 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
3617 alu.src[1].chan = j;
3618
3619 alu.dst.sel = clipdist_temp[oreg];
3620 alu.dst.chan = j;
3621 alu.dst.write = (j == ochan);
3622 if (j == 3)
3623 alu.last = 1;
3624 r = r600_bytecode_add_alu(ctx.bc, &alu);
3625 if (r)
3626 return r;
3627 }
3628 }
3629 }
3630
3631 /* Add stream outputs. */
3632 if (so.num_outputs) {
3633 bool emit = false;
3634 if (!lds_outputs && !ring_outputs && ctx.type == PIPE_SHADER_VERTEX)
3635 emit = true;
3636 if (!ring_outputs && ctx.type == PIPE_SHADER_TESS_EVAL)
3637 emit = true;
3638 if (emit)
3639 emit_streamout(&ctx, &so, -1, NULL);
3640 }
3641 pipeshader->enabled_stream_buffers_mask = ctx.enabled_stream_buffers_mask;
3642 convert_edgeflag_to_int(&ctx);
3643
3644 if (ctx.type == PIPE_SHADER_TESS_CTRL)
3645 r600_emit_tess_factor(&ctx);
3646
3647 if (lds_outputs) {
3648 if (ctx.type == PIPE_SHADER_VERTEX) {
3649 if (ctx.shader->noutput)
3650 emit_lds_vs_writes(&ctx);
3651 }
3652 } else if (ring_outputs) {
3653 if (shader->vs_as_es || shader->tes_as_es) {
3654 ctx.gs_export_gpr_tregs[0] = r600_get_temp(&ctx);
3655 ctx.gs_export_gpr_tregs[1] = -1;
3656 ctx.gs_export_gpr_tregs[2] = -1;
3657 ctx.gs_export_gpr_tregs[3] = -1;
3658
3659 emit_gs_ring_writes(&ctx, &so, -1, FALSE);
3660 }
3661 } else {
3662 /* Export output */
3663 next_clip_base = shader->vs_out_misc_write ? 62 : 61;
3664
3665 for (i = 0, j = 0; i < noutput; i++, j++) {
3666 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
3667 output[j].gpr = shader->output[i].gpr;
3668 output[j].elem_size = 3;
3669 output[j].swizzle_x = 0;
3670 output[j].swizzle_y = 1;
3671 output[j].swizzle_z = 2;
3672 output[j].swizzle_w = 3;
3673 output[j].burst_count = 1;
3674 output[j].type = 0xffffffff;
3675 output[j].op = CF_OP_EXPORT;
3676 switch (ctx.type) {
3677 case PIPE_SHADER_VERTEX:
3678 case PIPE_SHADER_TESS_EVAL:
3679 switch (shader->output[i].name) {
3680 case TGSI_SEMANTIC_POSITION:
3681 output[j].array_base = 60;
3682 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
3683 pos_emitted = true;
3684 break;
3685
3686 case TGSI_SEMANTIC_PSIZE:
3687 output[j].array_base = 61;
3688 output[j].swizzle_y = 7;
3689 output[j].swizzle_z = 7;
3690 output[j].swizzle_w = 7;
3691 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
3692 pos_emitted = true;
3693 break;
3694 case TGSI_SEMANTIC_EDGEFLAG:
3695 output[j].array_base = 61;
3696 output[j].swizzle_x = 7;
3697 output[j].swizzle_y = 0;
3698 output[j].swizzle_z = 7;
3699 output[j].swizzle_w = 7;
3700 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
3701 pos_emitted = true;
3702 break;
3703 case TGSI_SEMANTIC_LAYER:
3704 /* spi_sid is 0 for outputs that are
3705 * not consumed by PS */
3706 if (shader->output[i].spi_sid) {
3707 output[j].array_base = next_param_base++;
3708 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
3709 j++;
3710 memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output));
3711 }
3712 output[j].array_base = 61;
3713 output[j].swizzle_x = 7;
3714 output[j].swizzle_y = 7;
3715 output[j].swizzle_z = 0;
3716 output[j].swizzle_w = 7;
3717 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
3718 pos_emitted = true;
3719 break;
3720 case TGSI_SEMANTIC_VIEWPORT_INDEX:
3721 /* spi_sid is 0 for outputs that are
3722 * not consumed by PS */
3723 if (shader->output[i].spi_sid) {
3724 output[j].array_base = next_param_base++;
3725 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
3726 j++;
3727 memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output));
3728 }
3729 output[j].array_base = 61;
3730 output[j].swizzle_x = 7;
3731 output[j].swizzle_y = 7;
3732 output[j].swizzle_z = 7;
3733 output[j].swizzle_w = 0;
3734 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
3735 pos_emitted = true;
3736 break;
3737 case TGSI_SEMANTIC_CLIPVERTEX:
3738 j--;
3739 break;
3740 case TGSI_SEMANTIC_CLIPDIST:
3741 output[j].array_base = next_clip_base++;
3742 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
3743 pos_emitted = true;
3744 /* spi_sid is 0 for clipdistance outputs that were generated
3745 * for clipvertex - we don't need to pass them to PS */
3746 if (shader->output[i].spi_sid) {
3747 j++;
3748 /* duplicate it as PARAM to pass to the pixel shader */
3749 memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output));
3750 output[j].array_base = next_param_base++;
3751 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
3752 }
3753 break;
3754 case TGSI_SEMANTIC_FOG:
3755 output[j].swizzle_y = 4; /* 0 */
3756 output[j].swizzle_z = 4; /* 0 */
3757 output[j].swizzle_w = 5; /* 1 */
3758 break;
3759 case TGSI_SEMANTIC_PRIMID:
3760 output[j].swizzle_x = 2;
3761 output[j].swizzle_y = 4; /* 0 */
3762 output[j].swizzle_z = 4; /* 0 */
3763 output[j].swizzle_w = 4; /* 0 */
3764 break;
3765 }
3766
3767 break;
3768 case PIPE_SHADER_FRAGMENT:
3769 if (shader->output[i].name == TGSI_SEMANTIC_COLOR) {
3770 /* never export more colors than the number of CBs */
3771 if (shader->output[i].sid >= max_color_exports) {
3772 /* skip export */
3773 j--;
3774 continue;
3775 }
3776 output[j].swizzle_w = key.ps.alpha_to_one ? 5 : 3;
3777 output[j].array_base = shader->output[i].sid;
3778 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
3779 shader->nr_ps_color_exports++;
3780 if (shader->fs_write_all && (rscreen->b.chip_class >= EVERGREEN)) {
3781 for (k = 1; k < max_color_exports; k++) {
3782 j++;
3783 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
3784 output[j].gpr = shader->output[i].gpr;
3785 output[j].elem_size = 3;
3786 output[j].swizzle_x = 0;
3787 output[j].swizzle_y = 1;
3788 output[j].swizzle_z = 2;
3789 output[j].swizzle_w = key.ps.alpha_to_one ? 5 : 3;
3790 output[j].burst_count = 1;
3791 output[j].array_base = k;
3792 output[j].op = CF_OP_EXPORT;
3793 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
3794 shader->nr_ps_color_exports++;
3795 }
3796 }
3797 } else if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
3798 output[j].array_base = 61;
3799 output[j].swizzle_x = 2;
3800 output[j].swizzle_y = 7;
3801 output[j].swizzle_z = output[j].swizzle_w = 7;
3802 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
3803 } else if (shader->output[i].name == TGSI_SEMANTIC_STENCIL) {
3804 output[j].array_base = 61;
3805 output[j].swizzle_x = 7;
3806 output[j].swizzle_y = 1;
3807 output[j].swizzle_z = output[j].swizzle_w = 7;
3808 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
3809 } else if (shader->output[i].name == TGSI_SEMANTIC_SAMPLEMASK) {
3810 output[j].array_base = 61;
3811 output[j].swizzle_x = 7;
3812 output[j].swizzle_y = 7;
3813 output[j].swizzle_z = 0;
3814 output[j].swizzle_w = 7;
3815 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
3816 } else {
3817 R600_ERR("unsupported fragment output name %d\n", shader->output[i].name);
3818 r = -EINVAL;
3819 goto out_err;
3820 }
3821 break;
3822 case PIPE_SHADER_TESS_CTRL:
3823 break;
3824 default:
3825 R600_ERR("unsupported processor type %d\n", ctx.type);
3826 r = -EINVAL;
3827 goto out_err;
3828 }
3829
3830 if (output[j].type == 0xffffffff) {
3831 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
3832 output[j].array_base = next_param_base++;
3833 }
3834 }
3835
3836 /* add fake position export */
3837 if ((ctx.type == PIPE_SHADER_VERTEX || ctx.type == PIPE_SHADER_TESS_EVAL) && pos_emitted == false) {
3838 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
3839 output[j].gpr = 0;
3840 output[j].elem_size = 3;
3841 output[j].swizzle_x = 7;
3842 output[j].swizzle_y = 7;
3843 output[j].swizzle_z = 7;
3844 output[j].swizzle_w = 7;
3845 output[j].burst_count = 1;
3846 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
3847 output[j].array_base = 60;
3848 output[j].op = CF_OP_EXPORT;
3849 j++;
3850 }
3851
3852 /* add fake param output for vertex shader if no param is exported */
3853 if ((ctx.type == PIPE_SHADER_VERTEX || ctx.type == PIPE_SHADER_TESS_EVAL) && next_param_base == 0) {
3854 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
3855 output[j].gpr = 0;
3856 output[j].elem_size = 3;
3857 output[j].swizzle_x = 7;
3858 output[j].swizzle_y = 7;
3859 output[j].swizzle_z = 7;
3860 output[j].swizzle_w = 7;
3861 output[j].burst_count = 1;
3862 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
3863 output[j].array_base = 0;
3864 output[j].op = CF_OP_EXPORT;
3865 j++;
3866 }
3867
3868 /* add fake pixel export */
3869 if (ctx.type == PIPE_SHADER_FRAGMENT && shader->nr_ps_color_exports == 0) {
3870 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
3871 output[j].gpr = 0;
3872 output[j].elem_size = 3;
3873 output[j].swizzle_x = 7;
3874 output[j].swizzle_y = 7;
3875 output[j].swizzle_z = 7;
3876 output[j].swizzle_w = 7;
3877 output[j].burst_count = 1;
3878 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
3879 output[j].array_base = 0;
3880 output[j].op = CF_OP_EXPORT;
3881 j++;
3882 shader->nr_ps_color_exports++;
3883 }
3884
3885 noutput = j;
3886
3887 /* set export done on last export of each type */
3888 for (k = noutput - 1, output_done = 0; k >= 0; k--) {
3889 if (!(output_done & (1 << output[k].type))) {
3890 output_done |= (1 << output[k].type);
3891 output[k].op = CF_OP_EXPORT_DONE;
3892 }
3893 }
3894 /* add output to bytecode */
3895 for (i = 0; i < noutput; i++) {
3896 r = r600_bytecode_add_output(ctx.bc, &output[i]);
3897 if (r)
3898 goto out_err;
3899 }
3900 }
3901
3902 /* add program end */
3903 if (ctx.bc->chip_class == CAYMAN)
3904 cm_bytecode_add_cf_end(ctx.bc);
3905 else {
3906 const struct cf_op_info *last = NULL;
3907
3908 if (ctx.bc->cf_last)
3909 last = r600_isa_cf(ctx.bc->cf_last->op);
3910
3911 /* alu clause instructions don't have EOP bit, so add NOP */
3912 if (!last || last->flags & CF_ALU || ctx.bc->cf_last->op == CF_OP_LOOP_END || ctx.bc->cf_last->op == CF_OP_POP)
3913 r600_bytecode_add_cfinst(ctx.bc, CF_OP_NOP);
3914
3915 ctx.bc->cf_last->end_of_program = 1;
3916 }
3917
3918 /* check GPR limit - we have 124 = 128 - 4
3919 * (4 are reserved as alu clause temporary registers) */
3920 if (ctx.bc->ngpr > 124) {
3921 R600_ERR("GPR limit exceeded - shader requires %d registers\n", ctx.bc->ngpr);
3922 r = -ENOMEM;
3923 goto out_err;
3924 }
3925
3926 if (ctx.type == PIPE_SHADER_GEOMETRY) {
3927 if ((r = generate_gs_copy_shader(rctx, pipeshader, &so)))
3928 return r;
3929 }
3930
3931 free(ctx.literals);
3932 tgsi_parse_free(&ctx.parse);
3933 return 0;
3934 out_err:
3935 free(ctx.literals);
3936 tgsi_parse_free(&ctx.parse);
3937 return r;
3938 }
3939
3940 static int tgsi_unsupported(struct r600_shader_ctx *ctx)
3941 {
3942 const unsigned tgsi_opcode =
3943 ctx->parse.FullToken.FullInstruction.Instruction.Opcode;
3944 R600_ERR("%s tgsi opcode unsupported\n",
3945 tgsi_get_opcode_name(tgsi_opcode));
3946 return -EINVAL;
3947 }
3948
3949 static int tgsi_end(struct r600_shader_ctx *ctx UNUSED)
3950 {
3951 return 0;
3952 }
3953
3954 static void r600_bytecode_src(struct r600_bytecode_alu_src *bc_src,
3955 const struct r600_shader_src *shader_src,
3956 unsigned chan)
3957 {
3958 bc_src->sel = shader_src->sel;
3959 bc_src->chan = shader_src->swizzle[chan];
3960 bc_src->neg = shader_src->neg;
3961 bc_src->abs = shader_src->abs;
3962 bc_src->rel = shader_src->rel;
3963 bc_src->value = shader_src->value[bc_src->chan];
3964 bc_src->kc_bank = shader_src->kc_bank;
3965 bc_src->kc_rel = shader_src->kc_rel;
3966 }
3967
3968 static void r600_bytecode_src_set_abs(struct r600_bytecode_alu_src *bc_src)
3969 {
3970 bc_src->abs = 1;
3971 bc_src->neg = 0;
3972 }
3973
3974 static void r600_bytecode_src_toggle_neg(struct r600_bytecode_alu_src *bc_src)
3975 {
3976 bc_src->neg = !bc_src->neg;
3977 }
3978
3979 static void tgsi_dst(struct r600_shader_ctx *ctx,
3980 const struct tgsi_full_dst_register *tgsi_dst,
3981 unsigned swizzle,
3982 struct r600_bytecode_alu_dst *r600_dst)
3983 {
3984 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3985
3986 r600_dst->sel = tgsi_dst->Register.Index;
3987 r600_dst->sel += ctx->file_offset[tgsi_dst->Register.File];
3988 r600_dst->chan = swizzle;
3989 r600_dst->write = 1;
3990 if (inst->Instruction.Saturate) {
3991 r600_dst->clamp = 1;
3992 }
3993 if (ctx->type == PIPE_SHADER_TESS_CTRL) {
3994 if (tgsi_dst->Register.File == TGSI_FILE_OUTPUT) {
3995 return;
3996 }
3997 }
3998 if (tgsi_dst->Register.Indirect)
3999 r600_dst->rel = V_SQ_REL_RELATIVE;
4000
4001 }
4002
4003 static int tgsi_op2_64_params(struct r600_shader_ctx *ctx, bool singledest, bool swap, int dest_temp, int op_override)
4004 {
4005 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4006 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4007 struct r600_bytecode_alu alu;
4008 int i, j, r, lasti = tgsi_last_instruction(write_mask);
4009 int use_tmp = 0;
4010 int swizzle_x = inst->Src[0].Register.SwizzleX;
4011
4012 if (singledest) {
4013 switch (write_mask) {
4014 case 0x1:
4015 if (swizzle_x == 2) {
4016 write_mask = 0xc;
4017 use_tmp = 3;
4018 } else
4019 write_mask = 0x3;
4020 break;
4021 case 0x2:
4022 if (swizzle_x == 2) {
4023 write_mask = 0xc;
4024 use_tmp = 3;
4025 } else {
4026 write_mask = 0x3;
4027 use_tmp = 1;
4028 }
4029 break;
4030 case 0x4:
4031 if (swizzle_x == 0) {
4032 write_mask = 0x3;
4033 use_tmp = 1;
4034 } else
4035 write_mask = 0xc;
4036 break;
4037 case 0x8:
4038 if (swizzle_x == 0) {
4039 write_mask = 0x3;
4040 use_tmp = 1;
4041 } else {
4042 write_mask = 0xc;
4043 use_tmp = 3;
4044 }
4045 break;
4046 }
4047 }
4048
4049 lasti = tgsi_last_instruction(write_mask);
4050 for (i = 0; i <= lasti; i++) {
4051
4052 if (!(write_mask & (1 << i)))
4053 continue;
4054
4055 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4056
4057 if (singledest) {
4058 if (use_tmp || dest_temp) {
4059 alu.dst.sel = use_tmp ? ctx->temp_reg : dest_temp;
4060 alu.dst.chan = i;
4061 alu.dst.write = 1;
4062 } else {
4063 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4064 }
4065 if (i == 1 || i == 3)
4066 alu.dst.write = 0;
4067 } else
4068 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4069
4070 alu.op = op_override ? op_override : ctx->inst_info->op;
4071 if (ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DABS) {
4072 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4073 } else if (!swap) {
4074 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
4075 r600_bytecode_src(&alu.src[j], &ctx->src[j], fp64_switch(i));
4076 }
4077 } else {
4078 r600_bytecode_src(&alu.src[0], &ctx->src[1], fp64_switch(i));
4079 r600_bytecode_src(&alu.src[1], &ctx->src[0], fp64_switch(i));
4080 }
4081
4082 /* handle some special cases */
4083 if (i == 1 || i == 3) {
4084 switch (ctx->parse.FullToken.FullInstruction.Instruction.Opcode) {
4085 case TGSI_OPCODE_DABS:
4086 r600_bytecode_src_set_abs(&alu.src[0]);
4087 break;
4088 default:
4089 break;
4090 }
4091 }
4092 if (i == lasti) {
4093 alu.last = 1;
4094 }
4095 r = r600_bytecode_add_alu(ctx->bc, &alu);
4096 if (r)
4097 return r;
4098 }
4099
4100 if (use_tmp) {
4101 write_mask = inst->Dst[0].Register.WriteMask;
4102
4103 lasti = tgsi_last_instruction(write_mask);
4104 /* move result from temp to dst */
4105 for (i = 0; i <= lasti; i++) {
4106 if (!(write_mask & (1 << i)))
4107 continue;
4108
4109 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4110 alu.op = ALU_OP1_MOV;
4111
4112 if (dest_temp) {
4113 alu.dst.sel = dest_temp;
4114 alu.dst.chan = i;
4115 alu.dst.write = 1;
4116 } else
4117 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4118 alu.src[0].sel = ctx->temp_reg;
4119 alu.src[0].chan = use_tmp - 1;
4120 alu.last = (i == lasti);
4121
4122 r = r600_bytecode_add_alu(ctx->bc, &alu);
4123 if (r)
4124 return r;
4125 }
4126 }
4127 return 0;
4128 }
4129
4130 static int tgsi_op2_64(struct r600_shader_ctx *ctx)
4131 {
4132 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4133 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4134 /* confirm writemasking */
4135 if ((write_mask & 0x3) != 0x3 &&
4136 (write_mask & 0xc) != 0xc) {
4137 fprintf(stderr, "illegal writemask for 64-bit: 0x%x\n", write_mask);
4138 return -1;
4139 }
4140 return tgsi_op2_64_params(ctx, false, false, 0, 0);
4141 }
4142
4143 static int tgsi_op2_64_single_dest(struct r600_shader_ctx *ctx)
4144 {
4145 return tgsi_op2_64_params(ctx, true, false, 0, 0);
4146 }
4147
4148 static int tgsi_op2_64_single_dest_s(struct r600_shader_ctx *ctx)
4149 {
4150 return tgsi_op2_64_params(ctx, true, true, 0, 0);
4151 }
4152
4153 static int tgsi_op3_64(struct r600_shader_ctx *ctx)
4154 {
4155 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4156 struct r600_bytecode_alu alu;
4157 int i, j, r;
4158 int lasti = 3;
4159 int tmp = r600_get_temp(ctx);
4160
4161 for (i = 0; i < lasti + 1; i++) {
4162
4163 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4164 alu.op = ctx->inst_info->op;
4165 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
4166 r600_bytecode_src(&alu.src[j], &ctx->src[j], i == 3 ? 0 : 1);
4167 }
4168
4169 if (inst->Dst[0].Register.WriteMask & (1 << i))
4170 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4171 else
4172 alu.dst.sel = tmp;
4173
4174 alu.dst.chan = i;
4175 alu.is_op3 = 1;
4176 if (i == lasti) {
4177 alu.last = 1;
4178 }
4179 r = r600_bytecode_add_alu(ctx->bc, &alu);
4180 if (r)
4181 return r;
4182 }
4183 return 0;
4184 }
4185
4186 static int tgsi_op2_s(struct r600_shader_ctx *ctx, int swap, int trans_only)
4187 {
4188 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4189 struct r600_bytecode_alu alu;
4190 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4191 int i, j, r, lasti = tgsi_last_instruction(write_mask);
4192 /* use temp register if trans_only and more than one dst component */
4193 int use_tmp = trans_only && (write_mask ^ (1 << lasti));
4194 unsigned op = ctx->inst_info->op;
4195
4196 if (op == ALU_OP2_MUL_IEEE &&
4197 ctx->info.properties[TGSI_PROPERTY_MUL_ZERO_WINS])
4198 op = ALU_OP2_MUL;
4199
4200 for (i = 0; i <= lasti; i++) {
4201 if (!(write_mask & (1 << i)))
4202 continue;
4203
4204 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4205 if (use_tmp) {
4206 alu.dst.sel = ctx->temp_reg;
4207 alu.dst.chan = i;
4208 alu.dst.write = 1;
4209 } else
4210 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4211
4212 alu.op = op;
4213 if (!swap) {
4214 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
4215 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
4216 }
4217 } else {
4218 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
4219 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4220 }
4221 if (i == lasti || trans_only) {
4222 alu.last = 1;
4223 }
4224 r = r600_bytecode_add_alu(ctx->bc, &alu);
4225 if (r)
4226 return r;
4227 }
4228
4229 if (use_tmp) {
4230 /* move result from temp to dst */
4231 for (i = 0; i <= lasti; i++) {
4232 if (!(write_mask & (1 << i)))
4233 continue;
4234
4235 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4236 alu.op = ALU_OP1_MOV;
4237 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4238 alu.src[0].sel = ctx->temp_reg;
4239 alu.src[0].chan = i;
4240 alu.last = (i == lasti);
4241
4242 r = r600_bytecode_add_alu(ctx->bc, &alu);
4243 if (r)
4244 return r;
4245 }
4246 }
4247 return 0;
4248 }
4249
4250 static int tgsi_op2(struct r600_shader_ctx *ctx)
4251 {
4252 return tgsi_op2_s(ctx, 0, 0);
4253 }
4254
4255 static int tgsi_op2_swap(struct r600_shader_ctx *ctx)
4256 {
4257 return tgsi_op2_s(ctx, 1, 0);
4258 }
4259
4260 static int tgsi_op2_trans(struct r600_shader_ctx *ctx)
4261 {
4262 return tgsi_op2_s(ctx, 0, 1);
4263 }
4264
4265 static int tgsi_ineg(struct r600_shader_ctx *ctx)
4266 {
4267 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4268 struct r600_bytecode_alu alu;
4269 int i, r;
4270 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4271
4272 for (i = 0; i < lasti + 1; i++) {
4273
4274 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4275 continue;
4276 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4277 alu.op = ctx->inst_info->op;
4278
4279 alu.src[0].sel = V_SQ_ALU_SRC_0;
4280
4281 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4282
4283 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4284
4285 if (i == lasti) {
4286 alu.last = 1;
4287 }
4288 r = r600_bytecode_add_alu(ctx->bc, &alu);
4289 if (r)
4290 return r;
4291 }
4292 return 0;
4293
4294 }
4295
4296 static int tgsi_dneg(struct r600_shader_ctx *ctx)
4297 {
4298 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4299 struct r600_bytecode_alu alu;
4300 int i, r;
4301 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4302
4303 for (i = 0; i < lasti + 1; i++) {
4304
4305 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4306 continue;
4307 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4308 alu.op = ALU_OP1_MOV;
4309
4310 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4311
4312 if (i == 1 || i == 3)
4313 r600_bytecode_src_toggle_neg(&alu.src[0]);
4314 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4315
4316 if (i == lasti) {
4317 alu.last = 1;
4318 }
4319 r = r600_bytecode_add_alu(ctx->bc, &alu);
4320 if (r)
4321 return r;
4322 }
4323 return 0;
4324
4325 }
4326
4327 static int tgsi_dfracexp(struct r600_shader_ctx *ctx)
4328 {
4329 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4330 struct r600_bytecode_alu alu;
4331 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4332 int i, j, r;
4333
4334 for (i = 0; i <= 3; i++) {
4335 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4336 alu.op = ctx->inst_info->op;
4337
4338 alu.dst.sel = ctx->temp_reg;
4339 alu.dst.chan = i;
4340 alu.dst.write = 1;
4341 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
4342 r600_bytecode_src(&alu.src[j], &ctx->src[j], fp64_switch(i));
4343 }
4344
4345 if (i == 3)
4346 alu.last = 1;
4347
4348 r = r600_bytecode_add_alu(ctx->bc, &alu);
4349 if (r)
4350 return r;
4351 }
4352
4353 /* Replicate significand result across channels. */
4354 for (i = 0; i <= 3; i++) {
4355 if (!(write_mask & (1 << i)))
4356 continue;
4357
4358 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4359 alu.op = ALU_OP1_MOV;
4360 alu.src[0].chan = (i & 1) + 2;
4361 alu.src[0].sel = ctx->temp_reg;
4362
4363 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4364 alu.dst.write = 1;
4365 alu.last = 1;
4366 r = r600_bytecode_add_alu(ctx->bc, &alu);
4367 if (r)
4368 return r;
4369 }
4370
4371 for (i = 0; i <= 3; i++) {
4372 if (inst->Dst[1].Register.WriteMask & (1 << i)) {
4373 /* MOV third channels to writemask dst1 */
4374 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4375 alu.op = ALU_OP1_MOV;
4376 alu.src[0].chan = 1;
4377 alu.src[0].sel = ctx->temp_reg;
4378
4379 tgsi_dst(ctx, &inst->Dst[1], i, &alu.dst);
4380 alu.last = 1;
4381 r = r600_bytecode_add_alu(ctx->bc, &alu);
4382 if (r)
4383 return r;
4384 break;
4385 }
4386 }
4387 return 0;
4388 }
4389
4390
4391 static int egcm_int_to_double(struct r600_shader_ctx *ctx)
4392 {
4393 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4394 struct r600_bytecode_alu alu;
4395 int i, r;
4396 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4397
4398 assert(inst->Instruction.Opcode == TGSI_OPCODE_I2D ||
4399 inst->Instruction.Opcode == TGSI_OPCODE_U2D);
4400
4401 for (i = 0; i <= (lasti+1)/2; i++) {
4402 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4403 alu.op = ctx->inst_info->op;
4404
4405 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4406 alu.dst.sel = ctx->temp_reg;
4407 alu.dst.chan = i;
4408 alu.dst.write = 1;
4409 alu.last = 1;
4410
4411 r = r600_bytecode_add_alu(ctx->bc, &alu);
4412 if (r)
4413 return r;
4414 }
4415
4416 for (i = 0; i <= lasti; i++) {
4417 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4418 alu.op = ALU_OP1_FLT32_TO_FLT64;
4419
4420 alu.src[0].chan = i/2;
4421 if (i%2 == 0)
4422 alu.src[0].sel = ctx->temp_reg;
4423 else {
4424 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
4425 alu.src[0].value = 0x0;
4426 }
4427 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4428 alu.last = i == lasti;
4429
4430 r = r600_bytecode_add_alu(ctx->bc, &alu);
4431 if (r)
4432 return r;
4433 }
4434
4435 return 0;
4436 }
4437
4438 static int egcm_double_to_int(struct r600_shader_ctx *ctx)
4439 {
4440 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4441 struct r600_bytecode_alu alu;
4442 int i, r;
4443 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4444 int treg = r600_get_temp(ctx);
4445 assert(inst->Instruction.Opcode == TGSI_OPCODE_D2I ||
4446 inst->Instruction.Opcode == TGSI_OPCODE_D2U);
4447
4448 /* do a 64->32 into a temp register */
4449 r = tgsi_op2_64_params(ctx, true, false, treg, ALU_OP1_FLT64_TO_FLT32);
4450 if (r)
4451 return r;
4452
4453 for (i = 0; i <= lasti; i++) {
4454 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4455 continue;
4456 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4457 alu.op = ctx->inst_info->op;
4458
4459 alu.src[0].chan = i;
4460 alu.src[0].sel = treg;
4461 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4462 alu.last = (i == lasti);
4463
4464 r = r600_bytecode_add_alu(ctx->bc, &alu);
4465 if (r)
4466 return r;
4467 }
4468
4469 return 0;
4470 }
4471
4472 static int cayman_emit_unary_double_raw(struct r600_bytecode *bc,
4473 unsigned op,
4474 int dst_reg,
4475 struct r600_shader_src *src,
4476 bool abs)
4477 {
4478 struct r600_bytecode_alu alu;
4479 const int last_slot = 3;
4480 int r;
4481
4482 /* these have to write the result to X/Y by the looks of it */
4483 for (int i = 0 ; i < last_slot; i++) {
4484 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4485 alu.op = op;
4486
4487 r600_bytecode_src(&alu.src[0], src, 1);
4488 r600_bytecode_src(&alu.src[1], src, 0);
4489
4490 if (abs)
4491 r600_bytecode_src_set_abs(&alu.src[1]);
4492
4493 alu.dst.sel = dst_reg;
4494 alu.dst.chan = i;
4495 alu.dst.write = (i == 0 || i == 1);
4496
4497 if (bc->chip_class != CAYMAN || i == last_slot - 1)
4498 alu.last = 1;
4499 r = r600_bytecode_add_alu(bc, &alu);
4500 if (r)
4501 return r;
4502 }
4503
4504 return 0;
4505 }
4506
4507 static int cayman_emit_double_instr(struct r600_shader_ctx *ctx)
4508 {
4509 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4510 int i, r;
4511 struct r600_bytecode_alu alu;
4512 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4513 int t1 = ctx->temp_reg;
4514
4515 /* should only be one src regs */
4516 assert(inst->Instruction.NumSrcRegs == 1);
4517
4518 /* only support one double at a time */
4519 assert(inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY ||
4520 inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_ZW);
4521
4522 r = cayman_emit_unary_double_raw(
4523 ctx->bc, ctx->inst_info->op, t1,
4524 &ctx->src[0],
4525 ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DRSQ ||
4526 ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DSQRT);
4527 if (r)
4528 return r;
4529
4530 for (i = 0 ; i <= lasti; i++) {
4531 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4532 continue;
4533 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4534 alu.op = ALU_OP1_MOV;
4535 alu.src[0].sel = t1;
4536 alu.src[0].chan = (i == 0 || i == 2) ? 0 : 1;
4537 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4538 alu.dst.write = 1;
4539 if (i == lasti)
4540 alu.last = 1;
4541 r = r600_bytecode_add_alu(ctx->bc, &alu);
4542 if (r)
4543 return r;
4544 }
4545 return 0;
4546 }
4547
4548 static int cayman_emit_float_instr(struct r600_shader_ctx *ctx)
4549 {
4550 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4551 int i, j, r;
4552 struct r600_bytecode_alu alu;
4553 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
4554
4555 for (i = 0 ; i < last_slot; i++) {
4556 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4557 alu.op = ctx->inst_info->op;
4558 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
4559 r600_bytecode_src(&alu.src[j], &ctx->src[j], 0);
4560
4561 /* RSQ should take the absolute value of src */
4562 if (inst->Instruction.Opcode == TGSI_OPCODE_RSQ) {
4563 r600_bytecode_src_set_abs(&alu.src[j]);
4564 }
4565 }
4566 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4567 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
4568
4569 if (i == last_slot - 1)
4570 alu.last = 1;
4571 r = r600_bytecode_add_alu(ctx->bc, &alu);
4572 if (r)
4573 return r;
4574 }
4575 return 0;
4576 }
4577
4578 static int cayman_mul_int_instr(struct r600_shader_ctx *ctx)
4579 {
4580 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4581 int i, j, k, r;
4582 struct r600_bytecode_alu alu;
4583 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4584 int t1 = ctx->temp_reg;
4585
4586 for (k = 0; k <= lasti; k++) {
4587 if (!(inst->Dst[0].Register.WriteMask & (1 << k)))
4588 continue;
4589
4590 for (i = 0 ; i < 4; i++) {
4591 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4592 alu.op = ctx->inst_info->op;
4593 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
4594 r600_bytecode_src(&alu.src[j], &ctx->src[j], k);
4595 }
4596 alu.dst.sel = t1;
4597 alu.dst.chan = i;
4598 alu.dst.write = (i == k);
4599 if (i == 3)
4600 alu.last = 1;
4601 r = r600_bytecode_add_alu(ctx->bc, &alu);
4602 if (r)
4603 return r;
4604 }
4605 }
4606
4607 for (i = 0 ; i <= lasti; i++) {
4608 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4609 continue;
4610 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4611 alu.op = ALU_OP1_MOV;
4612 alu.src[0].sel = t1;
4613 alu.src[0].chan = i;
4614 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4615 alu.dst.write = 1;
4616 if (i == lasti)
4617 alu.last = 1;
4618 r = r600_bytecode_add_alu(ctx->bc, &alu);
4619 if (r)
4620 return r;
4621 }
4622
4623 return 0;
4624 }
4625
4626
4627 static int cayman_mul_double_instr(struct r600_shader_ctx *ctx)
4628 {
4629 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4630 int i, j, k, r;
4631 struct r600_bytecode_alu alu;
4632 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4633 int t1 = ctx->temp_reg;
4634
4635 /* t1 would get overwritten below if we actually tried to
4636 * multiply two pairs of doubles at a time. */
4637 assert(inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY ||
4638 inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_ZW);
4639
4640 k = inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY ? 0 : 1;
4641
4642 for (i = 0; i < 4; i++) {
4643 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4644 alu.op = ctx->inst_info->op;
4645 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
4646 r600_bytecode_src(&alu.src[j], &ctx->src[j], k * 2 + ((i == 3) ? 0 : 1));
4647 }
4648 alu.dst.sel = t1;
4649 alu.dst.chan = i;
4650 alu.dst.write = 1;
4651 if (i == 3)
4652 alu.last = 1;
4653 r = r600_bytecode_add_alu(ctx->bc, &alu);
4654 if (r)
4655 return r;
4656 }
4657
4658 for (i = 0; i <= lasti; i++) {
4659 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4660 continue;
4661 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4662 alu.op = ALU_OP1_MOV;
4663 alu.src[0].sel = t1;
4664 alu.src[0].chan = i;
4665 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4666 alu.dst.write = 1;
4667 if (i == lasti)
4668 alu.last = 1;
4669 r = r600_bytecode_add_alu(ctx->bc, &alu);
4670 if (r)
4671 return r;
4672 }
4673
4674 return 0;
4675 }
4676
4677 /*
4678 * Emit RECIP_64 + MUL_64 to implement division.
4679 */
4680 static int cayman_ddiv_instr(struct r600_shader_ctx *ctx)
4681 {
4682 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4683 int r;
4684 struct r600_bytecode_alu alu;
4685 int t1 = ctx->temp_reg;
4686 int k;
4687
4688 /* Only support one double at a time. This is the same constraint as
4689 * in DMUL lowering. */
4690 assert(inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY ||
4691 inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_ZW);
4692
4693 k = inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY ? 0 : 1;
4694
4695 r = cayman_emit_unary_double_raw(ctx->bc, ALU_OP2_RECIP_64, t1, &ctx->src[1], false);
4696 if (r)
4697 return r;
4698
4699 for (int i = 0; i < 4; i++) {
4700 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4701 alu.op = ALU_OP2_MUL_64;
4702
4703 r600_bytecode_src(&alu.src[0], &ctx->src[0], k * 2 + ((i == 3) ? 0 : 1));
4704
4705 alu.src[1].sel = t1;
4706 alu.src[1].chan = (i == 3) ? 0 : 1;
4707
4708 alu.dst.sel = t1;
4709 alu.dst.chan = i;
4710 alu.dst.write = 1;
4711 if (i == 3)
4712 alu.last = 1;
4713 r = r600_bytecode_add_alu(ctx->bc, &alu);
4714 if (r)
4715 return r;
4716 }
4717
4718 for (int i = 0; i < 2; i++) {
4719 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4720 alu.op = ALU_OP1_MOV;
4721 alu.src[0].sel = t1;
4722 alu.src[0].chan = i;
4723 tgsi_dst(ctx, &inst->Dst[0], k * 2 + i, &alu.dst);
4724 alu.dst.write = 1;
4725 if (i == 1)
4726 alu.last = 1;
4727 r = r600_bytecode_add_alu(ctx->bc, &alu);
4728 if (r)
4729 return r;
4730 }
4731 return 0;
4732 }
4733
4734 /*
4735 * r600 - trunc to -PI..PI range
4736 * r700 - normalize by dividing by 2PI
4737 * see fdo bug 27901
4738 */
4739 static int tgsi_setup_trig(struct r600_shader_ctx *ctx)
4740 {
4741 int r;
4742 struct r600_bytecode_alu alu;
4743
4744 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4745 alu.op = ALU_OP3_MULADD;
4746 alu.is_op3 = 1;
4747
4748 alu.dst.chan = 0;
4749 alu.dst.sel = ctx->temp_reg;
4750 alu.dst.write = 1;
4751
4752 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
4753
4754 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
4755 alu.src[1].chan = 0;
4756 alu.src[1].value = u_bitcast_f2u(0.5f * M_1_PI);
4757 alu.src[2].sel = V_SQ_ALU_SRC_0_5;
4758 alu.src[2].chan = 0;
4759 alu.last = 1;
4760 r = r600_bytecode_add_alu(ctx->bc, &alu);
4761 if (r)
4762 return r;
4763
4764 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4765 alu.op = ALU_OP1_FRACT;
4766
4767 alu.dst.chan = 0;
4768 alu.dst.sel = ctx->temp_reg;
4769 alu.dst.write = 1;
4770
4771 alu.src[0].sel = ctx->temp_reg;
4772 alu.src[0].chan = 0;
4773 alu.last = 1;
4774 r = r600_bytecode_add_alu(ctx->bc, &alu);
4775 if (r)
4776 return r;
4777
4778 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4779 alu.op = ALU_OP3_MULADD;
4780 alu.is_op3 = 1;
4781
4782 alu.dst.chan = 0;
4783 alu.dst.sel = ctx->temp_reg;
4784 alu.dst.write = 1;
4785
4786 alu.src[0].sel = ctx->temp_reg;
4787 alu.src[0].chan = 0;
4788
4789 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
4790 alu.src[1].chan = 0;
4791 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
4792 alu.src[2].chan = 0;
4793
4794 if (ctx->bc->chip_class == R600) {
4795 alu.src[1].value = u_bitcast_f2u(2.0f * M_PI);
4796 alu.src[2].value = u_bitcast_f2u(-M_PI);
4797 } else {
4798 alu.src[1].sel = V_SQ_ALU_SRC_1;
4799 alu.src[2].sel = V_SQ_ALU_SRC_0_5;
4800 alu.src[2].neg = 1;
4801 }
4802
4803 alu.last = 1;
4804 r = r600_bytecode_add_alu(ctx->bc, &alu);
4805 if (r)
4806 return r;
4807 return 0;
4808 }
4809
4810 static int cayman_trig(struct r600_shader_ctx *ctx)
4811 {
4812 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4813 struct r600_bytecode_alu alu;
4814 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
4815 int i, r;
4816
4817 r = tgsi_setup_trig(ctx);
4818 if (r)
4819 return r;
4820
4821
4822 for (i = 0; i < last_slot; i++) {
4823 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4824 alu.op = ctx->inst_info->op;
4825 alu.dst.chan = i;
4826
4827 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4828 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
4829
4830 alu.src[0].sel = ctx->temp_reg;
4831 alu.src[0].chan = 0;
4832 if (i == last_slot - 1)
4833 alu.last = 1;
4834 r = r600_bytecode_add_alu(ctx->bc, &alu);
4835 if (r)
4836 return r;
4837 }
4838 return 0;
4839 }
4840
4841 static int tgsi_trig(struct r600_shader_ctx *ctx)
4842 {
4843 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4844 struct r600_bytecode_alu alu;
4845 int i, r;
4846 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4847
4848 r = tgsi_setup_trig(ctx);
4849 if (r)
4850 return r;
4851
4852 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4853 alu.op = ctx->inst_info->op;
4854 alu.dst.chan = 0;
4855 alu.dst.sel = ctx->temp_reg;
4856 alu.dst.write = 1;
4857
4858 alu.src[0].sel = ctx->temp_reg;
4859 alu.src[0].chan = 0;
4860 alu.last = 1;
4861 r = r600_bytecode_add_alu(ctx->bc, &alu);
4862 if (r)
4863 return r;
4864
4865 /* replicate result */
4866 for (i = 0; i < lasti + 1; i++) {
4867 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4868 continue;
4869
4870 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4871 alu.op = ALU_OP1_MOV;
4872
4873 alu.src[0].sel = ctx->temp_reg;
4874 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4875 if (i == lasti)
4876 alu.last = 1;
4877 r = r600_bytecode_add_alu(ctx->bc, &alu);
4878 if (r)
4879 return r;
4880 }
4881 return 0;
4882 }
4883
4884 static int tgsi_kill(struct r600_shader_ctx *ctx)
4885 {
4886 const struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4887 struct r600_bytecode_alu alu;
4888 int i, r;
4889
4890 for (i = 0; i < 4; i++) {
4891 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4892 alu.op = ctx->inst_info->op;
4893
4894 alu.dst.chan = i;
4895
4896 alu.src[0].sel = V_SQ_ALU_SRC_0;
4897
4898 if (inst->Instruction.Opcode == TGSI_OPCODE_KILL) {
4899 alu.src[1].sel = V_SQ_ALU_SRC_1;
4900 alu.src[1].neg = 1;
4901 } else {
4902 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4903 }
4904 if (i == 3) {
4905 alu.last = 1;
4906 }
4907 r = r600_bytecode_add_alu(ctx->bc, &alu);
4908 if (r)
4909 return r;
4910 }
4911
4912 /* kill must be last in ALU */
4913 ctx->bc->force_add_cf = 1;
4914 ctx->shader->uses_kill = TRUE;
4915 return 0;
4916 }
4917
4918 static int tgsi_lit(struct r600_shader_ctx *ctx)
4919 {
4920 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4921 struct r600_bytecode_alu alu;
4922 int r;
4923
4924 /* tmp.x = max(src.y, 0.0) */
4925 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4926 alu.op = ALU_OP2_MAX;
4927 r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
4928 alu.src[1].sel = V_SQ_ALU_SRC_0; /*0.0*/
4929 alu.src[1].chan = 1;
4930
4931 alu.dst.sel = ctx->temp_reg;
4932 alu.dst.chan = 0;
4933 alu.dst.write = 1;
4934
4935 alu.last = 1;
4936 r = r600_bytecode_add_alu(ctx->bc, &alu);
4937 if (r)
4938 return r;
4939
4940 if (inst->Dst[0].Register.WriteMask & (1 << 2))
4941 {
4942 int chan;
4943 int sel;
4944 unsigned i;
4945
4946 if (ctx->bc->chip_class == CAYMAN) {
4947 for (i = 0; i < 3; i++) {
4948 /* tmp.z = log(tmp.x) */
4949 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4950 alu.op = ALU_OP1_LOG_CLAMPED;
4951 alu.src[0].sel = ctx->temp_reg;
4952 alu.src[0].chan = 0;
4953 alu.dst.sel = ctx->temp_reg;
4954 alu.dst.chan = i;
4955 if (i == 2) {
4956 alu.dst.write = 1;
4957 alu.last = 1;
4958 } else
4959 alu.dst.write = 0;
4960
4961 r = r600_bytecode_add_alu(ctx->bc, &alu);
4962 if (r)
4963 return r;
4964 }
4965 } else {
4966 /* tmp.z = log(tmp.x) */
4967 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4968 alu.op = ALU_OP1_LOG_CLAMPED;
4969 alu.src[0].sel = ctx->temp_reg;
4970 alu.src[0].chan = 0;
4971 alu.dst.sel = ctx->temp_reg;
4972 alu.dst.chan = 2;
4973 alu.dst.write = 1;
4974 alu.last = 1;
4975 r = r600_bytecode_add_alu(ctx->bc, &alu);
4976 if (r)
4977 return r;
4978 }
4979
4980 chan = alu.dst.chan;
4981 sel = alu.dst.sel;
4982
4983 /* tmp.x = amd MUL_LIT(tmp.z, src.w, src.x ) */
4984 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4985 alu.op = ALU_OP3_MUL_LIT;
4986 alu.src[0].sel = sel;
4987 alu.src[0].chan = chan;
4988 r600_bytecode_src(&alu.src[1], &ctx->src[0], 3);
4989 r600_bytecode_src(&alu.src[2], &ctx->src[0], 0);
4990 alu.dst.sel = ctx->temp_reg;
4991 alu.dst.chan = 0;
4992 alu.dst.write = 1;
4993 alu.is_op3 = 1;
4994 alu.last = 1;
4995 r = r600_bytecode_add_alu(ctx->bc, &alu);
4996 if (r)
4997 return r;
4998
4999 if (ctx->bc->chip_class == CAYMAN) {
5000 for (i = 0; i < 3; i++) {
5001 /* dst.z = exp(tmp.x) */
5002 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5003 alu.op = ALU_OP1_EXP_IEEE;
5004 alu.src[0].sel = ctx->temp_reg;
5005 alu.src[0].chan = 0;
5006 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5007 if (i == 2) {
5008 alu.dst.write = 1;
5009 alu.last = 1;
5010 } else
5011 alu.dst.write = 0;
5012 r = r600_bytecode_add_alu(ctx->bc, &alu);
5013 if (r)
5014 return r;
5015 }
5016 } else {
5017 /* dst.z = exp(tmp.x) */
5018 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5019 alu.op = ALU_OP1_EXP_IEEE;
5020 alu.src[0].sel = ctx->temp_reg;
5021 alu.src[0].chan = 0;
5022 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
5023 alu.last = 1;
5024 r = r600_bytecode_add_alu(ctx->bc, &alu);
5025 if (r)
5026 return r;
5027 }
5028 }
5029
5030 /* dst.x, <- 1.0 */
5031 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5032 alu.op = ALU_OP1_MOV;
5033 alu.src[0].sel = V_SQ_ALU_SRC_1; /*1.0*/
5034 alu.src[0].chan = 0;
5035 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
5036 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 0) & 1;
5037 r = r600_bytecode_add_alu(ctx->bc, &alu);
5038 if (r)
5039 return r;
5040
5041 /* dst.y = max(src.x, 0.0) */
5042 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5043 alu.op = ALU_OP2_MAX;
5044 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5045 alu.src[1].sel = V_SQ_ALU_SRC_0; /*0.0*/
5046 alu.src[1].chan = 0;
5047 tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
5048 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 1) & 1;
5049 r = r600_bytecode_add_alu(ctx->bc, &alu);
5050 if (r)
5051 return r;
5052
5053 /* dst.w, <- 1.0 */
5054 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5055 alu.op = ALU_OP1_MOV;
5056 alu.src[0].sel = V_SQ_ALU_SRC_1;
5057 alu.src[0].chan = 0;
5058 tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
5059 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 3) & 1;
5060 alu.last = 1;
5061 r = r600_bytecode_add_alu(ctx->bc, &alu);
5062 if (r)
5063 return r;
5064
5065 return 0;
5066 }
5067
5068 static int tgsi_rsq(struct r600_shader_ctx *ctx)
5069 {
5070 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5071 struct r600_bytecode_alu alu;
5072 int i, r;
5073
5074 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5075
5076 alu.op = ALU_OP1_RECIPSQRT_IEEE;
5077
5078 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
5079 r600_bytecode_src(&alu.src[i], &ctx->src[i], 0);
5080 r600_bytecode_src_set_abs(&alu.src[i]);
5081 }
5082 alu.dst.sel = ctx->temp_reg;
5083 alu.dst.write = 1;
5084 alu.last = 1;
5085 r = r600_bytecode_add_alu(ctx->bc, &alu);
5086 if (r)
5087 return r;
5088 /* replicate result */
5089 return tgsi_helper_tempx_replicate(ctx);
5090 }
5091
5092 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx)
5093 {
5094 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5095 struct r600_bytecode_alu alu;
5096 int i, r;
5097
5098 for (i = 0; i < 4; i++) {
5099 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5100 alu.src[0].sel = ctx->temp_reg;
5101 alu.op = ALU_OP1_MOV;
5102 alu.dst.chan = i;
5103 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5104 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
5105 if (i == 3)
5106 alu.last = 1;
5107 r = r600_bytecode_add_alu(ctx->bc, &alu);
5108 if (r)
5109 return r;
5110 }
5111 return 0;
5112 }
5113
5114 static int tgsi_trans_srcx_replicate(struct r600_shader_ctx *ctx)
5115 {
5116 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5117 struct r600_bytecode_alu alu;
5118 int i, r;
5119
5120 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5121 alu.op = ctx->inst_info->op;
5122 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
5123 r600_bytecode_src(&alu.src[i], &ctx->src[i], 0);
5124 }
5125 alu.dst.sel = ctx->temp_reg;
5126 alu.dst.write = 1;
5127 alu.last = 1;
5128 r = r600_bytecode_add_alu(ctx->bc, &alu);
5129 if (r)
5130 return r;
5131 /* replicate result */
5132 return tgsi_helper_tempx_replicate(ctx);
5133 }
5134
5135 static int cayman_pow(struct r600_shader_ctx *ctx)
5136 {
5137 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5138 int i, r;
5139 struct r600_bytecode_alu alu;
5140 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
5141
5142 for (i = 0; i < 3; i++) {
5143 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5144 alu.op = ALU_OP1_LOG_IEEE;
5145 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5146 alu.dst.sel = ctx->temp_reg;
5147 alu.dst.chan = i;
5148 alu.dst.write = 1;
5149 if (i == 2)
5150 alu.last = 1;
5151 r = r600_bytecode_add_alu(ctx->bc, &alu);
5152 if (r)
5153 return r;
5154 }
5155
5156 /* b * LOG2(a) */
5157 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5158 alu.op = ALU_OP2_MUL;
5159 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
5160 alu.src[1].sel = ctx->temp_reg;
5161 alu.dst.sel = ctx->temp_reg;
5162 alu.dst.write = 1;
5163 alu.last = 1;
5164 r = r600_bytecode_add_alu(ctx->bc, &alu);
5165 if (r)
5166 return r;
5167
5168 for (i = 0; i < last_slot; i++) {
5169 /* POW(a,b) = EXP2(b * LOG2(a))*/
5170 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5171 alu.op = ALU_OP1_EXP_IEEE;
5172 alu.src[0].sel = ctx->temp_reg;
5173
5174 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5175 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
5176 if (i == last_slot - 1)
5177 alu.last = 1;
5178 r = r600_bytecode_add_alu(ctx->bc, &alu);
5179 if (r)
5180 return r;
5181 }
5182 return 0;
5183 }
5184
5185 static int tgsi_pow(struct r600_shader_ctx *ctx)
5186 {
5187 struct r600_bytecode_alu alu;
5188 int r;
5189
5190 /* LOG2(a) */
5191 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5192 alu.op = ALU_OP1_LOG_IEEE;
5193 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5194 alu.dst.sel = ctx->temp_reg;
5195 alu.dst.write = 1;
5196 alu.last = 1;
5197 r = r600_bytecode_add_alu(ctx->bc, &alu);
5198 if (r)
5199 return r;
5200 /* b * LOG2(a) */
5201 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5202 alu.op = ALU_OP2_MUL;
5203 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
5204 alu.src[1].sel = ctx->temp_reg;
5205 alu.dst.sel = ctx->temp_reg;
5206 alu.dst.write = 1;
5207 alu.last = 1;
5208 r = r600_bytecode_add_alu(ctx->bc, &alu);
5209 if (r)
5210 return r;
5211 /* POW(a,b) = EXP2(b * LOG2(a))*/
5212 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5213 alu.op = ALU_OP1_EXP_IEEE;
5214 alu.src[0].sel = ctx->temp_reg;
5215 alu.dst.sel = ctx->temp_reg;
5216 alu.dst.write = 1;
5217 alu.last = 1;
5218 r = r600_bytecode_add_alu(ctx->bc, &alu);
5219 if (r)
5220 return r;
5221 return tgsi_helper_tempx_replicate(ctx);
5222 }
5223
5224 static int tgsi_divmod(struct r600_shader_ctx *ctx, int mod, int signed_op)
5225 {
5226 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5227 struct r600_bytecode_alu alu;
5228 int i, r, j;
5229 unsigned write_mask = inst->Dst[0].Register.WriteMask;
5230 int tmp0 = ctx->temp_reg;
5231 int tmp1 = r600_get_temp(ctx);
5232 int tmp2 = r600_get_temp(ctx);
5233 int tmp3 = r600_get_temp(ctx);
5234 /* Unsigned path:
5235 *
5236 * we need to represent src1 as src2*q + r, where q - quotient, r - remainder
5237 *
5238 * 1. tmp0.x = rcp (src2) = 2^32/src2 + e, where e is rounding error
5239 * 2. tmp0.z = lo (tmp0.x * src2)
5240 * 3. tmp0.w = -tmp0.z
5241 * 4. tmp0.y = hi (tmp0.x * src2)
5242 * 5. tmp0.z = (tmp0.y == 0 ? tmp0.w : tmp0.z) = abs(lo(rcp*src2))
5243 * 6. tmp0.w = hi (tmp0.z * tmp0.x) = e, rounding error
5244 * 7. tmp1.x = tmp0.x - tmp0.w
5245 * 8. tmp1.y = tmp0.x + tmp0.w
5246 * 9. tmp0.x = (tmp0.y == 0 ? tmp1.y : tmp1.x)
5247 * 10. tmp0.z = hi(tmp0.x * src1) = q
5248 * 11. tmp0.y = lo (tmp0.z * src2) = src2*q = src1 - r
5249 *
5250 * 12. tmp0.w = src1 - tmp0.y = r
5251 * 13. tmp1.x = tmp0.w >= src2 = r >= src2 (uint comparison)
5252 * 14. tmp1.y = src1 >= tmp0.y = r >= 0 (uint comparison)
5253 *
5254 * if DIV
5255 *
5256 * 15. tmp1.z = tmp0.z + 1 = q + 1
5257 * 16. tmp1.w = tmp0.z - 1 = q - 1
5258 *
5259 * else MOD
5260 *
5261 * 15. tmp1.z = tmp0.w - src2 = r - src2
5262 * 16. tmp1.w = tmp0.w + src2 = r + src2
5263 *
5264 * endif
5265 *
5266 * 17. tmp1.x = tmp1.x & tmp1.y
5267 *
5268 * DIV: 18. tmp0.z = tmp1.x==0 ? tmp0.z : tmp1.z
5269 * MOD: 18. tmp0.z = tmp1.x==0 ? tmp0.w : tmp1.z
5270 *
5271 * 19. tmp0.z = tmp1.y==0 ? tmp1.w : tmp0.z
5272 * 20. dst = src2==0 ? MAX_UINT : tmp0.z
5273 *
5274 * Signed path:
5275 *
5276 * Same as unsigned, using abs values of the operands,
5277 * and fixing the sign of the result in the end.
5278 */
5279
5280 for (i = 0; i < 4; i++) {
5281 if (!(write_mask & (1<<i)))
5282 continue;
5283
5284 if (signed_op) {
5285
5286 /* tmp2.x = -src0 */
5287 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5288 alu.op = ALU_OP2_SUB_INT;
5289
5290 alu.dst.sel = tmp2;
5291 alu.dst.chan = 0;
5292 alu.dst.write = 1;
5293
5294 alu.src[0].sel = V_SQ_ALU_SRC_0;
5295
5296 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
5297
5298 alu.last = 1;
5299 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5300 return r;
5301
5302 /* tmp2.y = -src1 */
5303 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5304 alu.op = ALU_OP2_SUB_INT;
5305
5306 alu.dst.sel = tmp2;
5307 alu.dst.chan = 1;
5308 alu.dst.write = 1;
5309
5310 alu.src[0].sel = V_SQ_ALU_SRC_0;
5311
5312 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5313
5314 alu.last = 1;
5315 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5316 return r;
5317
5318 /* tmp2.z sign bit is set if src0 and src2 signs are different */
5319 /* it will be a sign of the quotient */
5320 if (!mod) {
5321
5322 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5323 alu.op = ALU_OP2_XOR_INT;
5324
5325 alu.dst.sel = tmp2;
5326 alu.dst.chan = 2;
5327 alu.dst.write = 1;
5328
5329 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5330 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5331
5332 alu.last = 1;
5333 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5334 return r;
5335 }
5336
5337 /* tmp2.x = |src0| */
5338 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5339 alu.op = ALU_OP3_CNDGE_INT;
5340 alu.is_op3 = 1;
5341
5342 alu.dst.sel = tmp2;
5343 alu.dst.chan = 0;
5344 alu.dst.write = 1;
5345
5346 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5347 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
5348 alu.src[2].sel = tmp2;
5349 alu.src[2].chan = 0;
5350
5351 alu.last = 1;
5352 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5353 return r;
5354
5355 /* tmp2.y = |src1| */
5356 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5357 alu.op = ALU_OP3_CNDGE_INT;
5358 alu.is_op3 = 1;
5359
5360 alu.dst.sel = tmp2;
5361 alu.dst.chan = 1;
5362 alu.dst.write = 1;
5363
5364 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
5365 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5366 alu.src[2].sel = tmp2;
5367 alu.src[2].chan = 1;
5368
5369 alu.last = 1;
5370 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5371 return r;
5372
5373 }
5374
5375 /* 1. tmp0.x = rcp_u (src2) = 2^32/src2 + e, where e is rounding error */
5376 if (ctx->bc->chip_class == CAYMAN) {
5377 /* tmp3.x = u2f(src2) */
5378 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5379 alu.op = ALU_OP1_UINT_TO_FLT;
5380
5381 alu.dst.sel = tmp3;
5382 alu.dst.chan = 0;
5383 alu.dst.write = 1;
5384
5385 if (signed_op) {
5386 alu.src[0].sel = tmp2;
5387 alu.src[0].chan = 1;
5388 } else {
5389 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
5390 }
5391
5392 alu.last = 1;
5393 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5394 return r;
5395
5396 /* tmp0.x = recip(tmp3.x) */
5397 for (j = 0 ; j < 3; j++) {
5398 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5399 alu.op = ALU_OP1_RECIP_IEEE;
5400
5401 alu.dst.sel = tmp0;
5402 alu.dst.chan = j;
5403 alu.dst.write = (j == 0);
5404
5405 alu.src[0].sel = tmp3;
5406 alu.src[0].chan = 0;
5407
5408 if (j == 2)
5409 alu.last = 1;
5410 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5411 return r;
5412 }
5413
5414 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5415 alu.op = ALU_OP2_MUL;
5416
5417 alu.src[0].sel = tmp0;
5418 alu.src[0].chan = 0;
5419
5420 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
5421 alu.src[1].value = 0x4f800000;
5422
5423 alu.dst.sel = tmp3;
5424 alu.dst.write = 1;
5425 alu.last = 1;
5426 r = r600_bytecode_add_alu(ctx->bc, &alu);
5427 if (r)
5428 return r;
5429
5430 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5431 alu.op = ALU_OP1_FLT_TO_UINT;
5432
5433 alu.dst.sel = tmp0;
5434 alu.dst.chan = 0;
5435 alu.dst.write = 1;
5436
5437 alu.src[0].sel = tmp3;
5438 alu.src[0].chan = 0;
5439
5440 alu.last = 1;
5441 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5442 return r;
5443
5444 } else {
5445 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5446 alu.op = ALU_OP1_RECIP_UINT;
5447
5448 alu.dst.sel = tmp0;
5449 alu.dst.chan = 0;
5450 alu.dst.write = 1;
5451
5452 if (signed_op) {
5453 alu.src[0].sel = tmp2;
5454 alu.src[0].chan = 1;
5455 } else {
5456 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
5457 }
5458
5459 alu.last = 1;
5460 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5461 return r;
5462 }
5463
5464 /* 2. tmp0.z = lo (tmp0.x * src2) */
5465 if (ctx->bc->chip_class == CAYMAN) {
5466 for (j = 0 ; j < 4; j++) {
5467 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5468 alu.op = ALU_OP2_MULLO_UINT;
5469
5470 alu.dst.sel = tmp0;
5471 alu.dst.chan = j;
5472 alu.dst.write = (j == 2);
5473
5474 alu.src[0].sel = tmp0;
5475 alu.src[0].chan = 0;
5476 if (signed_op) {
5477 alu.src[1].sel = tmp2;
5478 alu.src[1].chan = 1;
5479 } else {
5480 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5481 }
5482
5483 alu.last = (j == 3);
5484 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5485 return r;
5486 }
5487 } else {
5488 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5489 alu.op = ALU_OP2_MULLO_UINT;
5490
5491 alu.dst.sel = tmp0;
5492 alu.dst.chan = 2;
5493 alu.dst.write = 1;
5494
5495 alu.src[0].sel = tmp0;
5496 alu.src[0].chan = 0;
5497 if (signed_op) {
5498 alu.src[1].sel = tmp2;
5499 alu.src[1].chan = 1;
5500 } else {
5501 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5502 }
5503
5504 alu.last = 1;
5505 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5506 return r;
5507 }
5508
5509 /* 3. tmp0.w = -tmp0.z */
5510 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5511 alu.op = ALU_OP2_SUB_INT;
5512
5513 alu.dst.sel = tmp0;
5514 alu.dst.chan = 3;
5515 alu.dst.write = 1;
5516
5517 alu.src[0].sel = V_SQ_ALU_SRC_0;
5518 alu.src[1].sel = tmp0;
5519 alu.src[1].chan = 2;
5520
5521 alu.last = 1;
5522 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5523 return r;
5524
5525 /* 4. tmp0.y = hi (tmp0.x * src2) */
5526 if (ctx->bc->chip_class == CAYMAN) {
5527 for (j = 0 ; j < 4; j++) {
5528 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5529 alu.op = ALU_OP2_MULHI_UINT;
5530
5531 alu.dst.sel = tmp0;
5532 alu.dst.chan = j;
5533 alu.dst.write = (j == 1);
5534
5535 alu.src[0].sel = tmp0;
5536 alu.src[0].chan = 0;
5537
5538 if (signed_op) {
5539 alu.src[1].sel = tmp2;
5540 alu.src[1].chan = 1;
5541 } else {
5542 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5543 }
5544 alu.last = (j == 3);
5545 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5546 return r;
5547 }
5548 } else {
5549 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5550 alu.op = ALU_OP2_MULHI_UINT;
5551
5552 alu.dst.sel = tmp0;
5553 alu.dst.chan = 1;
5554 alu.dst.write = 1;
5555
5556 alu.src[0].sel = tmp0;
5557 alu.src[0].chan = 0;
5558
5559 if (signed_op) {
5560 alu.src[1].sel = tmp2;
5561 alu.src[1].chan = 1;
5562 } else {
5563 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5564 }
5565
5566 alu.last = 1;
5567 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5568 return r;
5569 }
5570
5571 /* 5. tmp0.z = (tmp0.y == 0 ? tmp0.w : tmp0.z) = abs(lo(rcp*src)) */
5572 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5573 alu.op = ALU_OP3_CNDE_INT;
5574 alu.is_op3 = 1;
5575
5576 alu.dst.sel = tmp0;
5577 alu.dst.chan = 2;
5578 alu.dst.write = 1;
5579
5580 alu.src[0].sel = tmp0;
5581 alu.src[0].chan = 1;
5582 alu.src[1].sel = tmp0;
5583 alu.src[1].chan = 3;
5584 alu.src[2].sel = tmp0;
5585 alu.src[2].chan = 2;
5586
5587 alu.last = 1;
5588 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5589 return r;
5590
5591 /* 6. tmp0.w = hi (tmp0.z * tmp0.x) = e, rounding error */
5592 if (ctx->bc->chip_class == CAYMAN) {
5593 for (j = 0 ; j < 4; j++) {
5594 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5595 alu.op = ALU_OP2_MULHI_UINT;
5596
5597 alu.dst.sel = tmp0;
5598 alu.dst.chan = j;
5599 alu.dst.write = (j == 3);
5600
5601 alu.src[0].sel = tmp0;
5602 alu.src[0].chan = 2;
5603
5604 alu.src[1].sel = tmp0;
5605 alu.src[1].chan = 0;
5606
5607 alu.last = (j == 3);
5608 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5609 return r;
5610 }
5611 } else {
5612 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5613 alu.op = ALU_OP2_MULHI_UINT;
5614
5615 alu.dst.sel = tmp0;
5616 alu.dst.chan = 3;
5617 alu.dst.write = 1;
5618
5619 alu.src[0].sel = tmp0;
5620 alu.src[0].chan = 2;
5621
5622 alu.src[1].sel = tmp0;
5623 alu.src[1].chan = 0;
5624
5625 alu.last = 1;
5626 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5627 return r;
5628 }
5629
5630 /* 7. tmp1.x = tmp0.x - tmp0.w */
5631 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5632 alu.op = ALU_OP2_SUB_INT;
5633
5634 alu.dst.sel = tmp1;
5635 alu.dst.chan = 0;
5636 alu.dst.write = 1;
5637
5638 alu.src[0].sel = tmp0;
5639 alu.src[0].chan = 0;
5640 alu.src[1].sel = tmp0;
5641 alu.src[1].chan = 3;
5642
5643 alu.last = 1;
5644 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5645 return r;
5646
5647 /* 8. tmp1.y = tmp0.x + tmp0.w */
5648 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5649 alu.op = ALU_OP2_ADD_INT;
5650
5651 alu.dst.sel = tmp1;
5652 alu.dst.chan = 1;
5653 alu.dst.write = 1;
5654
5655 alu.src[0].sel = tmp0;
5656 alu.src[0].chan = 0;
5657 alu.src[1].sel = tmp0;
5658 alu.src[1].chan = 3;
5659
5660 alu.last = 1;
5661 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5662 return r;
5663
5664 /* 9. tmp0.x = (tmp0.y == 0 ? tmp1.y : tmp1.x) */
5665 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5666 alu.op = ALU_OP3_CNDE_INT;
5667 alu.is_op3 = 1;
5668
5669 alu.dst.sel = tmp0;
5670 alu.dst.chan = 0;
5671 alu.dst.write = 1;
5672
5673 alu.src[0].sel = tmp0;
5674 alu.src[0].chan = 1;
5675 alu.src[1].sel = tmp1;
5676 alu.src[1].chan = 1;
5677 alu.src[2].sel = tmp1;
5678 alu.src[2].chan = 0;
5679
5680 alu.last = 1;
5681 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5682 return r;
5683
5684 /* 10. tmp0.z = hi(tmp0.x * src1) = q */
5685 if (ctx->bc->chip_class == CAYMAN) {
5686 for (j = 0 ; j < 4; j++) {
5687 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5688 alu.op = ALU_OP2_MULHI_UINT;
5689
5690 alu.dst.sel = tmp0;
5691 alu.dst.chan = j;
5692 alu.dst.write = (j == 2);
5693
5694 alu.src[0].sel = tmp0;
5695 alu.src[0].chan = 0;
5696
5697 if (signed_op) {
5698 alu.src[1].sel = tmp2;
5699 alu.src[1].chan = 0;
5700 } else {
5701 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
5702 }
5703
5704 alu.last = (j == 3);
5705 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5706 return r;
5707 }
5708 } else {
5709 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5710 alu.op = ALU_OP2_MULHI_UINT;
5711
5712 alu.dst.sel = tmp0;
5713 alu.dst.chan = 2;
5714 alu.dst.write = 1;
5715
5716 alu.src[0].sel = tmp0;
5717 alu.src[0].chan = 0;
5718
5719 if (signed_op) {
5720 alu.src[1].sel = tmp2;
5721 alu.src[1].chan = 0;
5722 } else {
5723 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
5724 }
5725
5726 alu.last = 1;
5727 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5728 return r;
5729 }
5730
5731 /* 11. tmp0.y = lo (src2 * tmp0.z) = src2*q = src1 - r */
5732 if (ctx->bc->chip_class == CAYMAN) {
5733 for (j = 0 ; j < 4; j++) {
5734 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5735 alu.op = ALU_OP2_MULLO_UINT;
5736
5737 alu.dst.sel = tmp0;
5738 alu.dst.chan = j;
5739 alu.dst.write = (j == 1);
5740
5741 if (signed_op) {
5742 alu.src[0].sel = tmp2;
5743 alu.src[0].chan = 1;
5744 } else {
5745 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
5746 }
5747
5748 alu.src[1].sel = tmp0;
5749 alu.src[1].chan = 2;
5750
5751 alu.last = (j == 3);
5752 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5753 return r;
5754 }
5755 } else {
5756 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5757 alu.op = ALU_OP2_MULLO_UINT;
5758
5759 alu.dst.sel = tmp0;
5760 alu.dst.chan = 1;
5761 alu.dst.write = 1;
5762
5763 if (signed_op) {
5764 alu.src[0].sel = tmp2;
5765 alu.src[0].chan = 1;
5766 } else {
5767 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
5768 }
5769
5770 alu.src[1].sel = tmp0;
5771 alu.src[1].chan = 2;
5772
5773 alu.last = 1;
5774 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5775 return r;
5776 }
5777
5778 /* 12. tmp0.w = src1 - tmp0.y = r */
5779 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5780 alu.op = ALU_OP2_SUB_INT;
5781
5782 alu.dst.sel = tmp0;
5783 alu.dst.chan = 3;
5784 alu.dst.write = 1;
5785
5786 if (signed_op) {
5787 alu.src[0].sel = tmp2;
5788 alu.src[0].chan = 0;
5789 } else {
5790 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5791 }
5792
5793 alu.src[1].sel = tmp0;
5794 alu.src[1].chan = 1;
5795
5796 alu.last = 1;
5797 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5798 return r;
5799
5800 /* 13. tmp1.x = tmp0.w >= src2 = r >= src2 */
5801 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5802 alu.op = ALU_OP2_SETGE_UINT;
5803
5804 alu.dst.sel = tmp1;
5805 alu.dst.chan = 0;
5806 alu.dst.write = 1;
5807
5808 alu.src[0].sel = tmp0;
5809 alu.src[0].chan = 3;
5810 if (signed_op) {
5811 alu.src[1].sel = tmp2;
5812 alu.src[1].chan = 1;
5813 } else {
5814 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5815 }
5816
5817 alu.last = 1;
5818 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5819 return r;
5820
5821 /* 14. tmp1.y = src1 >= tmp0.y = r >= 0 */
5822 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5823 alu.op = ALU_OP2_SETGE_UINT;
5824
5825 alu.dst.sel = tmp1;
5826 alu.dst.chan = 1;
5827 alu.dst.write = 1;
5828
5829 if (signed_op) {
5830 alu.src[0].sel = tmp2;
5831 alu.src[0].chan = 0;
5832 } else {
5833 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5834 }
5835
5836 alu.src[1].sel = tmp0;
5837 alu.src[1].chan = 1;
5838
5839 alu.last = 1;
5840 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5841 return r;
5842
5843 if (mod) { /* UMOD */
5844
5845 /* 15. tmp1.z = tmp0.w - src2 = r - src2 */
5846 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5847 alu.op = ALU_OP2_SUB_INT;
5848
5849 alu.dst.sel = tmp1;
5850 alu.dst.chan = 2;
5851 alu.dst.write = 1;
5852
5853 alu.src[0].sel = tmp0;
5854 alu.src[0].chan = 3;
5855
5856 if (signed_op) {
5857 alu.src[1].sel = tmp2;
5858 alu.src[1].chan = 1;
5859 } else {
5860 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5861 }
5862
5863 alu.last = 1;
5864 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5865 return r;
5866
5867 /* 16. tmp1.w = tmp0.w + src2 = r + src2 */
5868 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5869 alu.op = ALU_OP2_ADD_INT;
5870
5871 alu.dst.sel = tmp1;
5872 alu.dst.chan = 3;
5873 alu.dst.write = 1;
5874
5875 alu.src[0].sel = tmp0;
5876 alu.src[0].chan = 3;
5877 if (signed_op) {
5878 alu.src[1].sel = tmp2;
5879 alu.src[1].chan = 1;
5880 } else {
5881 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5882 }
5883
5884 alu.last = 1;
5885 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5886 return r;
5887
5888 } else { /* UDIV */
5889
5890 /* 15. tmp1.z = tmp0.z + 1 = q + 1 DIV */
5891 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5892 alu.op = ALU_OP2_ADD_INT;
5893
5894 alu.dst.sel = tmp1;
5895 alu.dst.chan = 2;
5896 alu.dst.write = 1;
5897
5898 alu.src[0].sel = tmp0;
5899 alu.src[0].chan = 2;
5900 alu.src[1].sel = V_SQ_ALU_SRC_1_INT;
5901
5902 alu.last = 1;
5903 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5904 return r;
5905
5906 /* 16. tmp1.w = tmp0.z - 1 = q - 1 */
5907 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5908 alu.op = ALU_OP2_ADD_INT;
5909
5910 alu.dst.sel = tmp1;
5911 alu.dst.chan = 3;
5912 alu.dst.write = 1;
5913
5914 alu.src[0].sel = tmp0;
5915 alu.src[0].chan = 2;
5916 alu.src[1].sel = V_SQ_ALU_SRC_M_1_INT;
5917
5918 alu.last = 1;
5919 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5920 return r;
5921
5922 }
5923
5924 /* 17. tmp1.x = tmp1.x & tmp1.y */
5925 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5926 alu.op = ALU_OP2_AND_INT;
5927
5928 alu.dst.sel = tmp1;
5929 alu.dst.chan = 0;
5930 alu.dst.write = 1;
5931
5932 alu.src[0].sel = tmp1;
5933 alu.src[0].chan = 0;
5934 alu.src[1].sel = tmp1;
5935 alu.src[1].chan = 1;
5936
5937 alu.last = 1;
5938 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5939 return r;
5940
5941 /* 18. tmp0.z = tmp1.x==0 ? tmp0.z : tmp1.z DIV */
5942 /* 18. tmp0.z = tmp1.x==0 ? tmp0.w : tmp1.z MOD */
5943 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5944 alu.op = ALU_OP3_CNDE_INT;
5945 alu.is_op3 = 1;
5946
5947 alu.dst.sel = tmp0;
5948 alu.dst.chan = 2;
5949 alu.dst.write = 1;
5950
5951 alu.src[0].sel = tmp1;
5952 alu.src[0].chan = 0;
5953 alu.src[1].sel = tmp0;
5954 alu.src[1].chan = mod ? 3 : 2;
5955 alu.src[2].sel = tmp1;
5956 alu.src[2].chan = 2;
5957
5958 alu.last = 1;
5959 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5960 return r;
5961
5962 /* 19. tmp0.z = tmp1.y==0 ? tmp1.w : tmp0.z */
5963 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5964 alu.op = ALU_OP3_CNDE_INT;
5965 alu.is_op3 = 1;
5966
5967 if (signed_op) {
5968 alu.dst.sel = tmp0;
5969 alu.dst.chan = 2;
5970 alu.dst.write = 1;
5971 } else {
5972 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5973 }
5974
5975 alu.src[0].sel = tmp1;
5976 alu.src[0].chan = 1;
5977 alu.src[1].sel = tmp1;
5978 alu.src[1].chan = 3;
5979 alu.src[2].sel = tmp0;
5980 alu.src[2].chan = 2;
5981
5982 alu.last = 1;
5983 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5984 return r;
5985
5986 if (signed_op) {
5987
5988 /* fix the sign of the result */
5989
5990 if (mod) {
5991
5992 /* tmp0.x = -tmp0.z */
5993 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5994 alu.op = ALU_OP2_SUB_INT;
5995
5996 alu.dst.sel = tmp0;
5997 alu.dst.chan = 0;
5998 alu.dst.write = 1;
5999
6000 alu.src[0].sel = V_SQ_ALU_SRC_0;
6001 alu.src[1].sel = tmp0;
6002 alu.src[1].chan = 2;
6003
6004 alu.last = 1;
6005 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6006 return r;
6007
6008 /* sign of the remainder is the same as the sign of src0 */
6009 /* tmp0.x = src0>=0 ? tmp0.z : tmp0.x */
6010 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6011 alu.op = ALU_OP3_CNDGE_INT;
6012 alu.is_op3 = 1;
6013
6014 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6015
6016 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6017 alu.src[1].sel = tmp0;
6018 alu.src[1].chan = 2;
6019 alu.src[2].sel = tmp0;
6020 alu.src[2].chan = 0;
6021
6022 alu.last = 1;
6023 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6024 return r;
6025
6026 } else {
6027
6028 /* tmp0.x = -tmp0.z */
6029 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6030 alu.op = ALU_OP2_SUB_INT;
6031
6032 alu.dst.sel = tmp0;
6033 alu.dst.chan = 0;
6034 alu.dst.write = 1;
6035
6036 alu.src[0].sel = V_SQ_ALU_SRC_0;
6037 alu.src[1].sel = tmp0;
6038 alu.src[1].chan = 2;
6039
6040 alu.last = 1;
6041 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6042 return r;
6043
6044 /* fix the quotient sign (same as the sign of src0*src1) */
6045 /* tmp0.x = tmp2.z>=0 ? tmp0.z : tmp0.x */
6046 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6047 alu.op = ALU_OP3_CNDGE_INT;
6048 alu.is_op3 = 1;
6049
6050 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6051
6052 alu.src[0].sel = tmp2;
6053 alu.src[0].chan = 2;
6054 alu.src[1].sel = tmp0;
6055 alu.src[1].chan = 2;
6056 alu.src[2].sel = tmp0;
6057 alu.src[2].chan = 0;
6058
6059 alu.last = 1;
6060 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6061 return r;
6062 }
6063 }
6064 }
6065 return 0;
6066 }
6067
6068 static int tgsi_udiv(struct r600_shader_ctx *ctx)
6069 {
6070 return tgsi_divmod(ctx, 0, 0);
6071 }
6072
6073 static int tgsi_umod(struct r600_shader_ctx *ctx)
6074 {
6075 return tgsi_divmod(ctx, 1, 0);
6076 }
6077
6078 static int tgsi_idiv(struct r600_shader_ctx *ctx)
6079 {
6080 return tgsi_divmod(ctx, 0, 1);
6081 }
6082
6083 static int tgsi_imod(struct r600_shader_ctx *ctx)
6084 {
6085 return tgsi_divmod(ctx, 1, 1);
6086 }
6087
6088
6089 static int tgsi_f2i(struct r600_shader_ctx *ctx)
6090 {
6091 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6092 struct r600_bytecode_alu alu;
6093 int i, r;
6094 unsigned write_mask = inst->Dst[0].Register.WriteMask;
6095 int last_inst = tgsi_last_instruction(write_mask);
6096
6097 for (i = 0; i < 4; i++) {
6098 if (!(write_mask & (1<<i)))
6099 continue;
6100
6101 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6102 alu.op = ALU_OP1_TRUNC;
6103
6104 alu.dst.sel = ctx->temp_reg;
6105 alu.dst.chan = i;
6106 alu.dst.write = 1;
6107
6108 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6109 if (i == last_inst)
6110 alu.last = 1;
6111 r = r600_bytecode_add_alu(ctx->bc, &alu);
6112 if (r)
6113 return r;
6114 }
6115
6116 for (i = 0; i < 4; i++) {
6117 if (!(write_mask & (1<<i)))
6118 continue;
6119
6120 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6121 alu.op = ctx->inst_info->op;
6122
6123 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6124
6125 alu.src[0].sel = ctx->temp_reg;
6126 alu.src[0].chan = i;
6127
6128 if (i == last_inst || alu.op == ALU_OP1_FLT_TO_UINT)
6129 alu.last = 1;
6130 r = r600_bytecode_add_alu(ctx->bc, &alu);
6131 if (r)
6132 return r;
6133 }
6134
6135 return 0;
6136 }
6137
6138 static int tgsi_iabs(struct r600_shader_ctx *ctx)
6139 {
6140 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6141 struct r600_bytecode_alu alu;
6142 int i, r;
6143 unsigned write_mask = inst->Dst[0].Register.WriteMask;
6144 int last_inst = tgsi_last_instruction(write_mask);
6145
6146 /* tmp = -src */
6147 for (i = 0; i < 4; i++) {
6148 if (!(write_mask & (1<<i)))
6149 continue;
6150
6151 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6152 alu.op = ALU_OP2_SUB_INT;
6153
6154 alu.dst.sel = ctx->temp_reg;
6155 alu.dst.chan = i;
6156 alu.dst.write = 1;
6157
6158 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
6159 alu.src[0].sel = V_SQ_ALU_SRC_0;
6160
6161 if (i == last_inst)
6162 alu.last = 1;
6163 r = r600_bytecode_add_alu(ctx->bc, &alu);
6164 if (r)
6165 return r;
6166 }
6167
6168 /* dst = (src >= 0 ? src : tmp) */
6169 for (i = 0; i < 4; i++) {
6170 if (!(write_mask & (1<<i)))
6171 continue;
6172
6173 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6174 alu.op = ALU_OP3_CNDGE_INT;
6175 alu.is_op3 = 1;
6176 alu.dst.write = 1;
6177
6178 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6179
6180 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6181 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
6182 alu.src[2].sel = ctx->temp_reg;
6183 alu.src[2].chan = i;
6184
6185 if (i == last_inst)
6186 alu.last = 1;
6187 r = r600_bytecode_add_alu(ctx->bc, &alu);
6188 if (r)
6189 return r;
6190 }
6191 return 0;
6192 }
6193
6194 static int tgsi_issg(struct r600_shader_ctx *ctx)
6195 {
6196 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6197 struct r600_bytecode_alu alu;
6198 int i, r;
6199 unsigned write_mask = inst->Dst[0].Register.WriteMask;
6200 int last_inst = tgsi_last_instruction(write_mask);
6201
6202 /* tmp = (src >= 0 ? src : -1) */
6203 for (i = 0; i < 4; i++) {
6204 if (!(write_mask & (1<<i)))
6205 continue;
6206
6207 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6208 alu.op = ALU_OP3_CNDGE_INT;
6209 alu.is_op3 = 1;
6210
6211 alu.dst.sel = ctx->temp_reg;
6212 alu.dst.chan = i;
6213 alu.dst.write = 1;
6214
6215 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6216 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
6217 alu.src[2].sel = V_SQ_ALU_SRC_M_1_INT;
6218
6219 if (i == last_inst)
6220 alu.last = 1;
6221 r = r600_bytecode_add_alu(ctx->bc, &alu);
6222 if (r)
6223 return r;
6224 }
6225
6226 /* dst = (tmp > 0 ? 1 : tmp) */
6227 for (i = 0; i < 4; i++) {
6228 if (!(write_mask & (1<<i)))
6229 continue;
6230
6231 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6232 alu.op = ALU_OP3_CNDGT_INT;
6233 alu.is_op3 = 1;
6234 alu.dst.write = 1;
6235
6236 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6237
6238 alu.src[0].sel = ctx->temp_reg;
6239 alu.src[0].chan = i;
6240
6241 alu.src[1].sel = V_SQ_ALU_SRC_1_INT;
6242
6243 alu.src[2].sel = ctx->temp_reg;
6244 alu.src[2].chan = i;
6245
6246 if (i == last_inst)
6247 alu.last = 1;
6248 r = r600_bytecode_add_alu(ctx->bc, &alu);
6249 if (r)
6250 return r;
6251 }
6252 return 0;
6253 }
6254
6255
6256
6257 static int tgsi_ssg(struct r600_shader_ctx *ctx)
6258 {
6259 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6260 struct r600_bytecode_alu alu;
6261 int i, r;
6262
6263 /* tmp = (src > 0 ? 1 : src) */
6264 for (i = 0; i < 4; i++) {
6265 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6266 alu.op = ALU_OP3_CNDGT;
6267 alu.is_op3 = 1;
6268
6269 alu.dst.sel = ctx->temp_reg;
6270 alu.dst.chan = i;
6271
6272 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6273 alu.src[1].sel = V_SQ_ALU_SRC_1;
6274 r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
6275
6276 if (i == 3)
6277 alu.last = 1;
6278 r = r600_bytecode_add_alu(ctx->bc, &alu);
6279 if (r)
6280 return r;
6281 }
6282
6283 /* dst = (-tmp > 0 ? -1 : tmp) */
6284 for (i = 0; i < 4; i++) {
6285 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6286 alu.op = ALU_OP3_CNDGT;
6287 alu.is_op3 = 1;
6288 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6289
6290 alu.src[0].sel = ctx->temp_reg;
6291 alu.src[0].chan = i;
6292 alu.src[0].neg = 1;
6293
6294 alu.src[1].sel = V_SQ_ALU_SRC_1;
6295 alu.src[1].neg = 1;
6296
6297 alu.src[2].sel = ctx->temp_reg;
6298 alu.src[2].chan = i;
6299
6300 if (i == 3)
6301 alu.last = 1;
6302 r = r600_bytecode_add_alu(ctx->bc, &alu);
6303 if (r)
6304 return r;
6305 }
6306 return 0;
6307 }
6308
6309 static int tgsi_bfi(struct r600_shader_ctx *ctx)
6310 {
6311 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6312 struct r600_bytecode_alu alu;
6313 int i, r, t1, t2;
6314
6315 unsigned write_mask = inst->Dst[0].Register.WriteMask;
6316 int last_inst = tgsi_last_instruction(write_mask);
6317
6318 t1 = r600_get_temp(ctx);
6319
6320 for (i = 0; i < 4; i++) {
6321 if (!(write_mask & (1<<i)))
6322 continue;
6323
6324 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6325 alu.op = ALU_OP2_SETGE_INT;
6326 r600_bytecode_src(&alu.src[0], &ctx->src[3], i);
6327 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6328 alu.src[1].value = 32;
6329 alu.dst.sel = ctx->temp_reg;
6330 alu.dst.chan = i;
6331 alu.dst.write = 1;
6332 alu.last = i == last_inst;
6333 r = r600_bytecode_add_alu(ctx->bc, &alu);
6334 if (r)
6335 return r;
6336 }
6337
6338 for (i = 0; i < 4; i++) {
6339 if (!(write_mask & (1<<i)))
6340 continue;
6341
6342 /* create mask tmp */
6343 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6344 alu.op = ALU_OP2_BFM_INT;
6345 alu.dst.sel = t1;
6346 alu.dst.chan = i;
6347 alu.dst.write = 1;
6348 alu.last = i == last_inst;
6349
6350 r600_bytecode_src(&alu.src[0], &ctx->src[3], i);
6351 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
6352
6353 r = r600_bytecode_add_alu(ctx->bc, &alu);
6354 if (r)
6355 return r;
6356 }
6357
6358 t2 = r600_get_temp(ctx);
6359
6360 for (i = 0; i < 4; i++) {
6361 if (!(write_mask & (1<<i)))
6362 continue;
6363
6364 /* shift insert left */
6365 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6366 alu.op = ALU_OP2_LSHL_INT;
6367 alu.dst.sel = t2;
6368 alu.dst.chan = i;
6369 alu.dst.write = 1;
6370 alu.last = i == last_inst;
6371
6372 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
6373 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
6374
6375 r = r600_bytecode_add_alu(ctx->bc, &alu);
6376 if (r)
6377 return r;
6378 }
6379
6380 for (i = 0; i < 4; i++) {
6381 if (!(write_mask & (1<<i)))
6382 continue;
6383
6384 /* actual bitfield insert */
6385 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6386 alu.op = ALU_OP3_BFI_INT;
6387 alu.is_op3 = 1;
6388 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6389 alu.dst.chan = i;
6390 alu.dst.write = 1;
6391 alu.last = i == last_inst;
6392
6393 alu.src[0].sel = t1;
6394 alu.src[0].chan = i;
6395 alu.src[1].sel = t2;
6396 alu.src[1].chan = i;
6397 r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
6398
6399 r = r600_bytecode_add_alu(ctx->bc, &alu);
6400 if (r)
6401 return r;
6402 }
6403
6404 for (i = 0; i < 4; i++) {
6405 if (!(write_mask & (1<<i)))
6406 continue;
6407 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6408 alu.op = ALU_OP3_CNDE_INT;
6409 alu.is_op3 = 1;
6410 alu.src[0].sel = ctx->temp_reg;
6411 alu.src[0].chan = i;
6412 r600_bytecode_src(&alu.src[2], &ctx->src[1], i);
6413
6414 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6415
6416 alu.src[1].sel = alu.dst.sel;
6417 alu.src[1].chan = i;
6418
6419 alu.last = i == last_inst;
6420 r = r600_bytecode_add_alu(ctx->bc, &alu);
6421 if (r)
6422 return r;
6423 }
6424 return 0;
6425 }
6426
6427 static int tgsi_msb(struct r600_shader_ctx *ctx)
6428 {
6429 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6430 struct r600_bytecode_alu alu;
6431 int i, r, t1, t2;
6432
6433 unsigned write_mask = inst->Dst[0].Register.WriteMask;
6434 int last_inst = tgsi_last_instruction(write_mask);
6435
6436 assert(ctx->inst_info->op == ALU_OP1_FFBH_INT ||
6437 ctx->inst_info->op == ALU_OP1_FFBH_UINT);
6438
6439 t1 = ctx->temp_reg;
6440
6441 /* bit position is indexed from lsb by TGSI, and from msb by the hardware */
6442 for (i = 0; i < 4; i++) {
6443 if (!(write_mask & (1<<i)))
6444 continue;
6445
6446 /* t1 = FFBH_INT / FFBH_UINT */
6447 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6448 alu.op = ctx->inst_info->op;
6449 alu.dst.sel = t1;
6450 alu.dst.chan = i;
6451 alu.dst.write = 1;
6452 alu.last = i == last_inst;
6453
6454 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6455
6456 r = r600_bytecode_add_alu(ctx->bc, &alu);
6457 if (r)
6458 return r;
6459 }
6460
6461 t2 = r600_get_temp(ctx);
6462
6463 for (i = 0; i < 4; i++) {
6464 if (!(write_mask & (1<<i)))
6465 continue;
6466
6467 /* t2 = 31 - t1 */
6468 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6469 alu.op = ALU_OP2_SUB_INT;
6470 alu.dst.sel = t2;
6471 alu.dst.chan = i;
6472 alu.dst.write = 1;
6473 alu.last = i == last_inst;
6474
6475 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
6476 alu.src[0].value = 31;
6477 alu.src[1].sel = t1;
6478 alu.src[1].chan = i;
6479
6480 r = r600_bytecode_add_alu(ctx->bc, &alu);
6481 if (r)
6482 return r;
6483 }
6484
6485 for (i = 0; i < 4; i++) {
6486 if (!(write_mask & (1<<i)))
6487 continue;
6488
6489 /* result = t1 >= 0 ? t2 : t1 */
6490 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6491 alu.op = ALU_OP3_CNDGE_INT;
6492 alu.is_op3 = 1;
6493 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6494 alu.dst.chan = i;
6495 alu.dst.write = 1;
6496 alu.last = i == last_inst;
6497
6498 alu.src[0].sel = t1;
6499 alu.src[0].chan = i;
6500 alu.src[1].sel = t2;
6501 alu.src[1].chan = i;
6502 alu.src[2].sel = t1;
6503 alu.src[2].chan = i;
6504
6505 r = r600_bytecode_add_alu(ctx->bc, &alu);
6506 if (r)
6507 return r;
6508 }
6509
6510 return 0;
6511 }
6512
6513 static int tgsi_interp_egcm(struct r600_shader_ctx *ctx)
6514 {
6515 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6516 struct r600_bytecode_alu alu;
6517 int r, i = 0, k, interp_gpr, interp_base_chan, tmp, lasti;
6518 unsigned location;
6519 const int input = inst->Src[0].Register.Index + ctx->shader->nsys_inputs;
6520
6521 assert(inst->Src[0].Register.File == TGSI_FILE_INPUT);
6522
6523 /* Interpolators have been marked for use already by allocate_system_value_inputs */
6524 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
6525 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
6526 location = TGSI_INTERPOLATE_LOC_CENTER; /* sample offset will be added explicitly */
6527 }
6528 else {
6529 location = TGSI_INTERPOLATE_LOC_CENTROID;
6530 }
6531
6532 k = eg_get_interpolator_index(ctx->shader->input[input].interpolate, location);
6533 if (k < 0)
6534 k = 0;
6535 interp_gpr = ctx->eg_interpolators[k].ij_index / 2;
6536 interp_base_chan = 2 * (ctx->eg_interpolators[k].ij_index % 2);
6537
6538 /* NOTE: currently offset is not perspective correct */
6539 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
6540 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
6541 int sample_gpr = -1;
6542 int gradientsH, gradientsV;
6543 struct r600_bytecode_tex tex;
6544
6545 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
6546 sample_gpr = load_sample_position(ctx, &ctx->src[1], ctx->src[1].swizzle[0]);
6547 }
6548
6549 gradientsH = r600_get_temp(ctx);
6550 gradientsV = r600_get_temp(ctx);
6551 for (i = 0; i < 2; i++) {
6552 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
6553 tex.op = i == 0 ? FETCH_OP_GET_GRADIENTS_H : FETCH_OP_GET_GRADIENTS_V;
6554 tex.src_gpr = interp_gpr;
6555 tex.src_sel_x = interp_base_chan + 0;
6556 tex.src_sel_y = interp_base_chan + 1;
6557 tex.src_sel_z = 0;
6558 tex.src_sel_w = 0;
6559 tex.dst_gpr = i == 0 ? gradientsH : gradientsV;
6560 tex.dst_sel_x = 0;
6561 tex.dst_sel_y = 1;
6562 tex.dst_sel_z = 7;
6563 tex.dst_sel_w = 7;
6564 tex.inst_mod = 1; // Use per pixel gradient calculation
6565 tex.sampler_id = 0;
6566 tex.resource_id = tex.sampler_id;
6567 r = r600_bytecode_add_tex(ctx->bc, &tex);
6568 if (r)
6569 return r;
6570 }
6571
6572 for (i = 0; i < 2; i++) {
6573 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6574 alu.op = ALU_OP3_MULADD;
6575 alu.is_op3 = 1;
6576 alu.src[0].sel = gradientsH;
6577 alu.src[0].chan = i;
6578 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
6579 alu.src[1].sel = sample_gpr;
6580 alu.src[1].chan = 2;
6581 }
6582 else {
6583 r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
6584 }
6585 alu.src[2].sel = interp_gpr;
6586 alu.src[2].chan = interp_base_chan + i;
6587 alu.dst.sel = ctx->temp_reg;
6588 alu.dst.chan = i;
6589 alu.last = i == 1;
6590
6591 r = r600_bytecode_add_alu(ctx->bc, &alu);
6592 if (r)
6593 return r;
6594 }
6595
6596 for (i = 0; i < 2; i++) {
6597 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6598 alu.op = ALU_OP3_MULADD;
6599 alu.is_op3 = 1;
6600 alu.src[0].sel = gradientsV;
6601 alu.src[0].chan = i;
6602 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
6603 alu.src[1].sel = sample_gpr;
6604 alu.src[1].chan = 3;
6605 }
6606 else {
6607 r600_bytecode_src(&alu.src[1], &ctx->src[1], 1);
6608 }
6609 alu.src[2].sel = ctx->temp_reg;
6610 alu.src[2].chan = i;
6611 alu.dst.sel = ctx->temp_reg;
6612 alu.dst.chan = i;
6613 alu.last = i == 1;
6614
6615 r = r600_bytecode_add_alu(ctx->bc, &alu);
6616 if (r)
6617 return r;
6618 }
6619 }
6620
6621 tmp = r600_get_temp(ctx);
6622 for (i = 0; i < 8; i++) {
6623 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6624 alu.op = i < 4 ? ALU_OP2_INTERP_ZW : ALU_OP2_INTERP_XY;
6625
6626 alu.dst.sel = tmp;
6627 if ((i > 1 && i < 6)) {
6628 alu.dst.write = 1;
6629 }
6630 else {
6631 alu.dst.write = 0;
6632 }
6633 alu.dst.chan = i % 4;
6634
6635 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
6636 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
6637 alu.src[0].sel = ctx->temp_reg;
6638 alu.src[0].chan = 1 - (i % 2);
6639 } else {
6640 alu.src[0].sel = interp_gpr;
6641 alu.src[0].chan = interp_base_chan + 1 - (i % 2);
6642 }
6643 alu.src[1].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
6644 alu.src[1].chan = 0;
6645
6646 alu.last = i % 4 == 3;
6647 alu.bank_swizzle_force = SQ_ALU_VEC_210;
6648
6649 r = r600_bytecode_add_alu(ctx->bc, &alu);
6650 if (r)
6651 return r;
6652 }
6653
6654 // INTERP can't swizzle dst
6655 lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
6656 for (i = 0; i <= lasti; i++) {
6657 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6658 continue;
6659
6660 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6661 alu.op = ALU_OP1_MOV;
6662 alu.src[0].sel = tmp;
6663 alu.src[0].chan = ctx->src[0].swizzle[i];
6664 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6665 alu.dst.write = 1;
6666 alu.last = i == lasti;
6667 r = r600_bytecode_add_alu(ctx->bc, &alu);
6668 if (r)
6669 return r;
6670 }
6671
6672 return 0;
6673 }
6674
6675
6676 static int tgsi_helper_copy(struct r600_shader_ctx *ctx, struct tgsi_full_instruction *inst)
6677 {
6678 struct r600_bytecode_alu alu;
6679 int i, r;
6680
6681 for (i = 0; i < 4; i++) {
6682 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6683 if (!(inst->Dst[0].Register.WriteMask & (1 << i))) {
6684 alu.op = ALU_OP0_NOP;
6685 alu.dst.chan = i;
6686 } else {
6687 alu.op = ALU_OP1_MOV;
6688 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6689 alu.src[0].sel = ctx->temp_reg;
6690 alu.src[0].chan = i;
6691 }
6692 if (i == 3) {
6693 alu.last = 1;
6694 }
6695 r = r600_bytecode_add_alu(ctx->bc, &alu);
6696 if (r)
6697 return r;
6698 }
6699 return 0;
6700 }
6701
6702 static int tgsi_make_src_for_op3(struct r600_shader_ctx *ctx,
6703 unsigned temp, int chan,
6704 struct r600_bytecode_alu_src *bc_src,
6705 const struct r600_shader_src *shader_src)
6706 {
6707 struct r600_bytecode_alu alu;
6708 int r;
6709
6710 r600_bytecode_src(bc_src, shader_src, chan);
6711
6712 /* op3 operands don't support abs modifier */
6713 if (bc_src->abs) {
6714 assert(temp!=0); /* we actually need the extra register, make sure it is allocated. */
6715 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6716 alu.op = ALU_OP1_MOV;
6717 alu.dst.sel = temp;
6718 alu.dst.chan = chan;
6719 alu.dst.write = 1;
6720
6721 alu.src[0] = *bc_src;
6722 alu.last = true; // sufficient?
6723 r = r600_bytecode_add_alu(ctx->bc, &alu);
6724 if (r)
6725 return r;
6726
6727 memset(bc_src, 0, sizeof(*bc_src));
6728 bc_src->sel = temp;
6729 bc_src->chan = chan;
6730 }
6731 return 0;
6732 }
6733
6734 static int tgsi_op3_dst(struct r600_shader_ctx *ctx, int dst)
6735 {
6736 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6737 struct r600_bytecode_alu alu;
6738 int i, j, r;
6739 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
6740 int temp_regs[4];
6741 unsigned op = ctx->inst_info->op;
6742
6743 if (op == ALU_OP3_MULADD_IEEE &&
6744 ctx->info.properties[TGSI_PROPERTY_MUL_ZERO_WINS])
6745 op = ALU_OP3_MULADD;
6746
6747 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
6748 temp_regs[j] = 0;
6749 if (ctx->src[j].abs)
6750 temp_regs[j] = r600_get_temp(ctx);
6751 }
6752 for (i = 0; i < lasti + 1; i++) {
6753 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6754 continue;
6755
6756 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6757 alu.op = op;
6758 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
6759 r = tgsi_make_src_for_op3(ctx, temp_regs[j], i, &alu.src[j], &ctx->src[j]);
6760 if (r)
6761 return r;
6762 }
6763
6764 if (dst == -1) {
6765 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6766 } else {
6767 alu.dst.sel = dst;
6768 }
6769 alu.dst.chan = i;
6770 alu.dst.write = 1;
6771 alu.is_op3 = 1;
6772 if (i == lasti) {
6773 alu.last = 1;
6774 }
6775 r = r600_bytecode_add_alu(ctx->bc, &alu);
6776 if (r)
6777 return r;
6778 }
6779 return 0;
6780 }
6781
6782 static int tgsi_op3(struct r600_shader_ctx *ctx)
6783 {
6784 return tgsi_op3_dst(ctx, -1);
6785 }
6786
6787 static int tgsi_dp(struct r600_shader_ctx *ctx)
6788 {
6789 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6790 struct r600_bytecode_alu alu;
6791 int i, j, r;
6792 unsigned op = ctx->inst_info->op;
6793 if (op == ALU_OP2_DOT4_IEEE &&
6794 ctx->info.properties[TGSI_PROPERTY_MUL_ZERO_WINS])
6795 op = ALU_OP2_DOT4;
6796
6797 for (i = 0; i < 4; i++) {
6798 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6799 alu.op = op;
6800 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
6801 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
6802 }
6803
6804 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6805 alu.dst.chan = i;
6806 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
6807 /* handle some special cases */
6808 switch (inst->Instruction.Opcode) {
6809 case TGSI_OPCODE_DP2:
6810 if (i > 1) {
6811 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
6812 alu.src[0].chan = alu.src[1].chan = 0;
6813 }
6814 break;
6815 case TGSI_OPCODE_DP3:
6816 if (i > 2) {
6817 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
6818 alu.src[0].chan = alu.src[1].chan = 0;
6819 }
6820 break;
6821 default:
6822 break;
6823 }
6824 if (i == 3) {
6825 alu.last = 1;
6826 }
6827 r = r600_bytecode_add_alu(ctx->bc, &alu);
6828 if (r)
6829 return r;
6830 }
6831 return 0;
6832 }
6833
6834 static inline boolean tgsi_tex_src_requires_loading(struct r600_shader_ctx *ctx,
6835 unsigned index)
6836 {
6837 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6838 return (inst->Src[index].Register.File != TGSI_FILE_TEMPORARY &&
6839 inst->Src[index].Register.File != TGSI_FILE_INPUT &&
6840 inst->Src[index].Register.File != TGSI_FILE_OUTPUT) ||
6841 ctx->src[index].neg || ctx->src[index].abs ||
6842 (inst->Src[index].Register.File == TGSI_FILE_INPUT && ctx->type == PIPE_SHADER_GEOMETRY);
6843 }
6844
6845 static inline unsigned tgsi_tex_get_src_gpr(struct r600_shader_ctx *ctx,
6846 unsigned index)
6847 {
6848 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6849 return ctx->file_offset[inst->Src[index].Register.File] + inst->Src[index].Register.Index;
6850 }
6851
6852 static int do_vtx_fetch_inst(struct r600_shader_ctx *ctx, boolean src_requires_loading)
6853 {
6854 struct r600_bytecode_vtx vtx;
6855 struct r600_bytecode_alu alu;
6856 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6857 int src_gpr, r, i;
6858 int id = tgsi_tex_get_src_gpr(ctx, 1);
6859 int sampler_index_mode = inst->Src[1].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
6860
6861 src_gpr = tgsi_tex_get_src_gpr(ctx, 0);
6862 if (src_requires_loading) {
6863 for (i = 0; i < 4; i++) {
6864 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6865 alu.op = ALU_OP1_MOV;
6866 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6867 alu.dst.sel = ctx->temp_reg;
6868 alu.dst.chan = i;
6869 if (i == 3)
6870 alu.last = 1;
6871 alu.dst.write = 1;
6872 r = r600_bytecode_add_alu(ctx->bc, &alu);
6873 if (r)
6874 return r;
6875 }
6876 src_gpr = ctx->temp_reg;
6877 }
6878
6879 memset(&vtx, 0, sizeof(vtx));
6880 vtx.op = FETCH_OP_VFETCH;
6881 vtx.buffer_id = id + R600_MAX_CONST_BUFFERS;
6882 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
6883 vtx.src_gpr = src_gpr;
6884 vtx.mega_fetch_count = 16;
6885 vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
6886 vtx.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7; /* SEL_X */
6887 vtx.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7; /* SEL_Y */
6888 vtx.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7; /* SEL_Z */
6889 vtx.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7; /* SEL_W */
6890 vtx.use_const_fields = 1;
6891 vtx.buffer_index_mode = sampler_index_mode;
6892
6893 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
6894 return r;
6895
6896 if (ctx->bc->chip_class >= EVERGREEN)
6897 return 0;
6898
6899 for (i = 0; i < 4; i++) {
6900 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
6901 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6902 continue;
6903
6904 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6905 alu.op = ALU_OP2_AND_INT;
6906
6907 alu.dst.chan = i;
6908 alu.dst.sel = vtx.dst_gpr;
6909 alu.dst.write = 1;
6910
6911 alu.src[0].sel = vtx.dst_gpr;
6912 alu.src[0].chan = i;
6913
6914 alu.src[1].sel = R600_SHADER_BUFFER_INFO_SEL;
6915 alu.src[1].sel += (id * 2);
6916 alu.src[1].chan = i % 4;
6917 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
6918
6919 if (i == lasti)
6920 alu.last = 1;
6921 r = r600_bytecode_add_alu(ctx->bc, &alu);
6922 if (r)
6923 return r;
6924 }
6925
6926 if (inst->Dst[0].Register.WriteMask & 3) {
6927 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6928 alu.op = ALU_OP2_OR_INT;
6929
6930 alu.dst.chan = 3;
6931 alu.dst.sel = vtx.dst_gpr;
6932 alu.dst.write = 1;
6933
6934 alu.src[0].sel = vtx.dst_gpr;
6935 alu.src[0].chan = 3;
6936
6937 alu.src[1].sel = R600_SHADER_BUFFER_INFO_SEL + (id * 2) + 1;
6938 alu.src[1].chan = 0;
6939 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
6940
6941 alu.last = 1;
6942 r = r600_bytecode_add_alu(ctx->bc, &alu);
6943 if (r)
6944 return r;
6945 }
6946 return 0;
6947 }
6948
6949 static int r600_do_buffer_txq(struct r600_shader_ctx *ctx, int reg_idx, int offset)
6950 {
6951 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6952 struct r600_bytecode_alu alu;
6953 int r;
6954 int id = tgsi_tex_get_src_gpr(ctx, reg_idx) + offset;
6955
6956 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6957 alu.op = ALU_OP1_MOV;
6958 alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL;
6959 if (ctx->bc->chip_class >= EVERGREEN) {
6960 /* with eg each dword is either buf size or number of cubes */
6961 alu.src[0].sel += id / 4;
6962 alu.src[0].chan = id % 4;
6963 } else {
6964 /* r600 we have them at channel 2 of the second dword */
6965 alu.src[0].sel += (id * 2) + 1;
6966 alu.src[0].chan = 1;
6967 }
6968 alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
6969 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
6970 alu.last = 1;
6971 r = r600_bytecode_add_alu(ctx->bc, &alu);
6972 if (r)
6973 return r;
6974 return 0;
6975 }
6976
6977 static int tgsi_tex(struct r600_shader_ctx *ctx)
6978 {
6979 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6980 struct r600_bytecode_tex tex;
6981 struct r600_bytecode_alu alu;
6982 unsigned src_gpr;
6983 int r, i, j;
6984 int opcode;
6985 bool read_compressed_msaa = ctx->bc->has_compressed_msaa_texturing &&
6986 inst->Instruction.Opcode == TGSI_OPCODE_TXF &&
6987 (inst->Texture.Texture == TGSI_TEXTURE_2D_MSAA ||
6988 inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY_MSAA);
6989
6990 bool txf_add_offsets = inst->Texture.NumOffsets &&
6991 inst->Instruction.Opcode == TGSI_OPCODE_TXF &&
6992 inst->Texture.Texture != TGSI_TEXTURE_BUFFER;
6993
6994 /* Texture fetch instructions can only use gprs as source.
6995 * Also they cannot negate the source or take the absolute value */
6996 const boolean src_requires_loading = (inst->Instruction.Opcode != TGSI_OPCODE_TXQS &&
6997 tgsi_tex_src_requires_loading(ctx, 0)) ||
6998 read_compressed_msaa || txf_add_offsets;
6999
7000 boolean src_loaded = FALSE;
7001 unsigned sampler_src_reg = 1;
7002 int8_t offset_x = 0, offset_y = 0, offset_z = 0;
7003 boolean has_txq_cube_array_z = false;
7004 unsigned sampler_index_mode;
7005
7006 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ &&
7007 ((inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
7008 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY)))
7009 if (inst->Dst[0].Register.WriteMask & 4) {
7010 ctx->shader->has_txq_cube_array_z_comp = true;
7011 has_txq_cube_array_z = true;
7012 }
7013
7014 if (inst->Instruction.Opcode == TGSI_OPCODE_TEX2 ||
7015 inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
7016 inst->Instruction.Opcode == TGSI_OPCODE_TXL2 ||
7017 inst->Instruction.Opcode == TGSI_OPCODE_TG4)
7018 sampler_src_reg = 2;
7019
7020 /* TGSI moves the sampler to src reg 3 for TXD */
7021 if (inst->Instruction.Opcode == TGSI_OPCODE_TXD)
7022 sampler_src_reg = 3;
7023
7024 sampler_index_mode = inst->Src[sampler_src_reg].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
7025
7026 src_gpr = tgsi_tex_get_src_gpr(ctx, 0);
7027
7028 if (inst->Texture.Texture == TGSI_TEXTURE_BUFFER) {
7029 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ) {
7030 ctx->shader->uses_tex_buffers = true;
7031 return r600_do_buffer_txq(ctx, 1, 0);
7032 }
7033 else if (inst->Instruction.Opcode == TGSI_OPCODE_TXF) {
7034 if (ctx->bc->chip_class < EVERGREEN)
7035 ctx->shader->uses_tex_buffers = true;
7036 return do_vtx_fetch_inst(ctx, src_requires_loading);
7037 }
7038 }
7039
7040 if (inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
7041 int out_chan;
7042 /* Add perspective divide */
7043 if (ctx->bc->chip_class == CAYMAN) {
7044 out_chan = 2;
7045 for (i = 0; i < 3; i++) {
7046 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7047 alu.op = ALU_OP1_RECIP_IEEE;
7048 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
7049
7050 alu.dst.sel = ctx->temp_reg;
7051 alu.dst.chan = i;
7052 if (i == 2)
7053 alu.last = 1;
7054 if (out_chan == i)
7055 alu.dst.write = 1;
7056 r = r600_bytecode_add_alu(ctx->bc, &alu);
7057 if (r)
7058 return r;
7059 }
7060
7061 } else {
7062 out_chan = 3;
7063 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7064 alu.op = ALU_OP1_RECIP_IEEE;
7065 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
7066
7067 alu.dst.sel = ctx->temp_reg;
7068 alu.dst.chan = out_chan;
7069 alu.last = 1;
7070 alu.dst.write = 1;
7071 r = r600_bytecode_add_alu(ctx->bc, &alu);
7072 if (r)
7073 return r;
7074 }
7075
7076 for (i = 0; i < 3; i++) {
7077 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7078 alu.op = ALU_OP2_MUL;
7079 alu.src[0].sel = ctx->temp_reg;
7080 alu.src[0].chan = out_chan;
7081 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
7082 alu.dst.sel = ctx->temp_reg;
7083 alu.dst.chan = i;
7084 alu.dst.write = 1;
7085 r = r600_bytecode_add_alu(ctx->bc, &alu);
7086 if (r)
7087 return r;
7088 }
7089 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7090 alu.op = ALU_OP1_MOV;
7091 alu.src[0].sel = V_SQ_ALU_SRC_1;
7092 alu.src[0].chan = 0;
7093 alu.dst.sel = ctx->temp_reg;
7094 alu.dst.chan = 3;
7095 alu.last = 1;
7096 alu.dst.write = 1;
7097 r = r600_bytecode_add_alu(ctx->bc, &alu);
7098 if (r)
7099 return r;
7100 src_loaded = TRUE;
7101 src_gpr = ctx->temp_reg;
7102 }
7103
7104
7105 if ((inst->Texture.Texture == TGSI_TEXTURE_CUBE ||
7106 inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
7107 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
7108 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) &&
7109 inst->Instruction.Opcode != TGSI_OPCODE_TXQ) {
7110
7111 static const unsigned src0_swizzle[] = {2, 2, 0, 1};
7112 static const unsigned src1_swizzle[] = {1, 0, 2, 2};
7113
7114 /* tmp1.xyzw = CUBE(R0.zzxy, R0.yxzz) */
7115 for (i = 0; i < 4; i++) {
7116 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7117 alu.op = ALU_OP2_CUBE;
7118 r600_bytecode_src(&alu.src[0], &ctx->src[0], src0_swizzle[i]);
7119 r600_bytecode_src(&alu.src[1], &ctx->src[0], src1_swizzle[i]);
7120 alu.dst.sel = ctx->temp_reg;
7121 alu.dst.chan = i;
7122 if (i == 3)
7123 alu.last = 1;
7124 alu.dst.write = 1;
7125 r = r600_bytecode_add_alu(ctx->bc, &alu);
7126 if (r)
7127 return r;
7128 }
7129
7130 /* tmp1.z = RCP_e(|tmp1.z|) */
7131 if (ctx->bc->chip_class == CAYMAN) {
7132 for (i = 0; i < 3; i++) {
7133 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7134 alu.op = ALU_OP1_RECIP_IEEE;
7135 alu.src[0].sel = ctx->temp_reg;
7136 alu.src[0].chan = 2;
7137 alu.src[0].abs = 1;
7138 alu.dst.sel = ctx->temp_reg;
7139 alu.dst.chan = i;
7140 if (i == 2)
7141 alu.dst.write = 1;
7142 if (i == 2)
7143 alu.last = 1;
7144 r = r600_bytecode_add_alu(ctx->bc, &alu);
7145 if (r)
7146 return r;
7147 }
7148 } else {
7149 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7150 alu.op = ALU_OP1_RECIP_IEEE;
7151 alu.src[0].sel = ctx->temp_reg;
7152 alu.src[0].chan = 2;
7153 alu.src[0].abs = 1;
7154 alu.dst.sel = ctx->temp_reg;
7155 alu.dst.chan = 2;
7156 alu.dst.write = 1;
7157 alu.last = 1;
7158 r = r600_bytecode_add_alu(ctx->bc, &alu);
7159 if (r)
7160 return r;
7161 }
7162
7163 /* MULADD R0.x, R0.x, PS1, (0x3FC00000, 1.5f).x
7164 * MULADD R0.y, R0.y, PS1, (0x3FC00000, 1.5f).x
7165 * muladd has no writemask, have to use another temp
7166 */
7167 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7168 alu.op = ALU_OP3_MULADD;
7169 alu.is_op3 = 1;
7170
7171 alu.src[0].sel = ctx->temp_reg;
7172 alu.src[0].chan = 0;
7173 alu.src[1].sel = ctx->temp_reg;
7174 alu.src[1].chan = 2;
7175
7176 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
7177 alu.src[2].chan = 0;
7178 alu.src[2].value = u_bitcast_f2u(1.5f);
7179
7180 alu.dst.sel = ctx->temp_reg;
7181 alu.dst.chan = 0;
7182 alu.dst.write = 1;
7183
7184 r = r600_bytecode_add_alu(ctx->bc, &alu);
7185 if (r)
7186 return r;
7187
7188 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7189 alu.op = ALU_OP3_MULADD;
7190 alu.is_op3 = 1;
7191
7192 alu.src[0].sel = ctx->temp_reg;
7193 alu.src[0].chan = 1;
7194 alu.src[1].sel = ctx->temp_reg;
7195 alu.src[1].chan = 2;
7196
7197 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
7198 alu.src[2].chan = 0;
7199 alu.src[2].value = u_bitcast_f2u(1.5f);
7200
7201 alu.dst.sel = ctx->temp_reg;
7202 alu.dst.chan = 1;
7203 alu.dst.write = 1;
7204
7205 alu.last = 1;
7206 r = r600_bytecode_add_alu(ctx->bc, &alu);
7207 if (r)
7208 return r;
7209 /* write initial compare value into Z component
7210 - W src 0 for shadow cube
7211 - X src 1 for shadow cube array */
7212 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
7213 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
7214 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7215 alu.op = ALU_OP1_MOV;
7216 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY)
7217 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
7218 else
7219 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
7220 alu.dst.sel = ctx->temp_reg;
7221 alu.dst.chan = 2;
7222 alu.dst.write = 1;
7223 alu.last = 1;
7224 r = r600_bytecode_add_alu(ctx->bc, &alu);
7225 if (r)
7226 return r;
7227 }
7228
7229 if (inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
7230 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
7231 if (ctx->bc->chip_class >= EVERGREEN) {
7232 int mytmp = r600_get_temp(ctx);
7233 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7234 alu.op = ALU_OP1_MOV;
7235 alu.src[0].sel = ctx->temp_reg;
7236 alu.src[0].chan = 3;
7237 alu.dst.sel = mytmp;
7238 alu.dst.chan = 0;
7239 alu.dst.write = 1;
7240 alu.last = 1;
7241 r = r600_bytecode_add_alu(ctx->bc, &alu);
7242 if (r)
7243 return r;
7244
7245 /* have to multiply original layer by 8 and add to face id (temp.w) in Z */
7246 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7247 alu.op = ALU_OP3_MULADD;
7248 alu.is_op3 = 1;
7249 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
7250 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
7251 alu.src[1].chan = 0;
7252 alu.src[1].value = u_bitcast_f2u(8.0f);
7253 alu.src[2].sel = mytmp;
7254 alu.src[2].chan = 0;
7255 alu.dst.sel = ctx->temp_reg;
7256 alu.dst.chan = 3;
7257 alu.dst.write = 1;
7258 alu.last = 1;
7259 r = r600_bytecode_add_alu(ctx->bc, &alu);
7260 if (r)
7261 return r;
7262 } else if (ctx->bc->chip_class < EVERGREEN) {
7263 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
7264 tex.op = FETCH_OP_SET_CUBEMAP_INDEX;
7265 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
7266 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
7267 tex.src_gpr = r600_get_temp(ctx);
7268 tex.src_sel_x = 0;
7269 tex.src_sel_y = 0;
7270 tex.src_sel_z = 0;
7271 tex.src_sel_w = 0;
7272 tex.dst_sel_x = tex.dst_sel_y = tex.dst_sel_z = tex.dst_sel_w = 7;
7273 tex.coord_type_x = 1;
7274 tex.coord_type_y = 1;
7275 tex.coord_type_z = 1;
7276 tex.coord_type_w = 1;
7277 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7278 alu.op = ALU_OP1_MOV;
7279 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
7280 alu.dst.sel = tex.src_gpr;
7281 alu.dst.chan = 0;
7282 alu.last = 1;
7283 alu.dst.write = 1;
7284 r = r600_bytecode_add_alu(ctx->bc, &alu);
7285 if (r)
7286 return r;
7287
7288 r = r600_bytecode_add_tex(ctx->bc, &tex);
7289 if (r)
7290 return r;
7291 }
7292
7293 }
7294
7295 /* for cube forms of lod and bias we need to route things */
7296 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB ||
7297 inst->Instruction.Opcode == TGSI_OPCODE_TXL ||
7298 inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
7299 inst->Instruction.Opcode == TGSI_OPCODE_TXL2) {
7300 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7301 alu.op = ALU_OP1_MOV;
7302 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
7303 inst->Instruction.Opcode == TGSI_OPCODE_TXL2)
7304 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
7305 else
7306 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
7307 alu.dst.sel = ctx->temp_reg;
7308 alu.dst.chan = 2;
7309 alu.last = 1;
7310 alu.dst.write = 1;
7311 r = r600_bytecode_add_alu(ctx->bc, &alu);
7312 if (r)
7313 return r;
7314 }
7315
7316 src_loaded = TRUE;
7317 src_gpr = ctx->temp_reg;
7318 }
7319
7320 if (inst->Instruction.Opcode == TGSI_OPCODE_TXD) {
7321 int temp_h = 0, temp_v = 0;
7322 int start_val = 0;
7323
7324 /* if we've already loaded the src (i.e. CUBE don't reload it). */
7325 if (src_loaded == TRUE)
7326 start_val = 1;
7327 else
7328 src_loaded = TRUE;
7329 for (i = start_val; i < 3; i++) {
7330 int treg = r600_get_temp(ctx);
7331
7332 if (i == 0)
7333 src_gpr = treg;
7334 else if (i == 1)
7335 temp_h = treg;
7336 else
7337 temp_v = treg;
7338
7339 for (j = 0; j < 4; j++) {
7340 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7341 alu.op = ALU_OP1_MOV;
7342 r600_bytecode_src(&alu.src[0], &ctx->src[i], j);
7343 alu.dst.sel = treg;
7344 alu.dst.chan = j;
7345 if (j == 3)
7346 alu.last = 1;
7347 alu.dst.write = 1;
7348 r = r600_bytecode_add_alu(ctx->bc, &alu);
7349 if (r)
7350 return r;
7351 }
7352 }
7353 for (i = 1; i < 3; i++) {
7354 /* set gradients h/v */
7355 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
7356 tex.op = (i == 1) ? FETCH_OP_SET_GRADIENTS_H :
7357 FETCH_OP_SET_GRADIENTS_V;
7358 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
7359 tex.sampler_index_mode = sampler_index_mode;
7360 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
7361 tex.resource_index_mode = sampler_index_mode;
7362
7363 tex.src_gpr = (i == 1) ? temp_h : temp_v;
7364 tex.src_sel_x = 0;
7365 tex.src_sel_y = 1;
7366 tex.src_sel_z = 2;
7367 tex.src_sel_w = 3;
7368
7369 tex.dst_gpr = r600_get_temp(ctx); /* just to avoid confusing the asm scheduler */
7370 tex.dst_sel_x = tex.dst_sel_y = tex.dst_sel_z = tex.dst_sel_w = 7;
7371 if (inst->Texture.Texture != TGSI_TEXTURE_RECT) {
7372 tex.coord_type_x = 1;
7373 tex.coord_type_y = 1;
7374 tex.coord_type_z = 1;
7375 tex.coord_type_w = 1;
7376 }
7377 r = r600_bytecode_add_tex(ctx->bc, &tex);
7378 if (r)
7379 return r;
7380 }
7381 }
7382
7383 if (src_requires_loading && !src_loaded) {
7384 for (i = 0; i < 4; i++) {
7385 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7386 alu.op = ALU_OP1_MOV;
7387 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
7388 alu.dst.sel = ctx->temp_reg;
7389 alu.dst.chan = i;
7390 if (i == 3)
7391 alu.last = 1;
7392 alu.dst.write = 1;
7393 r = r600_bytecode_add_alu(ctx->bc, &alu);
7394 if (r)
7395 return r;
7396 }
7397 src_loaded = TRUE;
7398 src_gpr = ctx->temp_reg;
7399 }
7400
7401 /* get offset values */
7402 if (inst->Texture.NumOffsets) {
7403 assert(inst->Texture.NumOffsets == 1);
7404
7405 /* The texture offset feature doesn't work with the TXF instruction
7406 * and must be emulated by adding the offset to the texture coordinates. */
7407 if (txf_add_offsets) {
7408 const struct tgsi_texture_offset *off = inst->TexOffsets;
7409
7410 switch (inst->Texture.Texture) {
7411 case TGSI_TEXTURE_3D:
7412 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7413 alu.op = ALU_OP2_ADD_INT;
7414 alu.src[0].sel = src_gpr;
7415 alu.src[0].chan = 2;
7416 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
7417 alu.src[1].value = ctx->literals[4 * off[0].Index + off[0].SwizzleZ];
7418 alu.dst.sel = src_gpr;
7419 alu.dst.chan = 2;
7420 alu.dst.write = 1;
7421 alu.last = 1;
7422 r = r600_bytecode_add_alu(ctx->bc, &alu);
7423 if (r)
7424 return r;
7425 /* fall through */
7426
7427 case TGSI_TEXTURE_2D:
7428 case TGSI_TEXTURE_SHADOW2D:
7429 case TGSI_TEXTURE_RECT:
7430 case TGSI_TEXTURE_SHADOWRECT:
7431 case TGSI_TEXTURE_2D_ARRAY:
7432 case TGSI_TEXTURE_SHADOW2D_ARRAY:
7433 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7434 alu.op = ALU_OP2_ADD_INT;
7435 alu.src[0].sel = src_gpr;
7436 alu.src[0].chan = 1;
7437 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
7438 alu.src[1].value = ctx->literals[4 * off[0].Index + off[0].SwizzleY];
7439 alu.dst.sel = src_gpr;
7440 alu.dst.chan = 1;
7441 alu.dst.write = 1;
7442 alu.last = 1;
7443 r = r600_bytecode_add_alu(ctx->bc, &alu);
7444 if (r)
7445 return r;
7446 /* fall through */
7447
7448 case TGSI_TEXTURE_1D:
7449 case TGSI_TEXTURE_SHADOW1D:
7450 case TGSI_TEXTURE_1D_ARRAY:
7451 case TGSI_TEXTURE_SHADOW1D_ARRAY:
7452 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7453 alu.op = ALU_OP2_ADD_INT;
7454 alu.src[0].sel = src_gpr;
7455 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
7456 alu.src[1].value = ctx->literals[4 * off[0].Index + off[0].SwizzleX];
7457 alu.dst.sel = src_gpr;
7458 alu.dst.write = 1;
7459 alu.last = 1;
7460 r = r600_bytecode_add_alu(ctx->bc, &alu);
7461 if (r)
7462 return r;
7463 break;
7464 /* texture offsets do not apply to other texture targets */
7465 }
7466 } else {
7467 switch (inst->Texture.Texture) {
7468 case TGSI_TEXTURE_3D:
7469 offset_z = ctx->literals[4 * inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleZ] << 1;
7470 /* fallthrough */
7471 case TGSI_TEXTURE_2D:
7472 case TGSI_TEXTURE_SHADOW2D:
7473 case TGSI_TEXTURE_RECT:
7474 case TGSI_TEXTURE_SHADOWRECT:
7475 case TGSI_TEXTURE_2D_ARRAY:
7476 case TGSI_TEXTURE_SHADOW2D_ARRAY:
7477 offset_y = ctx->literals[4 * inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleY] << 1;
7478 /* fallthrough */
7479 case TGSI_TEXTURE_1D:
7480 case TGSI_TEXTURE_SHADOW1D:
7481 case TGSI_TEXTURE_1D_ARRAY:
7482 case TGSI_TEXTURE_SHADOW1D_ARRAY:
7483 offset_x = ctx->literals[4 * inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleX] << 1;
7484 }
7485 }
7486 }
7487
7488 /* Obtain the sample index for reading a compressed MSAA color texture.
7489 * To read the FMASK, we use the ldfptr instruction, which tells us
7490 * where the samples are stored.
7491 * For uncompressed 8x MSAA surfaces, ldfptr should return 0x76543210,
7492 * which is the identity mapping. Each nibble says which physical sample
7493 * should be fetched to get that sample.
7494 *
7495 * Assume src.z contains the sample index. It should be modified like this:
7496 * src.z = (ldfptr() >> (src.z * 4)) & 0xF;
7497 * Then fetch the texel with src.
7498 */
7499 if (read_compressed_msaa) {
7500 unsigned sample_chan = 3;
7501 unsigned temp = r600_get_temp(ctx);
7502 assert(src_loaded);
7503
7504 /* temp.w = ldfptr() */
7505 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
7506 tex.op = FETCH_OP_LD;
7507 tex.inst_mod = 1; /* to indicate this is ldfptr */
7508 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
7509 tex.sampler_index_mode = sampler_index_mode;
7510 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
7511 tex.resource_index_mode = sampler_index_mode;
7512 tex.src_gpr = src_gpr;
7513 tex.dst_gpr = temp;
7514 tex.dst_sel_x = 7; /* mask out these components */
7515 tex.dst_sel_y = 7;
7516 tex.dst_sel_z = 7;
7517 tex.dst_sel_w = 0; /* store X */
7518 tex.src_sel_x = 0;
7519 tex.src_sel_y = 1;
7520 tex.src_sel_z = 2;
7521 tex.src_sel_w = 3;
7522 tex.offset_x = offset_x;
7523 tex.offset_y = offset_y;
7524 tex.offset_z = offset_z;
7525 r = r600_bytecode_add_tex(ctx->bc, &tex);
7526 if (r)
7527 return r;
7528
7529 /* temp.x = sample_index*4 */
7530 if (ctx->bc->chip_class == CAYMAN) {
7531 for (i = 0 ; i < 4; i++) {
7532 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7533 alu.op = ALU_OP2_MULLO_INT;
7534 alu.src[0].sel = src_gpr;
7535 alu.src[0].chan = sample_chan;
7536 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
7537 alu.src[1].value = 4;
7538 alu.dst.sel = temp;
7539 alu.dst.chan = i;
7540 alu.dst.write = i == 0;
7541 if (i == 3)
7542 alu.last = 1;
7543 r = r600_bytecode_add_alu(ctx->bc, &alu);
7544 if (r)
7545 return r;
7546 }
7547 } else {
7548 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7549 alu.op = ALU_OP2_MULLO_INT;
7550 alu.src[0].sel = src_gpr;
7551 alu.src[0].chan = sample_chan;
7552 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
7553 alu.src[1].value = 4;
7554 alu.dst.sel = temp;
7555 alu.dst.chan = 0;
7556 alu.dst.write = 1;
7557 alu.last = 1;
7558 r = r600_bytecode_add_alu(ctx->bc, &alu);
7559 if (r)
7560 return r;
7561 }
7562
7563 /* sample_index = temp.w >> temp.x */
7564 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7565 alu.op = ALU_OP2_LSHR_INT;
7566 alu.src[0].sel = temp;
7567 alu.src[0].chan = 3;
7568 alu.src[1].sel = temp;
7569 alu.src[1].chan = 0;
7570 alu.dst.sel = src_gpr;
7571 alu.dst.chan = sample_chan;
7572 alu.dst.write = 1;
7573 alu.last = 1;
7574 r = r600_bytecode_add_alu(ctx->bc, &alu);
7575 if (r)
7576 return r;
7577
7578 /* sample_index & 0xF */
7579 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7580 alu.op = ALU_OP2_AND_INT;
7581 alu.src[0].sel = src_gpr;
7582 alu.src[0].chan = sample_chan;
7583 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
7584 alu.src[1].value = 0xF;
7585 alu.dst.sel = src_gpr;
7586 alu.dst.chan = sample_chan;
7587 alu.dst.write = 1;
7588 alu.last = 1;
7589 r = r600_bytecode_add_alu(ctx->bc, &alu);
7590 if (r)
7591 return r;
7592 #if 0
7593 /* visualize the FMASK */
7594 for (i = 0; i < 4; i++) {
7595 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7596 alu.op = ALU_OP1_INT_TO_FLT;
7597 alu.src[0].sel = src_gpr;
7598 alu.src[0].chan = sample_chan;
7599 alu.dst.sel = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
7600 alu.dst.chan = i;
7601 alu.dst.write = 1;
7602 alu.last = 1;
7603 r = r600_bytecode_add_alu(ctx->bc, &alu);
7604 if (r)
7605 return r;
7606 }
7607 return 0;
7608 #endif
7609 }
7610
7611 /* does this shader want a num layers from TXQ for a cube array? */
7612 if (has_txq_cube_array_z) {
7613 int id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
7614
7615 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7616 alu.op = ALU_OP1_MOV;
7617
7618 alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL;
7619 if (ctx->bc->chip_class >= EVERGREEN) {
7620 /* with eg each dword is either buf size or number of cubes */
7621 alu.src[0].sel += id / 4;
7622 alu.src[0].chan = id % 4;
7623 } else {
7624 /* r600 we have them at channel 2 of the second dword */
7625 alu.src[0].sel += (id * 2) + 1;
7626 alu.src[0].chan = 2;
7627 }
7628 alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
7629 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
7630 alu.last = 1;
7631 r = r600_bytecode_add_alu(ctx->bc, &alu);
7632 if (r)
7633 return r;
7634 /* disable writemask from texture instruction */
7635 inst->Dst[0].Register.WriteMask &= ~4;
7636 }
7637
7638 opcode = ctx->inst_info->op;
7639 if (opcode == FETCH_OP_GATHER4 &&
7640 inst->TexOffsets[0].File != TGSI_FILE_NULL &&
7641 inst->TexOffsets[0].File != TGSI_FILE_IMMEDIATE) {
7642 opcode = FETCH_OP_GATHER4_O;
7643
7644 /* GATHER4_O/GATHER4_C_O use offset values loaded by
7645 SET_TEXTURE_OFFSETS instruction. The immediate offset values
7646 encoded in the instruction are ignored. */
7647 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
7648 tex.op = FETCH_OP_SET_TEXTURE_OFFSETS;
7649 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
7650 tex.sampler_index_mode = sampler_index_mode;
7651 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
7652 tex.resource_index_mode = sampler_index_mode;
7653
7654 tex.src_gpr = ctx->file_offset[inst->TexOffsets[0].File] + inst->TexOffsets[0].Index;
7655 tex.src_sel_x = inst->TexOffsets[0].SwizzleX;
7656 tex.src_sel_y = inst->TexOffsets[0].SwizzleY;
7657 tex.src_sel_z = inst->TexOffsets[0].SwizzleZ;
7658 tex.src_sel_w = 4;
7659
7660 tex.dst_sel_x = 7;
7661 tex.dst_sel_y = 7;
7662 tex.dst_sel_z = 7;
7663 tex.dst_sel_w = 7;
7664
7665 r = r600_bytecode_add_tex(ctx->bc, &tex);
7666 if (r)
7667 return r;
7668 }
7669
7670 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
7671 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
7672 inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
7673 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
7674 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY ||
7675 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
7676 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
7677 switch (opcode) {
7678 case FETCH_OP_SAMPLE:
7679 opcode = FETCH_OP_SAMPLE_C;
7680 break;
7681 case FETCH_OP_SAMPLE_L:
7682 opcode = FETCH_OP_SAMPLE_C_L;
7683 break;
7684 case FETCH_OP_SAMPLE_LB:
7685 opcode = FETCH_OP_SAMPLE_C_LB;
7686 break;
7687 case FETCH_OP_SAMPLE_G:
7688 opcode = FETCH_OP_SAMPLE_C_G;
7689 break;
7690 /* Texture gather variants */
7691 case FETCH_OP_GATHER4:
7692 opcode = FETCH_OP_GATHER4_C;
7693 break;
7694 case FETCH_OP_GATHER4_O:
7695 opcode = FETCH_OP_GATHER4_C_O;
7696 break;
7697 }
7698 }
7699
7700 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
7701 tex.op = opcode;
7702
7703 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
7704 tex.sampler_index_mode = sampler_index_mode;
7705 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
7706 tex.resource_index_mode = sampler_index_mode;
7707 tex.src_gpr = src_gpr;
7708 tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
7709
7710 if (inst->Instruction.Opcode == TGSI_OPCODE_DDX_FINE ||
7711 inst->Instruction.Opcode == TGSI_OPCODE_DDY_FINE) {
7712 tex.inst_mod = 1; /* per pixel gradient calculation instead of per 2x2 quad */
7713 }
7714
7715 if (inst->Instruction.Opcode == TGSI_OPCODE_TG4) {
7716 int8_t texture_component_select = ctx->literals[4 * inst->Src[1].Register.Index + inst->Src[1].Register.SwizzleX];
7717 tex.inst_mod = texture_component_select;
7718
7719 if (ctx->bc->chip_class == CAYMAN) {
7720 /* GATHER4 result order is different from TGSI TG4 */
7721 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 0 : 7;
7722 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 4) ? 1 : 7;
7723 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 1) ? 2 : 7;
7724 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
7725 } else {
7726 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
7727 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
7728 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
7729 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
7730 }
7731 }
7732 else if (inst->Instruction.Opcode == TGSI_OPCODE_LODQ) {
7733 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
7734 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
7735 tex.dst_sel_z = 7;
7736 tex.dst_sel_w = 7;
7737 }
7738 else if (inst->Instruction.Opcode == TGSI_OPCODE_TXQS) {
7739 tex.dst_sel_x = 3;
7740 tex.dst_sel_y = 7;
7741 tex.dst_sel_z = 7;
7742 tex.dst_sel_w = 7;
7743 }
7744 else {
7745 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
7746 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
7747 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
7748 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
7749 }
7750
7751
7752 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQS) {
7753 tex.src_sel_x = 4;
7754 tex.src_sel_y = 4;
7755 tex.src_sel_z = 4;
7756 tex.src_sel_w = 4;
7757 } else if (src_loaded) {
7758 tex.src_sel_x = 0;
7759 tex.src_sel_y = 1;
7760 tex.src_sel_z = 2;
7761 tex.src_sel_w = 3;
7762 } else {
7763 tex.src_sel_x = ctx->src[0].swizzle[0];
7764 tex.src_sel_y = ctx->src[0].swizzle[1];
7765 tex.src_sel_z = ctx->src[0].swizzle[2];
7766 tex.src_sel_w = ctx->src[0].swizzle[3];
7767 tex.src_rel = ctx->src[0].rel;
7768 }
7769
7770 if (inst->Texture.Texture == TGSI_TEXTURE_CUBE ||
7771 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
7772 inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
7773 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
7774 tex.src_sel_x = 1;
7775 tex.src_sel_y = 0;
7776 tex.src_sel_z = 3;
7777 tex.src_sel_w = 2; /* route Z compare or Lod value into W */
7778 }
7779
7780 if (inst->Texture.Texture != TGSI_TEXTURE_RECT &&
7781 inst->Texture.Texture != TGSI_TEXTURE_SHADOWRECT) {
7782 tex.coord_type_x = 1;
7783 tex.coord_type_y = 1;
7784 }
7785 tex.coord_type_z = 1;
7786 tex.coord_type_w = 1;
7787
7788 tex.offset_x = offset_x;
7789 tex.offset_y = offset_y;
7790 if (inst->Instruction.Opcode == TGSI_OPCODE_TG4 &&
7791 (inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
7792 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY)) {
7793 tex.offset_z = 0;
7794 }
7795 else {
7796 tex.offset_z = offset_z;
7797 }
7798
7799 /* Put the depth for comparison in W.
7800 * TGSI_TEXTURE_SHADOW2D_ARRAY already has the depth in W.
7801 * Some instructions expect the depth in Z. */
7802 if ((inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
7803 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
7804 inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
7805 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY) &&
7806 opcode != FETCH_OP_SAMPLE_C_L &&
7807 opcode != FETCH_OP_SAMPLE_C_LB) {
7808 tex.src_sel_w = tex.src_sel_z;
7809 }
7810
7811 if (inst->Texture.Texture == TGSI_TEXTURE_1D_ARRAY ||
7812 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY) {
7813 if (opcode == FETCH_OP_SAMPLE_C_L ||
7814 opcode == FETCH_OP_SAMPLE_C_LB) {
7815 /* the array index is read from Y */
7816 tex.coord_type_y = 0;
7817 } else {
7818 /* the array index is read from Z */
7819 tex.coord_type_z = 0;
7820 tex.src_sel_z = tex.src_sel_y;
7821 }
7822 } else if (inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
7823 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
7824 ((inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
7825 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) &&
7826 (ctx->bc->chip_class >= EVERGREEN)))
7827 /* the array index is read from Z */
7828 tex.coord_type_z = 0;
7829
7830 /* mask unused source components */
7831 if (opcode == FETCH_OP_SAMPLE || opcode == FETCH_OP_GATHER4) {
7832 switch (inst->Texture.Texture) {
7833 case TGSI_TEXTURE_2D:
7834 case TGSI_TEXTURE_RECT:
7835 tex.src_sel_z = 7;
7836 tex.src_sel_w = 7;
7837 break;
7838 case TGSI_TEXTURE_1D_ARRAY:
7839 tex.src_sel_y = 7;
7840 tex.src_sel_w = 7;
7841 break;
7842 case TGSI_TEXTURE_1D:
7843 tex.src_sel_y = 7;
7844 tex.src_sel_z = 7;
7845 tex.src_sel_w = 7;
7846 break;
7847 }
7848 }
7849
7850 r = r600_bytecode_add_tex(ctx->bc, &tex);
7851 if (r)
7852 return r;
7853
7854 /* add shadow ambient support - gallium doesn't do it yet */
7855 return 0;
7856 }
7857
7858 static int find_hw_atomic_counter(struct r600_shader_ctx *ctx,
7859 struct tgsi_full_src_register *src)
7860 {
7861 unsigned i;
7862
7863 if (src->Register.Indirect) {
7864 for (i = 0; i < ctx->shader->nhwatomic_ranges; i++) {
7865 if (src->Indirect.ArrayID == ctx->shader->atomics[i].array_id)
7866 return ctx->shader->atomics[i].hw_idx;
7867 }
7868 } else {
7869 uint32_t index = src->Register.Index;
7870 for (i = 0; i < ctx->shader->nhwatomic_ranges; i++) {
7871 if (ctx->shader->atomics[i].buffer_id != (unsigned)src->Dimension.Index)
7872 continue;
7873 if (index > ctx->shader->atomics[i].end)
7874 continue;
7875 if (index < ctx->shader->atomics[i].start)
7876 continue;
7877 uint32_t offset = (index - ctx->shader->atomics[i].start);
7878 return ctx->shader->atomics[i].hw_idx + offset;
7879 }
7880 }
7881 assert(0);
7882 return -1;
7883 }
7884
7885 static int tgsi_set_gds_temp(struct r600_shader_ctx *ctx,
7886 int *uav_id_p, int *uav_index_mode_p)
7887 {
7888 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7889 int uav_id, uav_index_mode = 0;
7890 int r;
7891 bool is_cm = (ctx->bc->chip_class == CAYMAN);
7892
7893 uav_id = find_hw_atomic_counter(ctx, &inst->Src[0]);
7894
7895 if (inst->Src[0].Register.Indirect) {
7896 if (is_cm) {
7897 struct r600_bytecode_alu alu;
7898 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7899 alu.op = ALU_OP2_LSHL_INT;
7900 alu.src[0].sel = get_address_file_reg(ctx, inst->Src[0].Indirect.Index);
7901 alu.src[0].chan = 0;
7902 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
7903 alu.src[1].value = 2;
7904 alu.dst.sel = ctx->temp_reg;
7905 alu.dst.chan = 0;
7906 alu.dst.write = 1;
7907 alu.last = 1;
7908 r = r600_bytecode_add_alu(ctx->bc, &alu);
7909 if (r)
7910 return r;
7911
7912 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
7913 ctx->temp_reg, 0,
7914 ctx->temp_reg, 0,
7915 V_SQ_ALU_SRC_LITERAL, uav_id * 4);
7916 if (r)
7917 return r;
7918 } else
7919 uav_index_mode = 2;
7920 } else if (is_cm) {
7921 r = single_alu_op2(ctx, ALU_OP1_MOV,
7922 ctx->temp_reg, 0,
7923 V_SQ_ALU_SRC_LITERAL, uav_id * 4,
7924 0, 0);
7925 if (r)
7926 return r;
7927 }
7928 *uav_id_p = uav_id;
7929 *uav_index_mode_p = uav_index_mode;
7930 return 0;
7931 }
7932
7933 static int tgsi_load_gds(struct r600_shader_ctx *ctx)
7934 {
7935 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7936 int r;
7937 struct r600_bytecode_gds gds;
7938 int uav_id = 0;
7939 int uav_index_mode = 0;
7940 bool is_cm = (ctx->bc->chip_class == CAYMAN);
7941
7942 r = tgsi_set_gds_temp(ctx, &uav_id, &uav_index_mode);
7943 if (r)
7944 return r;
7945
7946 memset(&gds, 0, sizeof(struct r600_bytecode_gds));
7947 gds.op = FETCH_OP_GDS_READ_RET;
7948 gds.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
7949 gds.uav_id = is_cm ? 0 : uav_id;
7950 gds.uav_index_mode = is_cm ? 0 : uav_index_mode;
7951 gds.src_gpr = ctx->temp_reg;
7952 gds.src_sel_x = (is_cm) ? 0 : 4;
7953 gds.src_sel_y = 4;
7954 gds.src_sel_z = 4;
7955 gds.dst_sel_x = 0;
7956 gds.dst_sel_y = 7;
7957 gds.dst_sel_z = 7;
7958 gds.dst_sel_w = 7;
7959 gds.src_gpr2 = 0;
7960 gds.alloc_consume = !is_cm;
7961 r = r600_bytecode_add_gds(ctx->bc, &gds);
7962 if (r)
7963 return r;
7964
7965 ctx->bc->cf_last->vpm = 1;
7966 return 0;
7967 }
7968
7969 /* this fixes up 1D arrays properly */
7970 static int load_index_src(struct r600_shader_ctx *ctx, int src_index, int *idx_gpr)
7971 {
7972 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7973 int r, i;
7974 struct r600_bytecode_alu alu;
7975 int temp_reg = r600_get_temp(ctx);
7976
7977 for (i = 0; i < 4; i++) {
7978 bool def_val = true, write_zero = false;
7979 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7980 alu.op = ALU_OP1_MOV;
7981 alu.dst.sel = temp_reg;
7982 alu.dst.chan = i;
7983
7984 switch (inst->Memory.Texture) {
7985 case TGSI_TEXTURE_BUFFER:
7986 case TGSI_TEXTURE_1D:
7987 if (i == 1 || i == 2 || i == 3) {
7988 write_zero = true;
7989 }
7990 break;
7991 case TGSI_TEXTURE_1D_ARRAY:
7992 if (i == 1 || i == 3)
7993 write_zero = true;
7994 else if (i == 2) {
7995 r600_bytecode_src(&alu.src[0], &ctx->src[src_index], 1);
7996 def_val = false;
7997 }
7998 break;
7999 case TGSI_TEXTURE_2D:
8000 if (i == 2 || i == 3)
8001 write_zero = true;
8002 break;
8003 default:
8004 if (i == 3)
8005 write_zero = true;
8006 break;
8007 }
8008
8009 if (write_zero) {
8010 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
8011 alu.src[0].value = 0;
8012 } else if (def_val) {
8013 r600_bytecode_src(&alu.src[0], &ctx->src[src_index], i);
8014 }
8015
8016 if (i == 3)
8017 alu.last = 1;
8018 alu.dst.write = 1;
8019 r = r600_bytecode_add_alu(ctx->bc, &alu);
8020 if (r)
8021 return r;
8022 }
8023 *idx_gpr = temp_reg;
8024 return 0;
8025 }
8026
8027 static int load_buffer_coord(struct r600_shader_ctx *ctx, int src_idx,
8028 int temp_reg)
8029 {
8030 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8031 int r;
8032 if (inst->Src[src_idx].Register.File == TGSI_FILE_IMMEDIATE) {
8033 int value = (ctx->literals[4 * inst->Src[src_idx].Register.Index + inst->Src[src_idx].Register.SwizzleX]);
8034 r = single_alu_op2(ctx, ALU_OP1_MOV,
8035 temp_reg, 0,
8036 V_SQ_ALU_SRC_LITERAL, value >> 2,
8037 0, 0);
8038 if (r)
8039 return r;
8040 } else {
8041 struct r600_bytecode_alu alu;
8042 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8043 alu.op = ALU_OP2_LSHR_INT;
8044 r600_bytecode_src(&alu.src[0], &ctx->src[src_idx], 0);
8045 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
8046 alu.src[1].value = 2;
8047 alu.dst.sel = temp_reg;
8048 alu.dst.write = 1;
8049 alu.last = 1;
8050 r = r600_bytecode_add_alu(ctx->bc, &alu);
8051 if (r)
8052 return r;
8053 }
8054 return 0;
8055 }
8056
8057 static int tgsi_load_buffer(struct r600_shader_ctx *ctx)
8058 {
8059 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8060 /* have to work out the offset into the RAT immediate return buffer */
8061 struct r600_bytecode_vtx vtx;
8062 struct r600_bytecode_cf *cf;
8063 int r;
8064 int temp_reg = r600_get_temp(ctx);
8065 unsigned rat_index_mode;
8066 unsigned base;
8067
8068 rat_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
8069 base = R600_IMAGE_REAL_RESOURCE_OFFSET + ctx->info.file_count[TGSI_FILE_IMAGE];
8070
8071 r = load_buffer_coord(ctx, 1, temp_reg);
8072 if (r)
8073 return r;
8074 ctx->bc->cf_last->barrier = 1;
8075 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
8076 vtx.op = FETCH_OP_VFETCH;
8077 vtx.buffer_id = inst->Src[0].Register.Index + base;
8078 vtx.buffer_index_mode = rat_index_mode;
8079 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
8080 vtx.src_gpr = temp_reg;
8081 vtx.src_sel_x = 0;
8082 vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
8083 vtx.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7; /* SEL_X */
8084 vtx.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7; /* SEL_Y */
8085 vtx.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7; /* SEL_Z */
8086 vtx.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7; /* SEL_W */
8087 vtx.num_format_all = 1;
8088 vtx.format_comp_all = 1;
8089 vtx.srf_mode_all = 0;
8090
8091 if (inst->Dst[0].Register.WriteMask & 8) {
8092 vtx.data_format = FMT_32_32_32_32;
8093 vtx.use_const_fields = 0;
8094 } else if (inst->Dst[0].Register.WriteMask & 4) {
8095 vtx.data_format = FMT_32_32_32;
8096 vtx.use_const_fields = 0;
8097 } else if (inst->Dst[0].Register.WriteMask & 2) {
8098 vtx.data_format = FMT_32_32;
8099 vtx.use_const_fields = 0;
8100 } else {
8101 vtx.data_format = FMT_32;
8102 vtx.use_const_fields = 0;
8103 }
8104
8105 r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx);
8106 if (r)
8107 return r;
8108 cf = ctx->bc->cf_last;
8109 cf->barrier = 1;
8110 return 0;
8111 }
8112
8113 static int tgsi_load_rat(struct r600_shader_ctx *ctx)
8114 {
8115 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8116 /* have to work out the offset into the RAT immediate return buffer */
8117 struct r600_bytecode_vtx vtx;
8118 struct r600_bytecode_cf *cf;
8119 int r;
8120 int idx_gpr;
8121 unsigned format, num_format, format_comp, endian;
8122 const struct util_format_description *desc;
8123 unsigned rat_index_mode;
8124 unsigned immed_base;
8125
8126 r = load_thread_id_gpr(ctx);
8127 if (r)
8128 return r;
8129
8130 rat_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
8131
8132 immed_base = R600_IMAGE_IMMED_RESOURCE_OFFSET;
8133 r = load_index_src(ctx, 1, &idx_gpr);
8134 if (r)
8135 return r;
8136
8137 if (rat_index_mode)
8138 egcm_load_index_reg(ctx->bc, 1, false);
8139
8140 r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT);
8141 cf = ctx->bc->cf_last;
8142
8143 cf->rat.id = ctx->shader->rat_base + inst->Src[0].Register.Index;
8144 cf->rat.inst = V_RAT_INST_NOP_RTN;
8145 cf->rat.index_mode = rat_index_mode;
8146 cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_READ_IND;
8147 cf->output.gpr = ctx->thread_id_gpr;
8148 cf->output.index_gpr = idx_gpr;
8149 cf->output.comp_mask = 0xf;
8150 cf->output.burst_count = 1;
8151 cf->vpm = 1;
8152 cf->barrier = 1;
8153 cf->mark = 1;
8154 cf->output.elem_size = 0;
8155
8156 r600_bytecode_add_cfinst(ctx->bc, CF_OP_WAIT_ACK);
8157 cf = ctx->bc->cf_last;
8158 cf->barrier = 1;
8159
8160 desc = util_format_description(inst->Memory.Format);
8161 r600_vertex_data_type(inst->Memory.Format,
8162 &format, &num_format, &format_comp, &endian);
8163 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
8164 vtx.op = FETCH_OP_VFETCH;
8165 vtx.buffer_id = immed_base + inst->Src[0].Register.Index;
8166 vtx.buffer_index_mode = rat_index_mode;
8167 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
8168 vtx.src_gpr = ctx->thread_id_gpr;
8169 vtx.src_sel_x = 1;
8170 vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
8171 vtx.dst_sel_x = desc->swizzle[0];
8172 vtx.dst_sel_y = desc->swizzle[1];
8173 vtx.dst_sel_z = desc->swizzle[2];
8174 vtx.dst_sel_w = desc->swizzle[3];
8175 vtx.srf_mode_all = 1;
8176 vtx.data_format = format;
8177 vtx.num_format_all = num_format;
8178 vtx.format_comp_all = format_comp;
8179 vtx.endian = endian;
8180 vtx.offset = 0;
8181 vtx.mega_fetch_count = 3;
8182 r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx);
8183 if (r)
8184 return r;
8185 cf = ctx->bc->cf_last;
8186 cf->barrier = 1;
8187 return 0;
8188 }
8189
8190 static int tgsi_load_lds(struct r600_shader_ctx *ctx)
8191 {
8192 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8193 struct r600_bytecode_alu alu;
8194 int r;
8195 int temp_reg = r600_get_temp(ctx);
8196
8197 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8198 alu.op = ALU_OP1_MOV;
8199 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
8200 alu.dst.sel = temp_reg;
8201 alu.dst.write = 1;
8202 alu.last = 1;
8203 r = r600_bytecode_add_alu(ctx->bc, &alu);
8204 if (r)
8205 return r;
8206
8207 r = do_lds_fetch_values(ctx, temp_reg,
8208 ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index, inst->Dst[0].Register.WriteMask);
8209 if (r)
8210 return r;
8211 return 0;
8212 }
8213
8214 static int tgsi_load(struct r600_shader_ctx *ctx)
8215 {
8216 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8217 if (inst->Src[0].Register.File == TGSI_FILE_IMAGE)
8218 return tgsi_load_rat(ctx);
8219 if (inst->Src[0].Register.File == TGSI_FILE_HW_ATOMIC)
8220 return tgsi_load_gds(ctx);
8221 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER)
8222 return tgsi_load_buffer(ctx);
8223 if (inst->Src[0].Register.File == TGSI_FILE_MEMORY)
8224 return tgsi_load_lds(ctx);
8225 return 0;
8226 }
8227
8228 static int tgsi_store_buffer_rat(struct r600_shader_ctx *ctx)
8229 {
8230 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8231 struct r600_bytecode_cf *cf;
8232 int r, i;
8233 unsigned rat_index_mode;
8234 int lasti;
8235 int temp_reg = r600_get_temp(ctx), treg2 = r600_get_temp(ctx);
8236
8237 r = load_buffer_coord(ctx, 0, treg2);
8238 if (r)
8239 return r;
8240
8241 rat_index_mode = inst->Dst[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
8242 if (rat_index_mode)
8243 egcm_load_index_reg(ctx->bc, 1, false);
8244
8245 for (i = 0; i <= 3; i++) {
8246 struct r600_bytecode_alu alu;
8247 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8248 alu.op = ALU_OP1_MOV;
8249 alu.dst.sel = temp_reg;
8250 alu.dst.chan = i;
8251 alu.src[0].sel = V_SQ_ALU_SRC_0;
8252 alu.last = (i == 3);
8253 alu.dst.write = 1;
8254 r = r600_bytecode_add_alu(ctx->bc, &alu);
8255 if (r)
8256 return r;
8257 }
8258
8259 lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
8260 for (i = 0; i <= lasti; i++) {
8261 struct r600_bytecode_alu alu;
8262 if (!((1 << i) & inst->Dst[0].Register.WriteMask))
8263 continue;
8264
8265 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
8266 temp_reg, 0,
8267 treg2, 0,
8268 V_SQ_ALU_SRC_LITERAL, i);
8269 if (r)
8270 return r;
8271
8272 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8273 alu.op = ALU_OP1_MOV;
8274 alu.dst.sel = ctx->temp_reg;
8275 alu.dst.chan = 0;
8276
8277 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
8278 alu.last = 1;
8279 alu.dst.write = 1;
8280 r = r600_bytecode_add_alu(ctx->bc, &alu);
8281 if (r)
8282 return r;
8283
8284 r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT);
8285 cf = ctx->bc->cf_last;
8286
8287 cf->rat.id = ctx->shader->rat_base + inst->Dst[0].Register.Index + ctx->info.file_count[TGSI_FILE_IMAGE];
8288 cf->rat.inst = V_RAT_INST_STORE_TYPED;
8289 cf->rat.index_mode = rat_index_mode;
8290 cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND;
8291 cf->output.gpr = ctx->temp_reg;
8292 cf->output.index_gpr = temp_reg;
8293 cf->output.comp_mask = 1;
8294 cf->output.burst_count = 1;
8295 cf->vpm = 1;
8296 cf->barrier = 1;
8297 cf->output.elem_size = 0;
8298 }
8299 return 0;
8300 }
8301
8302 static int tgsi_store_rat(struct r600_shader_ctx *ctx)
8303 {
8304 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8305 struct r600_bytecode_cf *cf;
8306 bool src_requires_loading = false;
8307 int val_gpr, idx_gpr;
8308 int r, i;
8309 unsigned rat_index_mode;
8310
8311 rat_index_mode = inst->Dst[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
8312
8313 r = load_index_src(ctx, 0, &idx_gpr);
8314 if (r)
8315 return r;
8316
8317 if (inst->Src[1].Register.File != TGSI_FILE_TEMPORARY)
8318 src_requires_loading = true;
8319
8320 if (src_requires_loading) {
8321 struct r600_bytecode_alu alu;
8322 for (i = 0; i < 4; i++) {
8323 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8324 alu.op = ALU_OP1_MOV;
8325 alu.dst.sel = ctx->temp_reg;
8326 alu.dst.chan = i;
8327
8328 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
8329 if (i == 3)
8330 alu.last = 1;
8331 alu.dst.write = 1;
8332 r = r600_bytecode_add_alu(ctx->bc, &alu);
8333 if (r)
8334 return r;
8335 }
8336 val_gpr = ctx->temp_reg;
8337 } else
8338 val_gpr = tgsi_tex_get_src_gpr(ctx, 1);
8339 if (rat_index_mode)
8340 egcm_load_index_reg(ctx->bc, 1, false);
8341
8342 r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT);
8343 cf = ctx->bc->cf_last;
8344
8345 cf->rat.id = ctx->shader->rat_base + inst->Dst[0].Register.Index;
8346 cf->rat.inst = V_RAT_INST_STORE_TYPED;
8347 cf->rat.index_mode = rat_index_mode;
8348 cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND;
8349 cf->output.gpr = val_gpr;
8350 cf->output.index_gpr = idx_gpr;
8351 cf->output.comp_mask = 0xf;
8352 cf->output.burst_count = 1;
8353 cf->vpm = 1;
8354 cf->barrier = 1;
8355 cf->output.elem_size = 0;
8356 return 0;
8357 }
8358
8359 static int tgsi_store_lds(struct r600_shader_ctx *ctx)
8360 {
8361 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8362 struct r600_bytecode_alu alu;
8363 int r, i, lasti;
8364 int write_mask = inst->Dst[0].Register.WriteMask;
8365 int temp_reg = r600_get_temp(ctx);
8366
8367 /* LDS write */
8368 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8369 alu.op = ALU_OP1_MOV;
8370 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
8371 alu.dst.sel = temp_reg;
8372 alu.dst.write = 1;
8373 alu.last = 1;
8374 r = r600_bytecode_add_alu(ctx->bc, &alu);
8375 if (r)
8376 return r;
8377
8378 lasti = tgsi_last_instruction(write_mask);
8379 for (i = 1; i <= lasti; i++) {
8380 if (!(write_mask & (1 << i)))
8381 continue;
8382 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
8383 temp_reg, i,
8384 temp_reg, 0,
8385 V_SQ_ALU_SRC_LITERAL, 4 * i);
8386 if (r)
8387 return r;
8388 }
8389 for (i = 0; i <= lasti; i++) {
8390 if (!(write_mask & (1 << i)))
8391 continue;
8392
8393 if ((i == 0 && ((write_mask & 3) == 3)) ||
8394 (i == 2 && ((write_mask & 0xc) == 0xc))) {
8395 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8396 alu.op = LDS_OP3_LDS_WRITE_REL;
8397
8398 alu.src[0].sel = temp_reg;
8399 alu.src[0].chan = i;
8400 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
8401 r600_bytecode_src(&alu.src[2], &ctx->src[1], i + 1);
8402 alu.last = 1;
8403 alu.is_lds_idx_op = true;
8404 alu.lds_idx = 1;
8405 r = r600_bytecode_add_alu(ctx->bc, &alu);
8406 if (r)
8407 return r;
8408 i += 1;
8409 continue;
8410 }
8411 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8412 alu.op = LDS_OP2_LDS_WRITE;
8413
8414 alu.src[0].sel = temp_reg;
8415 alu.src[0].chan = i;
8416 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
8417
8418 alu.last = 1;
8419 alu.is_lds_idx_op = true;
8420
8421 r = r600_bytecode_add_alu(ctx->bc, &alu);
8422 if (r)
8423 return r;
8424 }
8425 return 0;
8426 }
8427
8428 static int tgsi_store(struct r600_shader_ctx *ctx)
8429 {
8430 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8431 if (inst->Dst[0].Register.File == TGSI_FILE_BUFFER)
8432 return tgsi_store_buffer_rat(ctx);
8433 else if (inst->Dst[0].Register.File == TGSI_FILE_MEMORY)
8434 return tgsi_store_lds(ctx);
8435 else
8436 return tgsi_store_rat(ctx);
8437 }
8438
8439 static int tgsi_atomic_op_rat(struct r600_shader_ctx *ctx)
8440 {
8441 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8442 /* have to work out the offset into the RAT immediate return buffer */
8443 struct r600_bytecode_alu alu;
8444 struct r600_bytecode_vtx vtx;
8445 struct r600_bytecode_cf *cf;
8446 int r;
8447 int idx_gpr;
8448 unsigned format, num_format, format_comp, endian;
8449 const struct util_format_description *desc;
8450 unsigned rat_index_mode;
8451 unsigned immed_base;
8452 unsigned rat_base;
8453
8454 immed_base = R600_IMAGE_IMMED_RESOURCE_OFFSET;
8455 rat_base = ctx->shader->rat_base;
8456
8457 r = load_thread_id_gpr(ctx);
8458 if (r)
8459 return r;
8460
8461 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
8462 immed_base += ctx->info.file_count[TGSI_FILE_IMAGE];
8463 rat_base += ctx->info.file_count[TGSI_FILE_IMAGE];
8464
8465 r = load_buffer_coord(ctx, 1, ctx->temp_reg);
8466 if (r)
8467 return r;
8468 idx_gpr = ctx->temp_reg;
8469 } else {
8470 r = load_index_src(ctx, 1, &idx_gpr);
8471 if (r)
8472 return r;
8473 }
8474
8475 rat_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
8476
8477 if (ctx->inst_info->op == V_RAT_INST_CMPXCHG_INT_RTN) {
8478 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8479 alu.op = ALU_OP1_MOV;
8480 alu.dst.sel = ctx->thread_id_gpr;
8481 alu.dst.chan = 0;
8482 alu.dst.write = 1;
8483 r600_bytecode_src(&alu.src[0], &ctx->src[3], 0);
8484 alu.last = 1;
8485 r = r600_bytecode_add_alu(ctx->bc, &alu);
8486 if (r)
8487 return r;
8488
8489 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8490 alu.op = ALU_OP1_MOV;
8491 alu.dst.sel = ctx->thread_id_gpr;
8492 if (ctx->bc->chip_class == CAYMAN)
8493 alu.dst.chan = 2;
8494 else
8495 alu.dst.chan = 3;
8496 alu.dst.write = 1;
8497 r600_bytecode_src(&alu.src[0], &ctx->src[2], 0);
8498 alu.last = 1;
8499 r = r600_bytecode_add_alu(ctx->bc, &alu);
8500 if (r)
8501 return r;
8502 } else {
8503 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8504 alu.op = ALU_OP1_MOV;
8505 alu.dst.sel = ctx->thread_id_gpr;
8506 alu.dst.chan = 0;
8507 alu.dst.write = 1;
8508 r600_bytecode_src(&alu.src[0], &ctx->src[2], 0);
8509 alu.last = 1;
8510 r = r600_bytecode_add_alu(ctx->bc, &alu);
8511 if (r)
8512 return r;
8513 }
8514
8515 if (rat_index_mode)
8516 egcm_load_index_reg(ctx->bc, 1, false);
8517 r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT);
8518 cf = ctx->bc->cf_last;
8519
8520 cf->rat.id = rat_base + inst->Src[0].Register.Index;
8521 cf->rat.inst = ctx->inst_info->op;
8522 cf->rat.index_mode = rat_index_mode;
8523 cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_READ_IND;
8524 cf->output.gpr = ctx->thread_id_gpr;
8525 cf->output.index_gpr = idx_gpr;
8526 cf->output.comp_mask = 0xf;
8527 cf->output.burst_count = 1;
8528 cf->vpm = 1;
8529 cf->barrier = 1;
8530 cf->mark = 1;
8531 cf->output.elem_size = 0;
8532 r600_bytecode_add_cfinst(ctx->bc, CF_OP_WAIT_ACK);
8533 cf = ctx->bc->cf_last;
8534 cf->barrier = 1;
8535 cf->cf_addr = 1;
8536
8537 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
8538 if (inst->Src[0].Register.File == TGSI_FILE_IMAGE) {
8539 desc = util_format_description(inst->Memory.Format);
8540 r600_vertex_data_type(inst->Memory.Format,
8541 &format, &num_format, &format_comp, &endian);
8542 vtx.dst_sel_x = desc->swizzle[0];
8543 } else {
8544 format = FMT_32;
8545 num_format = 1;
8546 format_comp = 0;
8547 endian = 0;
8548 vtx.dst_sel_x = 0;
8549 }
8550 vtx.op = FETCH_OP_VFETCH;
8551 vtx.buffer_id = immed_base + inst->Src[0].Register.Index;
8552 vtx.buffer_index_mode = rat_index_mode;
8553 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
8554 vtx.src_gpr = ctx->thread_id_gpr;
8555 vtx.src_sel_x = 1;
8556 vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
8557 vtx.dst_sel_y = 7;
8558 vtx.dst_sel_z = 7;
8559 vtx.dst_sel_w = 7;
8560 vtx.use_const_fields = 0;
8561 vtx.srf_mode_all = 1;
8562 vtx.data_format = format;
8563 vtx.num_format_all = num_format;
8564 vtx.format_comp_all = format_comp;
8565 vtx.endian = endian;
8566 vtx.offset = 0;
8567 vtx.mega_fetch_count = 0xf;
8568 r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx);
8569 if (r)
8570 return r;
8571 cf = ctx->bc->cf_last;
8572 cf->vpm = 1;
8573 cf->barrier = 1;
8574 return 0;
8575 }
8576
8577 static int get_gds_op(int opcode)
8578 {
8579 switch (opcode) {
8580 case TGSI_OPCODE_ATOMUADD:
8581 return FETCH_OP_GDS_ADD_RET;
8582 case TGSI_OPCODE_ATOMAND:
8583 return FETCH_OP_GDS_AND_RET;
8584 case TGSI_OPCODE_ATOMOR:
8585 return FETCH_OP_GDS_OR_RET;
8586 case TGSI_OPCODE_ATOMXOR:
8587 return FETCH_OP_GDS_XOR_RET;
8588 case TGSI_OPCODE_ATOMUMIN:
8589 return FETCH_OP_GDS_MIN_UINT_RET;
8590 case TGSI_OPCODE_ATOMUMAX:
8591 return FETCH_OP_GDS_MAX_UINT_RET;
8592 case TGSI_OPCODE_ATOMXCHG:
8593 return FETCH_OP_GDS_XCHG_RET;
8594 case TGSI_OPCODE_ATOMCAS:
8595 return FETCH_OP_GDS_CMP_XCHG_RET;
8596 default:
8597 return -1;
8598 }
8599 }
8600
8601 static int tgsi_atomic_op_gds(struct r600_shader_ctx *ctx)
8602 {
8603 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8604 struct r600_bytecode_gds gds;
8605 struct r600_bytecode_alu alu;
8606 int gds_op = get_gds_op(inst->Instruction.Opcode);
8607 int r;
8608 int uav_id = 0;
8609 int uav_index_mode = 0;
8610 bool is_cm = (ctx->bc->chip_class == CAYMAN);
8611
8612 if (gds_op == -1) {
8613 fprintf(stderr, "unknown GDS op for opcode %d\n", inst->Instruction.Opcode);
8614 return -1;
8615 }
8616
8617 r = tgsi_set_gds_temp(ctx, &uav_id, &uav_index_mode);
8618 if (r)
8619 return r;
8620
8621 if (inst->Src[2].Register.File == TGSI_FILE_IMMEDIATE) {
8622 int value = (ctx->literals[4 * inst->Src[2].Register.Index + inst->Src[2].Register.SwizzleX]);
8623 int abs_value = abs(value);
8624 if (abs_value != value && gds_op == FETCH_OP_GDS_ADD_RET)
8625 gds_op = FETCH_OP_GDS_SUB_RET;
8626 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8627 alu.op = ALU_OP1_MOV;
8628 alu.dst.sel = ctx->temp_reg;
8629 alu.dst.chan = is_cm ? 1 : 0;
8630 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
8631 alu.src[0].value = abs_value;
8632 alu.last = 1;
8633 alu.dst.write = 1;
8634 r = r600_bytecode_add_alu(ctx->bc, &alu);
8635 if (r)
8636 return r;
8637 } else {
8638 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8639 alu.op = ALU_OP1_MOV;
8640 alu.dst.sel = ctx->temp_reg;
8641 alu.dst.chan = is_cm ? 1 : 0;
8642 r600_bytecode_src(&alu.src[0], &ctx->src[2], 0);
8643 alu.last = 1;
8644 alu.dst.write = 1;
8645 r = r600_bytecode_add_alu(ctx->bc, &alu);
8646 if (r)
8647 return r;
8648 }
8649
8650
8651 memset(&gds, 0, sizeof(struct r600_bytecode_gds));
8652 gds.op = gds_op;
8653 gds.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
8654 gds.uav_id = is_cm ? 0 : uav_id;
8655 gds.uav_index_mode = is_cm ? 0 : uav_index_mode;
8656 gds.src_gpr = ctx->temp_reg;
8657 gds.src_gpr2 = 0;
8658 gds.src_sel_x = is_cm ? 0 : 4;
8659 gds.src_sel_y = is_cm ? 1 : 0;
8660 gds.src_sel_z = 7;
8661 gds.dst_sel_x = 0;
8662 gds.dst_sel_y = 7;
8663 gds.dst_sel_z = 7;
8664 gds.dst_sel_w = 7;
8665 gds.alloc_consume = !is_cm;
8666
8667 r = r600_bytecode_add_gds(ctx->bc, &gds);
8668 if (r)
8669 return r;
8670 ctx->bc->cf_last->vpm = 1;
8671 return 0;
8672 }
8673
8674 static int get_lds_op(int opcode)
8675 {
8676 switch (opcode) {
8677 case TGSI_OPCODE_ATOMUADD:
8678 return LDS_OP2_LDS_ADD_RET;
8679 case TGSI_OPCODE_ATOMAND:
8680 return LDS_OP2_LDS_AND_RET;
8681 case TGSI_OPCODE_ATOMOR:
8682 return LDS_OP2_LDS_OR_RET;
8683 case TGSI_OPCODE_ATOMXOR:
8684 return LDS_OP2_LDS_XOR_RET;
8685 case TGSI_OPCODE_ATOMUMIN:
8686 return LDS_OP2_LDS_MIN_UINT_RET;
8687 case TGSI_OPCODE_ATOMUMAX:
8688 return LDS_OP2_LDS_MAX_UINT_RET;
8689 case TGSI_OPCODE_ATOMIMIN:
8690 return LDS_OP2_LDS_MIN_INT_RET;
8691 case TGSI_OPCODE_ATOMIMAX:
8692 return LDS_OP2_LDS_MAX_INT_RET;
8693 case TGSI_OPCODE_ATOMXCHG:
8694 return LDS_OP2_LDS_XCHG_RET;
8695 case TGSI_OPCODE_ATOMCAS:
8696 return LDS_OP3_LDS_CMP_XCHG_RET;
8697 default:
8698 return -1;
8699 }
8700 }
8701
8702 static int tgsi_atomic_op_lds(struct r600_shader_ctx *ctx)
8703 {
8704 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8705 int lds_op = get_lds_op(inst->Instruction.Opcode);
8706 int r;
8707
8708 struct r600_bytecode_alu alu;
8709 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8710 alu.op = lds_op;
8711 alu.is_lds_idx_op = true;
8712 alu.last = 1;
8713 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
8714 r600_bytecode_src(&alu.src[1], &ctx->src[2], 0);
8715 if (lds_op == LDS_OP3_LDS_CMP_XCHG_RET)
8716 r600_bytecode_src(&alu.src[2], &ctx->src[3], 0);
8717 else
8718 alu.src[2].sel = V_SQ_ALU_SRC_0;
8719 r = r600_bytecode_add_alu(ctx->bc, &alu);
8720 if (r)
8721 return r;
8722
8723 /* then read from LDS_OQ_A_POP */
8724 memset(&alu, 0, sizeof(alu));
8725
8726 alu.op = ALU_OP1_MOV;
8727 alu.src[0].sel = EG_V_SQ_ALU_SRC_LDS_OQ_A_POP;
8728 alu.src[0].chan = 0;
8729 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
8730 alu.dst.write = 1;
8731 alu.last = 1;
8732 r = r600_bytecode_add_alu(ctx->bc, &alu);
8733 if (r)
8734 return r;
8735
8736 return 0;
8737 }
8738
8739 static int tgsi_atomic_op(struct r600_shader_ctx *ctx)
8740 {
8741 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8742 if (inst->Src[0].Register.File == TGSI_FILE_IMAGE)
8743 return tgsi_atomic_op_rat(ctx);
8744 if (inst->Src[0].Register.File == TGSI_FILE_HW_ATOMIC)
8745 return tgsi_atomic_op_gds(ctx);
8746 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER)
8747 return tgsi_atomic_op_rat(ctx);
8748 if (inst->Src[0].Register.File == TGSI_FILE_MEMORY)
8749 return tgsi_atomic_op_lds(ctx);
8750 return 0;
8751 }
8752
8753 static int tgsi_resq(struct r600_shader_ctx *ctx)
8754 {
8755 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8756 unsigned sampler_index_mode;
8757 struct r600_bytecode_tex tex;
8758 int r;
8759 boolean has_txq_cube_array_z = false;
8760
8761 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER ||
8762 (inst->Src[0].Register.File == TGSI_FILE_IMAGE && inst->Memory.Texture == TGSI_TEXTURE_BUFFER)) {
8763 ctx->shader->uses_tex_buffers = true;
8764 return r600_do_buffer_txq(ctx, 0, ctx->shader->image_size_const_offset);
8765 }
8766
8767 if (inst->Memory.Texture == TGSI_TEXTURE_CUBE_ARRAY &&
8768 inst->Dst[0].Register.WriteMask & 4) {
8769 ctx->shader->has_txq_cube_array_z_comp = true;
8770 has_txq_cube_array_z = true;
8771 }
8772
8773 sampler_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
8774 if (sampler_index_mode)
8775 egcm_load_index_reg(ctx->bc, 1, false);
8776
8777
8778 /* does this shader want a num layers from TXQ for a cube array? */
8779 if (has_txq_cube_array_z) {
8780 int id = tgsi_tex_get_src_gpr(ctx, 0) + ctx->shader->image_size_const_offset;
8781 struct r600_bytecode_alu alu;
8782
8783 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8784 alu.op = ALU_OP1_MOV;
8785
8786 alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL;
8787 /* with eg each dword is either buf size or number of cubes */
8788 alu.src[0].sel += id / 4;
8789 alu.src[0].chan = id % 4;
8790 alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
8791 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
8792 alu.last = 1;
8793 r = r600_bytecode_add_alu(ctx->bc, &alu);
8794 if (r)
8795 return r;
8796 /* disable writemask from texture instruction */
8797 inst->Dst[0].Register.WriteMask &= ~4;
8798 }
8799 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
8800 tex.op = ctx->inst_info->op;
8801 tex.sampler_id = R600_IMAGE_REAL_RESOURCE_OFFSET + inst->Src[0].Register.Index;
8802 tex.sampler_index_mode = sampler_index_mode;
8803 tex.resource_id = tex.sampler_id;
8804 tex.resource_index_mode = sampler_index_mode;
8805 tex.src_sel_x = 4;
8806 tex.src_sel_y = 4;
8807 tex.src_sel_z = 4;
8808 tex.src_sel_w = 4;
8809 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
8810 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
8811 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
8812 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
8813 tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
8814 r = r600_bytecode_add_tex(ctx->bc, &tex);
8815 if (r)
8816 return r;
8817
8818 return 0;
8819 }
8820
8821 static int tgsi_lrp(struct r600_shader_ctx *ctx)
8822 {
8823 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8824 struct r600_bytecode_alu alu;
8825 unsigned lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
8826 unsigned i, temp_regs[2];
8827 int r;
8828
8829 /* optimize if it's just an equal balance */
8830 if (ctx->src[0].sel == V_SQ_ALU_SRC_0_5) {
8831 for (i = 0; i < lasti + 1; i++) {
8832 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
8833 continue;
8834
8835 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8836 alu.op = ALU_OP2_ADD;
8837 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
8838 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
8839 alu.omod = 3;
8840 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
8841 alu.dst.chan = i;
8842 if (i == lasti) {
8843 alu.last = 1;
8844 }
8845 r = r600_bytecode_add_alu(ctx->bc, &alu);
8846 if (r)
8847 return r;
8848 }
8849 return 0;
8850 }
8851
8852 /* 1 - src0 */
8853 for (i = 0; i < lasti + 1; i++) {
8854 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
8855 continue;
8856
8857 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8858 alu.op = ALU_OP2_ADD;
8859 alu.src[0].sel = V_SQ_ALU_SRC_1;
8860 alu.src[0].chan = 0;
8861 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
8862 r600_bytecode_src_toggle_neg(&alu.src[1]);
8863 alu.dst.sel = ctx->temp_reg;
8864 alu.dst.chan = i;
8865 if (i == lasti) {
8866 alu.last = 1;
8867 }
8868 alu.dst.write = 1;
8869 r = r600_bytecode_add_alu(ctx->bc, &alu);
8870 if (r)
8871 return r;
8872 }
8873
8874 /* (1 - src0) * src2 */
8875 for (i = 0; i < lasti + 1; i++) {
8876 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
8877 continue;
8878
8879 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8880 alu.op = ALU_OP2_MUL;
8881 alu.src[0].sel = ctx->temp_reg;
8882 alu.src[0].chan = i;
8883 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
8884 alu.dst.sel = ctx->temp_reg;
8885 alu.dst.chan = i;
8886 if (i == lasti) {
8887 alu.last = 1;
8888 }
8889 alu.dst.write = 1;
8890 r = r600_bytecode_add_alu(ctx->bc, &alu);
8891 if (r)
8892 return r;
8893 }
8894
8895 /* src0 * src1 + (1 - src0) * src2 */
8896 if (ctx->src[0].abs)
8897 temp_regs[0] = r600_get_temp(ctx);
8898 else
8899 temp_regs[0] = 0;
8900 if (ctx->src[1].abs)
8901 temp_regs[1] = r600_get_temp(ctx);
8902 else
8903 temp_regs[1] = 0;
8904
8905 for (i = 0; i < lasti + 1; i++) {
8906 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
8907 continue;
8908
8909 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8910 alu.op = ALU_OP3_MULADD;
8911 alu.is_op3 = 1;
8912 r = tgsi_make_src_for_op3(ctx, temp_regs[0], i, &alu.src[0], &ctx->src[0]);
8913 if (r)
8914 return r;
8915 r = tgsi_make_src_for_op3(ctx, temp_regs[1], i, &alu.src[1], &ctx->src[1]);
8916 if (r)
8917 return r;
8918 alu.src[2].sel = ctx->temp_reg;
8919 alu.src[2].chan = i;
8920
8921 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
8922 alu.dst.chan = i;
8923 if (i == lasti) {
8924 alu.last = 1;
8925 }
8926 r = r600_bytecode_add_alu(ctx->bc, &alu);
8927 if (r)
8928 return r;
8929 }
8930 return 0;
8931 }
8932
8933 static int tgsi_cmp(struct r600_shader_ctx *ctx)
8934 {
8935 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8936 struct r600_bytecode_alu alu;
8937 int i, r, j;
8938 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
8939 int temp_regs[3];
8940 unsigned op;
8941
8942 if (ctx->src[0].abs && ctx->src[0].neg) {
8943 op = ALU_OP3_CNDE;
8944 ctx->src[0].abs = 0;
8945 ctx->src[0].neg = 0;
8946 } else {
8947 op = ALU_OP3_CNDGE;
8948 }
8949
8950 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
8951 temp_regs[j] = 0;
8952 if (ctx->src[j].abs)
8953 temp_regs[j] = r600_get_temp(ctx);
8954 }
8955
8956 for (i = 0; i < lasti + 1; i++) {
8957 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
8958 continue;
8959
8960 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8961 alu.op = op;
8962 r = tgsi_make_src_for_op3(ctx, temp_regs[0], i, &alu.src[0], &ctx->src[0]);
8963 if (r)
8964 return r;
8965 r = tgsi_make_src_for_op3(ctx, temp_regs[2], i, &alu.src[1], &ctx->src[2]);
8966 if (r)
8967 return r;
8968 r = tgsi_make_src_for_op3(ctx, temp_regs[1], i, &alu.src[2], &ctx->src[1]);
8969 if (r)
8970 return r;
8971 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
8972 alu.dst.chan = i;
8973 alu.dst.write = 1;
8974 alu.is_op3 = 1;
8975 if (i == lasti)
8976 alu.last = 1;
8977 r = r600_bytecode_add_alu(ctx->bc, &alu);
8978 if (r)
8979 return r;
8980 }
8981 return 0;
8982 }
8983
8984 static int tgsi_ucmp(struct r600_shader_ctx *ctx)
8985 {
8986 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8987 struct r600_bytecode_alu alu;
8988 int i, r;
8989 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
8990
8991 for (i = 0; i < lasti + 1; i++) {
8992 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
8993 continue;
8994
8995 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8996 alu.op = ALU_OP3_CNDE_INT;
8997 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
8998 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
8999 r600_bytecode_src(&alu.src[2], &ctx->src[1], i);
9000 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
9001 alu.dst.chan = i;
9002 alu.dst.write = 1;
9003 alu.is_op3 = 1;
9004 if (i == lasti)
9005 alu.last = 1;
9006 r = r600_bytecode_add_alu(ctx->bc, &alu);
9007 if (r)
9008 return r;
9009 }
9010 return 0;
9011 }
9012
9013 static int tgsi_exp(struct r600_shader_ctx *ctx)
9014 {
9015 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9016 struct r600_bytecode_alu alu;
9017 int r;
9018 unsigned i;
9019
9020 /* result.x = 2^floor(src); */
9021 if (inst->Dst[0].Register.WriteMask & 1) {
9022 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9023
9024 alu.op = ALU_OP1_FLOOR;
9025 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9026
9027 alu.dst.sel = ctx->temp_reg;
9028 alu.dst.chan = 0;
9029 alu.dst.write = 1;
9030 alu.last = 1;
9031 r = r600_bytecode_add_alu(ctx->bc, &alu);
9032 if (r)
9033 return r;
9034
9035 if (ctx->bc->chip_class == CAYMAN) {
9036 for (i = 0; i < 3; i++) {
9037 alu.op = ALU_OP1_EXP_IEEE;
9038 alu.src[0].sel = ctx->temp_reg;
9039 alu.src[0].chan = 0;
9040
9041 alu.dst.sel = ctx->temp_reg;
9042 alu.dst.chan = i;
9043 alu.dst.write = i == 0;
9044 alu.last = i == 2;
9045 r = r600_bytecode_add_alu(ctx->bc, &alu);
9046 if (r)
9047 return r;
9048 }
9049 } else {
9050 alu.op = ALU_OP1_EXP_IEEE;
9051 alu.src[0].sel = ctx->temp_reg;
9052 alu.src[0].chan = 0;
9053
9054 alu.dst.sel = ctx->temp_reg;
9055 alu.dst.chan = 0;
9056 alu.dst.write = 1;
9057 alu.last = 1;
9058 r = r600_bytecode_add_alu(ctx->bc, &alu);
9059 if (r)
9060 return r;
9061 }
9062 }
9063
9064 /* result.y = tmp - floor(tmp); */
9065 if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
9066 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9067
9068 alu.op = ALU_OP1_FRACT;
9069 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9070
9071 alu.dst.sel = ctx->temp_reg;
9072 #if 0
9073 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
9074 if (r)
9075 return r;
9076 #endif
9077 alu.dst.write = 1;
9078 alu.dst.chan = 1;
9079
9080 alu.last = 1;
9081
9082 r = r600_bytecode_add_alu(ctx->bc, &alu);
9083 if (r)
9084 return r;
9085 }
9086
9087 /* result.z = RoughApprox2ToX(tmp);*/
9088 if ((inst->Dst[0].Register.WriteMask >> 2) & 0x1) {
9089 if (ctx->bc->chip_class == CAYMAN) {
9090 for (i = 0; i < 3; i++) {
9091 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9092 alu.op = ALU_OP1_EXP_IEEE;
9093 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9094
9095 alu.dst.sel = ctx->temp_reg;
9096 alu.dst.chan = i;
9097 if (i == 2) {
9098 alu.dst.write = 1;
9099 alu.last = 1;
9100 }
9101
9102 r = r600_bytecode_add_alu(ctx->bc, &alu);
9103 if (r)
9104 return r;
9105 }
9106 } else {
9107 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9108 alu.op = ALU_OP1_EXP_IEEE;
9109 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9110
9111 alu.dst.sel = ctx->temp_reg;
9112 alu.dst.write = 1;
9113 alu.dst.chan = 2;
9114
9115 alu.last = 1;
9116
9117 r = r600_bytecode_add_alu(ctx->bc, &alu);
9118 if (r)
9119 return r;
9120 }
9121 }
9122
9123 /* result.w = 1.0;*/
9124 if ((inst->Dst[0].Register.WriteMask >> 3) & 0x1) {
9125 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9126
9127 alu.op = ALU_OP1_MOV;
9128 alu.src[0].sel = V_SQ_ALU_SRC_1;
9129 alu.src[0].chan = 0;
9130
9131 alu.dst.sel = ctx->temp_reg;
9132 alu.dst.chan = 3;
9133 alu.dst.write = 1;
9134 alu.last = 1;
9135 r = r600_bytecode_add_alu(ctx->bc, &alu);
9136 if (r)
9137 return r;
9138 }
9139 return tgsi_helper_copy(ctx, inst);
9140 }
9141
9142 static int tgsi_log(struct r600_shader_ctx *ctx)
9143 {
9144 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9145 struct r600_bytecode_alu alu;
9146 int r;
9147 unsigned i;
9148
9149 /* result.x = floor(log2(|src|)); */
9150 if (inst->Dst[0].Register.WriteMask & 1) {
9151 if (ctx->bc->chip_class == CAYMAN) {
9152 for (i = 0; i < 3; i++) {
9153 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9154
9155 alu.op = ALU_OP1_LOG_IEEE;
9156 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9157 r600_bytecode_src_set_abs(&alu.src[0]);
9158
9159 alu.dst.sel = ctx->temp_reg;
9160 alu.dst.chan = i;
9161 if (i == 0)
9162 alu.dst.write = 1;
9163 if (i == 2)
9164 alu.last = 1;
9165 r = r600_bytecode_add_alu(ctx->bc, &alu);
9166 if (r)
9167 return r;
9168 }
9169
9170 } else {
9171 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9172
9173 alu.op = ALU_OP1_LOG_IEEE;
9174 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9175 r600_bytecode_src_set_abs(&alu.src[0]);
9176
9177 alu.dst.sel = ctx->temp_reg;
9178 alu.dst.chan = 0;
9179 alu.dst.write = 1;
9180 alu.last = 1;
9181 r = r600_bytecode_add_alu(ctx->bc, &alu);
9182 if (r)
9183 return r;
9184 }
9185
9186 alu.op = ALU_OP1_FLOOR;
9187 alu.src[0].sel = ctx->temp_reg;
9188 alu.src[0].chan = 0;
9189
9190 alu.dst.sel = ctx->temp_reg;
9191 alu.dst.chan = 0;
9192 alu.dst.write = 1;
9193 alu.last = 1;
9194
9195 r = r600_bytecode_add_alu(ctx->bc, &alu);
9196 if (r)
9197 return r;
9198 }
9199
9200 /* result.y = |src.x| / (2 ^ floor(log2(|src.x|))); */
9201 if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
9202
9203 if (ctx->bc->chip_class == CAYMAN) {
9204 for (i = 0; i < 3; i++) {
9205 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9206
9207 alu.op = ALU_OP1_LOG_IEEE;
9208 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9209 r600_bytecode_src_set_abs(&alu.src[0]);
9210
9211 alu.dst.sel = ctx->temp_reg;
9212 alu.dst.chan = i;
9213 if (i == 1)
9214 alu.dst.write = 1;
9215 if (i == 2)
9216 alu.last = 1;
9217
9218 r = r600_bytecode_add_alu(ctx->bc, &alu);
9219 if (r)
9220 return r;
9221 }
9222 } else {
9223 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9224
9225 alu.op = ALU_OP1_LOG_IEEE;
9226 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9227 r600_bytecode_src_set_abs(&alu.src[0]);
9228
9229 alu.dst.sel = ctx->temp_reg;
9230 alu.dst.chan = 1;
9231 alu.dst.write = 1;
9232 alu.last = 1;
9233
9234 r = r600_bytecode_add_alu(ctx->bc, &alu);
9235 if (r)
9236 return r;
9237 }
9238
9239 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9240
9241 alu.op = ALU_OP1_FLOOR;
9242 alu.src[0].sel = ctx->temp_reg;
9243 alu.src[0].chan = 1;
9244
9245 alu.dst.sel = ctx->temp_reg;
9246 alu.dst.chan = 1;
9247 alu.dst.write = 1;
9248 alu.last = 1;
9249
9250 r = r600_bytecode_add_alu(ctx->bc, &alu);
9251 if (r)
9252 return r;
9253
9254 if (ctx->bc->chip_class == CAYMAN) {
9255 for (i = 0; i < 3; i++) {
9256 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9257 alu.op = ALU_OP1_EXP_IEEE;
9258 alu.src[0].sel = ctx->temp_reg;
9259 alu.src[0].chan = 1;
9260
9261 alu.dst.sel = ctx->temp_reg;
9262 alu.dst.chan = i;
9263 if (i == 1)
9264 alu.dst.write = 1;
9265 if (i == 2)
9266 alu.last = 1;
9267
9268 r = r600_bytecode_add_alu(ctx->bc, &alu);
9269 if (r)
9270 return r;
9271 }
9272 } else {
9273 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9274 alu.op = ALU_OP1_EXP_IEEE;
9275 alu.src[0].sel = ctx->temp_reg;
9276 alu.src[0].chan = 1;
9277
9278 alu.dst.sel = ctx->temp_reg;
9279 alu.dst.chan = 1;
9280 alu.dst.write = 1;
9281 alu.last = 1;
9282
9283 r = r600_bytecode_add_alu(ctx->bc, &alu);
9284 if (r)
9285 return r;
9286 }
9287
9288 if (ctx->bc->chip_class == CAYMAN) {
9289 for (i = 0; i < 3; i++) {
9290 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9291 alu.op = ALU_OP1_RECIP_IEEE;
9292 alu.src[0].sel = ctx->temp_reg;
9293 alu.src[0].chan = 1;
9294
9295 alu.dst.sel = ctx->temp_reg;
9296 alu.dst.chan = i;
9297 if (i == 1)
9298 alu.dst.write = 1;
9299 if (i == 2)
9300 alu.last = 1;
9301
9302 r = r600_bytecode_add_alu(ctx->bc, &alu);
9303 if (r)
9304 return r;
9305 }
9306 } else {
9307 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9308 alu.op = ALU_OP1_RECIP_IEEE;
9309 alu.src[0].sel = ctx->temp_reg;
9310 alu.src[0].chan = 1;
9311
9312 alu.dst.sel = ctx->temp_reg;
9313 alu.dst.chan = 1;
9314 alu.dst.write = 1;
9315 alu.last = 1;
9316
9317 r = r600_bytecode_add_alu(ctx->bc, &alu);
9318 if (r)
9319 return r;
9320 }
9321
9322 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9323
9324 alu.op = ALU_OP2_MUL;
9325
9326 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9327 r600_bytecode_src_set_abs(&alu.src[0]);
9328
9329 alu.src[1].sel = ctx->temp_reg;
9330 alu.src[1].chan = 1;
9331
9332 alu.dst.sel = ctx->temp_reg;
9333 alu.dst.chan = 1;
9334 alu.dst.write = 1;
9335 alu.last = 1;
9336
9337 r = r600_bytecode_add_alu(ctx->bc, &alu);
9338 if (r)
9339 return r;
9340 }
9341
9342 /* result.z = log2(|src|);*/
9343 if ((inst->Dst[0].Register.WriteMask >> 2) & 1) {
9344 if (ctx->bc->chip_class == CAYMAN) {
9345 for (i = 0; i < 3; i++) {
9346 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9347
9348 alu.op = ALU_OP1_LOG_IEEE;
9349 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9350 r600_bytecode_src_set_abs(&alu.src[0]);
9351
9352 alu.dst.sel = ctx->temp_reg;
9353 if (i == 2)
9354 alu.dst.write = 1;
9355 alu.dst.chan = i;
9356 if (i == 2)
9357 alu.last = 1;
9358
9359 r = r600_bytecode_add_alu(ctx->bc, &alu);
9360 if (r)
9361 return r;
9362 }
9363 } else {
9364 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9365
9366 alu.op = ALU_OP1_LOG_IEEE;
9367 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9368 r600_bytecode_src_set_abs(&alu.src[0]);
9369
9370 alu.dst.sel = ctx->temp_reg;
9371 alu.dst.write = 1;
9372 alu.dst.chan = 2;
9373 alu.last = 1;
9374
9375 r = r600_bytecode_add_alu(ctx->bc, &alu);
9376 if (r)
9377 return r;
9378 }
9379 }
9380
9381 /* result.w = 1.0; */
9382 if ((inst->Dst[0].Register.WriteMask >> 3) & 1) {
9383 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9384
9385 alu.op = ALU_OP1_MOV;
9386 alu.src[0].sel = V_SQ_ALU_SRC_1;
9387 alu.src[0].chan = 0;
9388
9389 alu.dst.sel = ctx->temp_reg;
9390 alu.dst.chan = 3;
9391 alu.dst.write = 1;
9392 alu.last = 1;
9393
9394 r = r600_bytecode_add_alu(ctx->bc, &alu);
9395 if (r)
9396 return r;
9397 }
9398
9399 return tgsi_helper_copy(ctx, inst);
9400 }
9401
9402 static int tgsi_eg_arl(struct r600_shader_ctx *ctx)
9403 {
9404 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9405 struct r600_bytecode_alu alu;
9406 int r;
9407 int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
9408 unsigned reg = get_address_file_reg(ctx, inst->Dst[0].Register.Index);
9409
9410 assert(inst->Dst[0].Register.Index < 3);
9411 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9412
9413 switch (inst->Instruction.Opcode) {
9414 case TGSI_OPCODE_ARL:
9415 alu.op = ALU_OP1_FLT_TO_INT_FLOOR;
9416 break;
9417 case TGSI_OPCODE_ARR:
9418 alu.op = ALU_OP1_FLT_TO_INT;
9419 break;
9420 case TGSI_OPCODE_UARL:
9421 alu.op = ALU_OP1_MOV;
9422 break;
9423 default:
9424 assert(0);
9425 return -1;
9426 }
9427
9428 for (i = 0; i <= lasti; ++i) {
9429 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
9430 continue;
9431 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
9432 alu.last = i == lasti;
9433 alu.dst.sel = reg;
9434 alu.dst.chan = i;
9435 alu.dst.write = 1;
9436 r = r600_bytecode_add_alu(ctx->bc, &alu);
9437 if (r)
9438 return r;
9439 }
9440
9441 if (inst->Dst[0].Register.Index > 0)
9442 ctx->bc->index_loaded[inst->Dst[0].Register.Index - 1] = 0;
9443 else
9444 ctx->bc->ar_loaded = 0;
9445
9446 return 0;
9447 }
9448 static int tgsi_r600_arl(struct r600_shader_ctx *ctx)
9449 {
9450 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9451 struct r600_bytecode_alu alu;
9452 int r;
9453 int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
9454
9455 switch (inst->Instruction.Opcode) {
9456 case TGSI_OPCODE_ARL:
9457 memset(&alu, 0, sizeof(alu));
9458 alu.op = ALU_OP1_FLOOR;
9459 alu.dst.sel = ctx->bc->ar_reg;
9460 alu.dst.write = 1;
9461 for (i = 0; i <= lasti; ++i) {
9462 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
9463 alu.dst.chan = i;
9464 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
9465 alu.last = i == lasti;
9466 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
9467 return r;
9468 }
9469 }
9470
9471 memset(&alu, 0, sizeof(alu));
9472 alu.op = ALU_OP1_FLT_TO_INT;
9473 alu.src[0].sel = ctx->bc->ar_reg;
9474 alu.dst.sel = ctx->bc->ar_reg;
9475 alu.dst.write = 1;
9476 /* FLT_TO_INT is trans-only on r600/r700 */
9477 alu.last = TRUE;
9478 for (i = 0; i <= lasti; ++i) {
9479 alu.dst.chan = i;
9480 alu.src[0].chan = i;
9481 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
9482 return r;
9483 }
9484 break;
9485 case TGSI_OPCODE_ARR:
9486 memset(&alu, 0, sizeof(alu));
9487 alu.op = ALU_OP1_FLT_TO_INT;
9488 alu.dst.sel = ctx->bc->ar_reg;
9489 alu.dst.write = 1;
9490 /* FLT_TO_INT is trans-only on r600/r700 */
9491 alu.last = TRUE;
9492 for (i = 0; i <= lasti; ++i) {
9493 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
9494 alu.dst.chan = i;
9495 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
9496 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
9497 return r;
9498 }
9499 }
9500 break;
9501 case TGSI_OPCODE_UARL:
9502 memset(&alu, 0, sizeof(alu));
9503 alu.op = ALU_OP1_MOV;
9504 alu.dst.sel = ctx->bc->ar_reg;
9505 alu.dst.write = 1;
9506 for (i = 0; i <= lasti; ++i) {
9507 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
9508 alu.dst.chan = i;
9509 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
9510 alu.last = i == lasti;
9511 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
9512 return r;
9513 }
9514 }
9515 break;
9516 default:
9517 assert(0);
9518 return -1;
9519 }
9520
9521 ctx->bc->ar_loaded = 0;
9522 return 0;
9523 }
9524
9525 static int tgsi_opdst(struct r600_shader_ctx *ctx)
9526 {
9527 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9528 struct r600_bytecode_alu alu;
9529 int i, r = 0;
9530
9531 for (i = 0; i < 4; i++) {
9532 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9533
9534 alu.op = ALU_OP2_MUL;
9535 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
9536
9537 if (i == 0 || i == 3) {
9538 alu.src[0].sel = V_SQ_ALU_SRC_1;
9539 } else {
9540 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
9541 }
9542
9543 if (i == 0 || i == 2) {
9544 alu.src[1].sel = V_SQ_ALU_SRC_1;
9545 } else {
9546 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
9547 }
9548 if (i == 3)
9549 alu.last = 1;
9550 r = r600_bytecode_add_alu(ctx->bc, &alu);
9551 if (r)
9552 return r;
9553 }
9554 return 0;
9555 }
9556
9557 static int emit_logic_pred(struct r600_shader_ctx *ctx, int opcode, int alu_type)
9558 {
9559 struct r600_bytecode_alu alu;
9560 int r;
9561
9562 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9563 alu.op = opcode;
9564 alu.execute_mask = 1;
9565 alu.update_pred = 1;
9566
9567 alu.dst.sel = ctx->temp_reg;
9568 alu.dst.write = 1;
9569 alu.dst.chan = 0;
9570
9571 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9572 alu.src[1].sel = V_SQ_ALU_SRC_0;
9573 alu.src[1].chan = 0;
9574
9575 alu.last = 1;
9576
9577 r = r600_bytecode_add_alu_type(ctx->bc, &alu, alu_type);
9578 if (r)
9579 return r;
9580 return 0;
9581 }
9582
9583 static int pops(struct r600_shader_ctx *ctx, int pops)
9584 {
9585 unsigned force_pop = ctx->bc->force_add_cf;
9586
9587 if (!force_pop) {
9588 int alu_pop = 3;
9589 if (ctx->bc->cf_last) {
9590 if (ctx->bc->cf_last->op == CF_OP_ALU)
9591 alu_pop = 0;
9592 else if (ctx->bc->cf_last->op == CF_OP_ALU_POP_AFTER)
9593 alu_pop = 1;
9594 }
9595 alu_pop += pops;
9596 if (alu_pop == 1) {
9597 ctx->bc->cf_last->op = CF_OP_ALU_POP_AFTER;
9598 ctx->bc->force_add_cf = 1;
9599 } else if (alu_pop == 2) {
9600 ctx->bc->cf_last->op = CF_OP_ALU_POP2_AFTER;
9601 ctx->bc->force_add_cf = 1;
9602 } else {
9603 force_pop = 1;
9604 }
9605 }
9606
9607 if (force_pop) {
9608 r600_bytecode_add_cfinst(ctx->bc, CF_OP_POP);
9609 ctx->bc->cf_last->pop_count = pops;
9610 ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2;
9611 }
9612
9613 return 0;
9614 }
9615
9616 static inline void callstack_update_max_depth(struct r600_shader_ctx *ctx,
9617 unsigned reason)
9618 {
9619 struct r600_stack_info *stack = &ctx->bc->stack;
9620 unsigned elements;
9621 int entries;
9622
9623 unsigned entry_size = stack->entry_size;
9624
9625 elements = (stack->loop + stack->push_wqm ) * entry_size;
9626 elements += stack->push;
9627
9628 switch (ctx->bc->chip_class) {
9629 case R600:
9630 case R700:
9631 /* pre-r8xx: if any non-WQM PUSH instruction is invoked, 2 elements on
9632 * the stack must be reserved to hold the current active/continue
9633 * masks */
9634 if (reason == FC_PUSH_VPM) {
9635 elements += 2;
9636 }
9637 break;
9638
9639 case CAYMAN:
9640 /* r9xx: any stack operation on empty stack consumes 2 additional
9641 * elements */
9642 elements += 2;
9643
9644 /* fallthrough */
9645 /* FIXME: do the two elements added above cover the cases for the
9646 * r8xx+ below? */
9647
9648 case EVERGREEN:
9649 /* r8xx+: 2 extra elements are not always required, but one extra
9650 * element must be added for each of the following cases:
9651 * 1. There is an ALU_ELSE_AFTER instruction at the point of greatest
9652 * stack usage.
9653 * (Currently we don't use ALU_ELSE_AFTER.)
9654 * 2. There are LOOP/WQM frames on the stack when any flavor of non-WQM
9655 * PUSH instruction executed.
9656 *
9657 * NOTE: it seems we also need to reserve additional element in some
9658 * other cases, e.g. when we have 4 levels of PUSH_VPM in the shader,
9659 * then STACK_SIZE should be 2 instead of 1 */
9660 if (reason == FC_PUSH_VPM) {
9661 elements += 1;
9662 }
9663 break;
9664
9665 default:
9666 assert(0);
9667 break;
9668 }
9669
9670 /* NOTE: it seems STACK_SIZE is interpreted by hw as if entry_size is 4
9671 * for all chips, so we use 4 in the final formula, not the real entry_size
9672 * for the chip */
9673 entry_size = 4;
9674
9675 entries = (elements + (entry_size - 1)) / entry_size;
9676
9677 if (entries > stack->max_entries)
9678 stack->max_entries = entries;
9679 }
9680
9681 static inline void callstack_pop(struct r600_shader_ctx *ctx, unsigned reason)
9682 {
9683 switch(reason) {
9684 case FC_PUSH_VPM:
9685 --ctx->bc->stack.push;
9686 assert(ctx->bc->stack.push >= 0);
9687 break;
9688 case FC_PUSH_WQM:
9689 --ctx->bc->stack.push_wqm;
9690 assert(ctx->bc->stack.push_wqm >= 0);
9691 break;
9692 case FC_LOOP:
9693 --ctx->bc->stack.loop;
9694 assert(ctx->bc->stack.loop >= 0);
9695 break;
9696 default:
9697 assert(0);
9698 break;
9699 }
9700 }
9701
9702 static inline void callstack_push(struct r600_shader_ctx *ctx, unsigned reason)
9703 {
9704 switch (reason) {
9705 case FC_PUSH_VPM:
9706 ++ctx->bc->stack.push;
9707 break;
9708 case FC_PUSH_WQM:
9709 ++ctx->bc->stack.push_wqm;
9710 case FC_LOOP:
9711 ++ctx->bc->stack.loop;
9712 break;
9713 default:
9714 assert(0);
9715 }
9716
9717 callstack_update_max_depth(ctx, reason);
9718 }
9719
9720 static void fc_set_mid(struct r600_shader_ctx *ctx, int fc_sp)
9721 {
9722 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[fc_sp];
9723
9724 sp->mid = realloc((void *)sp->mid,
9725 sizeof(struct r600_bytecode_cf *) * (sp->num_mid + 1));
9726 sp->mid[sp->num_mid] = ctx->bc->cf_last;
9727 sp->num_mid++;
9728 }
9729
9730 static void fc_pushlevel(struct r600_shader_ctx *ctx, int type)
9731 {
9732 assert(ctx->bc->fc_sp < ARRAY_SIZE(ctx->bc->fc_stack));
9733 ctx->bc->fc_stack[ctx->bc->fc_sp].type = type;
9734 ctx->bc->fc_stack[ctx->bc->fc_sp].start = ctx->bc->cf_last;
9735 ctx->bc->fc_sp++;
9736 }
9737
9738 static void fc_poplevel(struct r600_shader_ctx *ctx)
9739 {
9740 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[ctx->bc->fc_sp - 1];
9741 free(sp->mid);
9742 sp->mid = NULL;
9743 sp->num_mid = 0;
9744 sp->start = NULL;
9745 sp->type = 0;
9746 ctx->bc->fc_sp--;
9747 }
9748
9749 #if 0
9750 static int emit_return(struct r600_shader_ctx *ctx)
9751 {
9752 r600_bytecode_add_cfinst(ctx->bc, CF_OP_RETURN));
9753 return 0;
9754 }
9755
9756 static int emit_jump_to_offset(struct r600_shader_ctx *ctx, int pops, int offset)
9757 {
9758
9759 r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP));
9760 ctx->bc->cf_last->pop_count = pops;
9761 /* XXX work out offset */
9762 return 0;
9763 }
9764
9765 static int emit_setret_in_loop_flag(struct r600_shader_ctx *ctx, unsigned flag_value)
9766 {
9767 return 0;
9768 }
9769
9770 static void emit_testflag(struct r600_shader_ctx *ctx)
9771 {
9772
9773 }
9774
9775 static void emit_return_on_flag(struct r600_shader_ctx *ctx, unsigned ifidx)
9776 {
9777 emit_testflag(ctx);
9778 emit_jump_to_offset(ctx, 1, 4);
9779 emit_setret_in_loop_flag(ctx, V_SQ_ALU_SRC_0);
9780 pops(ctx, ifidx + 1);
9781 emit_return(ctx);
9782 }
9783
9784 static void break_loop_on_flag(struct r600_shader_ctx *ctx, unsigned fc_sp)
9785 {
9786 emit_testflag(ctx);
9787
9788 r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
9789 ctx->bc->cf_last->pop_count = 1;
9790
9791 fc_set_mid(ctx, fc_sp);
9792
9793 pops(ctx, 1);
9794 }
9795 #endif
9796
9797 static int emit_if(struct r600_shader_ctx *ctx, int opcode)
9798 {
9799 int alu_type = CF_OP_ALU_PUSH_BEFORE;
9800
9801 /* There is a hardware bug on Cayman where a BREAK/CONTINUE followed by
9802 * LOOP_STARTxxx for nested loops may put the branch stack into a state
9803 * such that ALU_PUSH_BEFORE doesn't work as expected. Workaround this
9804 * by replacing the ALU_PUSH_BEFORE with a PUSH + ALU */
9805 if (ctx->bc->chip_class == CAYMAN && ctx->bc->stack.loop > 1) {
9806 r600_bytecode_add_cfinst(ctx->bc, CF_OP_PUSH);
9807 ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2;
9808 alu_type = CF_OP_ALU;
9809 }
9810
9811 emit_logic_pred(ctx, opcode, alu_type);
9812
9813 r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP);
9814
9815 fc_pushlevel(ctx, FC_IF);
9816
9817 callstack_push(ctx, FC_PUSH_VPM);
9818 return 0;
9819 }
9820
9821 static int tgsi_if(struct r600_shader_ctx *ctx)
9822 {
9823 return emit_if(ctx, ALU_OP2_PRED_SETNE);
9824 }
9825
9826 static int tgsi_uif(struct r600_shader_ctx *ctx)
9827 {
9828 return emit_if(ctx, ALU_OP2_PRED_SETNE_INT);
9829 }
9830
9831 static int tgsi_else(struct r600_shader_ctx *ctx)
9832 {
9833 r600_bytecode_add_cfinst(ctx->bc, CF_OP_ELSE);
9834 ctx->bc->cf_last->pop_count = 1;
9835
9836 fc_set_mid(ctx, ctx->bc->fc_sp - 1);
9837 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->cf_addr = ctx->bc->cf_last->id;
9838 return 0;
9839 }
9840
9841 static int tgsi_endif(struct r600_shader_ctx *ctx)
9842 {
9843 pops(ctx, 1);
9844 if (ctx->bc->fc_stack[ctx->bc->fc_sp - 1].type != FC_IF) {
9845 R600_ERR("if/endif unbalanced in shader\n");
9846 return -1;
9847 }
9848
9849 if (ctx->bc->fc_stack[ctx->bc->fc_sp - 1].mid == NULL) {
9850 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->cf_addr = ctx->bc->cf_last->id + 2;
9851 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->pop_count = 1;
9852 } else {
9853 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].mid[0]->cf_addr = ctx->bc->cf_last->id + 2;
9854 }
9855 fc_poplevel(ctx);
9856
9857 callstack_pop(ctx, FC_PUSH_VPM);
9858 return 0;
9859 }
9860
9861 static int tgsi_bgnloop(struct r600_shader_ctx *ctx)
9862 {
9863 /* LOOP_START_DX10 ignores the LOOP_CONFIG* registers, so it is not
9864 * limited to 4096 iterations, like the other LOOP_* instructions. */
9865 r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_START_DX10);
9866
9867 fc_pushlevel(ctx, FC_LOOP);
9868
9869 /* check stack depth */
9870 callstack_push(ctx, FC_LOOP);
9871 return 0;
9872 }
9873
9874 static int tgsi_endloop(struct r600_shader_ctx *ctx)
9875 {
9876 int i;
9877
9878 r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_END);
9879
9880 if (ctx->bc->fc_stack[ctx->bc->fc_sp - 1].type != FC_LOOP) {
9881 R600_ERR("loop/endloop in shader code are not paired.\n");
9882 return -EINVAL;
9883 }
9884
9885 /* fixup loop pointers - from r600isa
9886 LOOP END points to CF after LOOP START,
9887 LOOP START point to CF after LOOP END
9888 BRK/CONT point to LOOP END CF
9889 */
9890 ctx->bc->cf_last->cf_addr = ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->id + 2;
9891
9892 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->cf_addr = ctx->bc->cf_last->id + 2;
9893
9894 for (i = 0; i < ctx->bc->fc_stack[ctx->bc->fc_sp - 1].num_mid; i++) {
9895 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].mid[i]->cf_addr = ctx->bc->cf_last->id;
9896 }
9897 /* XXX add LOOPRET support */
9898 fc_poplevel(ctx);
9899 callstack_pop(ctx, FC_LOOP);
9900 return 0;
9901 }
9902
9903 static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx)
9904 {
9905 unsigned int fscp;
9906
9907 for (fscp = ctx->bc->fc_sp; fscp > 0; fscp--)
9908 {
9909 if (FC_LOOP == ctx->bc->fc_stack[fscp - 1].type)
9910 break;
9911 }
9912
9913 if (fscp == 0) {
9914 R600_ERR("Break not inside loop/endloop pair\n");
9915 return -EINVAL;
9916 }
9917
9918 r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
9919
9920 fc_set_mid(ctx, fscp - 1);
9921
9922 return 0;
9923 }
9924
9925 static int tgsi_gs_emit(struct r600_shader_ctx *ctx)
9926 {
9927 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9928 int stream = ctx->literals[inst->Src[0].Register.Index * 4 + inst->Src[0].Register.SwizzleX];
9929 int r;
9930
9931 if (ctx->inst_info->op == CF_OP_EMIT_VERTEX)
9932 emit_gs_ring_writes(ctx, ctx->gs_stream_output_info, stream, TRUE);
9933
9934 r = r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
9935 if (!r) {
9936 ctx->bc->cf_last->count = stream; // Count field for CUT/EMIT_VERTEX indicates which stream
9937 if (ctx->inst_info->op == CF_OP_EMIT_VERTEX)
9938 return emit_inc_ring_offset(ctx, stream, TRUE);
9939 }
9940 return r;
9941 }
9942
9943 static int tgsi_umad(struct r600_shader_ctx *ctx)
9944 {
9945 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9946 struct r600_bytecode_alu alu;
9947 int i, j, k, r;
9948 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
9949
9950 /* src0 * src1 */
9951 for (i = 0; i < lasti + 1; i++) {
9952 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
9953 continue;
9954
9955 if (ctx->bc->chip_class == CAYMAN) {
9956 for (j = 0 ; j < 4; j++) {
9957 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9958
9959 alu.op = ALU_OP2_MULLO_UINT;
9960 for (k = 0; k < inst->Instruction.NumSrcRegs; k++) {
9961 r600_bytecode_src(&alu.src[k], &ctx->src[k], i);
9962 }
9963 alu.dst.chan = j;
9964 alu.dst.sel = ctx->temp_reg;
9965 alu.dst.write = (j == i);
9966 if (j == 3)
9967 alu.last = 1;
9968 r = r600_bytecode_add_alu(ctx->bc, &alu);
9969 if (r)
9970 return r;
9971 }
9972 } else {
9973 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9974
9975 alu.dst.chan = i;
9976 alu.dst.sel = ctx->temp_reg;
9977 alu.dst.write = 1;
9978
9979 alu.op = ALU_OP2_MULLO_UINT;
9980 for (j = 0; j < 2; j++) {
9981 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
9982 }
9983
9984 alu.last = 1;
9985 r = r600_bytecode_add_alu(ctx->bc, &alu);
9986 if (r)
9987 return r;
9988 }
9989 }
9990
9991
9992 for (i = 0; i < lasti + 1; i++) {
9993 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
9994 continue;
9995
9996 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9997 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
9998
9999 alu.op = ALU_OP2_ADD_INT;
10000
10001 alu.src[0].sel = ctx->temp_reg;
10002 alu.src[0].chan = i;
10003
10004 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
10005 if (i == lasti) {
10006 alu.last = 1;
10007 }
10008 r = r600_bytecode_add_alu(ctx->bc, &alu);
10009 if (r)
10010 return r;
10011 }
10012 return 0;
10013 }
10014
10015 static int tgsi_pk2h(struct r600_shader_ctx *ctx)
10016 {
10017 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
10018 struct r600_bytecode_alu alu;
10019 int r, i;
10020 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
10021
10022 /* temp.xy = f32_to_f16(src) */
10023 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10024 alu.op = ALU_OP1_FLT32_TO_FLT16;
10025 alu.dst.chan = 0;
10026 alu.dst.sel = ctx->temp_reg;
10027 alu.dst.write = 1;
10028 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
10029 r = r600_bytecode_add_alu(ctx->bc, &alu);
10030 if (r)
10031 return r;
10032 alu.dst.chan = 1;
10033 r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
10034 alu.last = 1;
10035 r = r600_bytecode_add_alu(ctx->bc, &alu);
10036 if (r)
10037 return r;
10038
10039 /* dst.x = temp.y * 0x10000 + temp.x */
10040 for (i = 0; i < lasti + 1; i++) {
10041 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
10042 continue;
10043
10044 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10045 alu.op = ALU_OP3_MULADD_UINT24;
10046 alu.is_op3 = 1;
10047 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
10048 alu.last = i == lasti;
10049 alu.src[0].sel = ctx->temp_reg;
10050 alu.src[0].chan = 1;
10051 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
10052 alu.src[1].value = 0x10000;
10053 alu.src[2].sel = ctx->temp_reg;
10054 alu.src[2].chan = 0;
10055 r = r600_bytecode_add_alu(ctx->bc, &alu);
10056 if (r)
10057 return r;
10058 }
10059
10060 return 0;
10061 }
10062
10063 static int tgsi_up2h(struct r600_shader_ctx *ctx)
10064 {
10065 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
10066 struct r600_bytecode_alu alu;
10067 int r, i;
10068 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
10069
10070 /* temp.x = src.x */
10071 /* note: no need to mask out the high bits */
10072 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10073 alu.op = ALU_OP1_MOV;
10074 alu.dst.chan = 0;
10075 alu.dst.sel = ctx->temp_reg;
10076 alu.dst.write = 1;
10077 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
10078 r = r600_bytecode_add_alu(ctx->bc, &alu);
10079 if (r)
10080 return r;
10081
10082 /* temp.y = src.x >> 16 */
10083 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10084 alu.op = ALU_OP2_LSHR_INT;
10085 alu.dst.chan = 1;
10086 alu.dst.sel = ctx->temp_reg;
10087 alu.dst.write = 1;
10088 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
10089 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
10090 alu.src[1].value = 16;
10091 alu.last = 1;
10092 r = r600_bytecode_add_alu(ctx->bc, &alu);
10093 if (r)
10094 return r;
10095
10096 /* dst.wz = dst.xy = f16_to_f32(temp.xy) */
10097 for (i = 0; i < lasti + 1; i++) {
10098 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
10099 continue;
10100 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10101 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
10102 alu.op = ALU_OP1_FLT16_TO_FLT32;
10103 alu.src[0].sel = ctx->temp_reg;
10104 alu.src[0].chan = i % 2;
10105 alu.last = i == lasti;
10106 r = r600_bytecode_add_alu(ctx->bc, &alu);
10107 if (r)
10108 return r;
10109 }
10110
10111 return 0;
10112 }
10113
10114 static int tgsi_bfe(struct r600_shader_ctx *ctx)
10115 {
10116 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
10117 struct r600_bytecode_alu alu;
10118 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
10119 int r, i;
10120 int dst = -1;
10121
10122 if ((inst->Src[0].Register.File == inst->Dst[0].Register.File &&
10123 inst->Src[0].Register.Index == inst->Dst[0].Register.Index) ||
10124 (inst->Src[2].Register.File == inst->Dst[0].Register.File &&
10125 inst->Src[2].Register.Index == inst->Dst[0].Register.Index))
10126 dst = r600_get_temp(ctx);
10127
10128 r = tgsi_op3_dst(ctx, dst);
10129 if (r)
10130 return r;
10131
10132 for (i = 0; i < lasti + 1; i++) {
10133 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10134 alu.op = ALU_OP2_SETGE_INT;
10135 r600_bytecode_src(&alu.src[0], &ctx->src[2], i);
10136 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
10137 alu.src[1].value = 32;
10138 alu.dst.sel = ctx->temp_reg;
10139 alu.dst.chan = i;
10140 alu.dst.write = 1;
10141 if (i == lasti)
10142 alu.last = 1;
10143 r = r600_bytecode_add_alu(ctx->bc, &alu);
10144 if (r)
10145 return r;
10146 }
10147
10148 for (i = 0; i < lasti + 1; i++) {
10149 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10150 alu.op = ALU_OP3_CNDE_INT;
10151 alu.is_op3 = 1;
10152 alu.src[0].sel = ctx->temp_reg;
10153 alu.src[0].chan = i;
10154
10155 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
10156 if (dst != -1)
10157 alu.src[1].sel = dst;
10158 else
10159 alu.src[1].sel = alu.dst.sel;
10160 alu.src[1].chan = i;
10161 r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
10162 alu.dst.write = 1;
10163 if (i == lasti)
10164 alu.last = 1;
10165 r = r600_bytecode_add_alu(ctx->bc, &alu);
10166 if (r)
10167 return r;
10168 }
10169
10170 return 0;
10171 }
10172
10173 static const struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] = {
10174 [TGSI_OPCODE_ARL] = { ALU_OP0_NOP, tgsi_r600_arl},
10175 [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
10176 [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
10177
10178 [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_IEEE, tgsi_trans_srcx_replicate},
10179
10180 [TGSI_OPCODE_RSQ] = { ALU_OP0_NOP, tgsi_rsq},
10181 [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
10182 [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log},
10183 [TGSI_OPCODE_MUL] = { ALU_OP2_MUL_IEEE, tgsi_op2},
10184 [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2},
10185 [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
10186 [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
10187 [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
10188 /* MIN_DX10 returns non-nan result if one src is NaN, MIN returns NaN */
10189 [TGSI_OPCODE_MIN] = { ALU_OP2_MIN_DX10, tgsi_op2},
10190 [TGSI_OPCODE_MAX] = { ALU_OP2_MAX_DX10, tgsi_op2},
10191 [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
10192 [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
10193 [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD_IEEE, tgsi_op3},
10194 [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
10195 [TGSI_OPCODE_FMA] = { ALU_OP0_NOP, tgsi_unsupported},
10196 [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, tgsi_trans_srcx_replicate},
10197 [21] = { ALU_OP0_NOP, tgsi_unsupported},
10198 [22] = { ALU_OP0_NOP, tgsi_unsupported},
10199 [23] = { ALU_OP0_NOP, tgsi_unsupported},
10200 [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
10201 [25] = { ALU_OP0_NOP, tgsi_unsupported},
10202 [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2},
10203 [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2},
10204 [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, tgsi_trans_srcx_replicate},
10205 [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, tgsi_trans_srcx_replicate},
10206 [TGSI_OPCODE_POW] = { ALU_OP0_NOP, tgsi_pow},
10207 [31] = { ALU_OP0_NOP, tgsi_unsupported},
10208 [32] = { ALU_OP0_NOP, tgsi_unsupported},
10209 [33] = { ALU_OP0_NOP, tgsi_unsupported},
10210 [34] = { ALU_OP0_NOP, tgsi_unsupported},
10211 [35] = { ALU_OP0_NOP, tgsi_unsupported},
10212 [TGSI_OPCODE_COS] = { ALU_OP1_COS, tgsi_trig},
10213 [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
10214 [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
10215 [TGSI_OPCODE_KILL] = { ALU_OP2_KILLGT, tgsi_kill}, /* unconditional kill */
10216 [TGSI_OPCODE_PK2H] = { ALU_OP0_NOP, tgsi_unsupported},
10217 [TGSI_OPCODE_PK2US] = { ALU_OP0_NOP, tgsi_unsupported},
10218 [TGSI_OPCODE_PK4B] = { ALU_OP0_NOP, tgsi_unsupported},
10219 [TGSI_OPCODE_PK4UB] = { ALU_OP0_NOP, tgsi_unsupported},
10220 [44] = { ALU_OP0_NOP, tgsi_unsupported},
10221 [TGSI_OPCODE_SEQ] = { ALU_OP2_SETE, tgsi_op2},
10222 [46] = { ALU_OP0_NOP, tgsi_unsupported},
10223 [TGSI_OPCODE_SGT] = { ALU_OP2_SETGT, tgsi_op2},
10224 [TGSI_OPCODE_SIN] = { ALU_OP1_SIN, tgsi_trig},
10225 [TGSI_OPCODE_SLE] = { ALU_OP2_SETGE, tgsi_op2_swap},
10226 [TGSI_OPCODE_SNE] = { ALU_OP2_SETNE, tgsi_op2},
10227 [51] = { ALU_OP0_NOP, tgsi_unsupported},
10228 [TGSI_OPCODE_TEX] = { FETCH_OP_SAMPLE, tgsi_tex},
10229 [TGSI_OPCODE_TXD] = { FETCH_OP_SAMPLE_G, tgsi_tex},
10230 [TGSI_OPCODE_TXP] = { FETCH_OP_SAMPLE, tgsi_tex},
10231 [TGSI_OPCODE_UP2H] = { ALU_OP0_NOP, tgsi_unsupported},
10232 [TGSI_OPCODE_UP2US] = { ALU_OP0_NOP, tgsi_unsupported},
10233 [TGSI_OPCODE_UP4B] = { ALU_OP0_NOP, tgsi_unsupported},
10234 [TGSI_OPCODE_UP4UB] = { ALU_OP0_NOP, tgsi_unsupported},
10235 [59] = { ALU_OP0_NOP, tgsi_unsupported},
10236 [60] = { ALU_OP0_NOP, tgsi_unsupported},
10237 [TGSI_OPCODE_ARR] = { ALU_OP0_NOP, tgsi_r600_arl},
10238 [62] = { ALU_OP0_NOP, tgsi_unsupported},
10239 [TGSI_OPCODE_CAL] = { ALU_OP0_NOP, tgsi_unsupported},
10240 [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
10241 [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
10242 [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
10243 [67] = { ALU_OP0_NOP, tgsi_unsupported},
10244 [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
10245 [69] = { ALU_OP0_NOP, tgsi_unsupported},
10246 [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
10247 [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
10248 [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex},
10249 [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
10250 [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if},
10251 [TGSI_OPCODE_UIF] = { ALU_OP0_NOP, tgsi_uif},
10252 [76] = { ALU_OP0_NOP, tgsi_unsupported},
10253 [TGSI_OPCODE_ELSE] = { ALU_OP0_NOP, tgsi_else},
10254 [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
10255 [TGSI_OPCODE_DDX_FINE] = { ALU_OP0_NOP, tgsi_unsupported},
10256 [TGSI_OPCODE_DDY_FINE] = { ALU_OP0_NOP, tgsi_unsupported},
10257 [81] = { ALU_OP0_NOP, tgsi_unsupported},
10258 [82] = { ALU_OP0_NOP, tgsi_unsupported},
10259 [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
10260 [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2_trans},
10261 [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
10262 [TGSI_OPCODE_TRUNC] = { ALU_OP1_TRUNC, tgsi_op2},
10263 [TGSI_OPCODE_SHL] = { ALU_OP2_LSHL_INT, tgsi_op2_trans},
10264 [88] = { ALU_OP0_NOP, tgsi_unsupported},
10265 [TGSI_OPCODE_AND] = { ALU_OP2_AND_INT, tgsi_op2},
10266 [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
10267 [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
10268 [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
10269 [93] = { ALU_OP0_NOP, tgsi_unsupported},
10270 [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
10271 [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
10272 [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
10273 [TGSI_OPCODE_EMIT] = { CF_OP_EMIT_VERTEX, tgsi_gs_emit},
10274 [TGSI_OPCODE_ENDPRIM] = { CF_OP_CUT_VERTEX, tgsi_gs_emit},
10275 [TGSI_OPCODE_BGNLOOP] = { ALU_OP0_NOP, tgsi_bgnloop},
10276 [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
10277 [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
10278 [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
10279 [103] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
10280 [TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex},
10281 [TGSI_OPCODE_RESQ] = { ALU_OP0_NOP, tgsi_unsupported},
10282 [106] = { ALU_OP0_NOP, tgsi_unsupported},
10283 [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
10284 [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
10285 [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
10286 [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
10287 [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
10288 [TGSI_OPCODE_MEMBAR] = { ALU_OP0_NOP, tgsi_unsupported},
10289 [113] = { ALU_OP0_NOP, tgsi_unsupported},
10290 [114] = { ALU_OP0_NOP, tgsi_unsupported},
10291 [115] = { ALU_OP0_NOP, tgsi_unsupported},
10292 [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
10293 [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
10294 [TGSI_OPCODE_DFMA] = { ALU_OP0_NOP, tgsi_unsupported},
10295 [TGSI_OPCODE_F2I] = { ALU_OP1_FLT_TO_INT, tgsi_op2_trans},
10296 [TGSI_OPCODE_IDIV] = { ALU_OP0_NOP, tgsi_idiv},
10297 [TGSI_OPCODE_IMAX] = { ALU_OP2_MAX_INT, tgsi_op2},
10298 [TGSI_OPCODE_IMIN] = { ALU_OP2_MIN_INT, tgsi_op2},
10299 [TGSI_OPCODE_INEG] = { ALU_OP2_SUB_INT, tgsi_ineg},
10300 [TGSI_OPCODE_ISGE] = { ALU_OP2_SETGE_INT, tgsi_op2},
10301 [TGSI_OPCODE_ISHR] = { ALU_OP2_ASHR_INT, tgsi_op2_trans},
10302 [TGSI_OPCODE_ISLT] = { ALU_OP2_SETGT_INT, tgsi_op2_swap},
10303 [TGSI_OPCODE_F2U] = { ALU_OP1_FLT_TO_UINT, tgsi_op2_trans},
10304 [TGSI_OPCODE_U2F] = { ALU_OP1_UINT_TO_FLT, tgsi_op2_trans},
10305 [TGSI_OPCODE_UADD] = { ALU_OP2_ADD_INT, tgsi_op2},
10306 [TGSI_OPCODE_UDIV] = { ALU_OP0_NOP, tgsi_udiv},
10307 [TGSI_OPCODE_UMAD] = { ALU_OP0_NOP, tgsi_umad},
10308 [TGSI_OPCODE_UMAX] = { ALU_OP2_MAX_UINT, tgsi_op2},
10309 [TGSI_OPCODE_UMIN] = { ALU_OP2_MIN_UINT, tgsi_op2},
10310 [TGSI_OPCODE_UMOD] = { ALU_OP0_NOP, tgsi_umod},
10311 [TGSI_OPCODE_UMUL] = { ALU_OP2_MULLO_UINT, tgsi_op2_trans},
10312 [TGSI_OPCODE_USEQ] = { ALU_OP2_SETE_INT, tgsi_op2},
10313 [TGSI_OPCODE_USGE] = { ALU_OP2_SETGE_UINT, tgsi_op2},
10314 [TGSI_OPCODE_USHR] = { ALU_OP2_LSHR_INT, tgsi_op2_trans},
10315 [TGSI_OPCODE_USLT] = { ALU_OP2_SETGT_UINT, tgsi_op2_swap},
10316 [TGSI_OPCODE_USNE] = { ALU_OP2_SETNE_INT, tgsi_op2_swap},
10317 [TGSI_OPCODE_SWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
10318 [TGSI_OPCODE_CASE] = { ALU_OP0_NOP, tgsi_unsupported},
10319 [TGSI_OPCODE_DEFAULT] = { ALU_OP0_NOP, tgsi_unsupported},
10320 [TGSI_OPCODE_ENDSWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
10321 [TGSI_OPCODE_SAMPLE] = { 0, tgsi_unsupported},
10322 [TGSI_OPCODE_SAMPLE_I] = { 0, tgsi_unsupported},
10323 [TGSI_OPCODE_SAMPLE_I_MS] = { 0, tgsi_unsupported},
10324 [TGSI_OPCODE_SAMPLE_B] = { 0, tgsi_unsupported},
10325 [TGSI_OPCODE_SAMPLE_C] = { 0, tgsi_unsupported},
10326 [TGSI_OPCODE_SAMPLE_C_LZ] = { 0, tgsi_unsupported},
10327 [TGSI_OPCODE_SAMPLE_D] = { 0, tgsi_unsupported},
10328 [TGSI_OPCODE_SAMPLE_L] = { 0, tgsi_unsupported},
10329 [TGSI_OPCODE_GATHER4] = { 0, tgsi_unsupported},
10330 [TGSI_OPCODE_SVIEWINFO] = { 0, tgsi_unsupported},
10331 [TGSI_OPCODE_SAMPLE_POS] = { 0, tgsi_unsupported},
10332 [TGSI_OPCODE_SAMPLE_INFO] = { 0, tgsi_unsupported},
10333 [TGSI_OPCODE_UARL] = { ALU_OP1_MOVA_INT, tgsi_r600_arl},
10334 [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
10335 [TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
10336 [TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
10337 [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_unsupported},
10338 [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_unsupported},
10339 [163] = { ALU_OP0_NOP, tgsi_unsupported},
10340 [164] = { ALU_OP0_NOP, tgsi_unsupported},
10341 [165] = { ALU_OP0_NOP, tgsi_unsupported},
10342 [TGSI_OPCODE_BARRIER] = { ALU_OP0_NOP, tgsi_unsupported},
10343 [TGSI_OPCODE_ATOMUADD] = { ALU_OP0_NOP, tgsi_unsupported},
10344 [TGSI_OPCODE_ATOMXCHG] = { ALU_OP0_NOP, tgsi_unsupported},
10345 [TGSI_OPCODE_ATOMCAS] = { ALU_OP0_NOP, tgsi_unsupported},
10346 [TGSI_OPCODE_ATOMAND] = { ALU_OP0_NOP, tgsi_unsupported},
10347 [TGSI_OPCODE_ATOMOR] = { ALU_OP0_NOP, tgsi_unsupported},
10348 [TGSI_OPCODE_ATOMXOR] = { ALU_OP0_NOP, tgsi_unsupported},
10349 [TGSI_OPCODE_ATOMUMIN] = { ALU_OP0_NOP, tgsi_unsupported},
10350 [TGSI_OPCODE_ATOMUMAX] = { ALU_OP0_NOP, tgsi_unsupported},
10351 [TGSI_OPCODE_ATOMIMIN] = { ALU_OP0_NOP, tgsi_unsupported},
10352 [TGSI_OPCODE_ATOMIMAX] = { ALU_OP0_NOP, tgsi_unsupported},
10353 [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
10354 [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
10355 [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
10356 [TGSI_OPCODE_IMUL_HI] = { ALU_OP2_MULHI_INT, tgsi_op2_trans},
10357 [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, tgsi_op2_trans},
10358 [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_unsupported},
10359 [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_unsupported},
10360 [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_unsupported},
10361 [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_unsupported},
10362 [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_unsupported},
10363 [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_unsupported},
10364 [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_unsupported},
10365 [TGSI_OPCODE_LSB] = { ALU_OP1_FFBL_INT, tgsi_unsupported},
10366 [TGSI_OPCODE_IMSB] = { ALU_OP1_FFBH_INT, tgsi_unsupported},
10367 [TGSI_OPCODE_UMSB] = { ALU_OP1_FFBH_UINT, tgsi_unsupported},
10368 [TGSI_OPCODE_INTERP_CENTROID] = { ALU_OP0_NOP, tgsi_unsupported},
10369 [TGSI_OPCODE_INTERP_SAMPLE] = { ALU_OP0_NOP, tgsi_unsupported},
10370 [TGSI_OPCODE_INTERP_OFFSET] = { ALU_OP0_NOP, tgsi_unsupported},
10371 [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
10372 };
10373
10374 static const struct r600_shader_tgsi_instruction eg_shader_tgsi_instruction[] = {
10375 [TGSI_OPCODE_ARL] = { ALU_OP0_NOP, tgsi_eg_arl},
10376 [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
10377 [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
10378 [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_IEEE, tgsi_trans_srcx_replicate},
10379 [TGSI_OPCODE_RSQ] = { ALU_OP0_NOP, tgsi_rsq},
10380 [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
10381 [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log},
10382 [TGSI_OPCODE_MUL] = { ALU_OP2_MUL_IEEE, tgsi_op2},
10383 [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2},
10384 [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
10385 [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
10386 [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
10387 [TGSI_OPCODE_MIN] = { ALU_OP2_MIN_DX10, tgsi_op2},
10388 [TGSI_OPCODE_MAX] = { ALU_OP2_MAX_DX10, tgsi_op2},
10389 [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
10390 [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
10391 [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD_IEEE, tgsi_op3},
10392 [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
10393 [TGSI_OPCODE_FMA] = { ALU_OP3_FMA, tgsi_op3},
10394 [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, tgsi_trans_srcx_replicate},
10395 [21] = { ALU_OP0_NOP, tgsi_unsupported},
10396 [22] = { ALU_OP0_NOP, tgsi_unsupported},
10397 [23] = { ALU_OP0_NOP, tgsi_unsupported},
10398 [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
10399 [25] = { ALU_OP0_NOP, tgsi_unsupported},
10400 [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2},
10401 [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2},
10402 [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, tgsi_trans_srcx_replicate},
10403 [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, tgsi_trans_srcx_replicate},
10404 [TGSI_OPCODE_POW] = { ALU_OP0_NOP, tgsi_pow},
10405 [31] = { ALU_OP0_NOP, tgsi_unsupported},
10406 [32] = { ALU_OP0_NOP, tgsi_unsupported},
10407 [33] = { ALU_OP0_NOP, tgsi_unsupported},
10408 [34] = { ALU_OP0_NOP, tgsi_unsupported},
10409 [35] = { ALU_OP0_NOP, tgsi_unsupported},
10410 [TGSI_OPCODE_COS] = { ALU_OP1_COS, tgsi_trig},
10411 [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
10412 [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
10413 [TGSI_OPCODE_KILL] = { ALU_OP2_KILLGT, tgsi_kill}, /* unconditional kill */
10414 [TGSI_OPCODE_PK2H] = { ALU_OP0_NOP, tgsi_pk2h},
10415 [TGSI_OPCODE_PK2US] = { ALU_OP0_NOP, tgsi_unsupported},
10416 [TGSI_OPCODE_PK4B] = { ALU_OP0_NOP, tgsi_unsupported},
10417 [TGSI_OPCODE_PK4UB] = { ALU_OP0_NOP, tgsi_unsupported},
10418 [44] = { ALU_OP0_NOP, tgsi_unsupported},
10419 [TGSI_OPCODE_SEQ] = { ALU_OP2_SETE, tgsi_op2},
10420 [46] = { ALU_OP0_NOP, tgsi_unsupported},
10421 [TGSI_OPCODE_SGT] = { ALU_OP2_SETGT, tgsi_op2},
10422 [TGSI_OPCODE_SIN] = { ALU_OP1_SIN, tgsi_trig},
10423 [TGSI_OPCODE_SLE] = { ALU_OP2_SETGE, tgsi_op2_swap},
10424 [TGSI_OPCODE_SNE] = { ALU_OP2_SETNE, tgsi_op2},
10425 [51] = { ALU_OP0_NOP, tgsi_unsupported},
10426 [TGSI_OPCODE_TEX] = { FETCH_OP_SAMPLE, tgsi_tex},
10427 [TGSI_OPCODE_TXD] = { FETCH_OP_SAMPLE_G, tgsi_tex},
10428 [TGSI_OPCODE_TXP] = { FETCH_OP_SAMPLE, tgsi_tex},
10429 [TGSI_OPCODE_UP2H] = { ALU_OP0_NOP, tgsi_up2h},
10430 [TGSI_OPCODE_UP2US] = { ALU_OP0_NOP, tgsi_unsupported},
10431 [TGSI_OPCODE_UP4B] = { ALU_OP0_NOP, tgsi_unsupported},
10432 [TGSI_OPCODE_UP4UB] = { ALU_OP0_NOP, tgsi_unsupported},
10433 [59] = { ALU_OP0_NOP, tgsi_unsupported},
10434 [60] = { ALU_OP0_NOP, tgsi_unsupported},
10435 [TGSI_OPCODE_ARR] = { ALU_OP0_NOP, tgsi_eg_arl},
10436 [62] = { ALU_OP0_NOP, tgsi_unsupported},
10437 [TGSI_OPCODE_CAL] = { ALU_OP0_NOP, tgsi_unsupported},
10438 [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
10439 [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
10440 [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
10441 [67] = { ALU_OP0_NOP, tgsi_unsupported},
10442 [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
10443 [69] = { ALU_OP0_NOP, tgsi_unsupported},
10444 [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
10445 [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
10446 [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex},
10447 [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
10448 [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if},
10449 [TGSI_OPCODE_UIF] = { ALU_OP0_NOP, tgsi_uif},
10450 [76] = { ALU_OP0_NOP, tgsi_unsupported},
10451 [TGSI_OPCODE_ELSE] = { ALU_OP0_NOP, tgsi_else},
10452 [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
10453 [TGSI_OPCODE_DDX_FINE] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
10454 [TGSI_OPCODE_DDY_FINE] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
10455 [82] = { ALU_OP0_NOP, tgsi_unsupported},
10456 [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
10457 [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2_trans},
10458 [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
10459 [TGSI_OPCODE_TRUNC] = { ALU_OP1_TRUNC, tgsi_op2},
10460 [TGSI_OPCODE_SHL] = { ALU_OP2_LSHL_INT, tgsi_op2},
10461 [88] = { ALU_OP0_NOP, tgsi_unsupported},
10462 [TGSI_OPCODE_AND] = { ALU_OP2_AND_INT, tgsi_op2},
10463 [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
10464 [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
10465 [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
10466 [93] = { ALU_OP0_NOP, tgsi_unsupported},
10467 [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
10468 [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
10469 [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
10470 [TGSI_OPCODE_EMIT] = { CF_OP_EMIT_VERTEX, tgsi_gs_emit},
10471 [TGSI_OPCODE_ENDPRIM] = { CF_OP_CUT_VERTEX, tgsi_gs_emit},
10472 [TGSI_OPCODE_BGNLOOP] = { ALU_OP0_NOP, tgsi_bgnloop},
10473 [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
10474 [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
10475 [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
10476 [103] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
10477 [TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex},
10478 [TGSI_OPCODE_RESQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_resq},
10479 [106] = { ALU_OP0_NOP, tgsi_unsupported},
10480 [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
10481 [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
10482 [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
10483 [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
10484 [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
10485 [TGSI_OPCODE_MEMBAR] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier},
10486 [113] = { ALU_OP0_NOP, tgsi_unsupported},
10487 [114] = { ALU_OP0_NOP, tgsi_unsupported},
10488 [115] = { ALU_OP0_NOP, tgsi_unsupported},
10489 [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
10490 [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
10491 /* Refer below for TGSI_OPCODE_DFMA */
10492 [TGSI_OPCODE_F2I] = { ALU_OP1_FLT_TO_INT, tgsi_f2i},
10493 [TGSI_OPCODE_IDIV] = { ALU_OP0_NOP, tgsi_idiv},
10494 [TGSI_OPCODE_IMAX] = { ALU_OP2_MAX_INT, tgsi_op2},
10495 [TGSI_OPCODE_IMIN] = { ALU_OP2_MIN_INT, tgsi_op2},
10496 [TGSI_OPCODE_INEG] = { ALU_OP2_SUB_INT, tgsi_ineg},
10497 [TGSI_OPCODE_ISGE] = { ALU_OP2_SETGE_INT, tgsi_op2},
10498 [TGSI_OPCODE_ISHR] = { ALU_OP2_ASHR_INT, tgsi_op2},
10499 [TGSI_OPCODE_ISLT] = { ALU_OP2_SETGT_INT, tgsi_op2_swap},
10500 [TGSI_OPCODE_F2U] = { ALU_OP1_FLT_TO_UINT, tgsi_f2i},
10501 [TGSI_OPCODE_U2F] = { ALU_OP1_UINT_TO_FLT, tgsi_op2_trans},
10502 [TGSI_OPCODE_UADD] = { ALU_OP2_ADD_INT, tgsi_op2},
10503 [TGSI_OPCODE_UDIV] = { ALU_OP0_NOP, tgsi_udiv},
10504 [TGSI_OPCODE_UMAD] = { ALU_OP0_NOP, tgsi_umad},
10505 [TGSI_OPCODE_UMAX] = { ALU_OP2_MAX_UINT, tgsi_op2},
10506 [TGSI_OPCODE_UMIN] = { ALU_OP2_MIN_UINT, tgsi_op2},
10507 [TGSI_OPCODE_UMOD] = { ALU_OP0_NOP, tgsi_umod},
10508 [TGSI_OPCODE_UMUL] = { ALU_OP2_MULLO_UINT, tgsi_op2_trans},
10509 [TGSI_OPCODE_USEQ] = { ALU_OP2_SETE_INT, tgsi_op2},
10510 [TGSI_OPCODE_USGE] = { ALU_OP2_SETGE_UINT, tgsi_op2},
10511 [TGSI_OPCODE_USHR] = { ALU_OP2_LSHR_INT, tgsi_op2},
10512 [TGSI_OPCODE_USLT] = { ALU_OP2_SETGT_UINT, tgsi_op2_swap},
10513 [TGSI_OPCODE_USNE] = { ALU_OP2_SETNE_INT, tgsi_op2},
10514 [TGSI_OPCODE_SWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
10515 [TGSI_OPCODE_CASE] = { ALU_OP0_NOP, tgsi_unsupported},
10516 [TGSI_OPCODE_DEFAULT] = { ALU_OP0_NOP, tgsi_unsupported},
10517 [TGSI_OPCODE_ENDSWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
10518 [TGSI_OPCODE_SAMPLE] = { 0, tgsi_unsupported},
10519 [TGSI_OPCODE_SAMPLE_I] = { 0, tgsi_unsupported},
10520 [TGSI_OPCODE_SAMPLE_I_MS] = { 0, tgsi_unsupported},
10521 [TGSI_OPCODE_SAMPLE_B] = { 0, tgsi_unsupported},
10522 [TGSI_OPCODE_SAMPLE_C] = { 0, tgsi_unsupported},
10523 [TGSI_OPCODE_SAMPLE_C_LZ] = { 0, tgsi_unsupported},
10524 [TGSI_OPCODE_SAMPLE_D] = { 0, tgsi_unsupported},
10525 [TGSI_OPCODE_SAMPLE_L] = { 0, tgsi_unsupported},
10526 [TGSI_OPCODE_GATHER4] = { 0, tgsi_unsupported},
10527 [TGSI_OPCODE_SVIEWINFO] = { 0, tgsi_unsupported},
10528 [TGSI_OPCODE_SAMPLE_POS] = { 0, tgsi_unsupported},
10529 [TGSI_OPCODE_SAMPLE_INFO] = { 0, tgsi_unsupported},
10530 [TGSI_OPCODE_UARL] = { ALU_OP1_MOVA_INT, tgsi_eg_arl},
10531 [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
10532 [TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
10533 [TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
10534 [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_load},
10535 [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_store},
10536 [163] = { ALU_OP0_NOP, tgsi_unsupported},
10537 [164] = { ALU_OP0_NOP, tgsi_unsupported},
10538 [165] = { ALU_OP0_NOP, tgsi_unsupported},
10539 [TGSI_OPCODE_BARRIER] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier},
10540 [TGSI_OPCODE_ATOMUADD] = { V_RAT_INST_ADD_RTN, tgsi_atomic_op},
10541 [TGSI_OPCODE_ATOMXCHG] = { V_RAT_INST_XCHG_RTN, tgsi_atomic_op},
10542 [TGSI_OPCODE_ATOMCAS] = { V_RAT_INST_CMPXCHG_INT_RTN, tgsi_atomic_op},
10543 [TGSI_OPCODE_ATOMAND] = { V_RAT_INST_AND_RTN, tgsi_atomic_op},
10544 [TGSI_OPCODE_ATOMOR] = { V_RAT_INST_OR_RTN, tgsi_atomic_op},
10545 [TGSI_OPCODE_ATOMXOR] = { V_RAT_INST_XOR_RTN, tgsi_atomic_op},
10546 [TGSI_OPCODE_ATOMUMIN] = { V_RAT_INST_MIN_UINT_RTN, tgsi_atomic_op},
10547 [TGSI_OPCODE_ATOMUMAX] = { V_RAT_INST_MAX_UINT_RTN, tgsi_atomic_op},
10548 [TGSI_OPCODE_ATOMIMIN] = { V_RAT_INST_MIN_INT_RTN, tgsi_atomic_op},
10549 [TGSI_OPCODE_ATOMIMAX] = { V_RAT_INST_MAX_INT_RTN, tgsi_atomic_op},
10550 [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
10551 [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
10552 [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
10553 [TGSI_OPCODE_IMUL_HI] = { ALU_OP2_MULHI_INT, tgsi_op2_trans},
10554 [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, tgsi_op2_trans},
10555 [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_tex},
10556 [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_tex},
10557 [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_bfe},
10558 [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_bfe},
10559 [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_bfi},
10560 [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_op2},
10561 [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_op2},
10562 [TGSI_OPCODE_LSB] = { ALU_OP1_FFBL_INT, tgsi_op2},
10563 [TGSI_OPCODE_IMSB] = { ALU_OP1_FFBH_INT, tgsi_msb},
10564 [TGSI_OPCODE_UMSB] = { ALU_OP1_FFBH_UINT, tgsi_msb},
10565 [TGSI_OPCODE_INTERP_CENTROID] = { ALU_OP0_NOP, tgsi_interp_egcm},
10566 [TGSI_OPCODE_INTERP_SAMPLE] = { ALU_OP0_NOP, tgsi_interp_egcm},
10567 [TGSI_OPCODE_INTERP_OFFSET] = { ALU_OP0_NOP, tgsi_interp_egcm},
10568 [TGSI_OPCODE_F2D] = { ALU_OP1_FLT32_TO_FLT64, tgsi_op2_64},
10569 [TGSI_OPCODE_D2F] = { ALU_OP1_FLT64_TO_FLT32, tgsi_op2_64_single_dest},
10570 [TGSI_OPCODE_DABS] = { ALU_OP1_MOV, tgsi_op2_64},
10571 [TGSI_OPCODE_DNEG] = { ALU_OP2_ADD_64, tgsi_dneg},
10572 [TGSI_OPCODE_DADD] = { ALU_OP2_ADD_64, tgsi_op2_64},
10573 [TGSI_OPCODE_DMUL] = { ALU_OP2_MUL_64, cayman_mul_double_instr},
10574 [TGSI_OPCODE_DDIV] = { 0, cayman_ddiv_instr },
10575 [TGSI_OPCODE_DMAX] = { ALU_OP2_MAX_64, tgsi_op2_64},
10576 [TGSI_OPCODE_DMIN] = { ALU_OP2_MIN_64, tgsi_op2_64},
10577 [TGSI_OPCODE_DSLT] = { ALU_OP2_SETGT_64, tgsi_op2_64_single_dest_s},
10578 [TGSI_OPCODE_DSGE] = { ALU_OP2_SETGE_64, tgsi_op2_64_single_dest},
10579 [TGSI_OPCODE_DSEQ] = { ALU_OP2_SETE_64, tgsi_op2_64_single_dest},
10580 [TGSI_OPCODE_DSNE] = { ALU_OP2_SETNE_64, tgsi_op2_64_single_dest},
10581 [TGSI_OPCODE_DRCP] = { ALU_OP2_RECIP_64, cayman_emit_double_instr},
10582 [TGSI_OPCODE_DSQRT] = { ALU_OP2_SQRT_64, cayman_emit_double_instr},
10583 [TGSI_OPCODE_DMAD] = { ALU_OP3_FMA_64, tgsi_op3_64},
10584 [TGSI_OPCODE_DFMA] = { ALU_OP3_FMA_64, tgsi_op3_64},
10585 [TGSI_OPCODE_DFRAC] = { ALU_OP1_FRACT_64, tgsi_op2_64},
10586 [TGSI_OPCODE_DLDEXP] = { ALU_OP2_LDEXP_64, tgsi_op2_64},
10587 [TGSI_OPCODE_DFRACEXP] = { ALU_OP1_FREXP_64, tgsi_dfracexp},
10588 [TGSI_OPCODE_D2I] = { ALU_OP1_FLT_TO_INT, egcm_double_to_int},
10589 [TGSI_OPCODE_I2D] = { ALU_OP1_INT_TO_FLT, egcm_int_to_double},
10590 [TGSI_OPCODE_D2U] = { ALU_OP1_FLT_TO_UINT, egcm_double_to_int},
10591 [TGSI_OPCODE_U2D] = { ALU_OP1_UINT_TO_FLT, egcm_int_to_double},
10592 [TGSI_OPCODE_DRSQ] = { ALU_OP2_RECIPSQRT_64, cayman_emit_double_instr},
10593 [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
10594 };
10595
10596 static const struct r600_shader_tgsi_instruction cm_shader_tgsi_instruction[] = {
10597 [TGSI_OPCODE_ARL] = { ALU_OP0_NOP, tgsi_eg_arl},
10598 [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
10599 [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
10600 [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_IEEE, cayman_emit_float_instr},
10601 [TGSI_OPCODE_RSQ] = { ALU_OP1_RECIPSQRT_IEEE, cayman_emit_float_instr},
10602 [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
10603 [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log},
10604 [TGSI_OPCODE_MUL] = { ALU_OP2_MUL_IEEE, tgsi_op2},
10605 [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2},
10606 [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
10607 [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
10608 [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
10609 [TGSI_OPCODE_MIN] = { ALU_OP2_MIN_DX10, tgsi_op2},
10610 [TGSI_OPCODE_MAX] = { ALU_OP2_MAX_DX10, tgsi_op2},
10611 [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
10612 [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
10613 [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD_IEEE, tgsi_op3},
10614 [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
10615 [TGSI_OPCODE_FMA] = { ALU_OP3_FMA, tgsi_op3},
10616 [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, cayman_emit_float_instr},
10617 [21] = { ALU_OP0_NOP, tgsi_unsupported},
10618 [22] = { ALU_OP0_NOP, tgsi_unsupported},
10619 [23] = { ALU_OP0_NOP, tgsi_unsupported},
10620 [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
10621 [25] = { ALU_OP0_NOP, tgsi_unsupported},
10622 [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2},
10623 [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2},
10624 [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, cayman_emit_float_instr},
10625 [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, cayman_emit_float_instr},
10626 [TGSI_OPCODE_POW] = { ALU_OP0_NOP, cayman_pow},
10627 [31] = { ALU_OP0_NOP, tgsi_unsupported},
10628 [32] = { ALU_OP0_NOP, tgsi_unsupported},
10629 [33] = { ALU_OP0_NOP, tgsi_unsupported},
10630 [34] = { ALU_OP0_NOP, tgsi_unsupported},
10631 [35] = { ALU_OP0_NOP, tgsi_unsupported},
10632 [TGSI_OPCODE_COS] = { ALU_OP1_COS, cayman_trig},
10633 [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
10634 [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
10635 [TGSI_OPCODE_KILL] = { ALU_OP2_KILLGT, tgsi_kill}, /* unconditional kill */
10636 [TGSI_OPCODE_PK2H] = { ALU_OP0_NOP, tgsi_pk2h},
10637 [TGSI_OPCODE_PK2US] = { ALU_OP0_NOP, tgsi_unsupported},
10638 [TGSI_OPCODE_PK4B] = { ALU_OP0_NOP, tgsi_unsupported},
10639 [TGSI_OPCODE_PK4UB] = { ALU_OP0_NOP, tgsi_unsupported},
10640 [44] = { ALU_OP0_NOP, tgsi_unsupported},
10641 [TGSI_OPCODE_SEQ] = { ALU_OP2_SETE, tgsi_op2},
10642 [46] = { ALU_OP0_NOP, tgsi_unsupported},
10643 [TGSI_OPCODE_SGT] = { ALU_OP2_SETGT, tgsi_op2},
10644 [TGSI_OPCODE_SIN] = { ALU_OP1_SIN, cayman_trig},
10645 [TGSI_OPCODE_SLE] = { ALU_OP2_SETGE, tgsi_op2_swap},
10646 [TGSI_OPCODE_SNE] = { ALU_OP2_SETNE, tgsi_op2},
10647 [51] = { ALU_OP0_NOP, tgsi_unsupported},
10648 [TGSI_OPCODE_TEX] = { FETCH_OP_SAMPLE, tgsi_tex},
10649 [TGSI_OPCODE_TXD] = { FETCH_OP_SAMPLE_G, tgsi_tex},
10650 [TGSI_OPCODE_TXP] = { FETCH_OP_SAMPLE, tgsi_tex},
10651 [TGSI_OPCODE_UP2H] = { ALU_OP0_NOP, tgsi_up2h},
10652 [TGSI_OPCODE_UP2US] = { ALU_OP0_NOP, tgsi_unsupported},
10653 [TGSI_OPCODE_UP4B] = { ALU_OP0_NOP, tgsi_unsupported},
10654 [TGSI_OPCODE_UP4UB] = { ALU_OP0_NOP, tgsi_unsupported},
10655 [59] = { ALU_OP0_NOP, tgsi_unsupported},
10656 [60] = { ALU_OP0_NOP, tgsi_unsupported},
10657 [TGSI_OPCODE_ARR] = { ALU_OP0_NOP, tgsi_eg_arl},
10658 [62] = { ALU_OP0_NOP, tgsi_unsupported},
10659 [TGSI_OPCODE_CAL] = { ALU_OP0_NOP, tgsi_unsupported},
10660 [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
10661 [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
10662 [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
10663 [67] = { ALU_OP0_NOP, tgsi_unsupported},
10664 [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
10665 [69] = { ALU_OP0_NOP, tgsi_unsupported},
10666 [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
10667 [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
10668 [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex},
10669 [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
10670 [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if},
10671 [TGSI_OPCODE_UIF] = { ALU_OP0_NOP, tgsi_uif},
10672 [76] = { ALU_OP0_NOP, tgsi_unsupported},
10673 [TGSI_OPCODE_ELSE] = { ALU_OP0_NOP, tgsi_else},
10674 [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
10675 [TGSI_OPCODE_DDX_FINE] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
10676 [TGSI_OPCODE_DDY_FINE] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
10677 [82] = { ALU_OP0_NOP, tgsi_unsupported},
10678 [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
10679 [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2},
10680 [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
10681 [TGSI_OPCODE_TRUNC] = { ALU_OP1_TRUNC, tgsi_op2},
10682 [TGSI_OPCODE_SHL] = { ALU_OP2_LSHL_INT, tgsi_op2},
10683 [88] = { ALU_OP0_NOP, tgsi_unsupported},
10684 [TGSI_OPCODE_AND] = { ALU_OP2_AND_INT, tgsi_op2},
10685 [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
10686 [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
10687 [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
10688 [93] = { ALU_OP0_NOP, tgsi_unsupported},
10689 [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
10690 [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
10691 [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
10692 [TGSI_OPCODE_EMIT] = { CF_OP_EMIT_VERTEX, tgsi_gs_emit},
10693 [TGSI_OPCODE_ENDPRIM] = { CF_OP_CUT_VERTEX, tgsi_gs_emit},
10694 [TGSI_OPCODE_BGNLOOP] = { ALU_OP0_NOP, tgsi_bgnloop},
10695 [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
10696 [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
10697 [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
10698 [103] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
10699 [TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex},
10700 [TGSI_OPCODE_RESQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_resq},
10701 [106] = { ALU_OP0_NOP, tgsi_unsupported},
10702 [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
10703 [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
10704 [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
10705 [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
10706 [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
10707 [TGSI_OPCODE_MEMBAR] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier},
10708 [113] = { ALU_OP0_NOP, tgsi_unsupported},
10709 [114] = { ALU_OP0_NOP, tgsi_unsupported},
10710 [115] = { ALU_OP0_NOP, tgsi_unsupported},
10711 [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
10712 [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
10713 /* Refer below for TGSI_OPCODE_DFMA */
10714 [TGSI_OPCODE_F2I] = { ALU_OP1_FLT_TO_INT, tgsi_op2},
10715 [TGSI_OPCODE_IDIV] = { ALU_OP0_NOP, tgsi_idiv},
10716 [TGSI_OPCODE_IMAX] = { ALU_OP2_MAX_INT, tgsi_op2},
10717 [TGSI_OPCODE_IMIN] = { ALU_OP2_MIN_INT, tgsi_op2},
10718 [TGSI_OPCODE_INEG] = { ALU_OP2_SUB_INT, tgsi_ineg},
10719 [TGSI_OPCODE_ISGE] = { ALU_OP2_SETGE_INT, tgsi_op2},
10720 [TGSI_OPCODE_ISHR] = { ALU_OP2_ASHR_INT, tgsi_op2},
10721 [TGSI_OPCODE_ISLT] = { ALU_OP2_SETGT_INT, tgsi_op2_swap},
10722 [TGSI_OPCODE_F2U] = { ALU_OP1_FLT_TO_UINT, tgsi_op2},
10723 [TGSI_OPCODE_U2F] = { ALU_OP1_UINT_TO_FLT, tgsi_op2},
10724 [TGSI_OPCODE_UADD] = { ALU_OP2_ADD_INT, tgsi_op2},
10725 [TGSI_OPCODE_UDIV] = { ALU_OP0_NOP, tgsi_udiv},
10726 [TGSI_OPCODE_UMAD] = { ALU_OP0_NOP, tgsi_umad},
10727 [TGSI_OPCODE_UMAX] = { ALU_OP2_MAX_UINT, tgsi_op2},
10728 [TGSI_OPCODE_UMIN] = { ALU_OP2_MIN_UINT, tgsi_op2},
10729 [TGSI_OPCODE_UMOD] = { ALU_OP0_NOP, tgsi_umod},
10730 [TGSI_OPCODE_UMUL] = { ALU_OP2_MULLO_INT, cayman_mul_int_instr},
10731 [TGSI_OPCODE_USEQ] = { ALU_OP2_SETE_INT, tgsi_op2},
10732 [TGSI_OPCODE_USGE] = { ALU_OP2_SETGE_UINT, tgsi_op2},
10733 [TGSI_OPCODE_USHR] = { ALU_OP2_LSHR_INT, tgsi_op2},
10734 [TGSI_OPCODE_USLT] = { ALU_OP2_SETGT_UINT, tgsi_op2_swap},
10735 [TGSI_OPCODE_USNE] = { ALU_OP2_SETNE_INT, tgsi_op2},
10736 [TGSI_OPCODE_SWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
10737 [TGSI_OPCODE_CASE] = { ALU_OP0_NOP, tgsi_unsupported},
10738 [TGSI_OPCODE_DEFAULT] = { ALU_OP0_NOP, tgsi_unsupported},
10739 [TGSI_OPCODE_ENDSWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
10740 [TGSI_OPCODE_SAMPLE] = { 0, tgsi_unsupported},
10741 [TGSI_OPCODE_SAMPLE_I] = { 0, tgsi_unsupported},
10742 [TGSI_OPCODE_SAMPLE_I_MS] = { 0, tgsi_unsupported},
10743 [TGSI_OPCODE_SAMPLE_B] = { 0, tgsi_unsupported},
10744 [TGSI_OPCODE_SAMPLE_C] = { 0, tgsi_unsupported},
10745 [TGSI_OPCODE_SAMPLE_C_LZ] = { 0, tgsi_unsupported},
10746 [TGSI_OPCODE_SAMPLE_D] = { 0, tgsi_unsupported},
10747 [TGSI_OPCODE_SAMPLE_L] = { 0, tgsi_unsupported},
10748 [TGSI_OPCODE_GATHER4] = { 0, tgsi_unsupported},
10749 [TGSI_OPCODE_SVIEWINFO] = { 0, tgsi_unsupported},
10750 [TGSI_OPCODE_SAMPLE_POS] = { 0, tgsi_unsupported},
10751 [TGSI_OPCODE_SAMPLE_INFO] = { 0, tgsi_unsupported},
10752 [TGSI_OPCODE_UARL] = { ALU_OP1_MOVA_INT, tgsi_eg_arl},
10753 [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
10754 [TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
10755 [TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
10756 [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_load},
10757 [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_store},
10758 [163] = { ALU_OP0_NOP, tgsi_unsupported},
10759 [164] = { ALU_OP0_NOP, tgsi_unsupported},
10760 [165] = { ALU_OP0_NOP, tgsi_unsupported},
10761 [TGSI_OPCODE_BARRIER] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier},
10762 [TGSI_OPCODE_ATOMUADD] = { V_RAT_INST_ADD_RTN, tgsi_atomic_op},
10763 [TGSI_OPCODE_ATOMXCHG] = { V_RAT_INST_XCHG_RTN, tgsi_atomic_op},
10764 [TGSI_OPCODE_ATOMCAS] = { V_RAT_INST_CMPXCHG_INT_RTN, tgsi_atomic_op},
10765 [TGSI_OPCODE_ATOMAND] = { V_RAT_INST_AND_RTN, tgsi_atomic_op},
10766 [TGSI_OPCODE_ATOMOR] = { V_RAT_INST_OR_RTN, tgsi_atomic_op},
10767 [TGSI_OPCODE_ATOMXOR] = { V_RAT_INST_XOR_RTN, tgsi_atomic_op},
10768 [TGSI_OPCODE_ATOMUMIN] = { V_RAT_INST_MIN_UINT_RTN, tgsi_atomic_op},
10769 [TGSI_OPCODE_ATOMUMAX] = { V_RAT_INST_MAX_UINT_RTN, tgsi_atomic_op},
10770 [TGSI_OPCODE_ATOMIMIN] = { V_RAT_INST_MIN_INT_RTN, tgsi_atomic_op},
10771 [TGSI_OPCODE_ATOMIMAX] = { V_RAT_INST_MAX_INT_RTN, tgsi_atomic_op},
10772 [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
10773 [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
10774 [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
10775 [TGSI_OPCODE_IMUL_HI] = { ALU_OP2_MULHI_INT, cayman_mul_int_instr},
10776 [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, cayman_mul_int_instr},
10777 [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_tex},
10778 [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_tex},
10779 [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_bfe},
10780 [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_bfe},
10781 [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_bfi},
10782 [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_op2},
10783 [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_op2},
10784 [TGSI_OPCODE_LSB] = { ALU_OP1_FFBL_INT, tgsi_op2},
10785 [TGSI_OPCODE_IMSB] = { ALU_OP1_FFBH_INT, tgsi_msb},
10786 [TGSI_OPCODE_UMSB] = { ALU_OP1_FFBH_UINT, tgsi_msb},
10787 [TGSI_OPCODE_INTERP_CENTROID] = { ALU_OP0_NOP, tgsi_interp_egcm},
10788 [TGSI_OPCODE_INTERP_SAMPLE] = { ALU_OP0_NOP, tgsi_interp_egcm},
10789 [TGSI_OPCODE_INTERP_OFFSET] = { ALU_OP0_NOP, tgsi_interp_egcm},
10790 [TGSI_OPCODE_F2D] = { ALU_OP1_FLT32_TO_FLT64, tgsi_op2_64},
10791 [TGSI_OPCODE_D2F] = { ALU_OP1_FLT64_TO_FLT32, tgsi_op2_64_single_dest},
10792 [TGSI_OPCODE_DABS] = { ALU_OP1_MOV, tgsi_op2_64},
10793 [TGSI_OPCODE_DNEG] = { ALU_OP2_ADD_64, tgsi_dneg},
10794 [TGSI_OPCODE_DADD] = { ALU_OP2_ADD_64, tgsi_op2_64},
10795 [TGSI_OPCODE_DMUL] = { ALU_OP2_MUL_64, cayman_mul_double_instr},
10796 [TGSI_OPCODE_DDIV] = { 0, cayman_ddiv_instr },
10797 [TGSI_OPCODE_DMAX] = { ALU_OP2_MAX_64, tgsi_op2_64},
10798 [TGSI_OPCODE_DMIN] = { ALU_OP2_MIN_64, tgsi_op2_64},
10799 [TGSI_OPCODE_DSLT] = { ALU_OP2_SETGT_64, tgsi_op2_64_single_dest_s},
10800 [TGSI_OPCODE_DSGE] = { ALU_OP2_SETGE_64, tgsi_op2_64_single_dest},
10801 [TGSI_OPCODE_DSEQ] = { ALU_OP2_SETE_64, tgsi_op2_64_single_dest},
10802 [TGSI_OPCODE_DSNE] = { ALU_OP2_SETNE_64, tgsi_op2_64_single_dest},
10803 [TGSI_OPCODE_DRCP] = { ALU_OP2_RECIP_64, cayman_emit_double_instr},
10804 [TGSI_OPCODE_DSQRT] = { ALU_OP2_SQRT_64, cayman_emit_double_instr},
10805 [TGSI_OPCODE_DMAD] = { ALU_OP3_FMA_64, tgsi_op3_64},
10806 [TGSI_OPCODE_DFMA] = { ALU_OP3_FMA_64, tgsi_op3_64},
10807 [TGSI_OPCODE_DFRAC] = { ALU_OP1_FRACT_64, tgsi_op2_64},
10808 [TGSI_OPCODE_DLDEXP] = { ALU_OP2_LDEXP_64, tgsi_op2_64},
10809 [TGSI_OPCODE_DFRACEXP] = { ALU_OP1_FREXP_64, tgsi_dfracexp},
10810 [TGSI_OPCODE_D2I] = { ALU_OP1_FLT_TO_INT, egcm_double_to_int},
10811 [TGSI_OPCODE_I2D] = { ALU_OP1_INT_TO_FLT, egcm_int_to_double},
10812 [TGSI_OPCODE_D2U] = { ALU_OP1_FLT_TO_UINT, egcm_double_to_int},
10813 [TGSI_OPCODE_U2D] = { ALU_OP1_UINT_TO_FLT, egcm_int_to_double},
10814 [TGSI_OPCODE_DRSQ] = { ALU_OP2_RECIPSQRT_64, cayman_emit_double_instr},
10815 [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
10816 };