r600g: join export instructions
[mesa.git] / src / gallium / drivers / r600 / r600_asm.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <stdio.h>
24 #include <errno.h>
25 #include "util/u_format.h"
26 #include "util/u_memory.h"
27 #include "pipe/p_shader_tokens.h"
28 #include "r600_pipe.h"
29 #include "r600_sq.h"
30 #include "r600_opcodes.h"
31 #include "r600_asm.h"
32 #include "r600_formats.h"
33 #include "r600d.h"
34
35 #define NUM_OF_CYCLES 3
36 #define NUM_OF_COMPONENTS 4
37
38 #define PREV_ALU(alu) LIST_ENTRY(struct r600_bc_alu, alu->list.prev, list)
39 #define NEXT_ALU(alu) LIST_ENTRY(struct r600_bc_alu, alu->list.next, list)
40
41 static inline unsigned int r600_bc_get_num_operands(struct r600_bc_alu *alu)
42 {
43 if(alu->is_op3)
44 return 3;
45
46 switch (alu->inst) {
47 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP:
48 return 0;
49 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD:
50 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE:
51 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT:
52 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE:
53 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE:
54 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL:
55 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX:
56 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN:
57 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE:
58 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE:
59 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT:
60 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE:
61 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE:
62 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT:
63 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE:
64 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE:
65 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4:
66 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE:
67 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE:
68 return 2;
69
70 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV:
71 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR:
72 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT:
73 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR:
74 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC:
75 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE:
76 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED:
77 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE:
78 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE:
79 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE:
80 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT:
81 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN:
82 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS:
83 return 1;
84 default: R600_ERR(
85 "Need instruction operand number for 0x%x.\n", alu->inst);
86 };
87
88 return 3;
89 }
90
91 int r700_bc_alu_build(struct r600_bc *bc, struct r600_bc_alu *alu, unsigned id);
92
93 static struct r600_bc_cf *r600_bc_cf(void)
94 {
95 struct r600_bc_cf *cf = CALLOC_STRUCT(r600_bc_cf);
96
97 if (cf == NULL)
98 return NULL;
99 LIST_INITHEAD(&cf->list);
100 LIST_INITHEAD(&cf->alu);
101 LIST_INITHEAD(&cf->vtx);
102 LIST_INITHEAD(&cf->tex);
103 cf->barrier = 1;
104 return cf;
105 }
106
107 static struct r600_bc_alu *r600_bc_alu(void)
108 {
109 struct r600_bc_alu *alu = CALLOC_STRUCT(r600_bc_alu);
110
111 if (alu == NULL)
112 return NULL;
113 LIST_INITHEAD(&alu->list);
114 return alu;
115 }
116
117 static struct r600_bc_vtx *r600_bc_vtx(void)
118 {
119 struct r600_bc_vtx *vtx = CALLOC_STRUCT(r600_bc_vtx);
120
121 if (vtx == NULL)
122 return NULL;
123 LIST_INITHEAD(&vtx->list);
124 return vtx;
125 }
126
127 static struct r600_bc_tex *r600_bc_tex(void)
128 {
129 struct r600_bc_tex *tex = CALLOC_STRUCT(r600_bc_tex);
130
131 if (tex == NULL)
132 return NULL;
133 LIST_INITHEAD(&tex->list);
134 return tex;
135 }
136
137 int r600_bc_init(struct r600_bc *bc, enum radeon_family family)
138 {
139 LIST_INITHEAD(&bc->cf);
140 bc->family = family;
141 switch (bc->family) {
142 case CHIP_R600:
143 case CHIP_RV610:
144 case CHIP_RV630:
145 case CHIP_RV670:
146 case CHIP_RV620:
147 case CHIP_RV635:
148 case CHIP_RS780:
149 case CHIP_RS880:
150 bc->chiprev = CHIPREV_R600;
151 break;
152 case CHIP_RV770:
153 case CHIP_RV730:
154 case CHIP_RV710:
155 case CHIP_RV740:
156 bc->chiprev = CHIPREV_R700;
157 break;
158 case CHIP_CEDAR:
159 case CHIP_REDWOOD:
160 case CHIP_JUNIPER:
161 case CHIP_CYPRESS:
162 case CHIP_HEMLOCK:
163 case CHIP_PALM:
164 bc->chiprev = CHIPREV_EVERGREEN;
165 break;
166 default:
167 R600_ERR("unknown family %d\n", bc->family);
168 return -EINVAL;
169 }
170 return 0;
171 }
172
173 static int r600_bc_add_cf(struct r600_bc *bc)
174 {
175 struct r600_bc_cf *cf = r600_bc_cf();
176
177 if (cf == NULL)
178 return -ENOMEM;
179 LIST_ADDTAIL(&cf->list, &bc->cf);
180 if (bc->cf_last)
181 cf->id = bc->cf_last->id + 2;
182 bc->cf_last = cf;
183 bc->ncf++;
184 bc->ndw += 2;
185 bc->force_add_cf = 0;
186 return 0;
187 }
188
189 static void r600_bc_remove_cf(struct r600_bc *bc, struct r600_bc_cf *cf)
190 {
191 struct r600_bc_cf *other;
192 LIST_FOR_EACH_ENTRY(other, &bc->cf, list) {
193 if (other->id > cf->id)
194 other->id -= 2;
195 if (other->cf_addr > cf->id)
196 other->cf_addr -= 2;
197 }
198 LIST_DEL(&cf->list);
199 free(cf);
200 }
201
202 static void r600_bc_move_cf(struct r600_bc *bc, struct r600_bc_cf *cf, struct r600_bc_cf *next)
203 {
204 struct r600_bc_cf *prev = LIST_ENTRY(struct r600_bc_cf, next->list.prev, list);
205 unsigned old_id = cf->id;
206 unsigned new_id = prev->id + 2;
207 struct r600_bc_cf *other;
208
209 if (prev == cf)
210 return; /* position hasn't changed */
211
212 LIST_DEL(&cf->list);
213 LIST_FOR_EACH_ENTRY(other, &bc->cf, list) {
214 if (other->id > old_id)
215 other->id -= 2;
216 if (other->id >= new_id)
217 other->id += 2;
218 if (other->cf_addr > old_id)
219 other->cf_addr -= 2;
220 if (other->cf_addr > new_id)
221 other->cf_addr += 2;
222 }
223 cf->id = new_id;
224 LIST_ADD(&cf->list, &prev->list);
225 }
226
227 int r600_bc_add_output(struct r600_bc *bc, const struct r600_bc_output *output)
228 {
229 int r;
230
231 r = r600_bc_add_cf(bc);
232 if (r)
233 return r;
234 bc->cf_last->inst = BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT);
235 memcpy(&bc->cf_last->output, output, sizeof(struct r600_bc_output));
236 bc->cf_last->output.burst_count = 1;
237 return 0;
238 }
239
240 /* alu predicate instructions */
241 static int is_alu_pred_inst(struct r600_bc_alu *alu)
242 {
243 return !alu->is_op3 && (
244 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_UINT ||
245 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_UINT ||
246 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE ||
247 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT ||
248 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE ||
249 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE ||
250 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_INV ||
251 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_POP ||
252 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_CLR ||
253 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_RESTORE ||
254 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH ||
255 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH ||
256 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH ||
257 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH ||
258 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_INT ||
259 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_INT ||
260 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_INT ||
261 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT ||
262 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH_INT ||
263 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH_INT ||
264 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH_INT ||
265 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH_INT ||
266 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLT_PUSH_INT ||
267 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLE_PUSH_INT);
268 }
269
270 /* alu kill instructions */
271 static int is_alu_kill_inst(struct r600_bc_alu *alu)
272 {
273 return !alu->is_op3 && (
274 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE ||
275 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT ||
276 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE ||
277 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE ||
278 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_UINT ||
279 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_UINT ||
280 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE_INT ||
281 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_INT ||
282 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_INT ||
283 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE_INT);
284 }
285
286 /* alu instructions that can ony exits once per group */
287 static int is_alu_once_inst(struct r600_bc_alu *alu)
288 {
289 return is_alu_kill_inst(alu) ||
290 is_alu_pred_inst(alu);
291 }
292
293 static int is_alu_reduction_inst(struct r600_bc_alu *alu)
294 {
295 return !alu->is_op3 && (
296 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE ||
297 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4 ||
298 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE ||
299 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX4);
300 }
301
302 static int is_alu_mova_inst(struct r600_bc_alu *alu)
303 {
304 return !alu->is_op3 && (
305 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA ||
306 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR ||
307 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT);
308 }
309
310 /* alu instructions that can only execute on the vector unit */
311 static int is_alu_vec_unit_inst(struct r600_bc_alu *alu)
312 {
313 return is_alu_reduction_inst(alu) ||
314 is_alu_mova_inst(alu);
315 }
316
317 /* alu instructions that can only execute on the trans unit */
318 static int is_alu_trans_unit_inst(struct r600_bc_alu *alu)
319 {
320 if(!alu->is_op3)
321 return alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT ||
322 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT ||
323 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT ||
324 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHL_INT ||
325 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT ||
326 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_INT ||
327 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT ||
328 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_INT ||
329 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT ||
330 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_INT ||
331 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_UINT ||
332 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT ||
333 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS ||
334 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE ||
335 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED ||
336 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE ||
337 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED ||
338 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_FF ||
339 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE ||
340 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED ||
341 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_FF ||
342 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE ||
343 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN ||
344 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SQRT_IEEE;
345 else
346 return alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT ||
347 alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_D2 ||
348 alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_M2 ||
349 alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_M4;
350 }
351
352 /* alu instructions that can execute on any unit */
353 static int is_alu_any_unit_inst(struct r600_bc_alu *alu)
354 {
355 return !is_alu_vec_unit_inst(alu) &&
356 !is_alu_trans_unit_inst(alu);
357 }
358
359 static int assign_alu_units(struct r600_bc_alu *alu_first, struct r600_bc_alu *assignment[5])
360 {
361 struct r600_bc_alu *alu;
362 unsigned i, chan, trans;
363
364 for (i = 0; i < 5; i++)
365 assignment[i] = NULL;
366
367 for (alu = alu_first; alu; alu = NEXT_ALU(alu)) {
368 chan = alu->dst.chan;
369 if (is_alu_trans_unit_inst(alu))
370 trans = 1;
371 else if (is_alu_vec_unit_inst(alu))
372 trans = 0;
373 else if (assignment[chan])
374 trans = 1; // assume ALU_INST_PREFER_VECTOR
375 else
376 trans = 0;
377
378 if (trans) {
379 if (assignment[4]) {
380 assert(0); //ALU.Trans has already been allocated
381 return -1;
382 }
383 assignment[4] = alu;
384 } else {
385 if (assignment[chan]) {
386 assert(0); //ALU.chan has already been allocated
387 return -1;
388 }
389 assignment[chan] = alu;
390 }
391
392 if (alu->last)
393 break;
394 }
395 return 0;
396 }
397
398 struct alu_bank_swizzle {
399 int hw_gpr[NUM_OF_CYCLES][NUM_OF_COMPONENTS];
400 int hw_cfile_addr[4];
401 int hw_cfile_elem[4];
402 };
403
404 const unsigned cycle_for_bank_swizzle_vec[][3] = {
405 [SQ_ALU_VEC_012] = { 0, 1, 2 },
406 [SQ_ALU_VEC_021] = { 0, 2, 1 },
407 [SQ_ALU_VEC_120] = { 1, 2, 0 },
408 [SQ_ALU_VEC_102] = { 1, 0, 2 },
409 [SQ_ALU_VEC_201] = { 2, 0, 1 },
410 [SQ_ALU_VEC_210] = { 2, 1, 0 }
411 };
412
413 const unsigned cycle_for_bank_swizzle_scl[][3] = {
414 [SQ_ALU_SCL_210] = { 2, 1, 0 },
415 [SQ_ALU_SCL_122] = { 1, 2, 2 },
416 [SQ_ALU_SCL_212] = { 2, 1, 2 },
417 [SQ_ALU_SCL_221] = { 2, 2, 1 }
418 };
419
420 static void init_bank_swizzle(struct alu_bank_swizzle *bs)
421 {
422 int i, cycle, component;
423 /* set up gpr use */
424 for (cycle = 0; cycle < NUM_OF_CYCLES; cycle++)
425 for (component = 0; component < NUM_OF_COMPONENTS; component++)
426 bs->hw_gpr[cycle][component] = -1;
427 for (i = 0; i < 4; i++)
428 bs->hw_cfile_addr[i] = -1;
429 for (i = 0; i < 4; i++)
430 bs->hw_cfile_elem[i] = -1;
431 }
432
433 static int reserve_gpr(struct alu_bank_swizzle *bs, unsigned sel, unsigned chan, unsigned cycle)
434 {
435 if (bs->hw_gpr[cycle][chan] == -1)
436 bs->hw_gpr[cycle][chan] = sel;
437 else if (bs->hw_gpr[cycle][chan] != (int)sel) {
438 // Another scalar operation has already used GPR read port for channel
439 return -1;
440 }
441 return 0;
442 }
443
444 static int reserve_cfile(struct alu_bank_swizzle *bs, unsigned sel, unsigned chan)
445 {
446 int res, resmatch = -1, resempty = -1;
447 for (res = 3; res >= 0; --res) {
448 if (bs->hw_cfile_addr[res] == -1)
449 resempty = res;
450 else if (bs->hw_cfile_addr[res] == sel &&
451 bs->hw_cfile_elem[res] == chan)
452 resmatch = res;
453 }
454 if (resmatch != -1)
455 return 0; // Read for this scalar element already reserved, nothing to do here.
456 else if (resempty != -1) {
457 bs->hw_cfile_addr[resempty] = sel;
458 bs->hw_cfile_elem[resempty] = chan;
459 } else {
460 // All cfile read ports are used, cannot reference vector element
461 return -1;
462 }
463 return 0;
464 }
465
466 static int is_gpr(unsigned sel)
467 {
468 return (sel >= 0 && sel <= 127);
469 }
470
471 static int is_cfile(unsigned sel)
472 {
473 return (sel > 255 && sel < 512);
474 }
475
476 static int is_const(int sel)
477 {
478 return is_cfile(sel) ||
479 (sel >= V_SQ_ALU_SRC_0 &&
480 sel <= V_SQ_ALU_SRC_LITERAL);
481 }
482
483 static int check_vector(struct r600_bc_alu *alu, struct alu_bank_swizzle *bs, int bank_swizzle)
484 {
485 int r, src, num_src, sel, elem, cycle;
486
487 num_src = r600_bc_get_num_operands(alu);
488 for (src = 0; src < num_src; src++) {
489 sel = alu->src[src].sel;
490 elem = alu->src[src].chan;
491 if (is_gpr(sel)) {
492 cycle = cycle_for_bank_swizzle_vec[bank_swizzle][src];
493 if (src == 1 && sel == alu->src[0].sel && elem == alu->src[0].chan)
494 // Nothing to do; special-case optimization,
495 // second source uses first source’s reservation
496 continue;
497 else {
498 r = reserve_gpr(bs, sel, elem, cycle);
499 if (r)
500 return r;
501 }
502 } else if (is_cfile(sel)) {
503 r = reserve_cfile(bs, sel, elem);
504 if (r)
505 return r;
506 }
507 // No restrictions on PV, PS, literal or special constants
508 }
509 return 0;
510 }
511
512 static int check_scalar(struct r600_bc_alu *alu, struct alu_bank_swizzle *bs, int bank_swizzle)
513 {
514 int r, src, num_src, const_count, sel, elem, cycle;
515
516 num_src = r600_bc_get_num_operands(alu);
517 for (const_count = 0, src = 0; src < num_src; ++src) {
518 sel = alu->src[src].sel;
519 elem = alu->src[src].chan;
520 if (is_const(sel)) { // Any constant, including literal and inline constants
521 if (const_count >= 2)
522 // More than two references to a constant in
523 // transcendental operation.
524 return -1;
525 else
526 const_count++;
527 }
528 if (is_cfile(sel)) {
529 r = reserve_cfile(bs, sel, elem);
530 if (r)
531 return r;
532 }
533 }
534 for (src = 0; src < num_src; ++src) {
535 sel = alu->src[src].sel;
536 elem = alu->src[src].chan;
537 if (is_gpr(sel)) {
538 cycle = cycle_for_bank_swizzle_scl[bank_swizzle][src];
539 if (cycle < const_count)
540 // Cycle for GPR load conflicts with
541 // constant load in transcendental operation.
542 return -1;
543 r = reserve_gpr(bs, sel, elem, cycle);
544 if (r)
545 return r;
546 }
547 // Constants already processed
548 // No restrictions on PV, PS
549 }
550 return 0;
551 }
552
553 static int check_and_set_bank_swizzle(struct r600_bc_alu *slots[5])
554 {
555 struct alu_bank_swizzle bs;
556 int bank_swizzle[5];
557 int i, r = 0, forced = 0;
558
559 for (i = 0; i < 5; i++)
560 if (slots[i] && slots[i]->bank_swizzle_force) {
561 slots[i]->bank_swizzle = slots[i]->bank_swizzle_force;
562 forced = 1;
563 }
564
565 if (forced)
566 return 0;
567
568 // just check every possible combination of bank swizzle
569 // not very efficent, but works on the first try in most of the cases
570 for (i = 0; i < 4; i++)
571 bank_swizzle[i] = SQ_ALU_VEC_012;
572 bank_swizzle[4] = SQ_ALU_SCL_210;
573 while(bank_swizzle[4] <= SQ_ALU_SCL_221) {
574 init_bank_swizzle(&bs);
575 for (i = 0; i < 4; i++) {
576 if (slots[i]) {
577 r = check_vector(slots[i], &bs, bank_swizzle[i]);
578 if (r)
579 break;
580 }
581 }
582 if (!r && slots[4]) {
583 r = check_scalar(slots[4], &bs, bank_swizzle[4]);
584 }
585 if (!r) {
586 for (i = 0; i < 5; i++) {
587 if (slots[i])
588 slots[i]->bank_swizzle = bank_swizzle[i];
589 }
590 return 0;
591 }
592
593 for (i = 0; i < 5; i++) {
594 bank_swizzle[i]++;
595 if (bank_swizzle[i] <= SQ_ALU_VEC_210)
596 break;
597 else
598 bank_swizzle[i] = SQ_ALU_VEC_012;
599 }
600 }
601
602 // couldn't find a working swizzle
603 return -1;
604 }
605
606 static int replace_gpr_with_pv_ps(struct r600_bc_alu *slots[5], struct r600_bc_alu *alu_prev)
607 {
608 struct r600_bc_alu *prev[5];
609 int gpr[5], chan[5];
610 int i, j, r, src, num_src;
611
612 r = assign_alu_units(alu_prev, prev);
613 if (r)
614 return r;
615
616 for (i = 0; i < 5; ++i) {
617 if(prev[i] && prev[i]->dst.write && !prev[i]->dst.rel) {
618 gpr[i] = prev[i]->dst.sel;
619 if (is_alu_reduction_inst(prev[i]))
620 chan[i] = 0;
621 else
622 chan[i] = prev[i]->dst.chan;
623 } else
624 gpr[i] = -1;
625 }
626
627 for (i = 0; i < 5; ++i) {
628 struct r600_bc_alu *alu = slots[i];
629 if(!alu)
630 continue;
631
632 num_src = r600_bc_get_num_operands(alu);
633 for (src = 0; src < num_src; ++src) {
634 if (!is_gpr(alu->src[src].sel) || alu->src[src].rel)
635 continue;
636
637 if (alu->src[src].sel == gpr[4] &&
638 alu->src[src].chan == chan[4]) {
639 alu->src[src].sel = V_SQ_ALU_SRC_PS;
640 alu->src[src].chan = 0;
641 continue;
642 }
643
644 for (j = 0; j < 4; ++j) {
645 if (alu->src[src].sel == gpr[j] &&
646 alu->src[src].chan == j) {
647 alu->src[src].sel = V_SQ_ALU_SRC_PV;
648 alu->src[src].chan = chan[j];
649 break;
650 }
651 }
652 }
653 }
654
655 return 0;
656 }
657
658 void r600_bc_special_constants(u32 value, unsigned *sel, unsigned *neg)
659 {
660 switch(value) {
661 case 0:
662 *sel = V_SQ_ALU_SRC_0;
663 break;
664 case 1:
665 *sel = V_SQ_ALU_SRC_1_INT;
666 break;
667 case -1:
668 *sel = V_SQ_ALU_SRC_M_1_INT;
669 break;
670 case 0x3F800000: // 1.0f
671 *sel = V_SQ_ALU_SRC_1;
672 break;
673 case 0x3F000000: // 0.5f
674 *sel = V_SQ_ALU_SRC_0_5;
675 break;
676 case 0xBF800000: // -1.0f
677 *sel = V_SQ_ALU_SRC_1;
678 *neg ^= 1;
679 break;
680 case 0xBF000000: // -0.5f
681 *sel = V_SQ_ALU_SRC_0_5;
682 *neg ^= 1;
683 break;
684 default:
685 *sel = V_SQ_ALU_SRC_LITERAL;
686 break;
687 }
688 }
689
690 /* compute how many literal are needed */
691 static int r600_bc_alu_nliterals(struct r600_bc_alu *alu, uint32_t literal[4], unsigned *nliteral)
692 {
693 unsigned num_src = r600_bc_get_num_operands(alu);
694 unsigned i, j;
695
696 for (i = 0; i < num_src; ++i) {
697 if (alu->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
698 uint32_t value = alu->src[i].value[alu->src[i].chan];
699 unsigned found = 0;
700 for (j = 0; j < *nliteral; ++j) {
701 if (literal[j] == value) {
702 found = 1;
703 break;
704 }
705 }
706 if (!found) {
707 if (*nliteral >= 4)
708 return -EINVAL;
709 literal[(*nliteral)++] = value;
710 }
711 }
712 }
713 return 0;
714 }
715
716 static void r600_bc_alu_adjust_literals(struct r600_bc_alu *alu, uint32_t literal[4], unsigned nliteral)
717 {
718 unsigned num_src = r600_bc_get_num_operands(alu);
719 unsigned i, j;
720
721 for (i = 0; i < num_src; ++i) {
722 if (alu->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
723 uint32_t value = alu->src[i].value[alu->src[i].chan];
724 for (j = 0; j < nliteral; ++j) {
725 if (literal[j] == value) {
726 alu->src[i].chan = j;
727 break;
728 }
729 }
730 }
731 }
732 }
733
734 static int merge_inst_groups(struct r600_bc *bc, struct r600_bc_alu *slots[5], struct r600_bc_alu *alu_prev)
735 {
736 struct r600_bc_alu *prev[5];
737 struct r600_bc_alu *result[5] = { NULL };
738
739 uint32_t literal[4];
740 unsigned nliteral = 0;
741
742 int i, j, r, src, num_src;
743 int num_once_inst = 0;
744
745 r = assign_alu_units(alu_prev, prev);
746 if (r)
747 return r;
748
749 for (i = 0; i < 5; ++i) {
750 /* check number of literals */
751 if (prev[i] && r600_bc_alu_nliterals(prev[i], literal, &nliteral))
752 return 0;
753 if (slots[i] && r600_bc_alu_nliterals(slots[i], literal, &nliteral))
754 return 0;
755
756 // let's check used slots
757 if (prev[i] && !slots[i]) {
758 result[i] = prev[i];
759 num_once_inst += is_alu_once_inst(prev[i]);
760 continue;
761 } else if (prev[i] && slots[i]) {
762 if (result[4] == NULL && prev[4] == NULL && slots[4] == NULL) {
763 // trans unit is still free try to use it
764 if (is_alu_any_unit_inst(slots[i])) {
765 result[i] = prev[i];
766 result[4] = slots[i];
767 } else if (is_alu_any_unit_inst(prev[i])) {
768 result[i] = slots[i];
769 result[4] = prev[i];
770 } else
771 return 0;
772 } else
773 return 0;
774 } else if(!slots[i]) {
775 continue;
776 } else
777 result[i] = slots[i];
778
779 // let's check source gprs
780 struct r600_bc_alu *alu = slots[i];
781 num_once_inst += is_alu_once_inst(alu);
782
783 num_src = r600_bc_get_num_operands(alu);
784 for (src = 0; src < num_src; ++src) {
785 // constants doesn't matter
786 if (!is_gpr(alu->src[src].sel))
787 continue;
788
789 for (j = 0; j < 5; ++j) {
790 if (!prev[j] || !prev[j]->dst.write)
791 continue;
792
793 // if it's relative then we can't determin which gpr is really used
794 if (prev[j]->dst.chan == alu->src[src].chan &&
795 (prev[j]->dst.sel == alu->src[src].sel ||
796 prev[j]->dst.rel || alu->src[src].rel))
797 return 0;
798 }
799 }
800 }
801
802 /* more than one PRED_ or KILL_ ? */
803 if (num_once_inst > 1)
804 return 0;
805
806 /* check if the result can still be swizzlet */
807 r = check_and_set_bank_swizzle(result);
808 if (r)
809 return 0;
810
811 /* looks like everything worked out right, apply the changes */
812
813 /* sort instructions */
814 for (i = 0; i < 5; ++i) {
815 slots[i] = result[i];
816 if (result[i]) {
817 LIST_DEL(&result[i]->list);
818 result[i]->last = 0;
819 LIST_ADDTAIL(&result[i]->list, &bc->cf_last->alu);
820 }
821 }
822
823 /* determine new last instruction */
824 LIST_ENTRY(struct r600_bc_alu, bc->cf_last->alu.prev, list)->last = 1;
825
826 /* determine new first instruction */
827 for (i = 0; i < 5; ++i) {
828 if (result[i]) {
829 bc->cf_last->curr_bs_head = result[i];
830 break;
831 }
832 }
833
834 bc->cf_last->prev_bs_head = bc->cf_last->prev2_bs_head;
835 bc->cf_last->prev2_bs_head = NULL;
836
837 return 0;
838 }
839
840 int r600_bc_add_alu_type(struct r600_bc *bc, const struct r600_bc_alu *alu, int type)
841 {
842 struct r600_bc_alu *nalu = r600_bc_alu();
843 struct r600_bc_alu *lalu;
844 int i, r;
845
846 if (nalu == NULL)
847 return -ENOMEM;
848 memcpy(nalu, alu, sizeof(struct r600_bc_alu));
849
850 if (bc->cf_last != NULL && bc->cf_last->inst != (type << 3)) {
851 /* check if we could add it anyway */
852 if (bc->cf_last->inst == (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3) &&
853 type == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE) {
854 LIST_FOR_EACH_ENTRY(lalu, &bc->cf_last->alu, list) {
855 if (lalu->predicate) {
856 bc->force_add_cf = 1;
857 break;
858 }
859 }
860 } else
861 bc->force_add_cf = 1;
862 }
863
864 /* cf can contains only alu or only vtx or only tex */
865 if (bc->cf_last == NULL || bc->force_add_cf) {
866 r = r600_bc_add_cf(bc);
867 if (r) {
868 free(nalu);
869 return r;
870 }
871 }
872 bc->cf_last->inst = (type << 3);
873 if (!bc->cf_last->curr_bs_head) {
874 bc->cf_last->curr_bs_head = nalu;
875 }
876 /* at most 128 slots, one add alu can add 5 slots + 4 constants(2 slots)
877 * worst case */
878 if (alu->last && (bc->cf_last->ndw >> 1) >= 120) {
879 bc->force_add_cf = 1;
880 }
881 /* replace special constants */
882 for (i = 0; i < 3; i++) {
883 if (nalu->src[i].sel == V_SQ_ALU_SRC_LITERAL)
884 r600_bc_special_constants(
885 nalu->src[i].value[nalu->src[i].chan],
886 &nalu->src[i].sel, &nalu->src[i].neg);
887 }
888 LIST_ADDTAIL(&nalu->list, &bc->cf_last->alu);
889 /* each alu use 2 dwords */
890 bc->cf_last->ndw += 2;
891 bc->ndw += 2;
892
893 bc->cf_last->kcache0_mode = 2;
894
895 /* process cur ALU instructions for bank swizzle */
896 if (alu->last) {
897 struct r600_bc_alu *slots[5];
898 r = assign_alu_units(bc->cf_last->curr_bs_head, slots);
899 if (r)
900 return r;
901
902 if (bc->cf_last->prev_bs_head) {
903 r = merge_inst_groups(bc, slots, bc->cf_last->prev_bs_head);
904 if (r)
905 return r;
906 }
907
908 if (bc->cf_last->prev_bs_head) {
909 r = replace_gpr_with_pv_ps(slots, bc->cf_last->prev_bs_head);
910 if (r)
911 return r;
912 }
913
914 r = check_and_set_bank_swizzle(slots);
915 if (r)
916 return r;
917
918 bc->cf_last->prev2_bs_head = bc->cf_last->prev_bs_head;
919 bc->cf_last->prev_bs_head = bc->cf_last->curr_bs_head;
920 bc->cf_last->curr_bs_head = NULL;
921 }
922 return 0;
923 }
924
925 int r600_bc_add_alu(struct r600_bc *bc, const struct r600_bc_alu *alu)
926 {
927 return r600_bc_add_alu_type(bc, alu, BC_INST(bc, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU));
928 }
929
930 static void r600_bc_remove_alu(struct r600_bc_cf *cf, struct r600_bc_alu *alu)
931 {
932 if (alu->last && alu->list.prev != &cf->alu) {
933 PREV_ALU(alu)->last = 1;
934 }
935 LIST_DEL(&alu->list);
936 free(alu);
937 cf->ndw -= 2;
938 }
939
940 int r600_bc_add_vtx(struct r600_bc *bc, const struct r600_bc_vtx *vtx)
941 {
942 struct r600_bc_vtx *nvtx = r600_bc_vtx();
943 int r;
944
945 if (nvtx == NULL)
946 return -ENOMEM;
947 memcpy(nvtx, vtx, sizeof(struct r600_bc_vtx));
948
949 /* cf can contains only alu or only vtx or only tex */
950 if (bc->cf_last == NULL ||
951 (bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_VTX &&
952 bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) ||
953 bc->force_add_cf) {
954 r = r600_bc_add_cf(bc);
955 if (r) {
956 free(nvtx);
957 return r;
958 }
959 bc->cf_last->inst = V_SQ_CF_WORD1_SQ_CF_INST_VTX;
960 }
961 LIST_ADDTAIL(&nvtx->list, &bc->cf_last->vtx);
962 /* each fetch use 4 dwords */
963 bc->cf_last->ndw += 4;
964 bc->ndw += 4;
965 if ((bc->cf_last->ndw / 4) > 7)
966 bc->force_add_cf = 1;
967 return 0;
968 }
969
970 int r600_bc_add_tex(struct r600_bc *bc, const struct r600_bc_tex *tex)
971 {
972 struct r600_bc_tex *ntex = r600_bc_tex();
973 int r;
974
975 if (ntex == NULL)
976 return -ENOMEM;
977 memcpy(ntex, tex, sizeof(struct r600_bc_tex));
978
979 /* cf can contains only alu or only vtx or only tex */
980 if (bc->cf_last == NULL ||
981 bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_TEX ||
982 bc->force_add_cf) {
983 r = r600_bc_add_cf(bc);
984 if (r) {
985 free(ntex);
986 return r;
987 }
988 bc->cf_last->inst = V_SQ_CF_WORD1_SQ_CF_INST_TEX;
989 }
990 LIST_ADDTAIL(&ntex->list, &bc->cf_last->tex);
991 /* each texture fetch use 4 dwords */
992 bc->cf_last->ndw += 4;
993 bc->ndw += 4;
994 if ((bc->cf_last->ndw / 4) > 7)
995 bc->force_add_cf = 1;
996 return 0;
997 }
998
999 int r600_bc_add_cfinst(struct r600_bc *bc, int inst)
1000 {
1001 int r;
1002 r = r600_bc_add_cf(bc);
1003 if (r)
1004 return r;
1005
1006 bc->cf_last->cond = V_SQ_CF_COND_ACTIVE;
1007 bc->cf_last->inst = inst;
1008 return 0;
1009 }
1010
1011 /* common to all 3 families */
1012 static int r600_bc_vtx_build(struct r600_bc *bc, struct r600_bc_vtx *vtx, unsigned id)
1013 {
1014 unsigned fetch_resource_start = 0;
1015
1016 /* check if we are fetch shader */
1017 /* fetch shader can also access vertex resource,
1018 * first fetch shader resource is at 160
1019 */
1020 if (bc->type == -1) {
1021 switch (bc->chiprev) {
1022 /* r600 */
1023 case CHIPREV_R600:
1024 /* r700 */
1025 case CHIPREV_R700:
1026 fetch_resource_start = 160;
1027 break;
1028 /* evergreen */
1029 case CHIPREV_EVERGREEN:
1030 fetch_resource_start = 0;
1031 break;
1032 default:
1033 fprintf(stderr, "%s:%s:%d unknown chiprev %d\n",
1034 __FILE__, __func__, __LINE__, bc->chiprev);
1035 break;
1036 }
1037 }
1038 bc->bytecode[id++] = S_SQ_VTX_WORD0_BUFFER_ID(vtx->buffer_id + fetch_resource_start) |
1039 S_SQ_VTX_WORD0_SRC_GPR(vtx->src_gpr) |
1040 S_SQ_VTX_WORD0_SRC_SEL_X(vtx->src_sel_x) |
1041 S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(vtx->mega_fetch_count);
1042 bc->bytecode[id++] = S_SQ_VTX_WORD1_DST_SEL_X(vtx->dst_sel_x) |
1043 S_SQ_VTX_WORD1_DST_SEL_Y(vtx->dst_sel_y) |
1044 S_SQ_VTX_WORD1_DST_SEL_Z(vtx->dst_sel_z) |
1045 S_SQ_VTX_WORD1_DST_SEL_W(vtx->dst_sel_w) |
1046 S_SQ_VTX_WORD1_USE_CONST_FIELDS(vtx->use_const_fields) |
1047 S_SQ_VTX_WORD1_DATA_FORMAT(vtx->data_format) |
1048 S_SQ_VTX_WORD1_NUM_FORMAT_ALL(vtx->num_format_all) |
1049 S_SQ_VTX_WORD1_FORMAT_COMP_ALL(vtx->format_comp_all) |
1050 S_SQ_VTX_WORD1_SRF_MODE_ALL(vtx->srf_mode_all) |
1051 S_SQ_VTX_WORD1_GPR_DST_GPR(vtx->dst_gpr);
1052 bc->bytecode[id++] = S_SQ_VTX_WORD2_MEGA_FETCH(1);
1053 bc->bytecode[id++] = 0;
1054 return 0;
1055 }
1056
1057 /* common to all 3 families */
1058 static int r600_bc_tex_build(struct r600_bc *bc, struct r600_bc_tex *tex, unsigned id)
1059 {
1060 bc->bytecode[id++] = S_SQ_TEX_WORD0_TEX_INST(tex->inst) |
1061 S_SQ_TEX_WORD0_RESOURCE_ID(tex->resource_id) |
1062 S_SQ_TEX_WORD0_SRC_GPR(tex->src_gpr) |
1063 S_SQ_TEX_WORD0_SRC_REL(tex->src_rel);
1064 bc->bytecode[id++] = S_SQ_TEX_WORD1_DST_GPR(tex->dst_gpr) |
1065 S_SQ_TEX_WORD1_DST_REL(tex->dst_rel) |
1066 S_SQ_TEX_WORD1_DST_SEL_X(tex->dst_sel_x) |
1067 S_SQ_TEX_WORD1_DST_SEL_Y(tex->dst_sel_y) |
1068 S_SQ_TEX_WORD1_DST_SEL_Z(tex->dst_sel_z) |
1069 S_SQ_TEX_WORD1_DST_SEL_W(tex->dst_sel_w) |
1070 S_SQ_TEX_WORD1_LOD_BIAS(tex->lod_bias) |
1071 S_SQ_TEX_WORD1_COORD_TYPE_X(tex->coord_type_x) |
1072 S_SQ_TEX_WORD1_COORD_TYPE_Y(tex->coord_type_y) |
1073 S_SQ_TEX_WORD1_COORD_TYPE_Z(tex->coord_type_z) |
1074 S_SQ_TEX_WORD1_COORD_TYPE_W(tex->coord_type_w);
1075 bc->bytecode[id++] = S_SQ_TEX_WORD2_OFFSET_X(tex->offset_x) |
1076 S_SQ_TEX_WORD2_OFFSET_Y(tex->offset_y) |
1077 S_SQ_TEX_WORD2_OFFSET_Z(tex->offset_z) |
1078 S_SQ_TEX_WORD2_SAMPLER_ID(tex->sampler_id) |
1079 S_SQ_TEX_WORD2_SRC_SEL_X(tex->src_sel_x) |
1080 S_SQ_TEX_WORD2_SRC_SEL_Y(tex->src_sel_y) |
1081 S_SQ_TEX_WORD2_SRC_SEL_Z(tex->src_sel_z) |
1082 S_SQ_TEX_WORD2_SRC_SEL_W(tex->src_sel_w);
1083 bc->bytecode[id++] = 0;
1084 return 0;
1085 }
1086
1087 /* r600 only, r700/eg bits in r700_asm.c */
1088 static int r600_bc_alu_build(struct r600_bc *bc, struct r600_bc_alu *alu, unsigned id)
1089 {
1090 /* don't replace gpr by pv or ps for destination register */
1091 bc->bytecode[id++] = S_SQ_ALU_WORD0_SRC0_SEL(alu->src[0].sel) |
1092 S_SQ_ALU_WORD0_SRC0_REL(alu->src[0].rel) |
1093 S_SQ_ALU_WORD0_SRC0_CHAN(alu->src[0].chan) |
1094 S_SQ_ALU_WORD0_SRC0_NEG(alu->src[0].neg) |
1095 S_SQ_ALU_WORD0_SRC1_SEL(alu->src[1].sel) |
1096 S_SQ_ALU_WORD0_SRC1_REL(alu->src[1].rel) |
1097 S_SQ_ALU_WORD0_SRC1_CHAN(alu->src[1].chan) |
1098 S_SQ_ALU_WORD0_SRC1_NEG(alu->src[1].neg) |
1099 S_SQ_ALU_WORD0_LAST(alu->last);
1100
1101 if (alu->is_op3) {
1102 bc->bytecode[id++] = S_SQ_ALU_WORD1_DST_GPR(alu->dst.sel) |
1103 S_SQ_ALU_WORD1_DST_CHAN(alu->dst.chan) |
1104 S_SQ_ALU_WORD1_DST_REL(alu->dst.rel) |
1105 S_SQ_ALU_WORD1_CLAMP(alu->dst.clamp) |
1106 S_SQ_ALU_WORD1_OP3_SRC2_SEL(alu->src[2].sel) |
1107 S_SQ_ALU_WORD1_OP3_SRC2_REL(alu->src[2].rel) |
1108 S_SQ_ALU_WORD1_OP3_SRC2_CHAN(alu->src[2].chan) |
1109 S_SQ_ALU_WORD1_OP3_SRC2_NEG(alu->src[2].neg) |
1110 S_SQ_ALU_WORD1_OP3_ALU_INST(alu->inst) |
1111 S_SQ_ALU_WORD1_BANK_SWIZZLE(alu->bank_swizzle);
1112 } else {
1113 bc->bytecode[id++] = S_SQ_ALU_WORD1_DST_GPR(alu->dst.sel) |
1114 S_SQ_ALU_WORD1_DST_CHAN(alu->dst.chan) |
1115 S_SQ_ALU_WORD1_DST_REL(alu->dst.rel) |
1116 S_SQ_ALU_WORD1_CLAMP(alu->dst.clamp) |
1117 S_SQ_ALU_WORD1_OP2_SRC0_ABS(alu->src[0].abs) |
1118 S_SQ_ALU_WORD1_OP2_SRC1_ABS(alu->src[1].abs) |
1119 S_SQ_ALU_WORD1_OP2_WRITE_MASK(alu->dst.write) |
1120 S_SQ_ALU_WORD1_OP2_OMOD(alu->omod) |
1121 S_SQ_ALU_WORD1_OP2_ALU_INST(alu->inst) |
1122 S_SQ_ALU_WORD1_BANK_SWIZZLE(alu->bank_swizzle) |
1123 S_SQ_ALU_WORD1_OP2_UPDATE_EXECUTE_MASK(alu->predicate) |
1124 S_SQ_ALU_WORD1_OP2_UPDATE_PRED(alu->predicate);
1125 }
1126 return 0;
1127 }
1128
1129 enum cf_class
1130 {
1131 CF_CLASS_ALU,
1132 CF_CLASS_TEXTURE,
1133 CF_CLASS_VERTEX,
1134 CF_CLASS_EXPORT,
1135 CF_CLASS_OTHER
1136 };
1137
1138 static enum cf_class get_cf_class(struct r600_bc_cf *cf)
1139 {
1140 switch (cf->inst) {
1141 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
1142 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3):
1143 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3):
1144 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
1145 return CF_CLASS_ALU;
1146
1147 case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
1148 return CF_CLASS_TEXTURE;
1149
1150 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
1151 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
1152 return CF_CLASS_VERTEX;
1153
1154 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1155 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1156 return CF_CLASS_EXPORT;
1157
1158 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
1159 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
1160 case V_SQ_CF_WORD1_SQ_CF_INST_POP:
1161 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
1162 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
1163 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
1164 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
1165 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
1166 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
1167 return CF_CLASS_OTHER;
1168
1169 default:
1170 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
1171 return -EINVAL;
1172 }
1173 }
1174
1175 /* common for r600/r700 - eg in eg_asm.c */
1176 static int r600_bc_cf_build(struct r600_bc *bc, struct r600_bc_cf *cf)
1177 {
1178 unsigned id = cf->id;
1179 unsigned end_of_program = bc->cf.prev == &cf->list;
1180
1181 switch (get_cf_class(cf)) {
1182 case CF_CLASS_ALU:
1183 assert(!end_of_program);
1184 bc->bytecode[id++] = S_SQ_CF_ALU_WORD0_ADDR(cf->addr >> 1) |
1185 S_SQ_CF_ALU_WORD0_KCACHE_MODE0(cf->kcache0_mode) |
1186 S_SQ_CF_ALU_WORD0_KCACHE_BANK0(cf->kcache0_bank) |
1187 S_SQ_CF_ALU_WORD0_KCACHE_BANK1(cf->kcache1_bank);
1188
1189 bc->bytecode[id++] = S_SQ_CF_ALU_WORD1_CF_INST(cf->inst >> 3) |
1190 S_SQ_CF_ALU_WORD1_KCACHE_MODE1(cf->kcache1_mode) |
1191 S_SQ_CF_ALU_WORD1_KCACHE_ADDR0(cf->kcache0_addr) |
1192 S_SQ_CF_ALU_WORD1_KCACHE_ADDR1(cf->kcache1_addr) |
1193 S_SQ_CF_ALU_WORD1_BARRIER(cf->barrier) |
1194 S_SQ_CF_ALU_WORD1_USES_WATERFALL(bc->chiprev == CHIPREV_R600 ? cf->r6xx_uses_waterfall : 0) |
1195 S_SQ_CF_ALU_WORD1_COUNT((cf->ndw / 2) - 1);
1196 break;
1197 case CF_CLASS_TEXTURE:
1198 case CF_CLASS_VERTEX:
1199 bc->bytecode[id++] = S_SQ_CF_WORD0_ADDR(cf->addr >> 1);
1200 bc->bytecode[id++] = S_SQ_CF_WORD1_CF_INST(cf->inst) |
1201 S_SQ_CF_WORD1_BARRIER(cf->barrier) |
1202 S_SQ_CF_WORD1_COUNT((cf->ndw / 4) - 1) |
1203 S_SQ_CF_WORD1_END_OF_PROGRAM(end_of_program);
1204 break;
1205 case CF_CLASS_EXPORT:
1206 bc->bytecode[id++] = S_SQ_CF_ALLOC_EXPORT_WORD0_RW_GPR(cf->output.gpr) |
1207 S_SQ_CF_ALLOC_EXPORT_WORD0_ELEM_SIZE(cf->output.elem_size) |
1208 S_SQ_CF_ALLOC_EXPORT_WORD0_ARRAY_BASE(cf->output.array_base) |
1209 S_SQ_CF_ALLOC_EXPORT_WORD0_TYPE(cf->output.type);
1210 bc->bytecode[id++] = S_SQ_CF_ALLOC_EXPORT_WORD1_BURST_COUNT(cf->output.burst_count - 1) |
1211 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_X(cf->output.swizzle_x) |
1212 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Y(cf->output.swizzle_y) |
1213 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Z(cf->output.swizzle_z) |
1214 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_W(cf->output.swizzle_w) |
1215 S_SQ_CF_ALLOC_EXPORT_WORD1_BARRIER(cf->barrier) |
1216 S_SQ_CF_ALLOC_EXPORT_WORD1_CF_INST(cf->inst) |
1217 S_SQ_CF_ALLOC_EXPORT_WORD1_END_OF_PROGRAM(end_of_program);
1218 break;
1219 case CF_CLASS_OTHER:
1220 bc->bytecode[id++] = S_SQ_CF_WORD0_ADDR(cf->cf_addr >> 1);
1221 bc->bytecode[id++] = S_SQ_CF_WORD1_CF_INST(cf->inst) |
1222 S_SQ_CF_WORD1_BARRIER(cf->barrier) |
1223 S_SQ_CF_WORD1_COND(cf->cond) |
1224 S_SQ_CF_WORD1_POP_COUNT(cf->pop_count) |
1225 S_SQ_CF_WORD1_END_OF_PROGRAM(end_of_program);
1226
1227 break;
1228 default:
1229 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
1230 return -EINVAL;
1231 }
1232 return 0;
1233 }
1234
1235 struct gpr_usage_range {
1236 int replacement;
1237 int32_t start;
1238 int32_t end;
1239 };
1240
1241 struct gpr_usage {
1242 unsigned channels:4;
1243 int32_t first_write;
1244 int32_t last_write[4];
1245 unsigned nranges;
1246 struct gpr_usage_range *ranges;
1247 };
1248
1249 static struct gpr_usage_range* add_gpr_usage_range(struct gpr_usage *usage)
1250 {
1251 usage->nranges++;
1252 usage->ranges = realloc(usage->ranges, usage->nranges * sizeof(struct gpr_usage_range));
1253 if (!usage->ranges)
1254 return NULL;
1255 return &usage->ranges[usage->nranges-1];
1256 }
1257
1258 static void notice_gpr_read(struct gpr_usage *usage, int32_t id, unsigned chan)
1259 {
1260 usage->channels |= 1 << chan;
1261 usage->first_write = -1;
1262 if (!usage->nranges) {
1263 struct gpr_usage_range* range = add_gpr_usage_range(usage);
1264 range->replacement = -1;
1265 range->start = -1;
1266 range->end = -1;
1267 }
1268 if (usage->ranges[usage->nranges-1].end < id)
1269 usage->ranges[usage->nranges-1].end = id;
1270 }
1271
1272 static void notice_gpr_rel_read(struct gpr_usage usage[128], int32_t id, unsigned chan)
1273 {
1274 unsigned i;
1275 for (i = 0; i < 128; ++i)
1276 notice_gpr_read(&usage[i], id, chan);
1277 }
1278
1279 static void notice_gpr_last_write(struct gpr_usage *usage, int32_t id, unsigned chan)
1280 {
1281 usage->last_write[chan] = id;
1282 }
1283
1284 static void notice_gpr_write(struct gpr_usage *usage, int32_t id, unsigned chan,
1285 int predicate, int prefered_replacement)
1286 {
1287 int32_t start = usage->first_write != -1 ? usage->first_write : id;
1288 usage->channels &= ~(1 << chan);
1289 if (usage->channels) {
1290 if (usage->first_write == -1)
1291 usage->first_write = id;
1292 } else if (!usage->nranges || (usage->ranges[usage->nranges-1].start != start && !predicate)) {
1293 usage->first_write = start;
1294 struct gpr_usage_range* range = add_gpr_usage_range(usage);
1295 range->replacement = prefered_replacement;
1296 range->start = start;
1297 range->end = -1;
1298 } else if (usage->ranges[usage->nranges-1].start == start && prefered_replacement != -1) {
1299 usage->ranges[usage->nranges-1].replacement = prefered_replacement;
1300 }
1301 notice_gpr_last_write(usage, id, chan);
1302 }
1303
1304 static void notice_gpr_rel_last_write(struct gpr_usage usage[128], int32_t id, unsigned chan)
1305 {
1306 unsigned i;
1307 for (i = 0; i < 128; ++i)
1308 notice_gpr_last_write(&usage[i], id, chan);
1309 }
1310
1311 static void notice_gpr_rel_write(struct gpr_usage usage[128], int32_t id, unsigned chan)
1312 {
1313 unsigned i;
1314 for (i = 0; i < 128; ++i)
1315 notice_gpr_write(&usage[i], id, chan, 1, -1);
1316 }
1317
1318 static void notice_alu_src_gprs(struct r600_bc_alu *alu, struct gpr_usage usage[128], int32_t id)
1319 {
1320 unsigned src, num_src;
1321
1322 num_src = r600_bc_get_num_operands(alu);
1323 for (src = 0; src < num_src; ++src) {
1324 // constants doesn't matter
1325 if (!is_gpr(alu->src[src].sel))
1326 continue;
1327
1328 if (alu->src[src].rel)
1329 notice_gpr_rel_read(usage, id, alu->src[src].chan);
1330 else
1331 notice_gpr_read(&usage[alu->src[src].sel], id, alu->src[src].chan);
1332 }
1333 }
1334
1335 static void notice_alu_dst_gprs(struct r600_bc_alu *alu_first, struct gpr_usage usage[128],
1336 int32_t id, int predicate)
1337 {
1338 struct r600_bc_alu *alu;
1339 for (alu = alu_first; alu; alu = LIST_ENTRY(struct r600_bc_alu, alu->list.next, list)) {
1340 if (alu->dst.write) {
1341 if (alu->dst.rel)
1342 notice_gpr_rel_write(usage, id, alu->dst.chan);
1343 else if (alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV && is_gpr(alu->src[0].sel))
1344 notice_gpr_write(&usage[alu->dst.sel], id, alu->dst.chan,
1345 predicate, alu->src[0].sel);
1346 else
1347 notice_gpr_write(&usage[alu->dst.sel], id, alu->dst.chan, predicate, -1);
1348 }
1349
1350 if (alu->last)
1351 break;
1352 }
1353 }
1354
1355 static void notice_tex_gprs(struct r600_bc_tex *tex, struct gpr_usage usage[128],
1356 int32_t id, int predicate)
1357 {
1358 if (tex->src_rel) {
1359 if (tex->src_sel_x < 4)
1360 notice_gpr_rel_read(usage, id, tex->src_sel_x);
1361 if (tex->src_sel_y < 4)
1362 notice_gpr_rel_read(usage, id, tex->src_sel_y);
1363 if (tex->src_sel_z < 4)
1364 notice_gpr_rel_read(usage, id, tex->src_sel_z);
1365 if (tex->src_sel_w < 4)
1366 notice_gpr_rel_read(usage, id, tex->src_sel_w);
1367 } else {
1368 if (tex->src_sel_x < 4)
1369 notice_gpr_read(&usage[tex->src_gpr], id, tex->src_sel_x);
1370 if (tex->src_sel_y < 4)
1371 notice_gpr_read(&usage[tex->src_gpr], id, tex->src_sel_y);
1372 if (tex->src_sel_z < 4)
1373 notice_gpr_read(&usage[tex->src_gpr], id, tex->src_sel_z);
1374 if (tex->src_sel_w < 4)
1375 notice_gpr_read(&usage[tex->src_gpr], id, tex->src_sel_w);
1376 }
1377 if (tex->dst_rel) {
1378 if (tex->dst_sel_x != 7)
1379 notice_gpr_rel_write(usage, id, 0);
1380 if (tex->dst_sel_y != 7)
1381 notice_gpr_rel_write(usage, id, 1);
1382 if (tex->dst_sel_z != 7)
1383 notice_gpr_rel_write(usage, id, 2);
1384 if (tex->dst_sel_w != 7)
1385 notice_gpr_rel_write(usage, id, 3);
1386 } else {
1387 if (tex->dst_sel_x != 7)
1388 notice_gpr_write(&usage[tex->dst_gpr], id, 0, predicate, -1);
1389 if (tex->dst_sel_y != 7)
1390 notice_gpr_write(&usage[tex->dst_gpr], id, 1, predicate, -1);
1391 if (tex->dst_sel_z != 7)
1392 notice_gpr_write(&usage[tex->dst_gpr], id, 2, predicate, -1);
1393 if (tex->dst_sel_w != 7)
1394 notice_gpr_write(&usage[tex->dst_gpr], id, 3, predicate, -1);
1395 }
1396 }
1397
1398 static void notice_vtx_gprs(struct r600_bc_vtx *vtx, struct gpr_usage usage[128],
1399 int32_t id, int predicate)
1400 {
1401 notice_gpr_read(&usage[vtx->src_gpr], id, vtx->src_sel_x);
1402
1403 if (vtx->dst_sel_x != 7)
1404 notice_gpr_write(&usage[vtx->dst_gpr], id, 0, predicate, -1);
1405 if (vtx->dst_sel_y != 7)
1406 notice_gpr_write(&usage[vtx->dst_gpr], id, 1, predicate, -1);
1407 if (vtx->dst_sel_z != 7)
1408 notice_gpr_write(&usage[vtx->dst_gpr], id, 2, predicate, -1);
1409 if (vtx->dst_sel_w != 7)
1410 notice_gpr_write(&usage[vtx->dst_gpr], id, 3, predicate, -1);
1411 }
1412
1413 static void notice_export_gprs(struct r600_bc_cf *cf, struct gpr_usage usage[128],
1414 struct r600_bc_cf *export_cf[128], int32_t export_remap[128])
1415 {
1416 //TODO handle other memory operations
1417 struct gpr_usage *output = &usage[cf->output.gpr];
1418 int32_t id = (output->last_write[0] + 0x100) & ~0xFF;
1419
1420 export_cf[cf->output.gpr] = cf;
1421 export_remap[cf->output.gpr] = id;
1422 if (cf->output.swizzle_x < 4)
1423 notice_gpr_read(output, id, cf->output.swizzle_x);
1424 if (cf->output.swizzle_y < 4)
1425 notice_gpr_read(output, id, cf->output.swizzle_y);
1426 if (cf->output.swizzle_z < 4)
1427 notice_gpr_read(output, id, cf->output.swizzle_z);
1428 if (cf->output.swizzle_w < 4)
1429 notice_gpr_read(output, id, cf->output.swizzle_w);
1430 }
1431
1432 static struct gpr_usage_range *find_src_range(struct gpr_usage *usage, int32_t id)
1433 {
1434 unsigned i;
1435 for (i = 0; i < usage->nranges; ++i) {
1436 struct gpr_usage_range* range = &usage->ranges[i];
1437
1438 if (range->start < id && id <= range->end)
1439 return range;
1440 }
1441 return NULL;
1442 }
1443
1444 static struct gpr_usage_range *find_dst_range(struct gpr_usage *usage, int32_t id)
1445 {
1446 unsigned i;
1447 for (i = 0; i < usage->nranges; ++i) {
1448 struct gpr_usage_range* range = &usage->ranges[i];
1449 int32_t end = range->end;
1450
1451 if (range->start <= id && (id < end || end == -1))
1452 return range;
1453 }
1454 assert(0); /* should not happen */
1455 return NULL;
1456 }
1457
1458 static int is_barrier_needed(struct gpr_usage *usage, int32_t id, unsigned chan, int32_t last_barrier)
1459 {
1460 if (usage->last_write[chan] != (id & ~0xFF))
1461 return usage->last_write[chan] >= last_barrier;
1462 else
1463 return 0;
1464 }
1465
1466 static int is_intersection(struct gpr_usage_range* a, struct gpr_usage_range* b)
1467 {
1468 return a->start <= b->end && b->start < a->end;
1469 }
1470
1471 static int rate_replacement(struct gpr_usage *usage, struct gpr_usage_range* range)
1472 {
1473 unsigned i;
1474 int32_t best_start = 0x3FFFFFFF, best_end = 0x3FFFFFFF;
1475
1476 for (i = 0; i < usage->nranges; ++i) {
1477 if (usage->ranges[i].replacement != -1)
1478 continue; /* ignore already remapped ranges */
1479
1480 if (is_intersection(&usage->ranges[i], range))
1481 return -1; /* forget it if usages overlap */
1482
1483 if (range->start >= usage->ranges[i].end)
1484 best_start = MIN2(best_start, range->start - usage->ranges[i].end);
1485
1486 if (range->end != -1 && range->end <= usage->ranges[i].start)
1487 best_end = MIN2(best_end, usage->ranges[i].start - range->end);
1488 }
1489 return best_start + best_end;
1490 }
1491
1492 static void find_replacement(struct gpr_usage usage[128], unsigned current,
1493 struct gpr_usage_range *range, int is_export)
1494 {
1495 unsigned i;
1496 int best_gpr = -1, best_rate = 0x7FFFFFFF;
1497
1498 if (range->replacement != -1 && range->replacement <= current) {
1499 struct gpr_usage_range *other = find_src_range(&usage[range->replacement], range->start);
1500 if (other && other->replacement != -1)
1501 range->replacement = other->replacement;
1502 }
1503
1504 if (range->replacement != -1 && range->replacement < current) {
1505 int rate = rate_replacement(&usage[range->replacement], range);
1506
1507 /* check if prefered replacement can be used */
1508 if (rate != -1) {
1509 best_rate = rate;
1510 best_gpr = range->replacement;
1511 }
1512 }
1513
1514 if (best_gpr == -1 && (range->start & ~0xFF) == (range->end & ~0xFF)) {
1515 /* register is just used inside one ALU clause */
1516 /* try to use clause temporaryis for it */
1517 for (i = 127; i > 123; --i) {
1518 int rate = rate_replacement(&usage[i], range);
1519
1520 if (rate == -1) /* can't be used because ranges overlap */
1521 continue;
1522
1523 if (rate < best_rate) {
1524 best_rate = rate;
1525 best_gpr = i;
1526
1527 /* can't get better than this */
1528 if (rate == 0 || is_export)
1529 break;
1530 }
1531 }
1532 }
1533
1534 if (best_gpr == -1) {
1535 for (i = 0; i < current; ++i) {
1536 int rate = rate_replacement(&usage[i], range);
1537
1538 if (rate == -1) /* can't be used because ranges overlap */
1539 continue;
1540
1541 if (rate < best_rate) {
1542 best_rate = rate;
1543 best_gpr = i;
1544
1545 /* can't get better than this */
1546 if (rate == 0)
1547 break;
1548 }
1549 }
1550 }
1551
1552 range->replacement = best_gpr;
1553 if (best_gpr != -1) {
1554 struct gpr_usage_range *reservation = add_gpr_usage_range(&usage[best_gpr]);
1555 reservation->replacement = -1;
1556 reservation->start = range->start;
1557 reservation->end = range->end;
1558 }
1559 }
1560
1561 static void find_export_replacement(struct gpr_usage usage[128],
1562 struct gpr_usage_range *range, struct r600_bc_cf *current,
1563 struct r600_bc_cf *next, int32_t next_id)
1564 {
1565 if (!next || next_id <= range->start || next_id > range->end)
1566 return;
1567
1568 if (current->output.type != next->output.type)
1569 return;
1570
1571 if ((current->output.array_base + 1) != next->output.array_base)
1572 return;
1573
1574 find_src_range(&usage[next->output.gpr], next_id)->replacement = range->replacement + 1;
1575 }
1576
1577 static void replace_alu_gprs(struct r600_bc_alu *alu, struct gpr_usage usage[128],
1578 int32_t id, int32_t last_barrier, unsigned *barrier)
1579 {
1580 struct gpr_usage *cur_usage;
1581 struct gpr_usage_range *range;
1582 unsigned src, num_src;
1583
1584 num_src = r600_bc_get_num_operands(alu);
1585 for (src = 0; src < num_src; ++src) {
1586 // constants doesn't matter
1587 if (!is_gpr(alu->src[src].sel))
1588 continue;
1589
1590 cur_usage = &usage[alu->src[src].sel];
1591 range = find_src_range(cur_usage, id);
1592 if (range->replacement != -1)
1593 alu->src[src].sel = range->replacement;
1594
1595 *barrier |= is_barrier_needed(cur_usage, id, alu->src[src].chan, last_barrier);
1596 }
1597
1598 if (alu->dst.write) {
1599 cur_usage = &usage[alu->dst.sel];
1600 range = find_dst_range(cur_usage, id);
1601 if (range->replacement == alu->dst.sel) {
1602 if (!alu->is_op3)
1603 alu->dst.write = 0;
1604 else
1605 /*TODO: really check that register 123 is useable */
1606 alu->dst.sel = 123;
1607 } else if (range->replacement != -1) {
1608 alu->dst.sel = range->replacement;
1609 }
1610 if (alu->dst.rel)
1611 notice_gpr_rel_last_write(usage, id, alu->dst.chan);
1612 else
1613 notice_gpr_last_write(cur_usage, id, alu->dst.chan);
1614 }
1615 }
1616
1617 static void replace_tex_gprs(struct r600_bc_tex *tex, struct gpr_usage usage[128],
1618 int32_t id, int32_t last_barrier, unsigned *barrier)
1619 {
1620 struct gpr_usage *cur_usage = &usage[tex->src_gpr];
1621 struct gpr_usage_range *range = find_src_range(cur_usage, id);
1622
1623 if (tex->src_rel) {
1624 *barrier = 1;
1625 } else {
1626 if (tex->src_sel_x < 4)
1627 *barrier |= is_barrier_needed(cur_usage, id, tex->src_sel_x, last_barrier);
1628 if (tex->src_sel_y < 4)
1629 *barrier |= is_barrier_needed(cur_usage, id, tex->src_sel_y, last_barrier);
1630 if (tex->src_sel_z < 4)
1631 *barrier |= is_barrier_needed(cur_usage, id, tex->src_sel_z, last_barrier);
1632 if (tex->src_sel_w < 4)
1633 *barrier |= is_barrier_needed(cur_usage, id, tex->src_sel_w, last_barrier);
1634 }
1635
1636 if (range->replacement != -1)
1637 tex->src_gpr = range->replacement;
1638
1639 cur_usage = &usage[tex->dst_gpr];
1640 range = find_dst_range(cur_usage, id);
1641 if (range->replacement != -1)
1642 tex->dst_gpr = range->replacement;
1643
1644 if (tex->dst_rel) {
1645 if (tex->dst_sel_x != 7)
1646 notice_gpr_rel_last_write(usage, id, tex->dst_sel_x);
1647 if (tex->dst_sel_y != 7)
1648 notice_gpr_rel_last_write(usage, id, tex->dst_sel_y);
1649 if (tex->dst_sel_z != 7)
1650 notice_gpr_rel_last_write(usage, id, tex->dst_sel_z);
1651 if (tex->dst_sel_w != 7)
1652 notice_gpr_rel_last_write(usage, id, tex->dst_sel_w);
1653 } else {
1654 if (tex->dst_sel_x != 7)
1655 notice_gpr_last_write(cur_usage, id, tex->dst_sel_x);
1656 if (tex->dst_sel_y != 7)
1657 notice_gpr_last_write(cur_usage, id, tex->dst_sel_y);
1658 if (tex->dst_sel_z != 7)
1659 notice_gpr_last_write(cur_usage, id, tex->dst_sel_z);
1660 if (tex->dst_sel_w != 7)
1661 notice_gpr_last_write(cur_usage, id, tex->dst_sel_w);
1662 }
1663 }
1664
1665 static void replace_vtx_gprs(struct r600_bc_vtx *vtx, struct gpr_usage usage[128],
1666 int32_t id, int32_t last_barrier, unsigned *barrier)
1667 {
1668 struct gpr_usage *cur_usage = &usage[vtx->src_gpr];
1669 struct gpr_usage_range *range = find_src_range(cur_usage, id);
1670
1671 *barrier |= is_barrier_needed(cur_usage, id, vtx->src_sel_x, last_barrier);
1672
1673 if (range->replacement != -1)
1674 vtx->src_gpr = range->replacement;
1675
1676 cur_usage = &usage[vtx->dst_gpr];
1677 range = find_dst_range(cur_usage, id);
1678 if (range->replacement != -1)
1679 vtx->dst_gpr = range->replacement;
1680
1681 if (vtx->dst_sel_x != 7)
1682 notice_gpr_last_write(cur_usage, id, vtx->dst_sel_x);
1683 if (vtx->dst_sel_y != 7)
1684 notice_gpr_last_write(cur_usage, id, vtx->dst_sel_y);
1685 if (vtx->dst_sel_z != 7)
1686 notice_gpr_last_write(cur_usage, id, vtx->dst_sel_z);
1687 if (vtx->dst_sel_w != 7)
1688 notice_gpr_last_write(cur_usage, id, vtx->dst_sel_w);
1689 }
1690
1691 static void replace_export_gprs(struct r600_bc_cf *cf, struct gpr_usage usage[128],
1692 int32_t id, int32_t last_barrier)
1693 {
1694 //TODO handle other memory operations
1695 struct gpr_usage *cur_usage = &usage[cf->output.gpr];
1696 struct gpr_usage_range *range = find_src_range(cur_usage, id);
1697
1698 cf->barrier = 0;
1699 if (cf->output.swizzle_x < 4)
1700 cf->barrier |= is_barrier_needed(cur_usage, -1, cf->output.swizzle_x, last_barrier);
1701 if (cf->output.swizzle_y < 4)
1702 cf->barrier |= is_barrier_needed(cur_usage, -1, cf->output.swizzle_y, last_barrier);
1703 if (cf->output.swizzle_z < 4)
1704 cf->barrier |= is_barrier_needed(cur_usage, -1, cf->output.swizzle_z, last_barrier);
1705 if (cf->output.swizzle_w < 4)
1706 cf->barrier |= is_barrier_needed(cur_usage, -1, cf->output.swizzle_w, last_barrier);
1707
1708 if (range->replacement != -1)
1709 cf->output.gpr = range->replacement;
1710 }
1711
1712 static void optimize_alu_inst(struct r600_bc_cf *cf, struct r600_bc_alu *alu)
1713 {
1714 struct r600_bc_alu *alu_next;
1715 unsigned chan;
1716 unsigned src, num_src;
1717
1718 /* check if a MOV could be optimized away */
1719 if (alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV) {
1720
1721 /* destination equals source? */
1722 if (alu->dst.sel != alu->src[0].sel ||
1723 alu->dst.chan != alu->src[0].chan)
1724 return;
1725
1726 /* any special handling for the source? */
1727 if (alu->src[0].rel || alu->src[0].neg || alu->src[0].abs)
1728 return;
1729
1730 /* any special handling for destination? */
1731 if (alu->dst.rel || alu->dst.clamp)
1732 return;
1733
1734 /* ok find next instruction group and check if ps/pv is used */
1735 for (alu_next = alu; !alu_next->last; alu_next = NEXT_ALU(alu_next));
1736
1737 if (alu_next->list.next != &cf->alu) {
1738 chan = is_alu_reduction_inst(alu) ? 0 : alu->dst.chan;
1739 for (alu_next = NEXT_ALU(alu_next); alu_next; alu_next = NEXT_ALU(alu_next)) {
1740 num_src = r600_bc_get_num_operands(alu_next);
1741 for (src = 0; src < num_src; ++src) {
1742 if (alu_next->src[src].sel == V_SQ_ALU_SRC_PV &&
1743 alu_next->src[src].chan == chan)
1744 return;
1745
1746 if (alu_next->src[src].sel == V_SQ_ALU_SRC_PS)
1747 return;
1748 }
1749
1750 if (alu_next->last)
1751 break;
1752 }
1753 }
1754
1755 r600_bc_remove_alu(cf, alu);
1756 }
1757 }
1758
1759 static void optimize_export_inst(struct r600_bc *bc, struct r600_bc_cf *cf)
1760 {
1761 struct r600_bc_cf *prev = LIST_ENTRY(struct r600_bc_cf, cf->list.prev, list);
1762 if (&prev->list == &bc->cf ||
1763 prev->inst != cf->inst ||
1764 prev->output.type != cf->output.type ||
1765 prev->output.elem_size != cf->output.elem_size ||
1766 prev->output.swizzle_x != cf->output.swizzle_x ||
1767 prev->output.swizzle_y != cf->output.swizzle_y ||
1768 prev->output.swizzle_z != cf->output.swizzle_z ||
1769 prev->output.swizzle_w != cf->output.swizzle_w)
1770 return;
1771
1772 if ((prev->output.burst_count + cf->output.burst_count) > 16)
1773 return;
1774
1775 if ((prev->output.gpr + prev->output.burst_count) == cf->output.gpr &&
1776 (prev->output.array_base + prev->output.burst_count) == cf->output.array_base) {
1777
1778 prev->output.burst_count += cf->output.burst_count;
1779 r600_bc_remove_cf(bc, cf);
1780
1781 } else if (prev->output.gpr == (cf->output.gpr + cf->output.burst_count) &&
1782 prev->output.array_base == (cf->output.array_base + cf->output.burst_count)) {
1783
1784 cf->output.burst_count += prev->output.burst_count;
1785 r600_bc_remove_cf(bc, prev);
1786 }
1787 }
1788
1789 static void r600_bc_optimize(struct r600_bc *bc)
1790 {
1791 struct r600_bc_cf *cf, *next_cf;
1792 struct r600_bc_alu *first, *next_alu;
1793 struct r600_bc_alu *alu;
1794 struct r600_bc_vtx *vtx;
1795 struct r600_bc_tex *tex;
1796 struct gpr_usage usage[128];
1797
1798 /* assume that each gpr is exported only once */
1799 struct r600_bc_cf *export_cf[128] = { NULL };
1800 int32_t export_remap[128];
1801
1802 int32_t id, barrier[bc->nstack];
1803 unsigned i, j, stack, predicate, old_stack;
1804
1805 memset(&usage, 0, sizeof(usage));
1806 for (i = 0; i < 128; ++i) {
1807 usage[i].first_write = -1;
1808 usage[i].last_write[0] = -1;
1809 usage[i].last_write[1] = -1;
1810 usage[i].last_write[2] = -1;
1811 usage[i].last_write[3] = -1;
1812 }
1813
1814 /* first gather some informations about the gpr usage */
1815 id = 0; stack = 0;
1816 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
1817 switch (get_cf_class(cf)) {
1818 case CF_CLASS_ALU:
1819 predicate = 0;
1820 first = NULL;
1821 LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
1822 if (!first)
1823 first = alu;
1824 notice_alu_src_gprs(alu, usage, id);
1825 if (alu->last) {
1826 notice_alu_dst_gprs(first, usage, id, predicate || stack > 0);
1827 first = NULL;
1828 ++id;
1829 }
1830 if (is_alu_pred_inst(alu))
1831 predicate++;
1832 }
1833 if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3)
1834 stack += predicate;
1835 else if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3)
1836 stack -= 1;
1837 else if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3)
1838 stack -= 2;
1839 break;
1840 case CF_CLASS_TEXTURE:
1841 LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
1842 notice_tex_gprs(tex, usage, id++, stack > 0);
1843 }
1844 break;
1845 case CF_CLASS_VERTEX:
1846 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
1847 notice_vtx_gprs(vtx, usage, id++, stack > 0);
1848 }
1849 break;
1850 case CF_CLASS_EXPORT:
1851 notice_export_gprs(cf, usage, export_cf, export_remap);
1852 continue; // don't increment id
1853 case CF_CLASS_OTHER:
1854 switch (cf->inst) {
1855 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
1856 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
1857 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
1858 break;
1859
1860 case V_SQ_CF_WORD1_SQ_CF_INST_POP:
1861 stack -= cf->pop_count;
1862 break;
1863
1864 default:
1865 // TODO implement loop handling
1866 goto out;
1867 }
1868 }
1869 id += 0x100;
1870 id &= ~0xFF;
1871 }
1872 assert(stack == 0);
1873
1874 /* try to optimize gpr usage */
1875 for (i = 0; i < 124; ++i) {
1876 for (j = 0; j < usage[i].nranges; ++j) {
1877 struct gpr_usage_range *range = &usage[i].ranges[j];
1878 int is_export = export_cf[i] && export_cf[i + 1] &&
1879 range->start < export_remap[i] &&
1880 export_remap[i] <= range->end;
1881
1882 if (range->start == -1)
1883 range->replacement = -1;
1884 else if (range->end == -1)
1885 range->replacement = i;
1886 else
1887 find_replacement(usage, i, range, is_export);
1888
1889 if (range->replacement == -1)
1890 bc->ngpr = i;
1891 else if (range->replacement < i && range->replacement > bc->ngpr)
1892 bc->ngpr = range->replacement;
1893
1894 if (is_export && range->replacement != -1) {
1895 find_export_replacement(usage, range, export_cf[i],
1896 export_cf[i + 1], export_remap[i + 1]);
1897 }
1898 }
1899 }
1900 bc->ngpr++;
1901
1902 /* apply the changes */
1903 for (i = 0; i < 128; ++i) {
1904 usage[i].last_write[0] = -1;
1905 usage[i].last_write[1] = -1;
1906 usage[i].last_write[2] = -1;
1907 usage[i].last_write[3] = -1;
1908 }
1909 barrier[0] = 0;
1910 id = 0; stack = 0;
1911 LIST_FOR_EACH_ENTRY_SAFE(cf, next_cf, &bc->cf, list) {
1912 old_stack = stack;
1913 switch (get_cf_class(cf)) {
1914 case CF_CLASS_ALU:
1915 predicate = 0;
1916 first = NULL;
1917 cf->barrier = 0;
1918 LIST_FOR_EACH_ENTRY_SAFE(alu, next_alu, &cf->alu, list) {
1919 replace_alu_gprs(alu, usage, id, barrier[stack], &cf->barrier);
1920 if (alu->last)
1921 ++id;
1922
1923 if (is_alu_pred_inst(alu))
1924 predicate++;
1925
1926 if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3)
1927 optimize_alu_inst(cf, alu);
1928 }
1929 if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3)
1930 stack += predicate;
1931 else if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3)
1932 stack -= 1;
1933 else if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3)
1934 stack -= 2;
1935 if (LIST_IS_EMPTY(&cf->alu)) {
1936 r600_bc_remove_cf(bc, cf);
1937 cf = NULL;
1938 }
1939 break;
1940 case CF_CLASS_TEXTURE:
1941 cf->barrier = 0;
1942 LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
1943 replace_tex_gprs(tex, usage, id++, barrier[stack], &cf->barrier);
1944 }
1945 break;
1946 case CF_CLASS_VERTEX:
1947 cf->barrier = 0;
1948 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
1949 replace_vtx_gprs(vtx, usage, id++, barrier[stack], &cf->barrier);
1950 }
1951 break;
1952 case CF_CLASS_EXPORT:
1953 continue; // don't increment id
1954 case CF_CLASS_OTHER:
1955 if (cf->inst == V_SQ_CF_WORD1_SQ_CF_INST_POP) {
1956 cf->barrier = 0;
1957 stack -= cf->pop_count;
1958 }
1959 break;
1960 }
1961
1962 id &= ~0xFF;
1963 if (cf && cf->barrier)
1964 barrier[old_stack] = id;
1965
1966 for (i = old_stack + 1; i <= stack; ++i)
1967 barrier[i] = barrier[old_stack];
1968
1969 id += 0x100;
1970 if (stack != 0) /* ensue exports are placed outside of conditional blocks */
1971 continue;
1972
1973 for (i = 0; i < 128; ++i) {
1974 if (!export_cf[i] || id < export_remap[i])
1975 continue;
1976
1977 r600_bc_move_cf(bc, export_cf[i], next_cf);
1978 replace_export_gprs(export_cf[i], usage, export_remap[i], barrier[stack]);
1979 if (export_cf[i]->barrier)
1980 barrier[stack] = id - 1;
1981 next_cf = LIST_ENTRY(struct r600_bc_cf, export_cf[i]->list.next, list);
1982 optimize_export_inst(bc, export_cf[i]);
1983 export_cf[i] = NULL;
1984 }
1985 }
1986 assert(stack == 0);
1987
1988 out:
1989 for (i = 0; i < 128; ++i) {
1990 free(usage[i].ranges);
1991 }
1992 }
1993
1994 int r600_bc_build(struct r600_bc *bc)
1995 {
1996 struct r600_bc_cf *cf;
1997 struct r600_bc_alu *alu;
1998 struct r600_bc_vtx *vtx;
1999 struct r600_bc_tex *tex;
2000 struct r600_bc_cf *exports[4] = { NULL };
2001 uint32_t literal[4];
2002 unsigned nliteral;
2003 unsigned addr;
2004 int i, r;
2005
2006 if (bc->callstack[0].max > 0)
2007 bc->nstack = ((bc->callstack[0].max + 3) >> 2) + 2;
2008 if (bc->type == TGSI_PROCESSOR_VERTEX && !bc->nstack) {
2009 bc->nstack = 1;
2010 }
2011
2012 r600_bc_optimize(bc);
2013
2014 /* first path compute addr of each CF block */
2015 /* addr start after all the CF instructions */
2016 addr = LIST_ENTRY(struct r600_bc_cf, bc->cf.prev, list)->id + 2;
2017 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
2018 switch (get_cf_class(cf)) {
2019 case CF_CLASS_ALU:
2020 nliteral = 0;
2021 LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
2022 r = r600_bc_alu_nliterals(alu, literal, &nliteral);
2023 if (r)
2024 return r;
2025 if (alu->last) {
2026 cf->ndw += align(nliteral, 2);
2027 nliteral = 0;
2028 }
2029 }
2030 break;
2031 case CF_CLASS_TEXTURE:
2032 case CF_CLASS_VERTEX:
2033 /* fetch node need to be 16 bytes aligned*/
2034 addr += 3;
2035 addr &= 0xFFFFFFFCUL;
2036 break;
2037 break;
2038 case CF_CLASS_EXPORT:
2039 if (cf->inst == BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT))
2040 exports[cf->output.type] = cf;
2041 break;
2042 case CF_CLASS_OTHER:
2043 break;
2044 default:
2045 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
2046 return -EINVAL;
2047 }
2048 cf->addr = addr;
2049 addr += cf->ndw;
2050 bc->ndw = cf->addr + cf->ndw;
2051 }
2052
2053 /* set export done on last export of each type */
2054 for (i = 0; i < 4; ++i) {
2055 if (exports[i]) {
2056 exports[i]->inst = BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE);
2057 }
2058 }
2059
2060 free(bc->bytecode);
2061 bc->bytecode = calloc(1, bc->ndw * 4);
2062 if (bc->bytecode == NULL)
2063 return -ENOMEM;
2064 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
2065 addr = cf->addr;
2066 if (bc->chiprev == CHIPREV_EVERGREEN)
2067 r = eg_bc_cf_build(bc, cf);
2068 else
2069 r = r600_bc_cf_build(bc, cf);
2070 if (r)
2071 return r;
2072 switch (get_cf_class(cf)) {
2073 case CF_CLASS_ALU:
2074 nliteral = 0;
2075 LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
2076 r = r600_bc_alu_nliterals(alu, literal, &nliteral);
2077 if (r)
2078 return r;
2079 r600_bc_alu_adjust_literals(alu, literal, nliteral);
2080 switch(bc->chiprev) {
2081 case CHIPREV_R600:
2082 r = r600_bc_alu_build(bc, alu, addr);
2083 break;
2084 case CHIPREV_R700:
2085 case CHIPREV_EVERGREEN: /* eg alu is same encoding as r700 */
2086 r = r700_bc_alu_build(bc, alu, addr);
2087 break;
2088 default:
2089 R600_ERR("unknown family %d\n", bc->family);
2090 return -EINVAL;
2091 }
2092 if (r)
2093 return r;
2094 addr += 2;
2095 if (alu->last) {
2096 for (i = 0; i < align(nliteral, 2); ++i) {
2097 bc->bytecode[addr++] = literal[i];
2098 }
2099 nliteral = 0;
2100 }
2101 }
2102 break;
2103 case CF_CLASS_VERTEX:
2104 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
2105 r = r600_bc_vtx_build(bc, vtx, addr);
2106 if (r)
2107 return r;
2108 addr += 4;
2109 }
2110 break;
2111 case CF_CLASS_TEXTURE:
2112 LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
2113 r = r600_bc_tex_build(bc, tex, addr);
2114 if (r)
2115 return r;
2116 addr += 4;
2117 }
2118 break;
2119 case CF_CLASS_EXPORT:
2120 case CF_CLASS_OTHER:
2121 break;
2122 default:
2123 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
2124 return -EINVAL;
2125 }
2126 }
2127 return 0;
2128 }
2129
2130 void r600_bc_clear(struct r600_bc *bc)
2131 {
2132 struct r600_bc_cf *cf = NULL, *next_cf;
2133
2134 free(bc->bytecode);
2135 bc->bytecode = NULL;
2136
2137 LIST_FOR_EACH_ENTRY_SAFE(cf, next_cf, &bc->cf, list) {
2138 struct r600_bc_alu *alu = NULL, *next_alu;
2139 struct r600_bc_tex *tex = NULL, *next_tex;
2140 struct r600_bc_tex *vtx = NULL, *next_vtx;
2141
2142 LIST_FOR_EACH_ENTRY_SAFE(alu, next_alu, &cf->alu, list) {
2143 free(alu);
2144 }
2145
2146 LIST_INITHEAD(&cf->alu);
2147
2148 LIST_FOR_EACH_ENTRY_SAFE(tex, next_tex, &cf->tex, list) {
2149 free(tex);
2150 }
2151
2152 LIST_INITHEAD(&cf->tex);
2153
2154 LIST_FOR_EACH_ENTRY_SAFE(vtx, next_vtx, &cf->vtx, list) {
2155 free(vtx);
2156 }
2157
2158 LIST_INITHEAD(&cf->vtx);
2159
2160 free(cf);
2161 }
2162
2163 LIST_INITHEAD(&cf->list);
2164 }
2165
2166 void r600_bc_dump(struct r600_bc *bc)
2167 {
2168 struct r600_bc_cf *cf;
2169 struct r600_bc_alu *alu;
2170 struct r600_bc_vtx *vtx;
2171 struct r600_bc_tex *tex;
2172
2173 unsigned i, id;
2174 uint32_t literal[4];
2175 unsigned nliteral;
2176 char chip = '6';
2177
2178 switch (bc->chiprev) {
2179 case 1:
2180 chip = '7';
2181 break;
2182 case 2:
2183 chip = 'E';
2184 break;
2185 case 0:
2186 default:
2187 chip = '6';
2188 break;
2189 }
2190 fprintf(stderr, "bytecode %d dw -- %d gprs -----------------------\n", bc->ndw, bc->ngpr);
2191 fprintf(stderr, " %c\n", chip);
2192
2193 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
2194 id = cf->id;
2195
2196 switch (get_cf_class(cf)) {
2197 case CF_CLASS_ALU:
2198 fprintf(stderr, "%04d %08X ALU ", id, bc->bytecode[id]);
2199 fprintf(stderr, "ADDR:%04d ", cf->addr);
2200 fprintf(stderr, "KCACHE_MODE0:%X ", cf->kcache0_mode);
2201 fprintf(stderr, "KCACHE_BANK0:%X ", cf->kcache0_bank);
2202 fprintf(stderr, "KCACHE_BANK1:%X\n", cf->kcache1_bank);
2203 id++;
2204 fprintf(stderr, "%04d %08X ALU ", id, bc->bytecode[id]);
2205 fprintf(stderr, "INST:%d ", cf->inst);
2206 fprintf(stderr, "KCACHE_MODE1:%X ", cf->kcache1_mode);
2207 fprintf(stderr, "KCACHE_ADDR0:%X ", cf->kcache0_addr);
2208 fprintf(stderr, "KCACHE_ADDR1:%X ", cf->kcache1_addr);
2209 fprintf(stderr, "BARRIER:%d ", cf->barrier);
2210 fprintf(stderr, "COUNT:%d\n", cf->ndw / 2);
2211 break;
2212 case CF_CLASS_TEXTURE:
2213 case CF_CLASS_VERTEX:
2214 fprintf(stderr, "%04d %08X TEX/VTX ", id, bc->bytecode[id]);
2215 fprintf(stderr, "ADDR:%04d\n", cf->addr);
2216 id++;
2217 fprintf(stderr, "%04d %08X TEX/VTX ", id, bc->bytecode[id]);
2218 fprintf(stderr, "INST:%d ", cf->inst);
2219 fprintf(stderr, "BARRIER:%d ", cf->barrier);
2220 fprintf(stderr, "COUNT:%d\n", cf->ndw / 4);
2221 break;
2222 case CF_CLASS_EXPORT:
2223 fprintf(stderr, "%04d %08X EXPORT ", id, bc->bytecode[id]);
2224 fprintf(stderr, "GPR:%d ", cf->output.gpr);
2225 fprintf(stderr, "ELEM_SIZE:%X ", cf->output.elem_size);
2226 fprintf(stderr, "ARRAY_BASE:%X ", cf->output.array_base);
2227 fprintf(stderr, "TYPE:%X\n", cf->output.type);
2228 id++;
2229 fprintf(stderr, "%04d %08X EXPORT ", id, bc->bytecode[id]);
2230 fprintf(stderr, "SWIZ_X:%X ", cf->output.swizzle_x);
2231 fprintf(stderr, "SWIZ_Y:%X ", cf->output.swizzle_y);
2232 fprintf(stderr, "SWIZ_Z:%X ", cf->output.swizzle_z);
2233 fprintf(stderr, "SWIZ_W:%X ", cf->output.swizzle_w);
2234 fprintf(stderr, "SWIZ_W:%X ", cf->output.swizzle_w);
2235 fprintf(stderr, "BARRIER:%d ", cf->barrier);
2236 fprintf(stderr, "INST:%d ", cf->inst);
2237 fprintf(stderr, "BURST_COUNT:%d\n", cf->output.burst_count);
2238 break;
2239 case CF_CLASS_OTHER:
2240 fprintf(stderr, "%04d %08X CF ", id, bc->bytecode[id]);
2241 fprintf(stderr, "ADDR:%04d\n", cf->cf_addr);
2242 id++;
2243 fprintf(stderr, "%04d %08X CF ", id, bc->bytecode[id]);
2244 fprintf(stderr, "INST:%d ", cf->inst);
2245 fprintf(stderr, "COND:%X ", cf->cond);
2246 fprintf(stderr, "BARRIER:%d ", cf->barrier);
2247 fprintf(stderr, "POP_COUNT:%X\n", cf->pop_count);
2248 break;
2249 }
2250
2251 id = cf->addr;
2252 nliteral = 0;
2253 LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
2254 r600_bc_alu_nliterals(alu, literal, &nliteral);
2255
2256 fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]);
2257 fprintf(stderr, "SRC0(SEL:%d ", alu->src[0].sel);
2258 fprintf(stderr, "REL:%d ", alu->src[0].rel);
2259 fprintf(stderr, "CHAN:%d ", alu->src[0].chan);
2260 fprintf(stderr, "NEG:%d) ", alu->src[0].neg);
2261 fprintf(stderr, "SRC1(SEL:%d ", alu->src[1].sel);
2262 fprintf(stderr, "REL:%d ", alu->src[1].rel);
2263 fprintf(stderr, "CHAN:%d ", alu->src[1].chan);
2264 fprintf(stderr, "NEG:%d) ", alu->src[1].neg);
2265 fprintf(stderr, "LAST:%d)\n", alu->last);
2266 id++;
2267 fprintf(stderr, "%04d %08X %c ", id, bc->bytecode[id], alu->last ? '*' : ' ');
2268 fprintf(stderr, "INST:%d ", alu->inst);
2269 fprintf(stderr, "DST(SEL:%d ", alu->dst.sel);
2270 fprintf(stderr, "CHAN:%d ", alu->dst.chan);
2271 fprintf(stderr, "REL:%d ", alu->dst.rel);
2272 fprintf(stderr, "CLAMP:%d) ", alu->dst.clamp);
2273 fprintf(stderr, "BANK_SWIZZLE:%d ", alu->bank_swizzle);
2274 if (alu->is_op3) {
2275 fprintf(stderr, "SRC2(SEL:%d ", alu->src[2].sel);
2276 fprintf(stderr, "REL:%d ", alu->src[2].rel);
2277 fprintf(stderr, "CHAN:%d ", alu->src[2].chan);
2278 fprintf(stderr, "NEG:%d)\n", alu->src[2].neg);
2279 } else {
2280 fprintf(stderr, "SRC0_ABS:%d ", alu->src[0].abs);
2281 fprintf(stderr, "SRC1_ABS:%d ", alu->src[1].abs);
2282 fprintf(stderr, "WRITE_MASK:%d ", alu->dst.write);
2283 fprintf(stderr, "OMOD:%d ", alu->omod);
2284 fprintf(stderr, "EXECUTE_MASK:%d ", alu->predicate);
2285 fprintf(stderr, "UPDATE_PRED:%d\n", alu->predicate);
2286 }
2287
2288 id++;
2289 if (alu->last) {
2290 for (i = 0; i < nliteral; i++, id++) {
2291 float *f = (float*)(bc->bytecode + id);
2292 fprintf(stderr, "%04d %08X %f\n", id, bc->bytecode[id], *f);
2293 }
2294 id += nliteral & 1;
2295 nliteral = 0;
2296 }
2297 }
2298
2299 LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
2300 //TODO
2301 }
2302
2303 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
2304 //TODO
2305 }
2306 }
2307
2308 fprintf(stderr, "--------------------------------------\n");
2309 }
2310
2311 void r600_cf_vtx(struct r600_vertex_element *ve, u32 *bytecode, unsigned count)
2312 {
2313 struct r600_pipe_state *rstate;
2314 unsigned i = 0;
2315
2316 if (count > 8) {
2317 bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2318 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX) |
2319 S_SQ_CF_WORD1_BARRIER(0) |
2320 S_SQ_CF_WORD1_COUNT(8 - 1);
2321 bytecode[i++] = S_SQ_CF_WORD0_ADDR(40 >> 1);
2322 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX) |
2323 S_SQ_CF_WORD1_BARRIER(0) |
2324 S_SQ_CF_WORD1_COUNT(count - 8 - 1);
2325 } else {
2326 bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2327 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX) |
2328 S_SQ_CF_WORD1_BARRIER(0) |
2329 S_SQ_CF_WORD1_COUNT(count - 1);
2330 }
2331 bytecode[i++] = S_SQ_CF_WORD0_ADDR(0);
2332 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_RETURN) |
2333 S_SQ_CF_WORD1_BARRIER(0);
2334
2335 rstate = &ve->rstate;
2336 rstate->id = R600_PIPE_STATE_FETCH_SHADER;
2337 rstate->nregs = 0;
2338 r600_pipe_state_add_reg(rstate, R_0288A4_SQ_PGM_RESOURCES_FS,
2339 0x00000000, 0xFFFFFFFF, NULL);
2340 r600_pipe_state_add_reg(rstate, R_0288DC_SQ_PGM_CF_OFFSET_FS,
2341 0x00000000, 0xFFFFFFFF, NULL);
2342 r600_pipe_state_add_reg(rstate, R_028894_SQ_PGM_START_FS,
2343 r600_bo_offset(ve->fetch_shader) >> 8,
2344 0xFFFFFFFF, ve->fetch_shader);
2345 }
2346
2347 void r600_cf_vtx_tc(struct r600_vertex_element *ve, u32 *bytecode, unsigned count)
2348 {
2349 struct r600_pipe_state *rstate;
2350 unsigned i = 0;
2351
2352 if (count > 8) {
2353 bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2354 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) |
2355 S_SQ_CF_WORD1_BARRIER(0) |
2356 S_SQ_CF_WORD1_COUNT(8 - 1);
2357 bytecode[i++] = S_SQ_CF_WORD0_ADDR(40 >> 1);
2358 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) |
2359 S_SQ_CF_WORD1_BARRIER(0) |
2360 S_SQ_CF_WORD1_COUNT((count - 8) - 1);
2361 } else {
2362 bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2363 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) |
2364 S_SQ_CF_WORD1_BARRIER(0) |
2365 S_SQ_CF_WORD1_COUNT(count - 1);
2366 }
2367 bytecode[i++] = S_SQ_CF_WORD0_ADDR(0);
2368 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_RETURN) |
2369 S_SQ_CF_WORD1_BARRIER(0);
2370
2371 rstate = &ve->rstate;
2372 rstate->id = R600_PIPE_STATE_FETCH_SHADER;
2373 rstate->nregs = 0;
2374 r600_pipe_state_add_reg(rstate, R_0288A4_SQ_PGM_RESOURCES_FS,
2375 0x00000000, 0xFFFFFFFF, NULL);
2376 r600_pipe_state_add_reg(rstate, R_0288DC_SQ_PGM_CF_OFFSET_FS,
2377 0x00000000, 0xFFFFFFFF, NULL);
2378 r600_pipe_state_add_reg(rstate, R_028894_SQ_PGM_START_FS,
2379 r600_bo_offset(ve->fetch_shader) >> 8,
2380 0xFFFFFFFF, ve->fetch_shader);
2381 }
2382
2383 static void r600_vertex_data_type(enum pipe_format pformat, unsigned *format,
2384 unsigned *num_format, unsigned *format_comp)
2385 {
2386 const struct util_format_description *desc;
2387 unsigned i;
2388
2389 *format = 0;
2390 *num_format = 0;
2391 *format_comp = 0;
2392
2393 desc = util_format_description(pformat);
2394 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN) {
2395 goto out_unknown;
2396 }
2397
2398 /* Find the first non-VOID channel. */
2399 for (i = 0; i < 4; i++) {
2400 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
2401 break;
2402 }
2403 }
2404
2405 switch (desc->channel[i].type) {
2406 /* Half-floats, floats, doubles */
2407 case UTIL_FORMAT_TYPE_FLOAT:
2408 switch (desc->channel[i].size) {
2409 case 16:
2410 switch (desc->nr_channels) {
2411 case 1:
2412 *format = FMT_16_FLOAT;
2413 break;
2414 case 2:
2415 *format = FMT_16_16_FLOAT;
2416 break;
2417 case 3:
2418 *format = FMT_16_16_16_FLOAT;
2419 break;
2420 case 4:
2421 *format = FMT_16_16_16_16_FLOAT;
2422 break;
2423 }
2424 break;
2425 case 32:
2426 switch (desc->nr_channels) {
2427 case 1:
2428 *format = FMT_32_FLOAT;
2429 break;
2430 case 2:
2431 *format = FMT_32_32_FLOAT;
2432 break;
2433 case 3:
2434 *format = FMT_32_32_32_FLOAT;
2435 break;
2436 case 4:
2437 *format = FMT_32_32_32_32_FLOAT;
2438 break;
2439 }
2440 break;
2441 default:
2442 goto out_unknown;
2443 }
2444 break;
2445 /* Unsigned ints */
2446 case UTIL_FORMAT_TYPE_UNSIGNED:
2447 /* Signed ints */
2448 case UTIL_FORMAT_TYPE_SIGNED:
2449 switch (desc->channel[i].size) {
2450 case 8:
2451 switch (desc->nr_channels) {
2452 case 1:
2453 *format = FMT_8;
2454 break;
2455 case 2:
2456 *format = FMT_8_8;
2457 break;
2458 case 3:
2459 // *format = FMT_8_8_8; /* fails piglit draw-vertices test */
2460 // break;
2461 case 4:
2462 *format = FMT_8_8_8_8;
2463 break;
2464 }
2465 break;
2466 case 16:
2467 switch (desc->nr_channels) {
2468 case 1:
2469 *format = FMT_16;
2470 break;
2471 case 2:
2472 *format = FMT_16_16;
2473 break;
2474 case 3:
2475 // *format = FMT_16_16_16; /* fails piglit draw-vertices test */
2476 // break;
2477 case 4:
2478 *format = FMT_16_16_16_16;
2479 break;
2480 }
2481 break;
2482 case 32:
2483 switch (desc->nr_channels) {
2484 case 1:
2485 *format = FMT_32;
2486 break;
2487 case 2:
2488 *format = FMT_32_32;
2489 break;
2490 case 3:
2491 *format = FMT_32_32_32;
2492 break;
2493 case 4:
2494 *format = FMT_32_32_32_32;
2495 break;
2496 }
2497 break;
2498 default:
2499 goto out_unknown;
2500 }
2501 break;
2502 default:
2503 goto out_unknown;
2504 }
2505
2506 if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
2507 *format_comp = 1;
2508 }
2509 if (desc->channel[i].normalized) {
2510 *num_format = 0;
2511 } else {
2512 *num_format = 2;
2513 }
2514 return;
2515 out_unknown:
2516 R600_ERR("unsupported vertex format %s\n", util_format_name(pformat));
2517 }
2518
2519 int r600_vertex_elements_build_fetch_shader(struct r600_pipe_context *rctx, struct r600_vertex_element *ve)
2520 {
2521 unsigned ndw, i;
2522 u32 *bytecode;
2523 unsigned fetch_resource_start = 0, format, num_format, format_comp;
2524 struct pipe_vertex_element *elements = ve->elements;
2525 const struct util_format_description *desc;
2526
2527 /* 2 dwords for cf aligned to 4 + 4 dwords per input */
2528 ndw = 8 + ve->count * 4;
2529 ve->fs_size = ndw * 4;
2530
2531 /* use PIPE_BIND_VERTEX_BUFFER so we use the cache buffer manager */
2532 ve->fetch_shader = r600_bo(rctx->radeon, ndw*4, 256, PIPE_BIND_VERTEX_BUFFER, 0);
2533 if (ve->fetch_shader == NULL) {
2534 return -ENOMEM;
2535 }
2536
2537 bytecode = r600_bo_map(rctx->radeon, ve->fetch_shader, 0, NULL);
2538 if (bytecode == NULL) {
2539 r600_bo_reference(rctx->radeon, &ve->fetch_shader, NULL);
2540 return -ENOMEM;
2541 }
2542
2543 if (rctx->family >= CHIP_CEDAR) {
2544 eg_cf_vtx(ve, &bytecode[0], (ndw - 8) / 4);
2545 } else {
2546 r600_cf_vtx(ve, &bytecode[0], (ndw - 8) / 4);
2547 fetch_resource_start = 160;
2548 }
2549
2550 /* vertex elements offset need special handling, if offset is bigger
2551 * than what we can put in fetch instruction then we need to alterate
2552 * the vertex resource offset. In such case in order to simplify code
2553 * we will bound one resource per elements. It's a worst case scenario.
2554 */
2555 for (i = 0; i < ve->count; i++) {
2556 ve->vbuffer_offset[i] = C_SQ_VTX_WORD2_OFFSET & elements[i].src_offset;
2557 if (ve->vbuffer_offset[i]) {
2558 ve->vbuffer_need_offset = 1;
2559 }
2560 }
2561
2562 for (i = 0; i < ve->count; i++) {
2563 unsigned vbuffer_index;
2564 r600_vertex_data_type(ve->hw_format[i], &format, &num_format, &format_comp);
2565 desc = util_format_description(ve->hw_format[i]);
2566 if (desc == NULL) {
2567 R600_ERR("unknown format %d\n", ve->hw_format[i]);
2568 r600_bo_reference(rctx->radeon, &ve->fetch_shader, NULL);
2569 return -EINVAL;
2570 }
2571
2572 /* see above for vbuffer_need_offset explanation */
2573 vbuffer_index = elements[i].vertex_buffer_index;
2574 if (ve->vbuffer_need_offset) {
2575 bytecode[8 + i * 4 + 0] = S_SQ_VTX_WORD0_BUFFER_ID(i + fetch_resource_start);
2576 } else {
2577 bytecode[8 + i * 4 + 0] = S_SQ_VTX_WORD0_BUFFER_ID(vbuffer_index + fetch_resource_start);
2578 }
2579 bytecode[8 + i * 4 + 0] |= S_SQ_VTX_WORD0_SRC_GPR(0) |
2580 S_SQ_VTX_WORD0_SRC_SEL_X(0) |
2581 S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(0x1F);
2582 bytecode[8 + i * 4 + 1] = S_SQ_VTX_WORD1_DST_SEL_X(desc->swizzle[0]) |
2583 S_SQ_VTX_WORD1_DST_SEL_Y(desc->swizzle[1]) |
2584 S_SQ_VTX_WORD1_DST_SEL_Z(desc->swizzle[2]) |
2585 S_SQ_VTX_WORD1_DST_SEL_W(desc->swizzle[3]) |
2586 S_SQ_VTX_WORD1_USE_CONST_FIELDS(0) |
2587 S_SQ_VTX_WORD1_DATA_FORMAT(format) |
2588 S_SQ_VTX_WORD1_NUM_FORMAT_ALL(num_format) |
2589 S_SQ_VTX_WORD1_FORMAT_COMP_ALL(format_comp) |
2590 S_SQ_VTX_WORD1_SRF_MODE_ALL(1) |
2591 S_SQ_VTX_WORD1_GPR_DST_GPR(i + 1);
2592 bytecode[8 + i * 4 + 2] = S_SQ_VTX_WORD2_OFFSET(elements[i].src_offset) |
2593 S_SQ_VTX_WORD2_MEGA_FETCH(1);
2594 bytecode[8 + i * 4 + 3] = 0;
2595 }
2596 r600_bo_unmap(rctx->radeon, ve->fetch_shader);
2597 return 0;
2598 }