r600g: fully implement barrier handling
[mesa.git] / src / gallium / drivers / r600 / r600_asm.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <stdio.h>
24 #include <errno.h>
25 #include "util/u_format.h"
26 #include "util/u_memory.h"
27 #include "pipe/p_shader_tokens.h"
28 #include "r600_pipe.h"
29 #include "r600_sq.h"
30 #include "r600_opcodes.h"
31 #include "r600_asm.h"
32 #include "r600_formats.h"
33 #include "r600d.h"
34
35 #define NUM_OF_CYCLES 3
36 #define NUM_OF_COMPONENTS 4
37
38 #define PREV_ALU(alu) LIST_ENTRY(struct r600_bc_alu, alu->list.prev, list)
39 #define NEXT_ALU(alu) LIST_ENTRY(struct r600_bc_alu, alu->list.next, list)
40
41 static inline unsigned int r600_bc_get_num_operands(struct r600_bc_alu *alu)
42 {
43 if(alu->is_op3)
44 return 3;
45
46 switch (alu->inst) {
47 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP:
48 return 0;
49 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD:
50 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE:
51 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT:
52 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE:
53 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE:
54 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL:
55 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX:
56 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN:
57 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE:
58 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE:
59 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT:
60 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE:
61 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE:
62 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT:
63 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE:
64 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE:
65 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4:
66 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE:
67 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE:
68 return 2;
69
70 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV:
71 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR:
72 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT:
73 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR:
74 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC:
75 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE:
76 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED:
77 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE:
78 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE:
79 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE:
80 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT:
81 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN:
82 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS:
83 return 1;
84 default: R600_ERR(
85 "Need instruction operand number for 0x%x.\n", alu->inst);
86 };
87
88 return 3;
89 }
90
91 int r700_bc_alu_build(struct r600_bc *bc, struct r600_bc_alu *alu, unsigned id);
92
93 static struct r600_bc_cf *r600_bc_cf(void)
94 {
95 struct r600_bc_cf *cf = CALLOC_STRUCT(r600_bc_cf);
96
97 if (cf == NULL)
98 return NULL;
99 LIST_INITHEAD(&cf->list);
100 LIST_INITHEAD(&cf->alu);
101 LIST_INITHEAD(&cf->vtx);
102 LIST_INITHEAD(&cf->tex);
103 cf->barrier = 1;
104 return cf;
105 }
106
107 static struct r600_bc_alu *r600_bc_alu(void)
108 {
109 struct r600_bc_alu *alu = CALLOC_STRUCT(r600_bc_alu);
110
111 if (alu == NULL)
112 return NULL;
113 LIST_INITHEAD(&alu->list);
114 return alu;
115 }
116
117 static struct r600_bc_vtx *r600_bc_vtx(void)
118 {
119 struct r600_bc_vtx *vtx = CALLOC_STRUCT(r600_bc_vtx);
120
121 if (vtx == NULL)
122 return NULL;
123 LIST_INITHEAD(&vtx->list);
124 return vtx;
125 }
126
127 static struct r600_bc_tex *r600_bc_tex(void)
128 {
129 struct r600_bc_tex *tex = CALLOC_STRUCT(r600_bc_tex);
130
131 if (tex == NULL)
132 return NULL;
133 LIST_INITHEAD(&tex->list);
134 return tex;
135 }
136
137 int r600_bc_init(struct r600_bc *bc, enum radeon_family family)
138 {
139 LIST_INITHEAD(&bc->cf);
140 bc->family = family;
141 switch (bc->family) {
142 case CHIP_R600:
143 case CHIP_RV610:
144 case CHIP_RV630:
145 case CHIP_RV670:
146 case CHIP_RV620:
147 case CHIP_RV635:
148 case CHIP_RS780:
149 case CHIP_RS880:
150 bc->chiprev = CHIPREV_R600;
151 break;
152 case CHIP_RV770:
153 case CHIP_RV730:
154 case CHIP_RV710:
155 case CHIP_RV740:
156 bc->chiprev = CHIPREV_R700;
157 break;
158 case CHIP_CEDAR:
159 case CHIP_REDWOOD:
160 case CHIP_JUNIPER:
161 case CHIP_CYPRESS:
162 case CHIP_HEMLOCK:
163 case CHIP_PALM:
164 bc->chiprev = CHIPREV_EVERGREEN;
165 break;
166 default:
167 R600_ERR("unknown family %d\n", bc->family);
168 return -EINVAL;
169 }
170 return 0;
171 }
172
173 static int r600_bc_add_cf(struct r600_bc *bc)
174 {
175 struct r600_bc_cf *cf = r600_bc_cf();
176
177 if (cf == NULL)
178 return -ENOMEM;
179 LIST_ADDTAIL(&cf->list, &bc->cf);
180 if (bc->cf_last)
181 cf->id = bc->cf_last->id + 2;
182 bc->cf_last = cf;
183 bc->ncf++;
184 bc->ndw += 2;
185 bc->force_add_cf = 0;
186 return 0;
187 }
188
189 static void r600_bc_remove_cf(struct r600_bc *bc, struct r600_bc_cf *cf)
190 {
191 struct r600_bc_cf *other;
192 LIST_FOR_EACH_ENTRY(other, &bc->cf, list) {
193 if (other->id > cf->id)
194 other->id -= 2;
195 if (other->cf_addr > cf->id)
196 other->cf_addr -= 2;
197 }
198 LIST_DEL(&cf->list);
199 free(cf);
200 }
201
202 static void r600_bc_move_cf(struct r600_bc *bc, struct r600_bc_cf *cf, struct r600_bc_cf *next)
203 {
204 struct r600_bc_cf *prev = LIST_ENTRY(struct r600_bc_cf, next->list.prev, list);
205 unsigned old_id = cf->id;
206 unsigned new_id = prev->id + 2;
207 struct r600_bc_cf *other;
208
209 if (prev == cf)
210 return; /* position hasn't changed */
211
212 LIST_DEL(&cf->list);
213 LIST_FOR_EACH_ENTRY(other, &bc->cf, list) {
214 if (other->id > old_id)
215 other->id -= 2;
216 if (other->id >= new_id)
217 other->id += 2;
218 if (other->cf_addr > old_id)
219 other->cf_addr -= 2;
220 if (other->cf_addr > new_id)
221 other->cf_addr += 2;
222 }
223 cf->id = new_id;
224 LIST_ADD(&cf->list, &prev->list);
225 }
226
227 int r600_bc_add_output(struct r600_bc *bc, const struct r600_bc_output *output)
228 {
229 int r;
230
231 r = r600_bc_add_cf(bc);
232 if (r)
233 return r;
234 bc->cf_last->inst = BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT);
235 memcpy(&bc->cf_last->output, output, sizeof(struct r600_bc_output));
236 return 0;
237 }
238
239 /* alu predicate instructions */
240 static int is_alu_pred_inst(struct r600_bc_alu *alu)
241 {
242 return !alu->is_op3 && (
243 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_UINT ||
244 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_UINT ||
245 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE ||
246 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT ||
247 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE ||
248 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE ||
249 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_INV ||
250 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_POP ||
251 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_CLR ||
252 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_RESTORE ||
253 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH ||
254 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH ||
255 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH ||
256 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH ||
257 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_INT ||
258 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_INT ||
259 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_INT ||
260 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT ||
261 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH_INT ||
262 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH_INT ||
263 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH_INT ||
264 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH_INT ||
265 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLT_PUSH_INT ||
266 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLE_PUSH_INT);
267 }
268
269 /* alu kill instructions */
270 static int is_alu_kill_inst(struct r600_bc_alu *alu)
271 {
272 return !alu->is_op3 && (
273 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE ||
274 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT ||
275 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE ||
276 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE ||
277 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_UINT ||
278 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_UINT ||
279 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE_INT ||
280 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_INT ||
281 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_INT ||
282 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE_INT);
283 }
284
285 /* alu instructions that can ony exits once per group */
286 static int is_alu_once_inst(struct r600_bc_alu *alu)
287 {
288 return is_alu_kill_inst(alu) ||
289 is_alu_pred_inst(alu);
290 }
291
292 static int is_alu_reduction_inst(struct r600_bc_alu *alu)
293 {
294 return !alu->is_op3 && (
295 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE ||
296 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4 ||
297 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE ||
298 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX4);
299 }
300
301 static int is_alu_mova_inst(struct r600_bc_alu *alu)
302 {
303 return !alu->is_op3 && (
304 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA ||
305 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR ||
306 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT);
307 }
308
309 /* alu instructions that can only execute on the vector unit */
310 static int is_alu_vec_unit_inst(struct r600_bc_alu *alu)
311 {
312 return is_alu_reduction_inst(alu) ||
313 is_alu_mova_inst(alu);
314 }
315
316 /* alu instructions that can only execute on the trans unit */
317 static int is_alu_trans_unit_inst(struct r600_bc_alu *alu)
318 {
319 if(!alu->is_op3)
320 return alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT ||
321 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT ||
322 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT ||
323 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHL_INT ||
324 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT ||
325 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_INT ||
326 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT ||
327 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_INT ||
328 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT ||
329 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_INT ||
330 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_UINT ||
331 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT ||
332 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS ||
333 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE ||
334 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED ||
335 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE ||
336 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED ||
337 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_FF ||
338 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE ||
339 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED ||
340 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_FF ||
341 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE ||
342 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN ||
343 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SQRT_IEEE;
344 else
345 return alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT ||
346 alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_D2 ||
347 alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_M2 ||
348 alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_M4;
349 }
350
351 /* alu instructions that can execute on any unit */
352 static int is_alu_any_unit_inst(struct r600_bc_alu *alu)
353 {
354 return !is_alu_vec_unit_inst(alu) &&
355 !is_alu_trans_unit_inst(alu);
356 }
357
358 static int assign_alu_units(struct r600_bc_alu *alu_first, struct r600_bc_alu *assignment[5])
359 {
360 struct r600_bc_alu *alu;
361 unsigned i, chan, trans;
362
363 for (i = 0; i < 5; i++)
364 assignment[i] = NULL;
365
366 for (alu = alu_first; alu; alu = NEXT_ALU(alu)) {
367 chan = alu->dst.chan;
368 if (is_alu_trans_unit_inst(alu))
369 trans = 1;
370 else if (is_alu_vec_unit_inst(alu))
371 trans = 0;
372 else if (assignment[chan])
373 trans = 1; // assume ALU_INST_PREFER_VECTOR
374 else
375 trans = 0;
376
377 if (trans) {
378 if (assignment[4]) {
379 assert(0); //ALU.Trans has already been allocated
380 return -1;
381 }
382 assignment[4] = alu;
383 } else {
384 if (assignment[chan]) {
385 assert(0); //ALU.chan has already been allocated
386 return -1;
387 }
388 assignment[chan] = alu;
389 }
390
391 if (alu->last)
392 break;
393 }
394 return 0;
395 }
396
397 struct alu_bank_swizzle {
398 int hw_gpr[NUM_OF_CYCLES][NUM_OF_COMPONENTS];
399 int hw_cfile_addr[4];
400 int hw_cfile_elem[4];
401 };
402
403 const unsigned cycle_for_bank_swizzle_vec[][3] = {
404 [SQ_ALU_VEC_012] = { 0, 1, 2 },
405 [SQ_ALU_VEC_021] = { 0, 2, 1 },
406 [SQ_ALU_VEC_120] = { 1, 2, 0 },
407 [SQ_ALU_VEC_102] = { 1, 0, 2 },
408 [SQ_ALU_VEC_201] = { 2, 0, 1 },
409 [SQ_ALU_VEC_210] = { 2, 1, 0 }
410 };
411
412 const unsigned cycle_for_bank_swizzle_scl[][3] = {
413 [SQ_ALU_SCL_210] = { 2, 1, 0 },
414 [SQ_ALU_SCL_122] = { 1, 2, 2 },
415 [SQ_ALU_SCL_212] = { 2, 1, 2 },
416 [SQ_ALU_SCL_221] = { 2, 2, 1 }
417 };
418
419 static void init_bank_swizzle(struct alu_bank_swizzle *bs)
420 {
421 int i, cycle, component;
422 /* set up gpr use */
423 for (cycle = 0; cycle < NUM_OF_CYCLES; cycle++)
424 for (component = 0; component < NUM_OF_COMPONENTS; component++)
425 bs->hw_gpr[cycle][component] = -1;
426 for (i = 0; i < 4; i++)
427 bs->hw_cfile_addr[i] = -1;
428 for (i = 0; i < 4; i++)
429 bs->hw_cfile_elem[i] = -1;
430 }
431
432 static int reserve_gpr(struct alu_bank_swizzle *bs, unsigned sel, unsigned chan, unsigned cycle)
433 {
434 if (bs->hw_gpr[cycle][chan] == -1)
435 bs->hw_gpr[cycle][chan] = sel;
436 else if (bs->hw_gpr[cycle][chan] != (int)sel) {
437 // Another scalar operation has already used GPR read port for channel
438 return -1;
439 }
440 return 0;
441 }
442
443 static int reserve_cfile(struct alu_bank_swizzle *bs, unsigned sel, unsigned chan)
444 {
445 int res, resmatch = -1, resempty = -1;
446 for (res = 3; res >= 0; --res) {
447 if (bs->hw_cfile_addr[res] == -1)
448 resempty = res;
449 else if (bs->hw_cfile_addr[res] == sel &&
450 bs->hw_cfile_elem[res] == chan)
451 resmatch = res;
452 }
453 if (resmatch != -1)
454 return 0; // Read for this scalar element already reserved, nothing to do here.
455 else if (resempty != -1) {
456 bs->hw_cfile_addr[resempty] = sel;
457 bs->hw_cfile_elem[resempty] = chan;
458 } else {
459 // All cfile read ports are used, cannot reference vector element
460 return -1;
461 }
462 return 0;
463 }
464
465 static int is_gpr(unsigned sel)
466 {
467 return (sel >= 0 && sel <= 127);
468 }
469
470 static int is_cfile(unsigned sel)
471 {
472 return (sel > 255 && sel < 512);
473 }
474
475 static int is_const(int sel)
476 {
477 return is_cfile(sel) ||
478 (sel >= V_SQ_ALU_SRC_0 &&
479 sel <= V_SQ_ALU_SRC_LITERAL);
480 }
481
482 static int check_vector(struct r600_bc_alu *alu, struct alu_bank_swizzle *bs, int bank_swizzle)
483 {
484 int r, src, num_src, sel, elem, cycle;
485
486 num_src = r600_bc_get_num_operands(alu);
487 for (src = 0; src < num_src; src++) {
488 sel = alu->src[src].sel;
489 elem = alu->src[src].chan;
490 if (is_gpr(sel)) {
491 cycle = cycle_for_bank_swizzle_vec[bank_swizzle][src];
492 if (src == 1 && sel == alu->src[0].sel && elem == alu->src[0].chan)
493 // Nothing to do; special-case optimization,
494 // second source uses first source’s reservation
495 continue;
496 else {
497 r = reserve_gpr(bs, sel, elem, cycle);
498 if (r)
499 return r;
500 }
501 } else if (is_cfile(sel)) {
502 r = reserve_cfile(bs, sel, elem);
503 if (r)
504 return r;
505 }
506 // No restrictions on PV, PS, literal or special constants
507 }
508 return 0;
509 }
510
511 static int check_scalar(struct r600_bc_alu *alu, struct alu_bank_swizzle *bs, int bank_swizzle)
512 {
513 int r, src, num_src, const_count, sel, elem, cycle;
514
515 num_src = r600_bc_get_num_operands(alu);
516 for (const_count = 0, src = 0; src < num_src; ++src) {
517 sel = alu->src[src].sel;
518 elem = alu->src[src].chan;
519 if (is_const(sel)) { // Any constant, including literal and inline constants
520 if (const_count >= 2)
521 // More than two references to a constant in
522 // transcendental operation.
523 return -1;
524 else
525 const_count++;
526 }
527 if (is_cfile(sel)) {
528 r = reserve_cfile(bs, sel, elem);
529 if (r)
530 return r;
531 }
532 }
533 for (src = 0; src < num_src; ++src) {
534 sel = alu->src[src].sel;
535 elem = alu->src[src].chan;
536 if (is_gpr(sel)) {
537 cycle = cycle_for_bank_swizzle_scl[bank_swizzle][src];
538 if (cycle < const_count)
539 // Cycle for GPR load conflicts with
540 // constant load in transcendental operation.
541 return -1;
542 r = reserve_gpr(bs, sel, elem, cycle);
543 if (r)
544 return r;
545 }
546 // Constants already processed
547 // No restrictions on PV, PS
548 }
549 return 0;
550 }
551
552 static int check_and_set_bank_swizzle(struct r600_bc_alu *slots[5])
553 {
554 struct alu_bank_swizzle bs;
555 int bank_swizzle[5];
556 int i, r = 0, forced = 0;
557
558 for (i = 0; i < 5; i++)
559 if (slots[i] && slots[i]->bank_swizzle_force) {
560 slots[i]->bank_swizzle = slots[i]->bank_swizzle_force;
561 forced = 1;
562 }
563
564 if (forced)
565 return 0;
566
567 // just check every possible combination of bank swizzle
568 // not very efficent, but works on the first try in most of the cases
569 for (i = 0; i < 4; i++)
570 bank_swizzle[i] = SQ_ALU_VEC_012;
571 bank_swizzle[4] = SQ_ALU_SCL_210;
572 while(bank_swizzle[4] <= SQ_ALU_SCL_221) {
573 init_bank_swizzle(&bs);
574 for (i = 0; i < 4; i++) {
575 if (slots[i]) {
576 r = check_vector(slots[i], &bs, bank_swizzle[i]);
577 if (r)
578 break;
579 }
580 }
581 if (!r && slots[4]) {
582 r = check_scalar(slots[4], &bs, bank_swizzle[4]);
583 }
584 if (!r) {
585 for (i = 0; i < 5; i++) {
586 if (slots[i])
587 slots[i]->bank_swizzle = bank_swizzle[i];
588 }
589 return 0;
590 }
591
592 for (i = 0; i < 5; i++) {
593 bank_swizzle[i]++;
594 if (bank_swizzle[i] <= SQ_ALU_VEC_210)
595 break;
596 else
597 bank_swizzle[i] = SQ_ALU_VEC_012;
598 }
599 }
600
601 // couldn't find a working swizzle
602 return -1;
603 }
604
605 static int replace_gpr_with_pv_ps(struct r600_bc_alu *slots[5], struct r600_bc_alu *alu_prev)
606 {
607 struct r600_bc_alu *prev[5];
608 int gpr[5], chan[5];
609 int i, j, r, src, num_src;
610
611 r = assign_alu_units(alu_prev, prev);
612 if (r)
613 return r;
614
615 for (i = 0; i < 5; ++i) {
616 if(prev[i] && prev[i]->dst.write && !prev[i]->dst.rel) {
617 gpr[i] = prev[i]->dst.sel;
618 if (is_alu_reduction_inst(prev[i]))
619 chan[i] = 0;
620 else
621 chan[i] = prev[i]->dst.chan;
622 } else
623 gpr[i] = -1;
624 }
625
626 for (i = 0; i < 5; ++i) {
627 struct r600_bc_alu *alu = slots[i];
628 if(!alu)
629 continue;
630
631 num_src = r600_bc_get_num_operands(alu);
632 for (src = 0; src < num_src; ++src) {
633 if (!is_gpr(alu->src[src].sel) || alu->src[src].rel)
634 continue;
635
636 if (alu->src[src].sel == gpr[4] &&
637 alu->src[src].chan == chan[4]) {
638 alu->src[src].sel = V_SQ_ALU_SRC_PS;
639 alu->src[src].chan = 0;
640 continue;
641 }
642
643 for (j = 0; j < 4; ++j) {
644 if (alu->src[src].sel == gpr[j] &&
645 alu->src[src].chan == j) {
646 alu->src[src].sel = V_SQ_ALU_SRC_PV;
647 alu->src[src].chan = chan[j];
648 break;
649 }
650 }
651 }
652 }
653
654 return 0;
655 }
656
657 void r600_bc_special_constants(u32 value, unsigned *sel, unsigned *neg)
658 {
659 switch(value) {
660 case 0:
661 *sel = V_SQ_ALU_SRC_0;
662 break;
663 case 1:
664 *sel = V_SQ_ALU_SRC_1_INT;
665 break;
666 case -1:
667 *sel = V_SQ_ALU_SRC_M_1_INT;
668 break;
669 case 0x3F800000: // 1.0f
670 *sel = V_SQ_ALU_SRC_1;
671 break;
672 case 0x3F000000: // 0.5f
673 *sel = V_SQ_ALU_SRC_0_5;
674 break;
675 case 0xBF800000: // -1.0f
676 *sel = V_SQ_ALU_SRC_1;
677 *neg ^= 1;
678 break;
679 case 0xBF000000: // -0.5f
680 *sel = V_SQ_ALU_SRC_0_5;
681 *neg ^= 1;
682 break;
683 default:
684 *sel = V_SQ_ALU_SRC_LITERAL;
685 break;
686 }
687 }
688
689 /* compute how many literal are needed */
690 static int r600_bc_alu_nliterals(struct r600_bc_alu *alu, uint32_t literal[4], unsigned *nliteral)
691 {
692 unsigned num_src = r600_bc_get_num_operands(alu);
693 unsigned i, j;
694
695 for (i = 0; i < num_src; ++i) {
696 if (alu->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
697 uint32_t value = alu->src[i].value[alu->src[i].chan];
698 unsigned found = 0;
699 for (j = 0; j < *nliteral; ++j) {
700 if (literal[j] == value) {
701 found = 1;
702 break;
703 }
704 }
705 if (!found) {
706 if (*nliteral >= 4)
707 return -EINVAL;
708 literal[(*nliteral)++] = value;
709 }
710 }
711 }
712 return 0;
713 }
714
715 static void r600_bc_alu_adjust_literals(struct r600_bc_alu *alu, uint32_t literal[4], unsigned nliteral)
716 {
717 unsigned num_src = r600_bc_get_num_operands(alu);
718 unsigned i, j;
719
720 for (i = 0; i < num_src; ++i) {
721 if (alu->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
722 uint32_t value = alu->src[i].value[alu->src[i].chan];
723 for (j = 0; j < nliteral; ++j) {
724 if (literal[j] == value) {
725 alu->src[i].chan = j;
726 break;
727 }
728 }
729 }
730 }
731 }
732
733 static int merge_inst_groups(struct r600_bc *bc, struct r600_bc_alu *slots[5], struct r600_bc_alu *alu_prev)
734 {
735 struct r600_bc_alu *prev[5];
736 struct r600_bc_alu *result[5] = { NULL };
737
738 uint32_t literal[4];
739 unsigned nliteral = 0;
740
741 int i, j, r, src, num_src;
742 int num_once_inst = 0;
743
744 r = assign_alu_units(alu_prev, prev);
745 if (r)
746 return r;
747
748 for (i = 0; i < 5; ++i) {
749 /* check number of literals */
750 if (prev[i] && r600_bc_alu_nliterals(prev[i], literal, &nliteral))
751 return 0;
752 if (slots[i] && r600_bc_alu_nliterals(slots[i], literal, &nliteral))
753 return 0;
754
755 // let's check used slots
756 if (prev[i] && !slots[i]) {
757 result[i] = prev[i];
758 num_once_inst += is_alu_once_inst(prev[i]);
759 continue;
760 } else if (prev[i] && slots[i]) {
761 if (result[4] == NULL && prev[4] == NULL && slots[4] == NULL) {
762 // trans unit is still free try to use it
763 if (is_alu_any_unit_inst(slots[i])) {
764 result[i] = prev[i];
765 result[4] = slots[i];
766 } else if (is_alu_any_unit_inst(prev[i])) {
767 result[i] = slots[i];
768 result[4] = prev[i];
769 } else
770 return 0;
771 } else
772 return 0;
773 } else if(!slots[i]) {
774 continue;
775 } else
776 result[i] = slots[i];
777
778 // let's check source gprs
779 struct r600_bc_alu *alu = slots[i];
780 num_once_inst += is_alu_once_inst(alu);
781
782 num_src = r600_bc_get_num_operands(alu);
783 for (src = 0; src < num_src; ++src) {
784 // constants doesn't matter
785 if (!is_gpr(alu->src[src].sel))
786 continue;
787
788 for (j = 0; j < 5; ++j) {
789 if (!prev[j] || !prev[j]->dst.write)
790 continue;
791
792 // if it's relative then we can't determin which gpr is really used
793 if (prev[j]->dst.chan == alu->src[src].chan &&
794 (prev[j]->dst.sel == alu->src[src].sel ||
795 prev[j]->dst.rel || alu->src[src].rel))
796 return 0;
797 }
798 }
799 }
800
801 /* more than one PRED_ or KILL_ ? */
802 if (num_once_inst > 1)
803 return 0;
804
805 /* check if the result can still be swizzlet */
806 r = check_and_set_bank_swizzle(result);
807 if (r)
808 return 0;
809
810 /* looks like everything worked out right, apply the changes */
811
812 /* sort instructions */
813 for (i = 0; i < 5; ++i) {
814 slots[i] = result[i];
815 if (result[i]) {
816 LIST_DEL(&result[i]->list);
817 result[i]->last = 0;
818 LIST_ADDTAIL(&result[i]->list, &bc->cf_last->alu);
819 }
820 }
821
822 /* determine new last instruction */
823 LIST_ENTRY(struct r600_bc_alu, bc->cf_last->alu.prev, list)->last = 1;
824
825 /* determine new first instruction */
826 for (i = 0; i < 5; ++i) {
827 if (result[i]) {
828 bc->cf_last->curr_bs_head = result[i];
829 break;
830 }
831 }
832
833 bc->cf_last->prev_bs_head = bc->cf_last->prev2_bs_head;
834 bc->cf_last->prev2_bs_head = NULL;
835
836 return 0;
837 }
838
839 int r600_bc_add_alu_type(struct r600_bc *bc, const struct r600_bc_alu *alu, int type)
840 {
841 struct r600_bc_alu *nalu = r600_bc_alu();
842 struct r600_bc_alu *lalu;
843 int i, r;
844
845 if (nalu == NULL)
846 return -ENOMEM;
847 memcpy(nalu, alu, sizeof(struct r600_bc_alu));
848
849 if (bc->cf_last != NULL && bc->cf_last->inst != (type << 3)) {
850 /* check if we could add it anyway */
851 if (bc->cf_last->inst == (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3) &&
852 type == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE) {
853 LIST_FOR_EACH_ENTRY(lalu, &bc->cf_last->alu, list) {
854 if (lalu->predicate) {
855 bc->force_add_cf = 1;
856 break;
857 }
858 }
859 } else
860 bc->force_add_cf = 1;
861 }
862
863 /* cf can contains only alu or only vtx or only tex */
864 if (bc->cf_last == NULL || bc->force_add_cf) {
865 r = r600_bc_add_cf(bc);
866 if (r) {
867 free(nalu);
868 return r;
869 }
870 }
871 bc->cf_last->inst = (type << 3);
872 if (!bc->cf_last->curr_bs_head) {
873 bc->cf_last->curr_bs_head = nalu;
874 }
875 /* at most 128 slots, one add alu can add 5 slots + 4 constants(2 slots)
876 * worst case */
877 if (alu->last && (bc->cf_last->ndw >> 1) >= 120) {
878 bc->force_add_cf = 1;
879 }
880 /* replace special constants */
881 for (i = 0; i < 3; i++) {
882 if (nalu->src[i].sel == V_SQ_ALU_SRC_LITERAL)
883 r600_bc_special_constants(
884 nalu->src[i].value[nalu->src[i].chan],
885 &nalu->src[i].sel, &nalu->src[i].neg);
886 }
887 LIST_ADDTAIL(&nalu->list, &bc->cf_last->alu);
888 /* each alu use 2 dwords */
889 bc->cf_last->ndw += 2;
890 bc->ndw += 2;
891
892 bc->cf_last->kcache0_mode = 2;
893
894 /* process cur ALU instructions for bank swizzle */
895 if (alu->last) {
896 struct r600_bc_alu *slots[5];
897 r = assign_alu_units(bc->cf_last->curr_bs_head, slots);
898 if (r)
899 return r;
900
901 if (bc->cf_last->prev_bs_head) {
902 r = merge_inst_groups(bc, slots, bc->cf_last->prev_bs_head);
903 if (r)
904 return r;
905 }
906
907 if (bc->cf_last->prev_bs_head) {
908 r = replace_gpr_with_pv_ps(slots, bc->cf_last->prev_bs_head);
909 if (r)
910 return r;
911 }
912
913 r = check_and_set_bank_swizzle(slots);
914 if (r)
915 return r;
916
917 bc->cf_last->prev2_bs_head = bc->cf_last->prev_bs_head;
918 bc->cf_last->prev_bs_head = bc->cf_last->curr_bs_head;
919 bc->cf_last->curr_bs_head = NULL;
920 }
921 return 0;
922 }
923
924 int r600_bc_add_alu(struct r600_bc *bc, const struct r600_bc_alu *alu)
925 {
926 return r600_bc_add_alu_type(bc, alu, BC_INST(bc, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU));
927 }
928
929 static void r600_bc_remove_alu(struct r600_bc_cf *cf, struct r600_bc_alu *alu)
930 {
931 if (alu->last && alu->list.prev != &cf->alu) {
932 PREV_ALU(alu)->last = 1;
933 }
934 LIST_DEL(&alu->list);
935 free(alu);
936 cf->ndw -= 2;
937 }
938
939 int r600_bc_add_vtx(struct r600_bc *bc, const struct r600_bc_vtx *vtx)
940 {
941 struct r600_bc_vtx *nvtx = r600_bc_vtx();
942 int r;
943
944 if (nvtx == NULL)
945 return -ENOMEM;
946 memcpy(nvtx, vtx, sizeof(struct r600_bc_vtx));
947
948 /* cf can contains only alu or only vtx or only tex */
949 if (bc->cf_last == NULL ||
950 (bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_VTX &&
951 bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) ||
952 bc->force_add_cf) {
953 r = r600_bc_add_cf(bc);
954 if (r) {
955 free(nvtx);
956 return r;
957 }
958 bc->cf_last->inst = V_SQ_CF_WORD1_SQ_CF_INST_VTX;
959 }
960 LIST_ADDTAIL(&nvtx->list, &bc->cf_last->vtx);
961 /* each fetch use 4 dwords */
962 bc->cf_last->ndw += 4;
963 bc->ndw += 4;
964 if ((bc->cf_last->ndw / 4) > 7)
965 bc->force_add_cf = 1;
966 return 0;
967 }
968
969 int r600_bc_add_tex(struct r600_bc *bc, const struct r600_bc_tex *tex)
970 {
971 struct r600_bc_tex *ntex = r600_bc_tex();
972 int r;
973
974 if (ntex == NULL)
975 return -ENOMEM;
976 memcpy(ntex, tex, sizeof(struct r600_bc_tex));
977
978 /* cf can contains only alu or only vtx or only tex */
979 if (bc->cf_last == NULL ||
980 bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_TEX ||
981 bc->force_add_cf) {
982 r = r600_bc_add_cf(bc);
983 if (r) {
984 free(ntex);
985 return r;
986 }
987 bc->cf_last->inst = V_SQ_CF_WORD1_SQ_CF_INST_TEX;
988 }
989 LIST_ADDTAIL(&ntex->list, &bc->cf_last->tex);
990 /* each texture fetch use 4 dwords */
991 bc->cf_last->ndw += 4;
992 bc->ndw += 4;
993 if ((bc->cf_last->ndw / 4) > 7)
994 bc->force_add_cf = 1;
995 return 0;
996 }
997
998 int r600_bc_add_cfinst(struct r600_bc *bc, int inst)
999 {
1000 int r;
1001 r = r600_bc_add_cf(bc);
1002 if (r)
1003 return r;
1004
1005 bc->cf_last->cond = V_SQ_CF_COND_ACTIVE;
1006 bc->cf_last->inst = inst;
1007 return 0;
1008 }
1009
1010 /* common to all 3 families */
1011 static int r600_bc_vtx_build(struct r600_bc *bc, struct r600_bc_vtx *vtx, unsigned id)
1012 {
1013 unsigned fetch_resource_start = 0;
1014
1015 /* check if we are fetch shader */
1016 /* fetch shader can also access vertex resource,
1017 * first fetch shader resource is at 160
1018 */
1019 if (bc->type == -1) {
1020 switch (bc->chiprev) {
1021 /* r600 */
1022 case CHIPREV_R600:
1023 /* r700 */
1024 case CHIPREV_R700:
1025 fetch_resource_start = 160;
1026 break;
1027 /* evergreen */
1028 case CHIPREV_EVERGREEN:
1029 fetch_resource_start = 0;
1030 break;
1031 default:
1032 fprintf(stderr, "%s:%s:%d unknown chiprev %d\n",
1033 __FILE__, __func__, __LINE__, bc->chiprev);
1034 break;
1035 }
1036 }
1037 bc->bytecode[id++] = S_SQ_VTX_WORD0_BUFFER_ID(vtx->buffer_id + fetch_resource_start) |
1038 S_SQ_VTX_WORD0_SRC_GPR(vtx->src_gpr) |
1039 S_SQ_VTX_WORD0_SRC_SEL_X(vtx->src_sel_x) |
1040 S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(vtx->mega_fetch_count);
1041 bc->bytecode[id++] = S_SQ_VTX_WORD1_DST_SEL_X(vtx->dst_sel_x) |
1042 S_SQ_VTX_WORD1_DST_SEL_Y(vtx->dst_sel_y) |
1043 S_SQ_VTX_WORD1_DST_SEL_Z(vtx->dst_sel_z) |
1044 S_SQ_VTX_WORD1_DST_SEL_W(vtx->dst_sel_w) |
1045 S_SQ_VTX_WORD1_USE_CONST_FIELDS(vtx->use_const_fields) |
1046 S_SQ_VTX_WORD1_DATA_FORMAT(vtx->data_format) |
1047 S_SQ_VTX_WORD1_NUM_FORMAT_ALL(vtx->num_format_all) |
1048 S_SQ_VTX_WORD1_FORMAT_COMP_ALL(vtx->format_comp_all) |
1049 S_SQ_VTX_WORD1_SRF_MODE_ALL(vtx->srf_mode_all) |
1050 S_SQ_VTX_WORD1_GPR_DST_GPR(vtx->dst_gpr);
1051 bc->bytecode[id++] = S_SQ_VTX_WORD2_MEGA_FETCH(1);
1052 bc->bytecode[id++] = 0;
1053 return 0;
1054 }
1055
1056 /* common to all 3 families */
1057 static int r600_bc_tex_build(struct r600_bc *bc, struct r600_bc_tex *tex, unsigned id)
1058 {
1059 bc->bytecode[id++] = S_SQ_TEX_WORD0_TEX_INST(tex->inst) |
1060 S_SQ_TEX_WORD0_RESOURCE_ID(tex->resource_id) |
1061 S_SQ_TEX_WORD0_SRC_GPR(tex->src_gpr) |
1062 S_SQ_TEX_WORD0_SRC_REL(tex->src_rel);
1063 bc->bytecode[id++] = S_SQ_TEX_WORD1_DST_GPR(tex->dst_gpr) |
1064 S_SQ_TEX_WORD1_DST_REL(tex->dst_rel) |
1065 S_SQ_TEX_WORD1_DST_SEL_X(tex->dst_sel_x) |
1066 S_SQ_TEX_WORD1_DST_SEL_Y(tex->dst_sel_y) |
1067 S_SQ_TEX_WORD1_DST_SEL_Z(tex->dst_sel_z) |
1068 S_SQ_TEX_WORD1_DST_SEL_W(tex->dst_sel_w) |
1069 S_SQ_TEX_WORD1_LOD_BIAS(tex->lod_bias) |
1070 S_SQ_TEX_WORD1_COORD_TYPE_X(tex->coord_type_x) |
1071 S_SQ_TEX_WORD1_COORD_TYPE_Y(tex->coord_type_y) |
1072 S_SQ_TEX_WORD1_COORD_TYPE_Z(tex->coord_type_z) |
1073 S_SQ_TEX_WORD1_COORD_TYPE_W(tex->coord_type_w);
1074 bc->bytecode[id++] = S_SQ_TEX_WORD2_OFFSET_X(tex->offset_x) |
1075 S_SQ_TEX_WORD2_OFFSET_Y(tex->offset_y) |
1076 S_SQ_TEX_WORD2_OFFSET_Z(tex->offset_z) |
1077 S_SQ_TEX_WORD2_SAMPLER_ID(tex->sampler_id) |
1078 S_SQ_TEX_WORD2_SRC_SEL_X(tex->src_sel_x) |
1079 S_SQ_TEX_WORD2_SRC_SEL_Y(tex->src_sel_y) |
1080 S_SQ_TEX_WORD2_SRC_SEL_Z(tex->src_sel_z) |
1081 S_SQ_TEX_WORD2_SRC_SEL_W(tex->src_sel_w);
1082 bc->bytecode[id++] = 0;
1083 return 0;
1084 }
1085
1086 /* r600 only, r700/eg bits in r700_asm.c */
1087 static int r600_bc_alu_build(struct r600_bc *bc, struct r600_bc_alu *alu, unsigned id)
1088 {
1089 /* don't replace gpr by pv or ps for destination register */
1090 bc->bytecode[id++] = S_SQ_ALU_WORD0_SRC0_SEL(alu->src[0].sel) |
1091 S_SQ_ALU_WORD0_SRC0_REL(alu->src[0].rel) |
1092 S_SQ_ALU_WORD0_SRC0_CHAN(alu->src[0].chan) |
1093 S_SQ_ALU_WORD0_SRC0_NEG(alu->src[0].neg) |
1094 S_SQ_ALU_WORD0_SRC1_SEL(alu->src[1].sel) |
1095 S_SQ_ALU_WORD0_SRC1_REL(alu->src[1].rel) |
1096 S_SQ_ALU_WORD0_SRC1_CHAN(alu->src[1].chan) |
1097 S_SQ_ALU_WORD0_SRC1_NEG(alu->src[1].neg) |
1098 S_SQ_ALU_WORD0_LAST(alu->last);
1099
1100 if (alu->is_op3) {
1101 bc->bytecode[id++] = S_SQ_ALU_WORD1_DST_GPR(alu->dst.sel) |
1102 S_SQ_ALU_WORD1_DST_CHAN(alu->dst.chan) |
1103 S_SQ_ALU_WORD1_DST_REL(alu->dst.rel) |
1104 S_SQ_ALU_WORD1_CLAMP(alu->dst.clamp) |
1105 S_SQ_ALU_WORD1_OP3_SRC2_SEL(alu->src[2].sel) |
1106 S_SQ_ALU_WORD1_OP3_SRC2_REL(alu->src[2].rel) |
1107 S_SQ_ALU_WORD1_OP3_SRC2_CHAN(alu->src[2].chan) |
1108 S_SQ_ALU_WORD1_OP3_SRC2_NEG(alu->src[2].neg) |
1109 S_SQ_ALU_WORD1_OP3_ALU_INST(alu->inst) |
1110 S_SQ_ALU_WORD1_BANK_SWIZZLE(alu->bank_swizzle);
1111 } else {
1112 bc->bytecode[id++] = S_SQ_ALU_WORD1_DST_GPR(alu->dst.sel) |
1113 S_SQ_ALU_WORD1_DST_CHAN(alu->dst.chan) |
1114 S_SQ_ALU_WORD1_DST_REL(alu->dst.rel) |
1115 S_SQ_ALU_WORD1_CLAMP(alu->dst.clamp) |
1116 S_SQ_ALU_WORD1_OP2_SRC0_ABS(alu->src[0].abs) |
1117 S_SQ_ALU_WORD1_OP2_SRC1_ABS(alu->src[1].abs) |
1118 S_SQ_ALU_WORD1_OP2_WRITE_MASK(alu->dst.write) |
1119 S_SQ_ALU_WORD1_OP2_OMOD(alu->omod) |
1120 S_SQ_ALU_WORD1_OP2_ALU_INST(alu->inst) |
1121 S_SQ_ALU_WORD1_BANK_SWIZZLE(alu->bank_swizzle) |
1122 S_SQ_ALU_WORD1_OP2_UPDATE_EXECUTE_MASK(alu->predicate) |
1123 S_SQ_ALU_WORD1_OP2_UPDATE_PRED(alu->predicate);
1124 }
1125 return 0;
1126 }
1127
1128 enum cf_class
1129 {
1130 CF_CLASS_ALU,
1131 CF_CLASS_TEXTURE,
1132 CF_CLASS_VERTEX,
1133 CF_CLASS_EXPORT,
1134 CF_CLASS_OTHER
1135 };
1136
1137 static enum cf_class get_cf_class(struct r600_bc_cf *cf)
1138 {
1139 switch (cf->inst) {
1140 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
1141 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3):
1142 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3):
1143 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
1144 return CF_CLASS_ALU;
1145
1146 case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
1147 return CF_CLASS_TEXTURE;
1148
1149 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
1150 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
1151 return CF_CLASS_VERTEX;
1152
1153 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1154 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1155 return CF_CLASS_EXPORT;
1156
1157 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
1158 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
1159 case V_SQ_CF_WORD1_SQ_CF_INST_POP:
1160 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
1161 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
1162 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
1163 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
1164 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
1165 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
1166 return CF_CLASS_OTHER;
1167
1168 default:
1169 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
1170 return -EINVAL;
1171 }
1172 }
1173
1174 /* common for r600/r700 - eg in eg_asm.c */
1175 static int r600_bc_cf_build(struct r600_bc *bc, struct r600_bc_cf *cf)
1176 {
1177 unsigned id = cf->id;
1178 unsigned end_of_program = bc->cf.prev == &cf->list;
1179
1180 switch (get_cf_class(cf)) {
1181 case CF_CLASS_ALU:
1182 assert(!end_of_program);
1183 bc->bytecode[id++] = S_SQ_CF_ALU_WORD0_ADDR(cf->addr >> 1) |
1184 S_SQ_CF_ALU_WORD0_KCACHE_MODE0(cf->kcache0_mode) |
1185 S_SQ_CF_ALU_WORD0_KCACHE_BANK0(cf->kcache0_bank) |
1186 S_SQ_CF_ALU_WORD0_KCACHE_BANK1(cf->kcache1_bank);
1187
1188 bc->bytecode[id++] = S_SQ_CF_ALU_WORD1_CF_INST(cf->inst >> 3) |
1189 S_SQ_CF_ALU_WORD1_KCACHE_MODE1(cf->kcache1_mode) |
1190 S_SQ_CF_ALU_WORD1_KCACHE_ADDR0(cf->kcache0_addr) |
1191 S_SQ_CF_ALU_WORD1_KCACHE_ADDR1(cf->kcache1_addr) |
1192 S_SQ_CF_ALU_WORD1_BARRIER(cf->barrier) |
1193 S_SQ_CF_ALU_WORD1_USES_WATERFALL(bc->chiprev == CHIPREV_R600 ? cf->r6xx_uses_waterfall : 0) |
1194 S_SQ_CF_ALU_WORD1_COUNT((cf->ndw / 2) - 1);
1195 break;
1196 case CF_CLASS_TEXTURE:
1197 case CF_CLASS_VERTEX:
1198 bc->bytecode[id++] = S_SQ_CF_WORD0_ADDR(cf->addr >> 1);
1199 bc->bytecode[id++] = S_SQ_CF_WORD1_CF_INST(cf->inst) |
1200 S_SQ_CF_WORD1_BARRIER(cf->barrier) |
1201 S_SQ_CF_WORD1_COUNT((cf->ndw / 4) - 1) |
1202 S_SQ_CF_WORD1_END_OF_PROGRAM(end_of_program);
1203 break;
1204 case CF_CLASS_EXPORT:
1205 bc->bytecode[id++] = S_SQ_CF_ALLOC_EXPORT_WORD0_RW_GPR(cf->output.gpr) |
1206 S_SQ_CF_ALLOC_EXPORT_WORD0_ELEM_SIZE(cf->output.elem_size) |
1207 S_SQ_CF_ALLOC_EXPORT_WORD0_ARRAY_BASE(cf->output.array_base) |
1208 S_SQ_CF_ALLOC_EXPORT_WORD0_TYPE(cf->output.type);
1209 bc->bytecode[id++] = S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_X(cf->output.swizzle_x) |
1210 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Y(cf->output.swizzle_y) |
1211 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Z(cf->output.swizzle_z) |
1212 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_W(cf->output.swizzle_w) |
1213 S_SQ_CF_ALLOC_EXPORT_WORD1_BARRIER(cf->barrier) |
1214 S_SQ_CF_ALLOC_EXPORT_WORD1_CF_INST(cf->inst) |
1215 S_SQ_CF_ALLOC_EXPORT_WORD1_END_OF_PROGRAM(end_of_program);
1216 break;
1217 case CF_CLASS_OTHER:
1218 bc->bytecode[id++] = S_SQ_CF_WORD0_ADDR(cf->cf_addr >> 1);
1219 bc->bytecode[id++] = S_SQ_CF_WORD1_CF_INST(cf->inst) |
1220 S_SQ_CF_WORD1_BARRIER(cf->barrier) |
1221 S_SQ_CF_WORD1_COND(cf->cond) |
1222 S_SQ_CF_WORD1_POP_COUNT(cf->pop_count) |
1223 S_SQ_CF_WORD1_END_OF_PROGRAM(end_of_program);
1224
1225 break;
1226 default:
1227 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
1228 return -EINVAL;
1229 }
1230 return 0;
1231 }
1232
1233 struct gpr_usage_range {
1234 int replacement;
1235 int32_t start;
1236 int32_t end;
1237 };
1238
1239 struct gpr_usage {
1240 unsigned channels:4;
1241 int32_t first_write;
1242 int32_t last_write[4];
1243 unsigned nranges;
1244 struct gpr_usage_range *ranges;
1245 };
1246
1247 static struct gpr_usage_range* add_gpr_usage_range(struct gpr_usage *usage)
1248 {
1249 usage->nranges++;
1250 usage->ranges = realloc(usage->ranges, usage->nranges * sizeof(struct gpr_usage_range));
1251 if (!usage->ranges)
1252 return NULL;
1253 return &usage->ranges[usage->nranges-1];
1254 }
1255
1256 static void notice_gpr_read(struct gpr_usage *usage, int32_t id, unsigned chan)
1257 {
1258 usage->channels |= 1 << chan;
1259 usage->first_write = -1;
1260 if (!usage->nranges) {
1261 struct gpr_usage_range* range = add_gpr_usage_range(usage);
1262 range->replacement = -1;
1263 range->start = -1;
1264 range->end = -1;
1265 }
1266 if (usage->ranges[usage->nranges-1].end < id)
1267 usage->ranges[usage->nranges-1].end = id;
1268 }
1269
1270 static void notice_gpr_rel_read(struct gpr_usage usage[128], int32_t id, unsigned chan)
1271 {
1272 unsigned i;
1273 for (i = 0; i < 128; ++i)
1274 notice_gpr_read(&usage[i], id, chan);
1275 }
1276
1277 static void notice_gpr_last_write(struct gpr_usage *usage, int32_t id, unsigned chan)
1278 {
1279 usage->last_write[chan] = id;
1280 }
1281
1282 static void notice_gpr_write(struct gpr_usage *usage, int32_t id, unsigned chan,
1283 int predicate, int prefered_replacement)
1284 {
1285 int32_t start = usage->first_write != -1 ? usage->first_write : id;
1286 usage->channels &= ~(1 << chan);
1287 if (usage->channels) {
1288 if (usage->first_write == -1)
1289 usage->first_write = id;
1290 } else if (!usage->nranges || (usage->ranges[usage->nranges-1].start != start && !predicate)) {
1291 usage->first_write = start;
1292 struct gpr_usage_range* range = add_gpr_usage_range(usage);
1293 range->replacement = prefered_replacement;
1294 range->start = start;
1295 range->end = -1;
1296 } else if (usage->ranges[usage->nranges-1].start == start && prefered_replacement != -1) {
1297 usage->ranges[usage->nranges-1].replacement = prefered_replacement;
1298 }
1299 notice_gpr_last_write(usage, id, chan);
1300 }
1301
1302 static void notice_gpr_rel_last_write(struct gpr_usage usage[128], int32_t id, unsigned chan)
1303 {
1304 unsigned i;
1305 for (i = 0; i < 128; ++i)
1306 notice_gpr_last_write(&usage[i], id, chan);
1307 }
1308
1309 static void notice_gpr_rel_write(struct gpr_usage usage[128], int32_t id, unsigned chan)
1310 {
1311 unsigned i;
1312 for (i = 0; i < 128; ++i)
1313 notice_gpr_write(&usage[i], id, chan, 1, -1);
1314 }
1315
1316 static void notice_alu_src_gprs(struct r600_bc_alu *alu, struct gpr_usage usage[128], int32_t id)
1317 {
1318 unsigned src, num_src;
1319
1320 num_src = r600_bc_get_num_operands(alu);
1321 for (src = 0; src < num_src; ++src) {
1322 // constants doesn't matter
1323 if (!is_gpr(alu->src[src].sel))
1324 continue;
1325
1326 if (alu->src[src].rel)
1327 notice_gpr_rel_read(usage, id, alu->src[src].chan);
1328 else
1329 notice_gpr_read(&usage[alu->src[src].sel], id, alu->src[src].chan);
1330 }
1331 }
1332
1333 static void notice_alu_dst_gprs(struct r600_bc_alu *alu_first, struct gpr_usage usage[128],
1334 int32_t id, int predicate)
1335 {
1336 struct r600_bc_alu *alu;
1337 for (alu = alu_first; alu; alu = LIST_ENTRY(struct r600_bc_alu, alu->list.next, list)) {
1338 if (alu->dst.write) {
1339 if (alu->dst.rel)
1340 notice_gpr_rel_write(usage, id, alu->dst.chan);
1341 else if (alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV && is_gpr(alu->src[0].sel))
1342 notice_gpr_write(&usage[alu->dst.sel], id, alu->dst.chan,
1343 predicate, alu->src[0].sel);
1344 else
1345 notice_gpr_write(&usage[alu->dst.sel], id, alu->dst.chan, predicate, -1);
1346 }
1347
1348 if (alu->last)
1349 break;
1350 }
1351 }
1352
1353 static void notice_tex_gprs(struct r600_bc_tex *tex, struct gpr_usage usage[128],
1354 int32_t id, int predicate)
1355 {
1356 if (tex->src_rel) {
1357 if (tex->src_sel_x < 4)
1358 notice_gpr_rel_read(usage, id, tex->src_sel_x);
1359 if (tex->src_sel_y < 4)
1360 notice_gpr_rel_read(usage, id, tex->src_sel_y);
1361 if (tex->src_sel_z < 4)
1362 notice_gpr_rel_read(usage, id, tex->src_sel_z);
1363 if (tex->src_sel_w < 4)
1364 notice_gpr_rel_read(usage, id, tex->src_sel_w);
1365 } else {
1366 if (tex->src_sel_x < 4)
1367 notice_gpr_read(&usage[tex->src_gpr], id, tex->src_sel_x);
1368 if (tex->src_sel_y < 4)
1369 notice_gpr_read(&usage[tex->src_gpr], id, tex->src_sel_y);
1370 if (tex->src_sel_z < 4)
1371 notice_gpr_read(&usage[tex->src_gpr], id, tex->src_sel_z);
1372 if (tex->src_sel_w < 4)
1373 notice_gpr_read(&usage[tex->src_gpr], id, tex->src_sel_w);
1374 }
1375 if (tex->dst_rel) {
1376 if (tex->dst_sel_x != 7)
1377 notice_gpr_rel_write(usage, id, 0);
1378 if (tex->dst_sel_y != 7)
1379 notice_gpr_rel_write(usage, id, 1);
1380 if (tex->dst_sel_z != 7)
1381 notice_gpr_rel_write(usage, id, 2);
1382 if (tex->dst_sel_w != 7)
1383 notice_gpr_rel_write(usage, id, 3);
1384 } else {
1385 if (tex->dst_sel_x != 7)
1386 notice_gpr_write(&usage[tex->dst_gpr], id, 0, predicate, -1);
1387 if (tex->dst_sel_y != 7)
1388 notice_gpr_write(&usage[tex->dst_gpr], id, 1, predicate, -1);
1389 if (tex->dst_sel_z != 7)
1390 notice_gpr_write(&usage[tex->dst_gpr], id, 2, predicate, -1);
1391 if (tex->dst_sel_w != 7)
1392 notice_gpr_write(&usage[tex->dst_gpr], id, 3, predicate, -1);
1393 }
1394 }
1395
1396 static void notice_vtx_gprs(struct r600_bc_vtx *vtx, struct gpr_usage usage[128],
1397 int32_t id, int predicate)
1398 {
1399 notice_gpr_read(&usage[vtx->src_gpr], id, vtx->src_sel_x);
1400
1401 if (vtx->dst_sel_x != 7)
1402 notice_gpr_write(&usage[vtx->dst_gpr], id, 0, predicate, -1);
1403 if (vtx->dst_sel_y != 7)
1404 notice_gpr_write(&usage[vtx->dst_gpr], id, 1, predicate, -1);
1405 if (vtx->dst_sel_z != 7)
1406 notice_gpr_write(&usage[vtx->dst_gpr], id, 2, predicate, -1);
1407 if (vtx->dst_sel_w != 7)
1408 notice_gpr_write(&usage[vtx->dst_gpr], id, 3, predicate, -1);
1409 }
1410
1411 static void notice_export_gprs(struct r600_bc_cf *cf, struct gpr_usage usage[128],
1412 struct r600_bc_cf *export_cf[128], int32_t export_remap[128])
1413 {
1414 //TODO handle other memory operations
1415 struct gpr_usage *output = &usage[cf->output.gpr];
1416 int32_t id = (output->last_write[0] + 0x100) & ~0xFF;
1417
1418 export_cf[cf->output.gpr] = cf;
1419 export_remap[cf->output.gpr] = id;
1420 if (cf->output.swizzle_x < 4)
1421 notice_gpr_read(output, id, cf->output.swizzle_x);
1422 if (cf->output.swizzle_y < 4)
1423 notice_gpr_read(output, id, cf->output.swizzle_y);
1424 if (cf->output.swizzle_z < 4)
1425 notice_gpr_read(output, id, cf->output.swizzle_z);
1426 if (cf->output.swizzle_w < 4)
1427 notice_gpr_read(output, id, cf->output.swizzle_w);
1428 }
1429
1430 static struct gpr_usage_range *find_src_range(struct gpr_usage *usage, int32_t id)
1431 {
1432 unsigned i;
1433 for (i = 0; i < usage->nranges; ++i) {
1434 struct gpr_usage_range* range = &usage->ranges[i];
1435
1436 if (range->start < id && id <= range->end)
1437 return range;
1438 }
1439 assert(0); /* should not happen */
1440 return NULL;
1441 }
1442
1443 static struct gpr_usage_range *find_dst_range(struct gpr_usage *usage, int32_t id)
1444 {
1445 unsigned i;
1446 for (i = 0; i < usage->nranges; ++i) {
1447 struct gpr_usage_range* range = &usage->ranges[i];
1448 int32_t end = range->end;
1449
1450 if (range->start <= id && (id < end || end == -1))
1451 return range;
1452 }
1453 assert(0); /* should not happen */
1454 return NULL;
1455 }
1456
1457 static int is_barrier_needed(struct gpr_usage *usage, int32_t id, unsigned chan, int32_t last_barrier)
1458 {
1459 if (usage->last_write[chan] != (id & ~0xFF))
1460 return usage->last_write[chan] >= last_barrier;
1461 else
1462 return 0;
1463 }
1464
1465 static int is_intersection(struct gpr_usage_range* a, struct gpr_usage_range* b)
1466 {
1467 return a->start <= b->end && b->start < a->end;
1468 }
1469
1470 static int rate_replacement(struct gpr_usage *usage, struct gpr_usage_range* range)
1471 {
1472 unsigned i;
1473 int32_t best_start = 0x3FFFFFFF, best_end = 0x3FFFFFFF;
1474
1475 for (i = 0; i < usage->nranges; ++i) {
1476 if (usage->ranges[i].replacement != -1)
1477 continue; /* ignore already remapped ranges */
1478
1479 if (is_intersection(&usage->ranges[i], range))
1480 return -1; /* forget it if usages overlap */
1481
1482 if (range->start >= usage->ranges[i].end)
1483 best_start = MIN2(best_start, range->start - usage->ranges[i].end);
1484
1485 if (range->end != -1 && range->end <= usage->ranges[i].start)
1486 best_end = MIN2(best_end, usage->ranges[i].start - range->end);
1487 }
1488 return best_start + best_end;
1489 }
1490
1491 static void find_replacement(struct gpr_usage usage[128], unsigned current, struct gpr_usage_range *range)
1492 {
1493 unsigned i;
1494 int best_gpr = -1, best_rate = 0x7FFFFFFF;
1495
1496 if (range->replacement != -1 && range->replacement <= current) {
1497 struct gpr_usage_range *other = find_src_range(&usage[range->replacement], range->start);
1498 if (other->replacement != -1)
1499 range->replacement = other->replacement;
1500 }
1501
1502 if (range->replacement != -1 && range->replacement < current) {
1503 int rate = rate_replacement(&usage[range->replacement], range);
1504
1505 /* check if prefered replacement can be used */
1506 if (rate != -1) {
1507 best_rate = rate;
1508 best_gpr = range->replacement;
1509 }
1510 }
1511
1512 if (best_gpr == -1 && (range->start & ~0xFF) == (range->end & ~0xFF)) {
1513 /* register is just used inside one ALU clause */
1514 /* try to use clause temporaryis for it */
1515 for (i = 127; i > 123; --i) {
1516 int rate = rate_replacement(&usage[i], range);
1517
1518 if (rate == -1) /* can't be used because ranges overlap */
1519 continue;
1520
1521 if (rate < best_rate) {
1522 best_rate = rate;
1523 best_gpr = i;
1524
1525 /* can't get better than this */
1526 if (rate == 0)
1527 break;
1528 }
1529 }
1530 }
1531
1532 if (best_gpr == -1) {
1533 for (i = 0; i < current; ++i) {
1534 int rate = rate_replacement(&usage[i], range);
1535
1536 if (rate == -1) /* can't be used because ranges overlap */
1537 continue;
1538
1539 if (rate < best_rate) {
1540 best_rate = rate;
1541 best_gpr = i;
1542
1543 /* can't get better than this */
1544 if (rate == 0)
1545 break;
1546 }
1547 }
1548 }
1549
1550 range->replacement = best_gpr;
1551 if (best_gpr != -1) {
1552 struct gpr_usage_range *reservation = add_gpr_usage_range(&usage[best_gpr]);
1553 reservation->replacement = -1;
1554 reservation->start = range->start;
1555 reservation->end = range->end;
1556 }
1557 }
1558
1559 static void replace_alu_gprs(struct r600_bc_alu *alu, struct gpr_usage usage[128],
1560 int32_t id, int32_t last_barrier, unsigned *barrier)
1561 {
1562 struct gpr_usage *cur_usage;
1563 struct gpr_usage_range *range;
1564 unsigned src, num_src;
1565
1566 num_src = r600_bc_get_num_operands(alu);
1567 for (src = 0; src < num_src; ++src) {
1568 // constants doesn't matter
1569 if (!is_gpr(alu->src[src].sel))
1570 continue;
1571
1572 cur_usage = &usage[alu->src[src].sel];
1573 range = find_src_range(cur_usage, id);
1574 if (range->replacement != -1)
1575 alu->src[src].sel = range->replacement;
1576
1577 *barrier |= is_barrier_needed(cur_usage, id, alu->src[src].chan, last_barrier);
1578 }
1579
1580 if (alu->dst.write) {
1581 cur_usage = &usage[alu->dst.sel];
1582 range = find_dst_range(cur_usage, id);
1583 if (range->replacement == alu->dst.sel) {
1584 if (!alu->is_op3)
1585 alu->dst.write = 0;
1586 else
1587 /*TODO: really check that register 123 is useable */
1588 alu->dst.sel = 123;
1589 } else if (range->replacement != -1) {
1590 alu->dst.sel = range->replacement;
1591 }
1592 if (alu->dst.rel)
1593 notice_gpr_rel_last_write(usage, id, alu->dst.chan);
1594 else
1595 notice_gpr_last_write(cur_usage, id, alu->dst.chan);
1596 }
1597 }
1598
1599 static void replace_tex_gprs(struct r600_bc_tex *tex, struct gpr_usage usage[128],
1600 int32_t id, int32_t last_barrier, unsigned *barrier)
1601 {
1602 struct gpr_usage *cur_usage = &usage[tex->src_gpr];
1603 struct gpr_usage_range *range = find_src_range(cur_usage, id);
1604
1605 if (tex->src_rel) {
1606 *barrier = 1;
1607 } else {
1608 if (tex->src_sel_x < 4)
1609 *barrier |= is_barrier_needed(cur_usage, id, tex->src_sel_x, last_barrier);
1610 if (tex->src_sel_y < 4)
1611 *barrier |= is_barrier_needed(cur_usage, id, tex->src_sel_y, last_barrier);
1612 if (tex->src_sel_z < 4)
1613 *barrier |= is_barrier_needed(cur_usage, id, tex->src_sel_z, last_barrier);
1614 if (tex->src_sel_w < 4)
1615 *barrier |= is_barrier_needed(cur_usage, id, tex->src_sel_w, last_barrier);
1616 }
1617
1618 if (range->replacement != -1)
1619 tex->src_gpr = range->replacement;
1620
1621 cur_usage = &usage[tex->dst_gpr];
1622 range = find_dst_range(cur_usage, id);
1623 if (range->replacement != -1)
1624 tex->dst_gpr = range->replacement;
1625
1626 if (tex->dst_rel) {
1627 if (tex->dst_sel_x != 7)
1628 notice_gpr_rel_last_write(usage, id, tex->dst_sel_x);
1629 if (tex->dst_sel_y != 7)
1630 notice_gpr_rel_last_write(usage, id, tex->dst_sel_y);
1631 if (tex->dst_sel_z != 7)
1632 notice_gpr_rel_last_write(usage, id, tex->dst_sel_z);
1633 if (tex->dst_sel_w != 7)
1634 notice_gpr_rel_last_write(usage, id, tex->dst_sel_w);
1635 } else {
1636 if (tex->dst_sel_x != 7)
1637 notice_gpr_last_write(cur_usage, id, tex->dst_sel_x);
1638 if (tex->dst_sel_y != 7)
1639 notice_gpr_last_write(cur_usage, id, tex->dst_sel_y);
1640 if (tex->dst_sel_z != 7)
1641 notice_gpr_last_write(cur_usage, id, tex->dst_sel_z);
1642 if (tex->dst_sel_w != 7)
1643 notice_gpr_last_write(cur_usage, id, tex->dst_sel_w);
1644 }
1645 }
1646
1647 static void replace_vtx_gprs(struct r600_bc_vtx *vtx, struct gpr_usage usage[128],
1648 int32_t id, int32_t last_barrier, unsigned *barrier)
1649 {
1650 struct gpr_usage *cur_usage = &usage[vtx->src_gpr];
1651 struct gpr_usage_range *range = find_src_range(cur_usage, id);
1652
1653 *barrier |= is_barrier_needed(cur_usage, id, vtx->src_sel_x, last_barrier);
1654
1655 if (range->replacement != -1)
1656 vtx->src_gpr = range->replacement;
1657
1658 cur_usage = &usage[vtx->dst_gpr];
1659 range = find_dst_range(cur_usage, id);
1660 if (range->replacement != -1)
1661 vtx->dst_gpr = range->replacement;
1662
1663 if (vtx->dst_sel_x != 7)
1664 notice_gpr_last_write(cur_usage, id, vtx->dst_sel_x);
1665 if (vtx->dst_sel_y != 7)
1666 notice_gpr_last_write(cur_usage, id, vtx->dst_sel_y);
1667 if (vtx->dst_sel_z != 7)
1668 notice_gpr_last_write(cur_usage, id, vtx->dst_sel_z);
1669 if (vtx->dst_sel_w != 7)
1670 notice_gpr_last_write(cur_usage, id, vtx->dst_sel_w);
1671 }
1672
1673 static void replace_export_gprs(struct r600_bc_cf *cf, struct gpr_usage usage[128],
1674 int32_t id, int32_t last_barrier)
1675 {
1676 //TODO handle other memory operations
1677 struct gpr_usage *cur_usage = &usage[cf->output.gpr];
1678 struct gpr_usage_range *range = find_src_range(cur_usage, id);
1679
1680 cf->barrier = 0;
1681 if (cf->output.swizzle_x < 4)
1682 cf->barrier |= is_barrier_needed(cur_usage, -1, cf->output.swizzle_x, last_barrier);
1683 if (cf->output.swizzle_y < 4)
1684 cf->barrier |= is_barrier_needed(cur_usage, -1, cf->output.swizzle_y, last_barrier);
1685 if (cf->output.swizzle_z < 4)
1686 cf->barrier |= is_barrier_needed(cur_usage, -1, cf->output.swizzle_z, last_barrier);
1687 if (cf->output.swizzle_w < 4)
1688 cf->barrier |= is_barrier_needed(cur_usage, -1, cf->output.swizzle_w, last_barrier);
1689
1690 if (range->replacement != -1)
1691 cf->output.gpr = range->replacement;
1692 }
1693
1694 static void optimize_alu_inst(struct r600_bc_cf *cf, struct r600_bc_alu *alu)
1695 {
1696 struct r600_bc_alu *alu_next;
1697 unsigned chan;
1698 unsigned src, num_src;
1699
1700 /* check if a MOV could be optimized away */
1701 if (alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV) {
1702
1703 /* destination equals source? */
1704 if (alu->dst.sel != alu->src[0].sel ||
1705 alu->dst.chan != alu->src[0].chan)
1706 return;
1707
1708 /* any special handling for the source? */
1709 if (alu->src[0].rel || alu->src[0].neg || alu->src[0].abs)
1710 return;
1711
1712 /* any special handling for destination? */
1713 if (alu->dst.rel || alu->dst.clamp)
1714 return;
1715
1716 /* ok find next instruction group and check if ps/pv is used */
1717 for (alu_next = alu; !alu_next->last; alu_next = NEXT_ALU(alu_next));
1718
1719 if (alu_next->list.next != &cf->alu) {
1720 chan = is_alu_reduction_inst(alu) ? 0 : alu->dst.chan;
1721 for (alu_next = NEXT_ALU(alu_next); alu_next; alu_next = NEXT_ALU(alu_next)) {
1722 num_src = r600_bc_get_num_operands(alu_next);
1723 for (src = 0; src < num_src; ++src) {
1724 if (alu_next->src[src].sel == V_SQ_ALU_SRC_PV &&
1725 alu_next->src[src].chan == chan)
1726 return;
1727
1728 if (alu_next->src[src].sel == V_SQ_ALU_SRC_PS)
1729 return;
1730 }
1731
1732 if (alu_next->last)
1733 break;
1734 }
1735 }
1736
1737 r600_bc_remove_alu(cf, alu);
1738 }
1739 }
1740
1741 static void r600_bc_optimize(struct r600_bc *bc)
1742 {
1743 struct r600_bc_cf *cf, *next_cf;
1744 struct r600_bc_alu *first, *next_alu;
1745 struct r600_bc_alu *alu;
1746 struct r600_bc_vtx *vtx;
1747 struct r600_bc_tex *tex;
1748 struct gpr_usage usage[128];
1749
1750 /* assume that each gpr is exported only once */
1751 struct r600_bc_cf *export_cf[128] = { NULL };
1752 int32_t export_remap[128];
1753
1754 int32_t id, barrier[bc->nstack];
1755 unsigned i, j, stack, predicate, old_stack;
1756
1757 memset(&usage, 0, sizeof(usage));
1758 for (i = 0; i < 128; ++i) {
1759 usage[i].first_write = -1;
1760 usage[i].last_write[0] = -1;
1761 usage[i].last_write[1] = -1;
1762 usage[i].last_write[2] = -1;
1763 usage[i].last_write[3] = -1;
1764 }
1765
1766 /* first gather some informations about the gpr usage */
1767 id = 0; stack = 0;
1768 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
1769 switch (get_cf_class(cf)) {
1770 case CF_CLASS_ALU:
1771 predicate = 0;
1772 first = NULL;
1773 LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
1774 if (!first)
1775 first = alu;
1776 notice_alu_src_gprs(alu, usage, id);
1777 if (alu->last) {
1778 notice_alu_dst_gprs(first, usage, id, predicate || stack > 0);
1779 first = NULL;
1780 ++id;
1781 }
1782 if (is_alu_pred_inst(alu))
1783 predicate++;
1784 }
1785 if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3)
1786 stack += predicate;
1787 else if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3)
1788 stack -= 1;
1789 else if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3)
1790 stack -= 2;
1791 break;
1792 case CF_CLASS_TEXTURE:
1793 LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
1794 notice_tex_gprs(tex, usage, id++, stack > 0);
1795 }
1796 break;
1797 case CF_CLASS_VERTEX:
1798 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
1799 notice_vtx_gprs(vtx, usage, id++, stack > 0);
1800 }
1801 break;
1802 case CF_CLASS_EXPORT:
1803 notice_export_gprs(cf, usage, export_cf, export_remap);
1804 continue; // don't increment id
1805 case CF_CLASS_OTHER:
1806 switch (cf->inst) {
1807 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
1808 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
1809 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
1810 break;
1811
1812 case V_SQ_CF_WORD1_SQ_CF_INST_POP:
1813 stack -= cf->pop_count;
1814 break;
1815
1816 default:
1817 // TODO implement loop handling
1818 goto out;
1819 }
1820 }
1821 id += 0x100;
1822 id &= ~0xFF;
1823 }
1824 assert(stack == 0);
1825
1826 /* try to optimize gpr usage */
1827 for (i = 0; i < 124; ++i) {
1828 for (j = 0; j < usage[i].nranges; ++j) {
1829 struct gpr_usage_range *range = &usage[i].ranges[j];
1830 if (range->start == -1)
1831 range->replacement = -1;
1832 else if (range->end == -1)
1833 range->replacement = i;
1834 else
1835 find_replacement(usage, i, range);
1836
1837 if (range->replacement == -1)
1838 bc->ngpr = i;
1839 else if (range->replacement < i && range->replacement > bc->ngpr)
1840 bc->ngpr = range->replacement;
1841 }
1842 }
1843 bc->ngpr++;
1844
1845 /* apply the changes */
1846
1847 for (i = 0; i < 128; ++i) {
1848 usage[i].last_write[0] = -1;
1849 usage[i].last_write[1] = -1;
1850 usage[i].last_write[2] = -1;
1851 usage[i].last_write[3] = -1;
1852 }
1853 barrier[0] = 0;
1854 id = 0; stack = 0;
1855 LIST_FOR_EACH_ENTRY_SAFE(cf, next_cf, &bc->cf, list) {
1856 old_stack = stack;
1857 switch (get_cf_class(cf)) {
1858 case CF_CLASS_ALU:
1859 predicate = 0;
1860 first = NULL;
1861 cf->barrier = 0;
1862 LIST_FOR_EACH_ENTRY_SAFE(alu, next_alu, &cf->alu, list) {
1863 replace_alu_gprs(alu, usage, id, barrier[stack], &cf->barrier);
1864 if (alu->last)
1865 ++id;
1866
1867 if (is_alu_pred_inst(alu))
1868 predicate++;
1869
1870 if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3)
1871 optimize_alu_inst(cf, alu);
1872 }
1873 if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3)
1874 stack += predicate;
1875 else if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3)
1876 stack -= 1;
1877 else if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3)
1878 stack -= 2;
1879 if (LIST_IS_EMPTY(&cf->alu)) {
1880 r600_bc_remove_cf(bc, cf);
1881 cf = NULL;
1882 }
1883 break;
1884 case CF_CLASS_TEXTURE:
1885 cf->barrier = 0;
1886 LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
1887 replace_tex_gprs(tex, usage, id++, barrier[stack], &cf->barrier);
1888 }
1889 break;
1890 case CF_CLASS_VERTEX:
1891 cf->barrier = 0;
1892 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
1893 replace_vtx_gprs(vtx, usage, id++, barrier[stack], &cf->barrier);
1894 }
1895 break;
1896 case CF_CLASS_EXPORT:
1897 continue; // don't increment id
1898 case CF_CLASS_OTHER:
1899 if (cf->inst == V_SQ_CF_WORD1_SQ_CF_INST_POP) {
1900 cf->barrier = 0;
1901 stack -= cf->pop_count;
1902 }
1903 break;
1904 }
1905
1906 id &= ~0xFF;
1907 if (cf && cf->barrier)
1908 barrier[old_stack] = id;
1909
1910 for (i = old_stack + 1; i <= stack; ++i)
1911 barrier[i] = barrier[old_stack];
1912
1913 id += 0x100;
1914 if (stack != 0) /* ensue exports are placed outside of conditional blocks */
1915 continue;
1916
1917 for (i = 0; i < 128; ++i) {
1918 if (!export_cf[i] || id < export_remap[i])
1919 continue;
1920
1921 r600_bc_move_cf(bc, export_cf[i], next_cf);
1922 replace_export_gprs(export_cf[i], usage, export_remap[i], barrier[stack]);
1923 if (export_cf[i]->barrier)
1924 barrier[stack] = id - 1;
1925 next_cf = LIST_ENTRY(struct r600_bc_cf, export_cf[i]->list.next, list);
1926 export_cf[i] = NULL;
1927 }
1928 }
1929 assert(stack == 0);
1930
1931 out:
1932 for (i = 0; i < 128; ++i) {
1933 free(usage[i].ranges);
1934 }
1935 }
1936
1937 int r600_bc_build(struct r600_bc *bc)
1938 {
1939 struct r600_bc_cf *cf;
1940 struct r600_bc_alu *alu;
1941 struct r600_bc_vtx *vtx;
1942 struct r600_bc_tex *tex;
1943 struct r600_bc_cf *exports[4] = { NULL };
1944 uint32_t literal[4];
1945 unsigned nliteral;
1946 unsigned addr;
1947 int i, r;
1948
1949 if (bc->callstack[0].max > 0)
1950 bc->nstack = ((bc->callstack[0].max + 3) >> 2) + 2;
1951 if (bc->type == TGSI_PROCESSOR_VERTEX && !bc->nstack) {
1952 bc->nstack = 1;
1953 }
1954
1955 r600_bc_optimize(bc);
1956
1957 /* first path compute addr of each CF block */
1958 /* addr start after all the CF instructions */
1959 addr = LIST_ENTRY(struct r600_bc_cf, bc->cf.prev, list)->id + 2;
1960 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
1961 switch (get_cf_class(cf)) {
1962 case CF_CLASS_ALU:
1963 nliteral = 0;
1964 LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
1965 r = r600_bc_alu_nliterals(alu, literal, &nliteral);
1966 if (r)
1967 return r;
1968 if (alu->last) {
1969 cf->ndw += align(nliteral, 2);
1970 nliteral = 0;
1971 }
1972 }
1973 break;
1974 case CF_CLASS_TEXTURE:
1975 case CF_CLASS_VERTEX:
1976 /* fetch node need to be 16 bytes aligned*/
1977 addr += 3;
1978 addr &= 0xFFFFFFFCUL;
1979 break;
1980 break;
1981 case CF_CLASS_EXPORT:
1982 if (cf->inst == BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT))
1983 exports[cf->output.type] = cf;
1984 break;
1985 case CF_CLASS_OTHER:
1986 break;
1987 default:
1988 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
1989 return -EINVAL;
1990 }
1991 cf->addr = addr;
1992 addr += cf->ndw;
1993 bc->ndw = cf->addr + cf->ndw;
1994 }
1995
1996 /* set export done on last export of each type */
1997 for (i = 0; i < 4; ++i) {
1998 if (exports[i]) {
1999 exports[i]->inst = BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE);
2000 }
2001 }
2002
2003 free(bc->bytecode);
2004 bc->bytecode = calloc(1, bc->ndw * 4);
2005 if (bc->bytecode == NULL)
2006 return -ENOMEM;
2007 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
2008 addr = cf->addr;
2009 if (bc->chiprev == CHIPREV_EVERGREEN)
2010 r = eg_bc_cf_build(bc, cf);
2011 else
2012 r = r600_bc_cf_build(bc, cf);
2013 if (r)
2014 return r;
2015 switch (get_cf_class(cf)) {
2016 case CF_CLASS_ALU:
2017 nliteral = 0;
2018 LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
2019 r = r600_bc_alu_nliterals(alu, literal, &nliteral);
2020 if (r)
2021 return r;
2022 r600_bc_alu_adjust_literals(alu, literal, nliteral);
2023 switch(bc->chiprev) {
2024 case CHIPREV_R600:
2025 r = r600_bc_alu_build(bc, alu, addr);
2026 break;
2027 case CHIPREV_R700:
2028 case CHIPREV_EVERGREEN: /* eg alu is same encoding as r700 */
2029 r = r700_bc_alu_build(bc, alu, addr);
2030 break;
2031 default:
2032 R600_ERR("unknown family %d\n", bc->family);
2033 return -EINVAL;
2034 }
2035 if (r)
2036 return r;
2037 addr += 2;
2038 if (alu->last) {
2039 for (i = 0; i < align(nliteral, 2); ++i) {
2040 bc->bytecode[addr++] = literal[i];
2041 }
2042 nliteral = 0;
2043 }
2044 }
2045 break;
2046 case CF_CLASS_VERTEX:
2047 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
2048 r = r600_bc_vtx_build(bc, vtx, addr);
2049 if (r)
2050 return r;
2051 addr += 4;
2052 }
2053 break;
2054 case CF_CLASS_TEXTURE:
2055 LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
2056 r = r600_bc_tex_build(bc, tex, addr);
2057 if (r)
2058 return r;
2059 addr += 4;
2060 }
2061 break;
2062 case CF_CLASS_EXPORT:
2063 case CF_CLASS_OTHER:
2064 break;
2065 default:
2066 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
2067 return -EINVAL;
2068 }
2069 }
2070 return 0;
2071 }
2072
2073 void r600_bc_clear(struct r600_bc *bc)
2074 {
2075 struct r600_bc_cf *cf = NULL, *next_cf;
2076
2077 free(bc->bytecode);
2078 bc->bytecode = NULL;
2079
2080 LIST_FOR_EACH_ENTRY_SAFE(cf, next_cf, &bc->cf, list) {
2081 struct r600_bc_alu *alu = NULL, *next_alu;
2082 struct r600_bc_tex *tex = NULL, *next_tex;
2083 struct r600_bc_tex *vtx = NULL, *next_vtx;
2084
2085 LIST_FOR_EACH_ENTRY_SAFE(alu, next_alu, &cf->alu, list) {
2086 free(alu);
2087 }
2088
2089 LIST_INITHEAD(&cf->alu);
2090
2091 LIST_FOR_EACH_ENTRY_SAFE(tex, next_tex, &cf->tex, list) {
2092 free(tex);
2093 }
2094
2095 LIST_INITHEAD(&cf->tex);
2096
2097 LIST_FOR_EACH_ENTRY_SAFE(vtx, next_vtx, &cf->vtx, list) {
2098 free(vtx);
2099 }
2100
2101 LIST_INITHEAD(&cf->vtx);
2102
2103 free(cf);
2104 }
2105
2106 LIST_INITHEAD(&cf->list);
2107 }
2108
2109 void r600_bc_dump(struct r600_bc *bc)
2110 {
2111 struct r600_bc_cf *cf;
2112 struct r600_bc_alu *alu;
2113 struct r600_bc_vtx *vtx;
2114 struct r600_bc_tex *tex;
2115
2116 unsigned i, id;
2117 uint32_t literal[4];
2118 unsigned nliteral;
2119 char chip = '6';
2120
2121 switch (bc->chiprev) {
2122 case 1:
2123 chip = '7';
2124 break;
2125 case 2:
2126 chip = 'E';
2127 break;
2128 case 0:
2129 default:
2130 chip = '6';
2131 break;
2132 }
2133 fprintf(stderr, "bytecode %d dw -- %d gprs -----------------------\n", bc->ndw, bc->ngpr);
2134 fprintf(stderr, " %c\n", chip);
2135
2136 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
2137 id = cf->id;
2138
2139 switch (get_cf_class(cf)) {
2140 case CF_CLASS_ALU:
2141 fprintf(stderr, "%04d %08X ALU ", id, bc->bytecode[id]);
2142 fprintf(stderr, "ADDR:%04d ", cf->addr);
2143 fprintf(stderr, "KCACHE_MODE0:%X ", cf->kcache0_mode);
2144 fprintf(stderr, "KCACHE_BANK0:%X ", cf->kcache0_bank);
2145 fprintf(stderr, "KCACHE_BANK1:%X\n", cf->kcache1_bank);
2146 id++;
2147 fprintf(stderr, "%04d %08X ALU ", id, bc->bytecode[id]);
2148 fprintf(stderr, "INST:%d ", cf->inst);
2149 fprintf(stderr, "KCACHE_MODE1:%X ", cf->kcache1_mode);
2150 fprintf(stderr, "KCACHE_ADDR0:%X ", cf->kcache0_addr);
2151 fprintf(stderr, "KCACHE_ADDR1:%X ", cf->kcache1_addr);
2152 fprintf(stderr, "BARRIER:%d ", cf->barrier);
2153 fprintf(stderr, "COUNT:%d\n", cf->ndw / 2);
2154 break;
2155 case CF_CLASS_TEXTURE:
2156 case CF_CLASS_VERTEX:
2157 fprintf(stderr, "%04d %08X TEX/VTX ", id, bc->bytecode[id]);
2158 fprintf(stderr, "ADDR:%04d\n", cf->addr);
2159 id++;
2160 fprintf(stderr, "%04d %08X TEX/VTX ", id, bc->bytecode[id]);
2161 fprintf(stderr, "INST:%d ", cf->inst);
2162 fprintf(stderr, "BARRIER:%d ", cf->barrier);
2163 fprintf(stderr, "COUNT:%d\n", cf->ndw / 4);
2164 break;
2165 case CF_CLASS_EXPORT:
2166 fprintf(stderr, "%04d %08X EXPORT ", id, bc->bytecode[id]);
2167 fprintf(stderr, "GPR:%d ", cf->output.gpr);
2168 fprintf(stderr, "ELEM_SIZE:%X ", cf->output.elem_size);
2169 fprintf(stderr, "ARRAY_BASE:%X ", cf->output.array_base);
2170 fprintf(stderr, "TYPE:%X\n", cf->output.type);
2171 id++;
2172 fprintf(stderr, "%04d %08X EXPORT ", id, bc->bytecode[id]);
2173 fprintf(stderr, "SWIZ_X:%X ", cf->output.swizzle_x);
2174 fprintf(stderr, "SWIZ_Y:%X ", cf->output.swizzle_y);
2175 fprintf(stderr, "SWIZ_Z:%X ", cf->output.swizzle_z);
2176 fprintf(stderr, "SWIZ_W:%X ", cf->output.swizzle_w);
2177 fprintf(stderr, "SWIZ_W:%X ", cf->output.swizzle_w);
2178 fprintf(stderr, "BARRIER:%d ", cf->barrier);
2179 fprintf(stderr, "INST:%d\n", cf->inst);
2180 break;
2181 case CF_CLASS_OTHER:
2182 fprintf(stderr, "%04d %08X CF ", id, bc->bytecode[id]);
2183 fprintf(stderr, "ADDR:%04d\n", cf->cf_addr);
2184 id++;
2185 fprintf(stderr, "%04d %08X CF ", id, bc->bytecode[id]);
2186 fprintf(stderr, "INST:%d ", cf->inst);
2187 fprintf(stderr, "COND:%X ", cf->cond);
2188 fprintf(stderr, "BARRIER:%d ", cf->barrier);
2189 fprintf(stderr, "POP_COUNT:%X\n", cf->pop_count);
2190 break;
2191 }
2192
2193 id = cf->addr;
2194 nliteral = 0;
2195 LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
2196 r600_bc_alu_nliterals(alu, literal, &nliteral);
2197
2198 fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]);
2199 fprintf(stderr, "SRC0(SEL:%d ", alu->src[0].sel);
2200 fprintf(stderr, "REL:%d ", alu->src[0].rel);
2201 fprintf(stderr, "CHAN:%d ", alu->src[0].chan);
2202 fprintf(stderr, "NEG:%d) ", alu->src[0].neg);
2203 fprintf(stderr, "SRC1(SEL:%d ", alu->src[1].sel);
2204 fprintf(stderr, "REL:%d ", alu->src[1].rel);
2205 fprintf(stderr, "CHAN:%d ", alu->src[1].chan);
2206 fprintf(stderr, "NEG:%d) ", alu->src[1].neg);
2207 fprintf(stderr, "LAST:%d)\n", alu->last);
2208 id++;
2209 fprintf(stderr, "%04d %08X %c ", id, bc->bytecode[id], alu->last ? '*' : ' ');
2210 fprintf(stderr, "INST:%d ", alu->inst);
2211 fprintf(stderr, "DST(SEL:%d ", alu->dst.sel);
2212 fprintf(stderr, "CHAN:%d ", alu->dst.chan);
2213 fprintf(stderr, "REL:%d ", alu->dst.rel);
2214 fprintf(stderr, "CLAMP:%d) ", alu->dst.clamp);
2215 fprintf(stderr, "BANK_SWIZZLE:%d ", alu->bank_swizzle);
2216 if (alu->is_op3) {
2217 fprintf(stderr, "SRC2(SEL:%d ", alu->src[2].sel);
2218 fprintf(stderr, "REL:%d ", alu->src[2].rel);
2219 fprintf(stderr, "CHAN:%d ", alu->src[2].chan);
2220 fprintf(stderr, "NEG:%d)\n", alu->src[2].neg);
2221 } else {
2222 fprintf(stderr, "SRC0_ABS:%d ", alu->src[0].abs);
2223 fprintf(stderr, "SRC1_ABS:%d ", alu->src[1].abs);
2224 fprintf(stderr, "WRITE_MASK:%d ", alu->dst.write);
2225 fprintf(stderr, "OMOD:%d ", alu->omod);
2226 fprintf(stderr, "EXECUTE_MASK:%d ", alu->predicate);
2227 fprintf(stderr, "UPDATE_PRED:%d\n", alu->predicate);
2228 }
2229
2230 id++;
2231 if (alu->last) {
2232 for (i = 0; i < nliteral; i++, id++) {
2233 float *f = (float*)(bc->bytecode + id);
2234 fprintf(stderr, "%04d %08X %f\n", id, bc->bytecode[id], *f);
2235 }
2236 id += nliteral & 1;
2237 nliteral = 0;
2238 }
2239 }
2240
2241 LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
2242 //TODO
2243 }
2244
2245 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
2246 //TODO
2247 }
2248 }
2249
2250 fprintf(stderr, "--------------------------------------\n");
2251 }
2252
2253 void r600_cf_vtx(struct r600_vertex_element *ve, u32 *bytecode, unsigned count)
2254 {
2255 struct r600_pipe_state *rstate;
2256 unsigned i = 0;
2257
2258 if (count > 8) {
2259 bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2260 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX) |
2261 S_SQ_CF_WORD1_BARRIER(0) |
2262 S_SQ_CF_WORD1_COUNT(8 - 1);
2263 bytecode[i++] = S_SQ_CF_WORD0_ADDR(40 >> 1);
2264 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX) |
2265 S_SQ_CF_WORD1_BARRIER(0) |
2266 S_SQ_CF_WORD1_COUNT(count - 8 - 1);
2267 } else {
2268 bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2269 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX) |
2270 S_SQ_CF_WORD1_BARRIER(0) |
2271 S_SQ_CF_WORD1_COUNT(count - 1);
2272 }
2273 bytecode[i++] = S_SQ_CF_WORD0_ADDR(0);
2274 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_RETURN) |
2275 S_SQ_CF_WORD1_BARRIER(0);
2276
2277 rstate = &ve->rstate;
2278 rstate->id = R600_PIPE_STATE_FETCH_SHADER;
2279 rstate->nregs = 0;
2280 r600_pipe_state_add_reg(rstate, R_0288A4_SQ_PGM_RESOURCES_FS,
2281 0x00000000, 0xFFFFFFFF, NULL);
2282 r600_pipe_state_add_reg(rstate, R_0288DC_SQ_PGM_CF_OFFSET_FS,
2283 0x00000000, 0xFFFFFFFF, NULL);
2284 r600_pipe_state_add_reg(rstate, R_028894_SQ_PGM_START_FS,
2285 r600_bo_offset(ve->fetch_shader) >> 8,
2286 0xFFFFFFFF, ve->fetch_shader);
2287 }
2288
2289 void r600_cf_vtx_tc(struct r600_vertex_element *ve, u32 *bytecode, unsigned count)
2290 {
2291 struct r600_pipe_state *rstate;
2292 unsigned i = 0;
2293
2294 if (count > 8) {
2295 bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2296 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) |
2297 S_SQ_CF_WORD1_BARRIER(0) |
2298 S_SQ_CF_WORD1_COUNT(8 - 1);
2299 bytecode[i++] = S_SQ_CF_WORD0_ADDR(40 >> 1);
2300 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) |
2301 S_SQ_CF_WORD1_BARRIER(0) |
2302 S_SQ_CF_WORD1_COUNT((count - 8) - 1);
2303 } else {
2304 bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2305 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) |
2306 S_SQ_CF_WORD1_BARRIER(0) |
2307 S_SQ_CF_WORD1_COUNT(count - 1);
2308 }
2309 bytecode[i++] = S_SQ_CF_WORD0_ADDR(0);
2310 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_RETURN) |
2311 S_SQ_CF_WORD1_BARRIER(0);
2312
2313 rstate = &ve->rstate;
2314 rstate->id = R600_PIPE_STATE_FETCH_SHADER;
2315 rstate->nregs = 0;
2316 r600_pipe_state_add_reg(rstate, R_0288A4_SQ_PGM_RESOURCES_FS,
2317 0x00000000, 0xFFFFFFFF, NULL);
2318 r600_pipe_state_add_reg(rstate, R_0288DC_SQ_PGM_CF_OFFSET_FS,
2319 0x00000000, 0xFFFFFFFF, NULL);
2320 r600_pipe_state_add_reg(rstate, R_028894_SQ_PGM_START_FS,
2321 r600_bo_offset(ve->fetch_shader) >> 8,
2322 0xFFFFFFFF, ve->fetch_shader);
2323 }
2324
2325 static void r600_vertex_data_type(enum pipe_format pformat, unsigned *format,
2326 unsigned *num_format, unsigned *format_comp)
2327 {
2328 const struct util_format_description *desc;
2329 unsigned i;
2330
2331 *format = 0;
2332 *num_format = 0;
2333 *format_comp = 0;
2334
2335 desc = util_format_description(pformat);
2336 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN) {
2337 goto out_unknown;
2338 }
2339
2340 /* Find the first non-VOID channel. */
2341 for (i = 0; i < 4; i++) {
2342 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
2343 break;
2344 }
2345 }
2346
2347 switch (desc->channel[i].type) {
2348 /* Half-floats, floats, doubles */
2349 case UTIL_FORMAT_TYPE_FLOAT:
2350 switch (desc->channel[i].size) {
2351 case 16:
2352 switch (desc->nr_channels) {
2353 case 1:
2354 *format = FMT_16_FLOAT;
2355 break;
2356 case 2:
2357 *format = FMT_16_16_FLOAT;
2358 break;
2359 case 3:
2360 *format = FMT_16_16_16_FLOAT;
2361 break;
2362 case 4:
2363 *format = FMT_16_16_16_16_FLOAT;
2364 break;
2365 }
2366 break;
2367 case 32:
2368 switch (desc->nr_channels) {
2369 case 1:
2370 *format = FMT_32_FLOAT;
2371 break;
2372 case 2:
2373 *format = FMT_32_32_FLOAT;
2374 break;
2375 case 3:
2376 *format = FMT_32_32_32_FLOAT;
2377 break;
2378 case 4:
2379 *format = FMT_32_32_32_32_FLOAT;
2380 break;
2381 }
2382 break;
2383 default:
2384 goto out_unknown;
2385 }
2386 break;
2387 /* Unsigned ints */
2388 case UTIL_FORMAT_TYPE_UNSIGNED:
2389 /* Signed ints */
2390 case UTIL_FORMAT_TYPE_SIGNED:
2391 switch (desc->channel[i].size) {
2392 case 8:
2393 switch (desc->nr_channels) {
2394 case 1:
2395 *format = FMT_8;
2396 break;
2397 case 2:
2398 *format = FMT_8_8;
2399 break;
2400 case 3:
2401 // *format = FMT_8_8_8; /* fails piglit draw-vertices test */
2402 // break;
2403 case 4:
2404 *format = FMT_8_8_8_8;
2405 break;
2406 }
2407 break;
2408 case 16:
2409 switch (desc->nr_channels) {
2410 case 1:
2411 *format = FMT_16;
2412 break;
2413 case 2:
2414 *format = FMT_16_16;
2415 break;
2416 case 3:
2417 // *format = FMT_16_16_16; /* fails piglit draw-vertices test */
2418 // break;
2419 case 4:
2420 *format = FMT_16_16_16_16;
2421 break;
2422 }
2423 break;
2424 case 32:
2425 switch (desc->nr_channels) {
2426 case 1:
2427 *format = FMT_32;
2428 break;
2429 case 2:
2430 *format = FMT_32_32;
2431 break;
2432 case 3:
2433 *format = FMT_32_32_32;
2434 break;
2435 case 4:
2436 *format = FMT_32_32_32_32;
2437 break;
2438 }
2439 break;
2440 default:
2441 goto out_unknown;
2442 }
2443 break;
2444 default:
2445 goto out_unknown;
2446 }
2447
2448 if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
2449 *format_comp = 1;
2450 }
2451 if (desc->channel[i].normalized) {
2452 *num_format = 0;
2453 } else {
2454 *num_format = 2;
2455 }
2456 return;
2457 out_unknown:
2458 R600_ERR("unsupported vertex format %s\n", util_format_name(pformat));
2459 }
2460
2461 int r600_vertex_elements_build_fetch_shader(struct r600_pipe_context *rctx, struct r600_vertex_element *ve)
2462 {
2463 unsigned ndw, i;
2464 u32 *bytecode;
2465 unsigned fetch_resource_start = 0, format, num_format, format_comp;
2466 struct pipe_vertex_element *elements = ve->elements;
2467 const struct util_format_description *desc;
2468
2469 /* 2 dwords for cf aligned to 4 + 4 dwords per input */
2470 ndw = 8 + ve->count * 4;
2471 ve->fs_size = ndw * 4;
2472
2473 /* use PIPE_BIND_VERTEX_BUFFER so we use the cache buffer manager */
2474 ve->fetch_shader = r600_bo(rctx->radeon, ndw*4, 256, PIPE_BIND_VERTEX_BUFFER, 0);
2475 if (ve->fetch_shader == NULL) {
2476 return -ENOMEM;
2477 }
2478
2479 bytecode = r600_bo_map(rctx->radeon, ve->fetch_shader, 0, NULL);
2480 if (bytecode == NULL) {
2481 r600_bo_reference(rctx->radeon, &ve->fetch_shader, NULL);
2482 return -ENOMEM;
2483 }
2484
2485 if (rctx->family >= CHIP_CEDAR) {
2486 eg_cf_vtx(ve, &bytecode[0], (ndw - 8) / 4);
2487 } else {
2488 r600_cf_vtx(ve, &bytecode[0], (ndw - 8) / 4);
2489 fetch_resource_start = 160;
2490 }
2491
2492 /* vertex elements offset need special handling, if offset is bigger
2493 * than what we can put in fetch instruction then we need to alterate
2494 * the vertex resource offset. In such case in order to simplify code
2495 * we will bound one resource per elements. It's a worst case scenario.
2496 */
2497 for (i = 0; i < ve->count; i++) {
2498 ve->vbuffer_offset[i] = C_SQ_VTX_WORD2_OFFSET & elements[i].src_offset;
2499 if (ve->vbuffer_offset[i]) {
2500 ve->vbuffer_need_offset = 1;
2501 }
2502 }
2503
2504 for (i = 0; i < ve->count; i++) {
2505 unsigned vbuffer_index;
2506 r600_vertex_data_type(ve->hw_format[i], &format, &num_format, &format_comp);
2507 desc = util_format_description(ve->hw_format[i]);
2508 if (desc == NULL) {
2509 R600_ERR("unknown format %d\n", ve->hw_format[i]);
2510 r600_bo_reference(rctx->radeon, &ve->fetch_shader, NULL);
2511 return -EINVAL;
2512 }
2513
2514 /* see above for vbuffer_need_offset explanation */
2515 vbuffer_index = elements[i].vertex_buffer_index;
2516 if (ve->vbuffer_need_offset) {
2517 bytecode[8 + i * 4 + 0] = S_SQ_VTX_WORD0_BUFFER_ID(i + fetch_resource_start);
2518 } else {
2519 bytecode[8 + i * 4 + 0] = S_SQ_VTX_WORD0_BUFFER_ID(vbuffer_index + fetch_resource_start);
2520 }
2521 bytecode[8 + i * 4 + 0] |= S_SQ_VTX_WORD0_SRC_GPR(0) |
2522 S_SQ_VTX_WORD0_SRC_SEL_X(0) |
2523 S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(0x1F);
2524 bytecode[8 + i * 4 + 1] = S_SQ_VTX_WORD1_DST_SEL_X(desc->swizzle[0]) |
2525 S_SQ_VTX_WORD1_DST_SEL_Y(desc->swizzle[1]) |
2526 S_SQ_VTX_WORD1_DST_SEL_Z(desc->swizzle[2]) |
2527 S_SQ_VTX_WORD1_DST_SEL_W(desc->swizzle[3]) |
2528 S_SQ_VTX_WORD1_USE_CONST_FIELDS(0) |
2529 S_SQ_VTX_WORD1_DATA_FORMAT(format) |
2530 S_SQ_VTX_WORD1_NUM_FORMAT_ALL(num_format) |
2531 S_SQ_VTX_WORD1_FORMAT_COMP_ALL(format_comp) |
2532 S_SQ_VTX_WORD1_SRF_MODE_ALL(1) |
2533 S_SQ_VTX_WORD1_GPR_DST_GPR(i + 1);
2534 bytecode[8 + i * 4 + 2] = S_SQ_VTX_WORD2_OFFSET(elements[i].src_offset) |
2535 S_SQ_VTX_WORD2_MEGA_FETCH(1);
2536 bytecode[8 + i * 4 + 3] = 0;
2537 }
2538 r600_bo_unmap(rctx->radeon, ve->fetch_shader);
2539 return 0;
2540 }