Merge remote branch 'origin/master' into pipe-video
[mesa.git] / src / gallium / drivers / r600 / r600_asm.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <stdio.h>
24 #include <errno.h>
25 #include "util/u_format.h"
26 #include "util/u_memory.h"
27 #include "pipe/p_shader_tokens.h"
28 #include "r600_pipe.h"
29 #include "r600_sq.h"
30 #include "r600_opcodes.h"
31 #include "r600_asm.h"
32 #include "r600_formats.h"
33 #include "r600d.h"
34
35 #define NUM_OF_CYCLES 3
36 #define NUM_OF_COMPONENTS 4
37
38 #define PREV_ALU(alu) LIST_ENTRY(struct r600_bc_alu, alu->list.prev, list)
39 #define NEXT_ALU(alu) LIST_ENTRY(struct r600_bc_alu, alu->list.next, list)
40
41 static inline unsigned int r600_bc_get_num_operands(struct r600_bc_alu *alu)
42 {
43 if(alu->is_op3)
44 return 3;
45
46 switch (alu->inst) {
47 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP:
48 return 0;
49 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD:
50 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE:
51 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT:
52 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE:
53 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE:
54 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL:
55 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX:
56 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN:
57 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE:
58 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE:
59 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT:
60 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE:
61 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE:
62 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT:
63 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE:
64 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE:
65 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4:
66 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE:
67 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE:
68 return 2;
69
70 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV:
71 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR:
72 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT:
73 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR:
74 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC:
75 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE:
76 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED:
77 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE:
78 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE:
79 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE:
80 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT:
81 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN:
82 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS:
83 return 1;
84 default: R600_ERR(
85 "Need instruction operand number for 0x%x.\n", alu->inst);
86 };
87
88 return 3;
89 }
90
91 int r700_bc_alu_build(struct r600_bc *bc, struct r600_bc_alu *alu, unsigned id);
92
93 static struct r600_bc_cf *r600_bc_cf(void)
94 {
95 struct r600_bc_cf *cf = CALLOC_STRUCT(r600_bc_cf);
96
97 if (cf == NULL)
98 return NULL;
99 LIST_INITHEAD(&cf->list);
100 LIST_INITHEAD(&cf->alu);
101 LIST_INITHEAD(&cf->vtx);
102 LIST_INITHEAD(&cf->tex);
103 cf->barrier = 1;
104 return cf;
105 }
106
107 static struct r600_bc_alu *r600_bc_alu(void)
108 {
109 struct r600_bc_alu *alu = CALLOC_STRUCT(r600_bc_alu);
110
111 if (alu == NULL)
112 return NULL;
113 LIST_INITHEAD(&alu->list);
114 return alu;
115 }
116
117 static struct r600_bc_vtx *r600_bc_vtx(void)
118 {
119 struct r600_bc_vtx *vtx = CALLOC_STRUCT(r600_bc_vtx);
120
121 if (vtx == NULL)
122 return NULL;
123 LIST_INITHEAD(&vtx->list);
124 return vtx;
125 }
126
127 static struct r600_bc_tex *r600_bc_tex(void)
128 {
129 struct r600_bc_tex *tex = CALLOC_STRUCT(r600_bc_tex);
130
131 if (tex == NULL)
132 return NULL;
133 LIST_INITHEAD(&tex->list);
134 return tex;
135 }
136
137 int r600_bc_init(struct r600_bc *bc, enum radeon_family family)
138 {
139 LIST_INITHEAD(&bc->cf);
140 bc->family = family;
141 switch (bc->family) {
142 case CHIP_R600:
143 case CHIP_RV610:
144 case CHIP_RV630:
145 case CHIP_RV670:
146 case CHIP_RV620:
147 case CHIP_RV635:
148 case CHIP_RS780:
149 case CHIP_RS880:
150 bc->chiprev = CHIPREV_R600;
151 break;
152 case CHIP_RV770:
153 case CHIP_RV730:
154 case CHIP_RV710:
155 case CHIP_RV740:
156 bc->chiprev = CHIPREV_R700;
157 break;
158 case CHIP_CEDAR:
159 case CHIP_REDWOOD:
160 case CHIP_JUNIPER:
161 case CHIP_CYPRESS:
162 case CHIP_HEMLOCK:
163 case CHIP_PALM:
164 case CHIP_BARTS:
165 case CHIP_TURKS:
166 case CHIP_CAICOS:
167 bc->chiprev = CHIPREV_EVERGREEN;
168 break;
169 default:
170 R600_ERR("unknown family %d\n", bc->family);
171 return -EINVAL;
172 }
173 return 0;
174 }
175
176 static int r600_bc_add_cf(struct r600_bc *bc)
177 {
178 struct r600_bc_cf *cf = r600_bc_cf();
179
180 if (cf == NULL)
181 return -ENOMEM;
182 LIST_ADDTAIL(&cf->list, &bc->cf);
183 if (bc->cf_last)
184 cf->id = bc->cf_last->id + 2;
185 bc->cf_last = cf;
186 bc->ncf++;
187 bc->ndw += 2;
188 bc->force_add_cf = 0;
189 return 0;
190 }
191
192 static void r600_bc_remove_cf(struct r600_bc *bc, struct r600_bc_cf *cf)
193 {
194 struct r600_bc_cf *other;
195 LIST_FOR_EACH_ENTRY(other, &bc->cf, list) {
196 if (other->id > cf->id)
197 other->id -= 2;
198 if (other->cf_addr > cf->id)
199 other->cf_addr -= 2;
200 }
201 LIST_DEL(&cf->list);
202 free(cf);
203 }
204
205 static void r600_bc_move_cf(struct r600_bc *bc, struct r600_bc_cf *cf, struct r600_bc_cf *next)
206 {
207 struct r600_bc_cf *prev = LIST_ENTRY(struct r600_bc_cf, next->list.prev, list);
208 unsigned old_id = cf->id;
209 unsigned new_id = prev->id + 2;
210 struct r600_bc_cf *other;
211
212 if (prev == cf)
213 return; /* position hasn't changed */
214
215 LIST_DEL(&cf->list);
216 LIST_FOR_EACH_ENTRY(other, &bc->cf, list) {
217 if (other->id > old_id)
218 other->id -= 2;
219 if (other->id >= new_id)
220 other->id += 2;
221 if (other->cf_addr > old_id)
222 other->cf_addr -= 2;
223 if (other->cf_addr > new_id)
224 other->cf_addr += 2;
225 }
226 cf->id = new_id;
227 LIST_ADD(&cf->list, &prev->list);
228 }
229
230 int r600_bc_add_output(struct r600_bc *bc, const struct r600_bc_output *output)
231 {
232 int r;
233
234 r = r600_bc_add_cf(bc);
235 if (r)
236 return r;
237 bc->cf_last->inst = BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT);
238 memcpy(&bc->cf_last->output, output, sizeof(struct r600_bc_output));
239 bc->cf_last->output.burst_count = 1;
240 return 0;
241 }
242
243 /* alu predicate instructions */
244 static int is_alu_pred_inst(struct r600_bc_alu *alu)
245 {
246 return !alu->is_op3 && (
247 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_UINT ||
248 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_UINT ||
249 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE ||
250 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT ||
251 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE ||
252 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE ||
253 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_INV ||
254 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_POP ||
255 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_CLR ||
256 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_RESTORE ||
257 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH ||
258 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH ||
259 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH ||
260 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH ||
261 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_INT ||
262 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_INT ||
263 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_INT ||
264 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT ||
265 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH_INT ||
266 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH_INT ||
267 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH_INT ||
268 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH_INT ||
269 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLT_PUSH_INT ||
270 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLE_PUSH_INT);
271 }
272
273 /* alu kill instructions */
274 static int is_alu_kill_inst(struct r600_bc_alu *alu)
275 {
276 return !alu->is_op3 && (
277 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE ||
278 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT ||
279 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE ||
280 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE ||
281 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_UINT ||
282 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_UINT ||
283 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE_INT ||
284 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_INT ||
285 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_INT ||
286 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE_INT);
287 }
288
289 /* alu instructions that can ony exits once per group */
290 static int is_alu_once_inst(struct r600_bc_alu *alu)
291 {
292 return is_alu_kill_inst(alu) ||
293 is_alu_pred_inst(alu);
294 }
295
296 static int is_alu_reduction_inst(struct r600_bc_alu *alu)
297 {
298 return !alu->is_op3 && (
299 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE ||
300 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4 ||
301 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE ||
302 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX4);
303 }
304
305 static int is_alu_mova_inst(struct r600_bc_alu *alu)
306 {
307 return !alu->is_op3 && (
308 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA ||
309 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR ||
310 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT);
311 }
312
313 /* alu instructions that can only execute on the vector unit */
314 static int is_alu_vec_unit_inst(struct r600_bc_alu *alu)
315 {
316 return is_alu_reduction_inst(alu) ||
317 is_alu_mova_inst(alu);
318 }
319
320 /* alu instructions that can only execute on the trans unit */
321 static int is_alu_trans_unit_inst(struct r600_bc_alu *alu)
322 {
323 if(!alu->is_op3)
324 return alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT ||
325 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT ||
326 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT ||
327 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHL_INT ||
328 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT ||
329 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_INT ||
330 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT ||
331 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_INT ||
332 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT ||
333 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_INT ||
334 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_UINT ||
335 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT ||
336 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS ||
337 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE ||
338 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED ||
339 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE ||
340 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED ||
341 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_FF ||
342 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE ||
343 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED ||
344 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_FF ||
345 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE ||
346 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN ||
347 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SQRT_IEEE;
348 else
349 return alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT ||
350 alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_D2 ||
351 alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_M2 ||
352 alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_M4;
353 }
354
355 /* alu instructions that can execute on any unit */
356 static int is_alu_any_unit_inst(struct r600_bc_alu *alu)
357 {
358 return !is_alu_vec_unit_inst(alu) &&
359 !is_alu_trans_unit_inst(alu);
360 }
361
362 static int assign_alu_units(struct r600_bc_alu *alu_first, struct r600_bc_alu *assignment[5])
363 {
364 struct r600_bc_alu *alu;
365 unsigned i, chan, trans;
366
367 for (i = 0; i < 5; i++)
368 assignment[i] = NULL;
369
370 for (alu = alu_first; alu; alu = NEXT_ALU(alu)) {
371 chan = alu->dst.chan;
372 if (is_alu_trans_unit_inst(alu))
373 trans = 1;
374 else if (is_alu_vec_unit_inst(alu))
375 trans = 0;
376 else if (assignment[chan])
377 trans = 1; // assume ALU_INST_PREFER_VECTOR
378 else
379 trans = 0;
380
381 if (trans) {
382 if (assignment[4]) {
383 assert(0); //ALU.Trans has already been allocated
384 return -1;
385 }
386 assignment[4] = alu;
387 } else {
388 if (assignment[chan]) {
389 assert(0); //ALU.chan has already been allocated
390 return -1;
391 }
392 assignment[chan] = alu;
393 }
394
395 if (alu->last)
396 break;
397 }
398 return 0;
399 }
400
401 struct alu_bank_swizzle {
402 int hw_gpr[NUM_OF_CYCLES][NUM_OF_COMPONENTS];
403 int hw_cfile_addr[4];
404 int hw_cfile_elem[4];
405 };
406
407 const unsigned cycle_for_bank_swizzle_vec[][3] = {
408 [SQ_ALU_VEC_012] = { 0, 1, 2 },
409 [SQ_ALU_VEC_021] = { 0, 2, 1 },
410 [SQ_ALU_VEC_120] = { 1, 2, 0 },
411 [SQ_ALU_VEC_102] = { 1, 0, 2 },
412 [SQ_ALU_VEC_201] = { 2, 0, 1 },
413 [SQ_ALU_VEC_210] = { 2, 1, 0 }
414 };
415
416 const unsigned cycle_for_bank_swizzle_scl[][3] = {
417 [SQ_ALU_SCL_210] = { 2, 1, 0 },
418 [SQ_ALU_SCL_122] = { 1, 2, 2 },
419 [SQ_ALU_SCL_212] = { 2, 1, 2 },
420 [SQ_ALU_SCL_221] = { 2, 2, 1 }
421 };
422
423 static void init_bank_swizzle(struct alu_bank_swizzle *bs)
424 {
425 int i, cycle, component;
426 /* set up gpr use */
427 for (cycle = 0; cycle < NUM_OF_CYCLES; cycle++)
428 for (component = 0; component < NUM_OF_COMPONENTS; component++)
429 bs->hw_gpr[cycle][component] = -1;
430 for (i = 0; i < 4; i++)
431 bs->hw_cfile_addr[i] = -1;
432 for (i = 0; i < 4; i++)
433 bs->hw_cfile_elem[i] = -1;
434 }
435
436 static int reserve_gpr(struct alu_bank_swizzle *bs, unsigned sel, unsigned chan, unsigned cycle)
437 {
438 if (bs->hw_gpr[cycle][chan] == -1)
439 bs->hw_gpr[cycle][chan] = sel;
440 else if (bs->hw_gpr[cycle][chan] != (int)sel) {
441 // Another scalar operation has already used GPR read port for channel
442 return -1;
443 }
444 return 0;
445 }
446
447 static int reserve_cfile(struct alu_bank_swizzle *bs, unsigned sel, unsigned chan)
448 {
449 int res, resmatch = -1, resempty = -1;
450 for (res = 3; res >= 0; --res) {
451 if (bs->hw_cfile_addr[res] == -1)
452 resempty = res;
453 else if (bs->hw_cfile_addr[res] == sel &&
454 bs->hw_cfile_elem[res] == chan)
455 resmatch = res;
456 }
457 if (resmatch != -1)
458 return 0; // Read for this scalar element already reserved, nothing to do here.
459 else if (resempty != -1) {
460 bs->hw_cfile_addr[resempty] = sel;
461 bs->hw_cfile_elem[resempty] = chan;
462 } else {
463 // All cfile read ports are used, cannot reference vector element
464 return -1;
465 }
466 return 0;
467 }
468
469 static int is_gpr(unsigned sel)
470 {
471 return (sel >= 0 && sel <= 127);
472 }
473
474 static int is_cfile(unsigned sel)
475 {
476 return (sel > 255 && sel < 512);
477 }
478
479 /* CB constants start at 512, and get translated to a kcache index when ALU
480 * clauses are constructed. Note that we handle kcache constants the same way
481 * as (the now gone) cfile constants, is that really required? */
482 static int is_cb_const(int sel)
483 {
484 if (sel > 511 && sel < 4607)
485 return 1;
486 return 0;
487 }
488
489 static int is_const(int sel)
490 {
491 return is_cfile(sel) ||
492 is_cb_const(sel) ||
493 (sel >= V_SQ_ALU_SRC_0 &&
494 sel <= V_SQ_ALU_SRC_LITERAL);
495 }
496
497 static int check_vector(struct r600_bc_alu *alu, struct alu_bank_swizzle *bs, int bank_swizzle)
498 {
499 int r, src, num_src, sel, elem, cycle;
500
501 num_src = r600_bc_get_num_operands(alu);
502 for (src = 0; src < num_src; src++) {
503 sel = alu->src[src].sel;
504 elem = alu->src[src].chan;
505 if (is_gpr(sel)) {
506 cycle = cycle_for_bank_swizzle_vec[bank_swizzle][src];
507 if (src == 1 && sel == alu->src[0].sel && elem == alu->src[0].chan)
508 // Nothing to do; special-case optimization,
509 // second source uses first source’s reservation
510 continue;
511 else {
512 r = reserve_gpr(bs, sel, elem, cycle);
513 if (r)
514 return r;
515 }
516 } else if (is_cfile(sel)) {
517 r = reserve_cfile(bs, sel, elem);
518 if (r)
519 return r;
520 }
521 // No restrictions on PV, PS, literal or special constants
522 }
523 return 0;
524 }
525
526 static int check_scalar(struct r600_bc_alu *alu, struct alu_bank_swizzle *bs, int bank_swizzle)
527 {
528 int r, src, num_src, const_count, sel, elem, cycle;
529
530 num_src = r600_bc_get_num_operands(alu);
531 for (const_count = 0, src = 0; src < num_src; ++src) {
532 sel = alu->src[src].sel;
533 elem = alu->src[src].chan;
534 if (is_const(sel)) { // Any constant, including literal and inline constants
535 if (const_count >= 2)
536 // More than two references to a constant in
537 // transcendental operation.
538 return -1;
539 else
540 const_count++;
541 }
542 if (is_cfile(sel)) {
543 r = reserve_cfile(bs, sel, elem);
544 if (r)
545 return r;
546 }
547 }
548 for (src = 0; src < num_src; ++src) {
549 sel = alu->src[src].sel;
550 elem = alu->src[src].chan;
551 if (is_gpr(sel)) {
552 cycle = cycle_for_bank_swizzle_scl[bank_swizzle][src];
553 if (cycle < const_count)
554 // Cycle for GPR load conflicts with
555 // constant load in transcendental operation.
556 return -1;
557 r = reserve_gpr(bs, sel, elem, cycle);
558 if (r)
559 return r;
560 }
561 // Constants already processed
562 // No restrictions on PV, PS
563 }
564 return 0;
565 }
566
567 static int check_and_set_bank_swizzle(struct r600_bc_alu *slots[5])
568 {
569 struct alu_bank_swizzle bs;
570 int bank_swizzle[5];
571 int i, r = 0, forced = 0;
572
573 for (i = 0; i < 5; i++)
574 if (slots[i] && slots[i]->bank_swizzle_force) {
575 slots[i]->bank_swizzle = slots[i]->bank_swizzle_force;
576 forced = 1;
577 }
578
579 if (forced)
580 return 0;
581
582 // just check every possible combination of bank swizzle
583 // not very efficent, but works on the first try in most of the cases
584 for (i = 0; i < 4; i++)
585 bank_swizzle[i] = SQ_ALU_VEC_012;
586 bank_swizzle[4] = SQ_ALU_SCL_210;
587 while(bank_swizzle[4] <= SQ_ALU_SCL_221) {
588 init_bank_swizzle(&bs);
589 for (i = 0; i < 4; i++) {
590 if (slots[i]) {
591 r = check_vector(slots[i], &bs, bank_swizzle[i]);
592 if (r)
593 break;
594 }
595 }
596 if (!r && slots[4]) {
597 r = check_scalar(slots[4], &bs, bank_swizzle[4]);
598 }
599 if (!r) {
600 for (i = 0; i < 5; i++) {
601 if (slots[i])
602 slots[i]->bank_swizzle = bank_swizzle[i];
603 }
604 return 0;
605 }
606
607 for (i = 0; i < 5; i++) {
608 bank_swizzle[i]++;
609 if (bank_swizzle[i] <= SQ_ALU_VEC_210)
610 break;
611 else
612 bank_swizzle[i] = SQ_ALU_VEC_012;
613 }
614 }
615
616 // couldn't find a working swizzle
617 return -1;
618 }
619
620 static int replace_gpr_with_pv_ps(struct r600_bc_alu *slots[5], struct r600_bc_alu *alu_prev)
621 {
622 struct r600_bc_alu *prev[5];
623 int gpr[5], chan[5];
624 int i, j, r, src, num_src;
625
626 r = assign_alu_units(alu_prev, prev);
627 if (r)
628 return r;
629
630 for (i = 0; i < 5; ++i) {
631 if(prev[i] && prev[i]->dst.write && !prev[i]->dst.rel) {
632 gpr[i] = prev[i]->dst.sel;
633 if (is_alu_reduction_inst(prev[i]))
634 chan[i] = 0;
635 else
636 chan[i] = prev[i]->dst.chan;
637 } else
638 gpr[i] = -1;
639 }
640
641 for (i = 0; i < 5; ++i) {
642 struct r600_bc_alu *alu = slots[i];
643 if(!alu)
644 continue;
645
646 num_src = r600_bc_get_num_operands(alu);
647 for (src = 0; src < num_src; ++src) {
648 if (!is_gpr(alu->src[src].sel) || alu->src[src].rel)
649 continue;
650
651 if (alu->src[src].sel == gpr[4] &&
652 alu->src[src].chan == chan[4]) {
653 alu->src[src].sel = V_SQ_ALU_SRC_PS;
654 alu->src[src].chan = 0;
655 continue;
656 }
657
658 for (j = 0; j < 4; ++j) {
659 if (alu->src[src].sel == gpr[j] &&
660 alu->src[src].chan == j) {
661 alu->src[src].sel = V_SQ_ALU_SRC_PV;
662 alu->src[src].chan = chan[j];
663 break;
664 }
665 }
666 }
667 }
668
669 return 0;
670 }
671
672 void r600_bc_special_constants(u32 value, unsigned *sel, unsigned *neg)
673 {
674 switch(value) {
675 case 0:
676 *sel = V_SQ_ALU_SRC_0;
677 break;
678 case 1:
679 *sel = V_SQ_ALU_SRC_1_INT;
680 break;
681 case -1:
682 *sel = V_SQ_ALU_SRC_M_1_INT;
683 break;
684 case 0x3F800000: // 1.0f
685 *sel = V_SQ_ALU_SRC_1;
686 break;
687 case 0x3F000000: // 0.5f
688 *sel = V_SQ_ALU_SRC_0_5;
689 break;
690 case 0xBF800000: // -1.0f
691 *sel = V_SQ_ALU_SRC_1;
692 *neg ^= 1;
693 break;
694 case 0xBF000000: // -0.5f
695 *sel = V_SQ_ALU_SRC_0_5;
696 *neg ^= 1;
697 break;
698 default:
699 *sel = V_SQ_ALU_SRC_LITERAL;
700 break;
701 }
702 }
703
704 /* compute how many literal are needed */
705 static int r600_bc_alu_nliterals(struct r600_bc_alu *alu, uint32_t literal[4], unsigned *nliteral)
706 {
707 unsigned num_src = r600_bc_get_num_operands(alu);
708 unsigned i, j;
709
710 for (i = 0; i < num_src; ++i) {
711 if (alu->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
712 uint32_t value = alu->src[i].value[alu->src[i].chan];
713 unsigned found = 0;
714 for (j = 0; j < *nliteral; ++j) {
715 if (literal[j] == value) {
716 found = 1;
717 break;
718 }
719 }
720 if (!found) {
721 if (*nliteral >= 4)
722 return -EINVAL;
723 literal[(*nliteral)++] = value;
724 }
725 }
726 }
727 return 0;
728 }
729
730 static void r600_bc_alu_adjust_literals(struct r600_bc_alu *alu, uint32_t literal[4], unsigned nliteral)
731 {
732 unsigned num_src = r600_bc_get_num_operands(alu);
733 unsigned i, j;
734
735 for (i = 0; i < num_src; ++i) {
736 if (alu->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
737 uint32_t value = alu->src[i].value[alu->src[i].chan];
738 for (j = 0; j < nliteral; ++j) {
739 if (literal[j] == value) {
740 alu->src[i].chan = j;
741 break;
742 }
743 }
744 }
745 }
746 }
747
748 static int merge_inst_groups(struct r600_bc *bc, struct r600_bc_alu *slots[5], struct r600_bc_alu *alu_prev)
749 {
750 struct r600_bc_alu *prev[5];
751 struct r600_bc_alu *result[5] = { NULL };
752
753 uint32_t literal[4];
754 unsigned nliteral = 0;
755
756 int i, j, r, src, num_src;
757 int num_once_inst = 0;
758
759 r = assign_alu_units(alu_prev, prev);
760 if (r)
761 return r;
762
763 for (i = 0; i < 5; ++i) {
764 /* check number of literals */
765 if (prev[i] && r600_bc_alu_nliterals(prev[i], literal, &nliteral))
766 return 0;
767 if (slots[i] && r600_bc_alu_nliterals(slots[i], literal, &nliteral))
768 return 0;
769
770 // let's check used slots
771 if (prev[i] && !slots[i]) {
772 result[i] = prev[i];
773 num_once_inst += is_alu_once_inst(prev[i]);
774 continue;
775 } else if (prev[i] && slots[i]) {
776 if (result[4] == NULL && prev[4] == NULL && slots[4] == NULL) {
777 // trans unit is still free try to use it
778 if (is_alu_any_unit_inst(slots[i])) {
779 result[i] = prev[i];
780 result[4] = slots[i];
781 } else if (is_alu_any_unit_inst(prev[i])) {
782 result[i] = slots[i];
783 result[4] = prev[i];
784 } else
785 return 0;
786 } else
787 return 0;
788 } else if(!slots[i]) {
789 continue;
790 } else
791 result[i] = slots[i];
792
793 // let's check source gprs
794 struct r600_bc_alu *alu = slots[i];
795 num_once_inst += is_alu_once_inst(alu);
796
797 num_src = r600_bc_get_num_operands(alu);
798 for (src = 0; src < num_src; ++src) {
799 // constants doesn't matter
800 if (!is_gpr(alu->src[src].sel))
801 continue;
802
803 for (j = 0; j < 5; ++j) {
804 if (!prev[j] || !prev[j]->dst.write)
805 continue;
806
807 // if it's relative then we can't determin which gpr is really used
808 if (prev[j]->dst.chan == alu->src[src].chan &&
809 (prev[j]->dst.sel == alu->src[src].sel ||
810 prev[j]->dst.rel || alu->src[src].rel))
811 return 0;
812 }
813 }
814 }
815
816 /* more than one PRED_ or KILL_ ? */
817 if (num_once_inst > 1)
818 return 0;
819
820 /* check if the result can still be swizzlet */
821 r = check_and_set_bank_swizzle(result);
822 if (r)
823 return 0;
824
825 /* looks like everything worked out right, apply the changes */
826
827 /* sort instructions */
828 for (i = 0; i < 5; ++i) {
829 slots[i] = result[i];
830 if (result[i]) {
831 LIST_DEL(&result[i]->list);
832 result[i]->last = 0;
833 LIST_ADDTAIL(&result[i]->list, &bc->cf_last->alu);
834 }
835 }
836
837 /* determine new last instruction */
838 LIST_ENTRY(struct r600_bc_alu, bc->cf_last->alu.prev, list)->last = 1;
839
840 /* determine new first instruction */
841 for (i = 0; i < 5; ++i) {
842 if (result[i]) {
843 bc->cf_last->curr_bs_head = result[i];
844 break;
845 }
846 }
847
848 bc->cf_last->prev_bs_head = bc->cf_last->prev2_bs_head;
849 bc->cf_last->prev2_bs_head = NULL;
850
851 return 0;
852 }
853
854 /* This code handles kcache lines as single blocks of 32 constants. We could
855 * probably do slightly better by recognizing that we actually have two
856 * consecutive lines of 16 constants, but the resulting code would also be
857 * somewhat more complicated. */
858 static int r600_bc_alloc_kcache_lines(struct r600_bc *bc, struct r600_bc_alu *alu, int type)
859 {
860 struct r600_bc_kcache *kcache = bc->cf_last->kcache;
861 unsigned int required_lines;
862 unsigned int free_lines = 0;
863 unsigned int cache_line[3];
864 unsigned int count = 0;
865 unsigned int i, j;
866 int r;
867
868 /* Collect required cache lines. */
869 for (i = 0; i < 3; ++i) {
870 bool found = false;
871 unsigned int line;
872
873 if (alu->src[i].sel < 512)
874 continue;
875
876 line = ((alu->src[i].sel - 512) / 32) * 2;
877
878 for (j = 0; j < count; ++j) {
879 if (cache_line[j] == line) {
880 found = true;
881 break;
882 }
883 }
884
885 if (!found)
886 cache_line[count++] = line;
887 }
888
889 /* This should never actually happen. */
890 if (count >= 3) return -ENOMEM;
891
892 for (i = 0; i < 2; ++i) {
893 if (kcache[i].mode == V_SQ_CF_KCACHE_NOP) {
894 ++free_lines;
895 }
896 }
897
898 /* Filter lines pulled in by previous intructions. Note that this is
899 * only for the required_lines count, we can't remove these from the
900 * cache_line array since we may have to start a new ALU clause. */
901 for (i = 0, required_lines = count; i < count; ++i) {
902 for (j = 0; j < 2; ++j) {
903 if (kcache[j].mode == V_SQ_CF_KCACHE_LOCK_2 &&
904 kcache[j].addr == cache_line[i]) {
905 --required_lines;
906 break;
907 }
908 }
909 }
910
911 /* Start a new ALU clause if needed. */
912 if (required_lines > free_lines) {
913 if ((r = r600_bc_add_cf(bc))) {
914 return r;
915 }
916 bc->cf_last->inst = (type << 3);
917 kcache = bc->cf_last->kcache;
918 }
919
920 /* Setup the kcache lines. */
921 for (i = 0; i < count; ++i) {
922 bool found = false;
923
924 for (j = 0; j < 2; ++j) {
925 if (kcache[j].mode == V_SQ_CF_KCACHE_LOCK_2 &&
926 kcache[j].addr == cache_line[i]) {
927 found = true;
928 break;
929 }
930 }
931
932 if (found) continue;
933
934 for (j = 0; j < 2; ++j) {
935 if (kcache[j].mode == V_SQ_CF_KCACHE_NOP) {
936 kcache[j].bank = 0;
937 kcache[j].addr = cache_line[i];
938 kcache[j].mode = V_SQ_CF_KCACHE_LOCK_2;
939 break;
940 }
941 }
942 }
943
944 /* Alter the src operands to refer to the kcache. */
945 for (i = 0; i < 3; ++i) {
946 static const unsigned int base[] = {128, 160, 256, 288};
947 unsigned int line;
948
949 if (alu->src[i].sel < 512)
950 continue;
951
952 alu->src[i].sel -= 512;
953 line = (alu->src[i].sel / 32) * 2;
954
955 for (j = 0; j < 2; ++j) {
956 if (kcache[j].mode == V_SQ_CF_KCACHE_LOCK_2 &&
957 kcache[j].addr == line) {
958 alu->src[i].sel &= 0x1f;
959 alu->src[i].sel += base[j];
960 break;
961 }
962 }
963 }
964
965 return 0;
966 }
967
968 int r600_bc_add_alu_type(struct r600_bc *bc, const struct r600_bc_alu *alu, int type)
969 {
970 struct r600_bc_alu *nalu = r600_bc_alu();
971 struct r600_bc_alu *lalu;
972 int i, r;
973
974 if (nalu == NULL)
975 return -ENOMEM;
976 memcpy(nalu, alu, sizeof(struct r600_bc_alu));
977
978 if (bc->cf_last != NULL && bc->cf_last->inst != (type << 3)) {
979 /* check if we could add it anyway */
980 if (bc->cf_last->inst == (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3) &&
981 type == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE) {
982 LIST_FOR_EACH_ENTRY(lalu, &bc->cf_last->alu, list) {
983 if (lalu->predicate) {
984 bc->force_add_cf = 1;
985 break;
986 }
987 }
988 } else
989 bc->force_add_cf = 1;
990 }
991
992 /* cf can contains only alu or only vtx or only tex */
993 if (bc->cf_last == NULL || bc->force_add_cf) {
994 r = r600_bc_add_cf(bc);
995 if (r) {
996 free(nalu);
997 return r;
998 }
999 }
1000 bc->cf_last->inst = (type << 3);
1001
1002 /* Setup the kcache for this ALU instruction. This will start a new
1003 * ALU clause if needed. */
1004 if ((r = r600_bc_alloc_kcache_lines(bc, nalu, type))) {
1005 free(nalu);
1006 return r;
1007 }
1008
1009 if (!bc->cf_last->curr_bs_head) {
1010 bc->cf_last->curr_bs_head = nalu;
1011 }
1012 /* at most 128 slots, one add alu can add 5 slots + 4 constants(2 slots)
1013 * worst case */
1014 if (nalu->last && (bc->cf_last->ndw >> 1) >= 120) {
1015 bc->force_add_cf = 1;
1016 }
1017 /* replace special constants */
1018 for (i = 0; i < 3; i++) {
1019 if (nalu->src[i].sel == V_SQ_ALU_SRC_LITERAL)
1020 r600_bc_special_constants(
1021 nalu->src[i].value[nalu->src[i].chan],
1022 &nalu->src[i].sel, &nalu->src[i].neg);
1023
1024 if (nalu->src[i].sel >= bc->ngpr && nalu->src[i].sel < 128) {
1025 bc->ngpr = nalu->src[i].sel + 1;
1026 }
1027 }
1028 if (nalu->dst.sel >= bc->ngpr) {
1029 bc->ngpr = nalu->dst.sel + 1;
1030 }
1031
1032 LIST_ADDTAIL(&nalu->list, &bc->cf_last->alu);
1033 /* each alu use 2 dwords */
1034 bc->cf_last->ndw += 2;
1035 bc->ndw += 2;
1036
1037 /* process cur ALU instructions for bank swizzle */
1038 if (nalu->last) {
1039 struct r600_bc_alu *slots[5];
1040 r = assign_alu_units(bc->cf_last->curr_bs_head, slots);
1041 if (r)
1042 return r;
1043
1044 if (bc->cf_last->prev_bs_head) {
1045 r = merge_inst_groups(bc, slots, bc->cf_last->prev_bs_head);
1046 if (r)
1047 return r;
1048 }
1049
1050 if (bc->cf_last->prev_bs_head) {
1051 r = replace_gpr_with_pv_ps(slots, bc->cf_last->prev_bs_head);
1052 if (r)
1053 return r;
1054 }
1055
1056 r = check_and_set_bank_swizzle(slots);
1057 if (r)
1058 return r;
1059
1060 bc->cf_last->prev2_bs_head = bc->cf_last->prev_bs_head;
1061 bc->cf_last->prev_bs_head = bc->cf_last->curr_bs_head;
1062 bc->cf_last->curr_bs_head = NULL;
1063 }
1064 return 0;
1065 }
1066
1067 int r600_bc_add_alu(struct r600_bc *bc, const struct r600_bc_alu *alu)
1068 {
1069 return r600_bc_add_alu_type(bc, alu, BC_INST(bc, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU));
1070 }
1071
1072 static void r600_bc_remove_alu(struct r600_bc_cf *cf, struct r600_bc_alu *alu)
1073 {
1074 if (alu->last && alu->list.prev != &cf->alu) {
1075 PREV_ALU(alu)->last = 1;
1076 }
1077 LIST_DEL(&alu->list);
1078 free(alu);
1079 cf->ndw -= 2;
1080 }
1081
1082 int r600_bc_add_vtx(struct r600_bc *bc, const struct r600_bc_vtx *vtx)
1083 {
1084 struct r600_bc_vtx *nvtx = r600_bc_vtx();
1085 int r;
1086
1087 if (nvtx == NULL)
1088 return -ENOMEM;
1089 memcpy(nvtx, vtx, sizeof(struct r600_bc_vtx));
1090
1091 /* cf can contains only alu or only vtx or only tex */
1092 if (bc->cf_last == NULL ||
1093 (bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_VTX &&
1094 bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) ||
1095 bc->force_add_cf) {
1096 r = r600_bc_add_cf(bc);
1097 if (r) {
1098 free(nvtx);
1099 return r;
1100 }
1101 bc->cf_last->inst = V_SQ_CF_WORD1_SQ_CF_INST_VTX;
1102 }
1103 LIST_ADDTAIL(&nvtx->list, &bc->cf_last->vtx);
1104 /* each fetch use 4 dwords */
1105 bc->cf_last->ndw += 4;
1106 bc->ndw += 4;
1107 if ((bc->cf_last->ndw / 4) > 7)
1108 bc->force_add_cf = 1;
1109 return 0;
1110 }
1111
1112 int r600_bc_add_tex(struct r600_bc *bc, const struct r600_bc_tex *tex)
1113 {
1114 struct r600_bc_tex *ntex = r600_bc_tex();
1115 int r;
1116
1117 if (ntex == NULL)
1118 return -ENOMEM;
1119 memcpy(ntex, tex, sizeof(struct r600_bc_tex));
1120
1121 /* cf can contains only alu or only vtx or only tex */
1122 if (bc->cf_last == NULL ||
1123 bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_TEX ||
1124 bc->force_add_cf) {
1125 r = r600_bc_add_cf(bc);
1126 if (r) {
1127 free(ntex);
1128 return r;
1129 }
1130 bc->cf_last->inst = V_SQ_CF_WORD1_SQ_CF_INST_TEX;
1131 }
1132 LIST_ADDTAIL(&ntex->list, &bc->cf_last->tex);
1133 /* each texture fetch use 4 dwords */
1134 bc->cf_last->ndw += 4;
1135 bc->ndw += 4;
1136 if ((bc->cf_last->ndw / 4) > 7)
1137 bc->force_add_cf = 1;
1138 return 0;
1139 }
1140
1141 int r600_bc_add_cfinst(struct r600_bc *bc, int inst)
1142 {
1143 int r;
1144 r = r600_bc_add_cf(bc);
1145 if (r)
1146 return r;
1147
1148 bc->cf_last->cond = V_SQ_CF_COND_ACTIVE;
1149 bc->cf_last->inst = inst;
1150 return 0;
1151 }
1152
1153 /* common to all 3 families */
1154 static int r600_bc_vtx_build(struct r600_bc *bc, struct r600_bc_vtx *vtx, unsigned id)
1155 {
1156 unsigned fetch_resource_start = 0;
1157
1158 /* check if we are fetch shader */
1159 /* fetch shader can also access vertex resource,
1160 * first fetch shader resource is at 160
1161 */
1162 if (bc->type == -1) {
1163 switch (bc->chiprev) {
1164 /* r600 */
1165 case CHIPREV_R600:
1166 /* r700 */
1167 case CHIPREV_R700:
1168 fetch_resource_start = 160;
1169 break;
1170 /* evergreen */
1171 case CHIPREV_EVERGREEN:
1172 fetch_resource_start = 0;
1173 break;
1174 default:
1175 fprintf(stderr, "%s:%s:%d unknown chiprev %d\n",
1176 __FILE__, __func__, __LINE__, bc->chiprev);
1177 break;
1178 }
1179 }
1180 bc->bytecode[id++] = S_SQ_VTX_WORD0_BUFFER_ID(vtx->buffer_id + fetch_resource_start) |
1181 S_SQ_VTX_WORD0_SRC_GPR(vtx->src_gpr) |
1182 S_SQ_VTX_WORD0_SRC_SEL_X(vtx->src_sel_x) |
1183 S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(vtx->mega_fetch_count);
1184 bc->bytecode[id++] = S_SQ_VTX_WORD1_DST_SEL_X(vtx->dst_sel_x) |
1185 S_SQ_VTX_WORD1_DST_SEL_Y(vtx->dst_sel_y) |
1186 S_SQ_VTX_WORD1_DST_SEL_Z(vtx->dst_sel_z) |
1187 S_SQ_VTX_WORD1_DST_SEL_W(vtx->dst_sel_w) |
1188 S_SQ_VTX_WORD1_USE_CONST_FIELDS(vtx->use_const_fields) |
1189 S_SQ_VTX_WORD1_DATA_FORMAT(vtx->data_format) |
1190 S_SQ_VTX_WORD1_NUM_FORMAT_ALL(vtx->num_format_all) |
1191 S_SQ_VTX_WORD1_FORMAT_COMP_ALL(vtx->format_comp_all) |
1192 S_SQ_VTX_WORD1_SRF_MODE_ALL(vtx->srf_mode_all) |
1193 S_SQ_VTX_WORD1_GPR_DST_GPR(vtx->dst_gpr);
1194 bc->bytecode[id++] = S_SQ_VTX_WORD2_MEGA_FETCH(1);
1195 bc->bytecode[id++] = 0;
1196 return 0;
1197 }
1198
1199 /* common to all 3 families */
1200 static int r600_bc_tex_build(struct r600_bc *bc, struct r600_bc_tex *tex, unsigned id)
1201 {
1202 bc->bytecode[id++] = S_SQ_TEX_WORD0_TEX_INST(tex->inst) |
1203 S_SQ_TEX_WORD0_RESOURCE_ID(tex->resource_id) |
1204 S_SQ_TEX_WORD0_SRC_GPR(tex->src_gpr) |
1205 S_SQ_TEX_WORD0_SRC_REL(tex->src_rel);
1206 bc->bytecode[id++] = S_SQ_TEX_WORD1_DST_GPR(tex->dst_gpr) |
1207 S_SQ_TEX_WORD1_DST_REL(tex->dst_rel) |
1208 S_SQ_TEX_WORD1_DST_SEL_X(tex->dst_sel_x) |
1209 S_SQ_TEX_WORD1_DST_SEL_Y(tex->dst_sel_y) |
1210 S_SQ_TEX_WORD1_DST_SEL_Z(tex->dst_sel_z) |
1211 S_SQ_TEX_WORD1_DST_SEL_W(tex->dst_sel_w) |
1212 S_SQ_TEX_WORD1_LOD_BIAS(tex->lod_bias) |
1213 S_SQ_TEX_WORD1_COORD_TYPE_X(tex->coord_type_x) |
1214 S_SQ_TEX_WORD1_COORD_TYPE_Y(tex->coord_type_y) |
1215 S_SQ_TEX_WORD1_COORD_TYPE_Z(tex->coord_type_z) |
1216 S_SQ_TEX_WORD1_COORD_TYPE_W(tex->coord_type_w);
1217 bc->bytecode[id++] = S_SQ_TEX_WORD2_OFFSET_X(tex->offset_x) |
1218 S_SQ_TEX_WORD2_OFFSET_Y(tex->offset_y) |
1219 S_SQ_TEX_WORD2_OFFSET_Z(tex->offset_z) |
1220 S_SQ_TEX_WORD2_SAMPLER_ID(tex->sampler_id) |
1221 S_SQ_TEX_WORD2_SRC_SEL_X(tex->src_sel_x) |
1222 S_SQ_TEX_WORD2_SRC_SEL_Y(tex->src_sel_y) |
1223 S_SQ_TEX_WORD2_SRC_SEL_Z(tex->src_sel_z) |
1224 S_SQ_TEX_WORD2_SRC_SEL_W(tex->src_sel_w);
1225 bc->bytecode[id++] = 0;
1226 return 0;
1227 }
1228
1229 /* r600 only, r700/eg bits in r700_asm.c */
1230 static int r600_bc_alu_build(struct r600_bc *bc, struct r600_bc_alu *alu, unsigned id)
1231 {
1232 /* don't replace gpr by pv or ps for destination register */
1233 bc->bytecode[id++] = S_SQ_ALU_WORD0_SRC0_SEL(alu->src[0].sel) |
1234 S_SQ_ALU_WORD0_SRC0_REL(alu->src[0].rel) |
1235 S_SQ_ALU_WORD0_SRC0_CHAN(alu->src[0].chan) |
1236 S_SQ_ALU_WORD0_SRC0_NEG(alu->src[0].neg) |
1237 S_SQ_ALU_WORD0_SRC1_SEL(alu->src[1].sel) |
1238 S_SQ_ALU_WORD0_SRC1_REL(alu->src[1].rel) |
1239 S_SQ_ALU_WORD0_SRC1_CHAN(alu->src[1].chan) |
1240 S_SQ_ALU_WORD0_SRC1_NEG(alu->src[1].neg) |
1241 S_SQ_ALU_WORD0_LAST(alu->last);
1242
1243 if (alu->is_op3) {
1244 bc->bytecode[id++] = S_SQ_ALU_WORD1_DST_GPR(alu->dst.sel) |
1245 S_SQ_ALU_WORD1_DST_CHAN(alu->dst.chan) |
1246 S_SQ_ALU_WORD1_DST_REL(alu->dst.rel) |
1247 S_SQ_ALU_WORD1_CLAMP(alu->dst.clamp) |
1248 S_SQ_ALU_WORD1_OP3_SRC2_SEL(alu->src[2].sel) |
1249 S_SQ_ALU_WORD1_OP3_SRC2_REL(alu->src[2].rel) |
1250 S_SQ_ALU_WORD1_OP3_SRC2_CHAN(alu->src[2].chan) |
1251 S_SQ_ALU_WORD1_OP3_SRC2_NEG(alu->src[2].neg) |
1252 S_SQ_ALU_WORD1_OP3_ALU_INST(alu->inst) |
1253 S_SQ_ALU_WORD1_BANK_SWIZZLE(alu->bank_swizzle);
1254 } else {
1255 bc->bytecode[id++] = S_SQ_ALU_WORD1_DST_GPR(alu->dst.sel) |
1256 S_SQ_ALU_WORD1_DST_CHAN(alu->dst.chan) |
1257 S_SQ_ALU_WORD1_DST_REL(alu->dst.rel) |
1258 S_SQ_ALU_WORD1_CLAMP(alu->dst.clamp) |
1259 S_SQ_ALU_WORD1_OP2_SRC0_ABS(alu->src[0].abs) |
1260 S_SQ_ALU_WORD1_OP2_SRC1_ABS(alu->src[1].abs) |
1261 S_SQ_ALU_WORD1_OP2_WRITE_MASK(alu->dst.write) |
1262 S_SQ_ALU_WORD1_OP2_OMOD(alu->omod) |
1263 S_SQ_ALU_WORD1_OP2_ALU_INST(alu->inst) |
1264 S_SQ_ALU_WORD1_BANK_SWIZZLE(alu->bank_swizzle) |
1265 S_SQ_ALU_WORD1_OP2_UPDATE_EXECUTE_MASK(alu->predicate) |
1266 S_SQ_ALU_WORD1_OP2_UPDATE_PRED(alu->predicate);
1267 }
1268 return 0;
1269 }
1270
1271 enum cf_class
1272 {
1273 CF_CLASS_ALU,
1274 CF_CLASS_TEXTURE,
1275 CF_CLASS_VERTEX,
1276 CF_CLASS_EXPORT,
1277 CF_CLASS_OTHER
1278 };
1279
1280 static enum cf_class get_cf_class(struct r600_bc_cf *cf)
1281 {
1282 switch (cf->inst) {
1283 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
1284 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3):
1285 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3):
1286 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
1287 return CF_CLASS_ALU;
1288
1289 case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
1290 return CF_CLASS_TEXTURE;
1291
1292 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
1293 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
1294 return CF_CLASS_VERTEX;
1295
1296 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1297 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1298 return CF_CLASS_EXPORT;
1299
1300 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
1301 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
1302 case V_SQ_CF_WORD1_SQ_CF_INST_POP:
1303 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
1304 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
1305 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
1306 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
1307 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
1308 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
1309 return CF_CLASS_OTHER;
1310
1311 default:
1312 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
1313 return -EINVAL;
1314 }
1315 }
1316
1317 /* common for r600/r700 - eg in eg_asm.c */
1318 static int r600_bc_cf_build(struct r600_bc *bc, struct r600_bc_cf *cf)
1319 {
1320 unsigned id = cf->id;
1321 unsigned end_of_program = bc->cf.prev == &cf->list;
1322
1323 switch (get_cf_class(cf)) {
1324 case CF_CLASS_ALU:
1325 assert(!end_of_program);
1326 bc->bytecode[id++] = S_SQ_CF_ALU_WORD0_ADDR(cf->addr >> 1) |
1327 S_SQ_CF_ALU_WORD0_KCACHE_MODE0(cf->kcache[0].mode) |
1328 S_SQ_CF_ALU_WORD0_KCACHE_BANK0(cf->kcache[0].bank) |
1329 S_SQ_CF_ALU_WORD0_KCACHE_BANK1(cf->kcache[1].bank);
1330
1331 bc->bytecode[id++] = S_SQ_CF_ALU_WORD1_CF_INST(cf->inst >> 3) |
1332 S_SQ_CF_ALU_WORD1_KCACHE_MODE1(cf->kcache[1].mode) |
1333 S_SQ_CF_ALU_WORD1_KCACHE_ADDR0(cf->kcache[0].addr) |
1334 S_SQ_CF_ALU_WORD1_KCACHE_ADDR1(cf->kcache[1].addr) |
1335 S_SQ_CF_ALU_WORD1_BARRIER(cf->barrier) |
1336 S_SQ_CF_ALU_WORD1_USES_WATERFALL(bc->chiprev == CHIPREV_R600 ? cf->r6xx_uses_waterfall : 0) |
1337 S_SQ_CF_ALU_WORD1_COUNT((cf->ndw / 2) - 1);
1338 break;
1339 case CF_CLASS_TEXTURE:
1340 case CF_CLASS_VERTEX:
1341 bc->bytecode[id++] = S_SQ_CF_WORD0_ADDR(cf->addr >> 1);
1342 bc->bytecode[id++] = S_SQ_CF_WORD1_CF_INST(cf->inst) |
1343 S_SQ_CF_WORD1_BARRIER(cf->barrier) |
1344 S_SQ_CF_WORD1_COUNT((cf->ndw / 4) - 1) |
1345 S_SQ_CF_WORD1_END_OF_PROGRAM(end_of_program);
1346 break;
1347 case CF_CLASS_EXPORT:
1348 bc->bytecode[id++] = S_SQ_CF_ALLOC_EXPORT_WORD0_RW_GPR(cf->output.gpr) |
1349 S_SQ_CF_ALLOC_EXPORT_WORD0_ELEM_SIZE(cf->output.elem_size) |
1350 S_SQ_CF_ALLOC_EXPORT_WORD0_ARRAY_BASE(cf->output.array_base) |
1351 S_SQ_CF_ALLOC_EXPORT_WORD0_TYPE(cf->output.type);
1352 bc->bytecode[id++] = S_SQ_CF_ALLOC_EXPORT_WORD1_BURST_COUNT(cf->output.burst_count - 1) |
1353 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_X(cf->output.swizzle_x) |
1354 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Y(cf->output.swizzle_y) |
1355 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Z(cf->output.swizzle_z) |
1356 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_W(cf->output.swizzle_w) |
1357 S_SQ_CF_ALLOC_EXPORT_WORD1_BARRIER(cf->barrier) |
1358 S_SQ_CF_ALLOC_EXPORT_WORD1_CF_INST(cf->inst) |
1359 S_SQ_CF_ALLOC_EXPORT_WORD1_END_OF_PROGRAM(end_of_program);
1360 break;
1361 case CF_CLASS_OTHER:
1362 bc->bytecode[id++] = S_SQ_CF_WORD0_ADDR(cf->cf_addr >> 1);
1363 bc->bytecode[id++] = S_SQ_CF_WORD1_CF_INST(cf->inst) |
1364 S_SQ_CF_WORD1_BARRIER(cf->barrier) |
1365 S_SQ_CF_WORD1_COND(cf->cond) |
1366 S_SQ_CF_WORD1_POP_COUNT(cf->pop_count) |
1367 S_SQ_CF_WORD1_END_OF_PROGRAM(end_of_program);
1368
1369 break;
1370 default:
1371 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
1372 return -EINVAL;
1373 }
1374 return 0;
1375 }
1376
1377 struct gpr_usage_range {
1378 int replacement;
1379 int32_t start;
1380 int32_t end;
1381 };
1382
1383 struct gpr_usage {
1384 unsigned channels:4;
1385 int32_t first_write;
1386 int32_t last_write[4];
1387 unsigned nranges;
1388 struct gpr_usage_range *ranges;
1389 };
1390
1391 static struct gpr_usage_range* add_gpr_usage_range(struct gpr_usage *usage)
1392 {
1393 usage->nranges++;
1394 usage->ranges = realloc(usage->ranges, usage->nranges * sizeof(struct gpr_usage_range));
1395 if (!usage->ranges)
1396 return NULL;
1397 return &usage->ranges[usage->nranges-1];
1398 }
1399
1400 static void notice_gpr_read(struct gpr_usage *usage, int32_t id, unsigned chan)
1401 {
1402 usage->channels |= 1 << chan;
1403 usage->first_write = -1;
1404 if (!usage->nranges) {
1405 struct gpr_usage_range* range = add_gpr_usage_range(usage);
1406 range->replacement = -1;
1407 range->start = -1;
1408 range->end = -1;
1409 }
1410 if (usage->ranges[usage->nranges-1].end < id)
1411 usage->ranges[usage->nranges-1].end = id;
1412 }
1413
1414 static void notice_gpr_rel_read(struct gpr_usage usage[128], int32_t id, unsigned chan)
1415 {
1416 unsigned i;
1417 for (i = 0; i < 128; ++i)
1418 notice_gpr_read(&usage[i], id, chan);
1419 }
1420
1421 static void notice_gpr_last_write(struct gpr_usage *usage, int32_t id, unsigned chan)
1422 {
1423 usage->last_write[chan] = id;
1424 }
1425
1426 static void notice_gpr_write(struct gpr_usage *usage, int32_t id, unsigned chan,
1427 int predicate, int prefered_replacement)
1428 {
1429 int32_t start = usage->first_write != -1 ? usage->first_write : id;
1430 usage->channels &= ~(1 << chan);
1431 if (usage->channels) {
1432 if (usage->first_write == -1)
1433 usage->first_write = id;
1434 } else if (!usage->nranges || (usage->ranges[usage->nranges-1].start != start && !predicate)) {
1435 usage->first_write = start;
1436 struct gpr_usage_range* range = add_gpr_usage_range(usage);
1437 range->replacement = prefered_replacement;
1438 range->start = start;
1439 range->end = -1;
1440 } else if (usage->ranges[usage->nranges-1].start == start && prefered_replacement != -1) {
1441 usage->ranges[usage->nranges-1].replacement = prefered_replacement;
1442 }
1443 notice_gpr_last_write(usage, id, chan);
1444 }
1445
1446 static void notice_gpr_rel_last_write(struct gpr_usage usage[128], int32_t id, unsigned chan)
1447 {
1448 unsigned i;
1449 for (i = 0; i < 128; ++i)
1450 notice_gpr_last_write(&usage[i], id, chan);
1451 }
1452
1453 static void notice_gpr_rel_write(struct gpr_usage usage[128], int32_t id, unsigned chan)
1454 {
1455 unsigned i;
1456 for (i = 0; i < 128; ++i)
1457 notice_gpr_write(&usage[i], id, chan, 1, -1);
1458 }
1459
1460 static void notice_alu_src_gprs(struct r600_bc_alu *alu, struct gpr_usage usage[128], int32_t id)
1461 {
1462 unsigned src, num_src;
1463
1464 num_src = r600_bc_get_num_operands(alu);
1465 for (src = 0; src < num_src; ++src) {
1466 // constants doesn't matter
1467 if (!is_gpr(alu->src[src].sel))
1468 continue;
1469
1470 if (alu->src[src].rel)
1471 notice_gpr_rel_read(usage, id, alu->src[src].chan);
1472 else
1473 notice_gpr_read(&usage[alu->src[src].sel], id, alu->src[src].chan);
1474 }
1475 }
1476
1477 static void notice_alu_dst_gprs(struct r600_bc_alu *alu_first, struct gpr_usage usage[128],
1478 int32_t id, int predicate)
1479 {
1480 struct r600_bc_alu *alu;
1481 for (alu = alu_first; alu; alu = LIST_ENTRY(struct r600_bc_alu, alu->list.next, list)) {
1482 if (alu->dst.write) {
1483 if (alu->dst.rel)
1484 notice_gpr_rel_write(usage, id, alu->dst.chan);
1485 else if (alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV && is_gpr(alu->src[0].sel))
1486 notice_gpr_write(&usage[alu->dst.sel], id, alu->dst.chan,
1487 predicate, alu->src[0].sel);
1488 else
1489 notice_gpr_write(&usage[alu->dst.sel], id, alu->dst.chan, predicate, -1);
1490 }
1491
1492 if (alu->last)
1493 break;
1494 }
1495 }
1496
1497 static void notice_tex_gprs(struct r600_bc_tex *tex, struct gpr_usage usage[128],
1498 int32_t id, int predicate)
1499 {
1500 if (tex->src_rel) {
1501 if (tex->src_sel_x < 4)
1502 notice_gpr_rel_read(usage, id, tex->src_sel_x);
1503 if (tex->src_sel_y < 4)
1504 notice_gpr_rel_read(usage, id, tex->src_sel_y);
1505 if (tex->src_sel_z < 4)
1506 notice_gpr_rel_read(usage, id, tex->src_sel_z);
1507 if (tex->src_sel_w < 4)
1508 notice_gpr_rel_read(usage, id, tex->src_sel_w);
1509 } else {
1510 if (tex->src_sel_x < 4)
1511 notice_gpr_read(&usage[tex->src_gpr], id, tex->src_sel_x);
1512 if (tex->src_sel_y < 4)
1513 notice_gpr_read(&usage[tex->src_gpr], id, tex->src_sel_y);
1514 if (tex->src_sel_z < 4)
1515 notice_gpr_read(&usage[tex->src_gpr], id, tex->src_sel_z);
1516 if (tex->src_sel_w < 4)
1517 notice_gpr_read(&usage[tex->src_gpr], id, tex->src_sel_w);
1518 }
1519 if (tex->dst_rel) {
1520 if (tex->dst_sel_x != 7)
1521 notice_gpr_rel_write(usage, id, 0);
1522 if (tex->dst_sel_y != 7)
1523 notice_gpr_rel_write(usage, id, 1);
1524 if (tex->dst_sel_z != 7)
1525 notice_gpr_rel_write(usage, id, 2);
1526 if (tex->dst_sel_w != 7)
1527 notice_gpr_rel_write(usage, id, 3);
1528 } else {
1529 if (tex->dst_sel_x != 7)
1530 notice_gpr_write(&usage[tex->dst_gpr], id, 0, predicate, -1);
1531 if (tex->dst_sel_y != 7)
1532 notice_gpr_write(&usage[tex->dst_gpr], id, 1, predicate, -1);
1533 if (tex->dst_sel_z != 7)
1534 notice_gpr_write(&usage[tex->dst_gpr], id, 2, predicate, -1);
1535 if (tex->dst_sel_w != 7)
1536 notice_gpr_write(&usage[tex->dst_gpr], id, 3, predicate, -1);
1537 }
1538 }
1539
1540 static void notice_vtx_gprs(struct r600_bc_vtx *vtx, struct gpr_usage usage[128],
1541 int32_t id, int predicate)
1542 {
1543 notice_gpr_read(&usage[vtx->src_gpr], id, vtx->src_sel_x);
1544
1545 if (vtx->dst_sel_x != 7)
1546 notice_gpr_write(&usage[vtx->dst_gpr], id, 0, predicate, -1);
1547 if (vtx->dst_sel_y != 7)
1548 notice_gpr_write(&usage[vtx->dst_gpr], id, 1, predicate, -1);
1549 if (vtx->dst_sel_z != 7)
1550 notice_gpr_write(&usage[vtx->dst_gpr], id, 2, predicate, -1);
1551 if (vtx->dst_sel_w != 7)
1552 notice_gpr_write(&usage[vtx->dst_gpr], id, 3, predicate, -1);
1553 }
1554
1555 static void notice_export_gprs(struct r600_bc_cf *cf, struct gpr_usage usage[128],
1556 struct r600_bc_cf *export_cf[128], int32_t export_remap[128])
1557 {
1558 //TODO handle other memory operations
1559 struct gpr_usage *output = &usage[cf->output.gpr];
1560 int32_t id = (output->last_write[0] + 0x100) & ~0xFF;
1561
1562 export_cf[cf->output.gpr] = cf;
1563 export_remap[cf->output.gpr] = id;
1564 if (cf->output.swizzle_x < 4)
1565 notice_gpr_read(output, id, cf->output.swizzle_x);
1566 if (cf->output.swizzle_y < 4)
1567 notice_gpr_read(output, id, cf->output.swizzle_y);
1568 if (cf->output.swizzle_z < 4)
1569 notice_gpr_read(output, id, cf->output.swizzle_z);
1570 if (cf->output.swizzle_w < 4)
1571 notice_gpr_read(output, id, cf->output.swizzle_w);
1572 }
1573
1574 static struct gpr_usage_range *find_src_range(struct gpr_usage *usage, int32_t id)
1575 {
1576 unsigned i;
1577 for (i = 0; i < usage->nranges; ++i) {
1578 struct gpr_usage_range* range = &usage->ranges[i];
1579
1580 if (range->start < id && id <= range->end)
1581 return range;
1582 }
1583 return NULL;
1584 }
1585
1586 static struct gpr_usage_range *find_dst_range(struct gpr_usage *usage, int32_t id)
1587 {
1588 unsigned i;
1589 for (i = 0; i < usage->nranges; ++i) {
1590 struct gpr_usage_range* range = &usage->ranges[i];
1591 int32_t end = range->end;
1592
1593 if (range->start <= id && (id < end || end == -1))
1594 return range;
1595 }
1596 assert(0); /* should not happen */
1597 return NULL;
1598 }
1599
1600 static int is_barrier_needed(struct gpr_usage *usage, int32_t id, unsigned chan, int32_t last_barrier)
1601 {
1602 if (usage->last_write[chan] != (id & ~0xFF))
1603 return usage->last_write[chan] >= last_barrier;
1604 else
1605 return 0;
1606 }
1607
1608 static int is_intersection(struct gpr_usage_range* a, struct gpr_usage_range* b)
1609 {
1610 return a->start <= b->end && b->start < a->end;
1611 }
1612
1613 static int rate_replacement(struct gpr_usage *usage, struct gpr_usage_range* range)
1614 {
1615 unsigned i;
1616 int32_t best_start = 0x3FFFFFFF, best_end = 0x3FFFFFFF;
1617
1618 for (i = 0; i < usage->nranges; ++i) {
1619 if (usage->ranges[i].replacement != -1)
1620 continue; /* ignore already remapped ranges */
1621
1622 if (is_intersection(&usage->ranges[i], range))
1623 return -1; /* forget it if usages overlap */
1624
1625 if (range->start >= usage->ranges[i].end)
1626 best_start = MIN2(best_start, range->start - usage->ranges[i].end);
1627
1628 if (range->end != -1 && range->end <= usage->ranges[i].start)
1629 best_end = MIN2(best_end, usage->ranges[i].start - range->end);
1630 }
1631 return best_start + best_end;
1632 }
1633
1634 static void find_replacement(struct gpr_usage usage[128], unsigned current,
1635 struct gpr_usage_range *range, int is_export)
1636 {
1637 unsigned i;
1638 int best_gpr = -1, best_rate = 0x7FFFFFFF;
1639
1640 if (range->replacement != -1 && range->replacement <= current) {
1641 struct gpr_usage_range *other = find_src_range(&usage[range->replacement], range->start);
1642 if (other && other->replacement != -1)
1643 range->replacement = other->replacement;
1644 }
1645
1646 if (range->replacement != -1 && range->replacement < current) {
1647 int rate = rate_replacement(&usage[range->replacement], range);
1648
1649 /* check if prefered replacement can be used */
1650 if (rate != -1) {
1651 best_rate = rate;
1652 best_gpr = range->replacement;
1653 }
1654 }
1655
1656 if (best_gpr == -1 && (range->start & ~0xFF) == (range->end & ~0xFF)) {
1657 /* register is just used inside one ALU clause */
1658 /* try to use clause temporaryis for it */
1659 for (i = 127; i > 123; --i) {
1660 int rate = rate_replacement(&usage[i], range);
1661
1662 if (rate == -1) /* can't be used because ranges overlap */
1663 continue;
1664
1665 if (rate < best_rate) {
1666 best_rate = rate;
1667 best_gpr = i;
1668
1669 /* can't get better than this */
1670 if (rate == 0 || is_export)
1671 break;
1672 }
1673 }
1674 }
1675
1676 if (best_gpr == -1) {
1677 for (i = 0; i < current; ++i) {
1678 int rate = rate_replacement(&usage[i], range);
1679
1680 if (rate == -1) /* can't be used because ranges overlap */
1681 continue;
1682
1683 if (rate < best_rate) {
1684 best_rate = rate;
1685 best_gpr = i;
1686
1687 /* can't get better than this */
1688 if (rate == 0)
1689 break;
1690 }
1691 }
1692 }
1693
1694 range->replacement = best_gpr;
1695 if (best_gpr != -1) {
1696 struct gpr_usage_range *reservation = add_gpr_usage_range(&usage[best_gpr]);
1697 reservation->replacement = -1;
1698 reservation->start = range->start;
1699 reservation->end = range->end;
1700 }
1701 }
1702
1703 static void find_export_replacement(struct gpr_usage usage[128],
1704 struct gpr_usage_range *range, struct r600_bc_cf *current,
1705 struct r600_bc_cf *next, int32_t next_id)
1706 {
1707 if (!next || next_id <= range->start || next_id > range->end)
1708 return;
1709
1710 if (current->output.type != next->output.type)
1711 return;
1712
1713 if ((current->output.array_base + 1) != next->output.array_base)
1714 return;
1715
1716 find_src_range(&usage[next->output.gpr], next_id)->replacement = range->replacement + 1;
1717 }
1718
1719 static void replace_alu_gprs(struct r600_bc_alu *alu, struct gpr_usage usage[128],
1720 int32_t id, int32_t last_barrier, unsigned *barrier)
1721 {
1722 struct gpr_usage *cur_usage;
1723 struct gpr_usage_range *range;
1724 unsigned src, num_src;
1725
1726 num_src = r600_bc_get_num_operands(alu);
1727 for (src = 0; src < num_src; ++src) {
1728 // constants doesn't matter
1729 if (!is_gpr(alu->src[src].sel))
1730 continue;
1731
1732 cur_usage = &usage[alu->src[src].sel];
1733 range = find_src_range(cur_usage, id);
1734 if (range->replacement != -1)
1735 alu->src[src].sel = range->replacement;
1736
1737 *barrier |= is_barrier_needed(cur_usage, id, alu->src[src].chan, last_barrier);
1738 }
1739
1740 if (alu->dst.write) {
1741 cur_usage = &usage[alu->dst.sel];
1742 range = find_dst_range(cur_usage, id);
1743 if (range->replacement == alu->dst.sel) {
1744 if (!alu->is_op3)
1745 alu->dst.write = 0;
1746 else
1747 /*TODO: really check that register 123 is useable */
1748 alu->dst.sel = 123;
1749 } else if (range->replacement != -1) {
1750 alu->dst.sel = range->replacement;
1751 }
1752 if (alu->dst.rel)
1753 notice_gpr_rel_last_write(usage, id, alu->dst.chan);
1754 else
1755 notice_gpr_last_write(cur_usage, id, alu->dst.chan);
1756 }
1757 }
1758
1759 static void replace_tex_gprs(struct r600_bc_tex *tex, struct gpr_usage usage[128],
1760 int32_t id, int32_t last_barrier, unsigned *barrier)
1761 {
1762 struct gpr_usage *cur_usage = &usage[tex->src_gpr];
1763 struct gpr_usage_range *range = find_src_range(cur_usage, id);
1764
1765 if (tex->src_rel) {
1766 *barrier = 1;
1767 } else {
1768 if (tex->src_sel_x < 4)
1769 *barrier |= is_barrier_needed(cur_usage, id, tex->src_sel_x, last_barrier);
1770 if (tex->src_sel_y < 4)
1771 *barrier |= is_barrier_needed(cur_usage, id, tex->src_sel_y, last_barrier);
1772 if (tex->src_sel_z < 4)
1773 *barrier |= is_barrier_needed(cur_usage, id, tex->src_sel_z, last_barrier);
1774 if (tex->src_sel_w < 4)
1775 *barrier |= is_barrier_needed(cur_usage, id, tex->src_sel_w, last_barrier);
1776 }
1777
1778 if (range->replacement != -1)
1779 tex->src_gpr = range->replacement;
1780
1781 cur_usage = &usage[tex->dst_gpr];
1782 range = find_dst_range(cur_usage, id);
1783 if (range->replacement != -1)
1784 tex->dst_gpr = range->replacement;
1785
1786 if (tex->dst_rel) {
1787 if (tex->dst_sel_x != 7)
1788 notice_gpr_rel_last_write(usage, id, tex->dst_sel_x);
1789 if (tex->dst_sel_y != 7)
1790 notice_gpr_rel_last_write(usage, id, tex->dst_sel_y);
1791 if (tex->dst_sel_z != 7)
1792 notice_gpr_rel_last_write(usage, id, tex->dst_sel_z);
1793 if (tex->dst_sel_w != 7)
1794 notice_gpr_rel_last_write(usage, id, tex->dst_sel_w);
1795 } else {
1796 if (tex->dst_sel_x != 7)
1797 notice_gpr_last_write(cur_usage, id, tex->dst_sel_x);
1798 if (tex->dst_sel_y != 7)
1799 notice_gpr_last_write(cur_usage, id, tex->dst_sel_y);
1800 if (tex->dst_sel_z != 7)
1801 notice_gpr_last_write(cur_usage, id, tex->dst_sel_z);
1802 if (tex->dst_sel_w != 7)
1803 notice_gpr_last_write(cur_usage, id, tex->dst_sel_w);
1804 }
1805 }
1806
1807 static void replace_vtx_gprs(struct r600_bc_vtx *vtx, struct gpr_usage usage[128],
1808 int32_t id, int32_t last_barrier, unsigned *barrier)
1809 {
1810 struct gpr_usage *cur_usage = &usage[vtx->src_gpr];
1811 struct gpr_usage_range *range = find_src_range(cur_usage, id);
1812
1813 *barrier |= is_barrier_needed(cur_usage, id, vtx->src_sel_x, last_barrier);
1814
1815 if (range->replacement != -1)
1816 vtx->src_gpr = range->replacement;
1817
1818 cur_usage = &usage[vtx->dst_gpr];
1819 range = find_dst_range(cur_usage, id);
1820 if (range->replacement != -1)
1821 vtx->dst_gpr = range->replacement;
1822
1823 if (vtx->dst_sel_x != 7)
1824 notice_gpr_last_write(cur_usage, id, vtx->dst_sel_x);
1825 if (vtx->dst_sel_y != 7)
1826 notice_gpr_last_write(cur_usage, id, vtx->dst_sel_y);
1827 if (vtx->dst_sel_z != 7)
1828 notice_gpr_last_write(cur_usage, id, vtx->dst_sel_z);
1829 if (vtx->dst_sel_w != 7)
1830 notice_gpr_last_write(cur_usage, id, vtx->dst_sel_w);
1831 }
1832
1833 static void replace_export_gprs(struct r600_bc_cf *cf, struct gpr_usage usage[128],
1834 int32_t id, int32_t last_barrier)
1835 {
1836 //TODO handle other memory operations
1837 struct gpr_usage *cur_usage = &usage[cf->output.gpr];
1838 struct gpr_usage_range *range = find_src_range(cur_usage, id);
1839
1840 cf->barrier = 0;
1841 if (cf->output.swizzle_x < 4)
1842 cf->barrier |= is_barrier_needed(cur_usage, -1, cf->output.swizzle_x, last_barrier);
1843 if (cf->output.swizzle_y < 4)
1844 cf->barrier |= is_barrier_needed(cur_usage, -1, cf->output.swizzle_y, last_barrier);
1845 if (cf->output.swizzle_z < 4)
1846 cf->barrier |= is_barrier_needed(cur_usage, -1, cf->output.swizzle_z, last_barrier);
1847 if (cf->output.swizzle_w < 4)
1848 cf->barrier |= is_barrier_needed(cur_usage, -1, cf->output.swizzle_w, last_barrier);
1849
1850 if (range->replacement != -1)
1851 cf->output.gpr = range->replacement;
1852 }
1853
1854 static void optimize_alu_inst(struct r600_bc_cf *cf, struct r600_bc_alu *alu)
1855 {
1856 struct r600_bc_alu *alu_next;
1857 unsigned chan;
1858 unsigned src, num_src;
1859
1860 /* check if a MOV could be optimized away */
1861 if (alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV) {
1862
1863 /* destination equals source? */
1864 if (alu->dst.sel != alu->src[0].sel ||
1865 alu->dst.chan != alu->src[0].chan)
1866 return;
1867
1868 /* any special handling for the source? */
1869 if (alu->src[0].rel || alu->src[0].neg || alu->src[0].abs)
1870 return;
1871
1872 /* any special handling for destination? */
1873 if (alu->dst.rel || alu->dst.clamp)
1874 return;
1875
1876 /* ok find next instruction group and check if ps/pv is used */
1877 for (alu_next = alu; !alu_next->last; alu_next = NEXT_ALU(alu_next));
1878
1879 if (alu_next->list.next != &cf->alu) {
1880 chan = is_alu_reduction_inst(alu) ? 0 : alu->dst.chan;
1881 for (alu_next = NEXT_ALU(alu_next); alu_next; alu_next = NEXT_ALU(alu_next)) {
1882 num_src = r600_bc_get_num_operands(alu_next);
1883 for (src = 0; src < num_src; ++src) {
1884 if (alu_next->src[src].sel == V_SQ_ALU_SRC_PV &&
1885 alu_next->src[src].chan == chan)
1886 return;
1887
1888 if (alu_next->src[src].sel == V_SQ_ALU_SRC_PS)
1889 return;
1890 }
1891
1892 if (alu_next->last)
1893 break;
1894 }
1895 }
1896
1897 r600_bc_remove_alu(cf, alu);
1898 }
1899 }
1900
1901 static void optimize_export_inst(struct r600_bc *bc, struct r600_bc_cf *cf)
1902 {
1903 struct r600_bc_cf *prev = LIST_ENTRY(struct r600_bc_cf, cf->list.prev, list);
1904 if (&prev->list == &bc->cf ||
1905 prev->inst != cf->inst ||
1906 prev->output.type != cf->output.type ||
1907 prev->output.elem_size != cf->output.elem_size ||
1908 prev->output.swizzle_x != cf->output.swizzle_x ||
1909 prev->output.swizzle_y != cf->output.swizzle_y ||
1910 prev->output.swizzle_z != cf->output.swizzle_z ||
1911 prev->output.swizzle_w != cf->output.swizzle_w)
1912 return;
1913
1914 if ((prev->output.burst_count + cf->output.burst_count) > 16)
1915 return;
1916
1917 if ((prev->output.gpr + prev->output.burst_count) == cf->output.gpr &&
1918 (prev->output.array_base + prev->output.burst_count) == cf->output.array_base) {
1919
1920 prev->output.burst_count += cf->output.burst_count;
1921 r600_bc_remove_cf(bc, cf);
1922
1923 } else if (prev->output.gpr == (cf->output.gpr + cf->output.burst_count) &&
1924 prev->output.array_base == (cf->output.array_base + cf->output.burst_count)) {
1925
1926 cf->output.burst_count += prev->output.burst_count;
1927 r600_bc_remove_cf(bc, prev);
1928 }
1929 }
1930
1931 static void r600_bc_optimize(struct r600_bc *bc)
1932 {
1933 struct r600_bc_cf *cf, *next_cf;
1934 struct r600_bc_alu *first, *next_alu;
1935 struct r600_bc_alu *alu;
1936 struct r600_bc_vtx *vtx;
1937 struct r600_bc_tex *tex;
1938 struct gpr_usage usage[128];
1939
1940 /* assume that each gpr is exported only once */
1941 struct r600_bc_cf *export_cf[128] = { NULL };
1942 int32_t export_remap[128];
1943
1944 int32_t id, barrier[bc->nstack];
1945 unsigned i, j, stack, predicate, old_stack;
1946
1947 memset(&usage, 0, sizeof(usage));
1948 for (i = 0; i < 128; ++i) {
1949 usage[i].first_write = -1;
1950 usage[i].last_write[0] = -1;
1951 usage[i].last_write[1] = -1;
1952 usage[i].last_write[2] = -1;
1953 usage[i].last_write[3] = -1;
1954 }
1955
1956 /* first gather some informations about the gpr usage */
1957 id = 0; stack = 0;
1958 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
1959 switch (get_cf_class(cf)) {
1960 case CF_CLASS_ALU:
1961 predicate = 0;
1962 first = NULL;
1963 LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
1964 if (!first)
1965 first = alu;
1966 notice_alu_src_gprs(alu, usage, id);
1967 if (alu->last) {
1968 notice_alu_dst_gprs(first, usage, id, predicate || stack > 0);
1969 first = NULL;
1970 ++id;
1971 }
1972 if (is_alu_pred_inst(alu))
1973 predicate++;
1974 }
1975 if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3)
1976 stack += predicate;
1977 else if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3)
1978 stack -= 1;
1979 else if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3)
1980 stack -= 2;
1981 break;
1982 case CF_CLASS_TEXTURE:
1983 LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
1984 notice_tex_gprs(tex, usage, id++, stack > 0);
1985 }
1986 break;
1987 case CF_CLASS_VERTEX:
1988 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
1989 notice_vtx_gprs(vtx, usage, id++, stack > 0);
1990 }
1991 break;
1992 case CF_CLASS_EXPORT:
1993 notice_export_gprs(cf, usage, export_cf, export_remap);
1994 continue; // don't increment id
1995 case CF_CLASS_OTHER:
1996 switch (cf->inst) {
1997 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
1998 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
1999 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
2000 break;
2001
2002 case V_SQ_CF_WORD1_SQ_CF_INST_POP:
2003 stack -= cf->pop_count;
2004 break;
2005
2006 default:
2007 // TODO implement loop handling
2008 goto out;
2009 }
2010 }
2011 id += 0x100;
2012 id &= ~0xFF;
2013 }
2014 assert(stack == 0);
2015
2016 /* try to optimize gpr usage */
2017 for (i = 0; i < 124; ++i) {
2018 for (j = 0; j < usage[i].nranges; ++j) {
2019 struct gpr_usage_range *range = &usage[i].ranges[j];
2020 int is_export = export_cf[i] && export_cf[i + 1] &&
2021 range->start < export_remap[i] &&
2022 export_remap[i] <= range->end;
2023
2024 if (range->start == -1)
2025 range->replacement = -1;
2026 else if (range->end == -1)
2027 range->replacement = i;
2028 else
2029 find_replacement(usage, i, range, is_export);
2030
2031 if (range->replacement == -1)
2032 bc->ngpr = i;
2033 else if (range->replacement < i && range->replacement > bc->ngpr)
2034 bc->ngpr = range->replacement;
2035
2036 if (is_export && range->replacement != -1) {
2037 find_export_replacement(usage, range, export_cf[i],
2038 export_cf[i + 1], export_remap[i + 1]);
2039 }
2040 }
2041 }
2042 bc->ngpr++;
2043
2044 /* apply the changes */
2045 for (i = 0; i < 128; ++i) {
2046 usage[i].last_write[0] = -1;
2047 usage[i].last_write[1] = -1;
2048 usage[i].last_write[2] = -1;
2049 usage[i].last_write[3] = -1;
2050 }
2051 barrier[0] = 0;
2052 id = 0; stack = 0;
2053 LIST_FOR_EACH_ENTRY_SAFE(cf, next_cf, &bc->cf, list) {
2054 old_stack = stack;
2055 switch (get_cf_class(cf)) {
2056 case CF_CLASS_ALU:
2057 predicate = 0;
2058 first = NULL;
2059 cf->barrier = 0;
2060 LIST_FOR_EACH_ENTRY_SAFE(alu, next_alu, &cf->alu, list) {
2061 replace_alu_gprs(alu, usage, id, barrier[stack], &cf->barrier);
2062 if (alu->last)
2063 ++id;
2064
2065 if (is_alu_pred_inst(alu))
2066 predicate++;
2067
2068 if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3)
2069 optimize_alu_inst(cf, alu);
2070 }
2071 if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3)
2072 stack += predicate;
2073 else if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3)
2074 stack -= 1;
2075 else if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3)
2076 stack -= 2;
2077 if (LIST_IS_EMPTY(&cf->alu)) {
2078 r600_bc_remove_cf(bc, cf);
2079 cf = NULL;
2080 }
2081 break;
2082 case CF_CLASS_TEXTURE:
2083 cf->barrier = 0;
2084 LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
2085 replace_tex_gprs(tex, usage, id++, barrier[stack], &cf->barrier);
2086 }
2087 break;
2088 case CF_CLASS_VERTEX:
2089 cf->barrier = 0;
2090 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
2091 replace_vtx_gprs(vtx, usage, id++, barrier[stack], &cf->barrier);
2092 }
2093 break;
2094 case CF_CLASS_EXPORT:
2095 continue; // don't increment id
2096 case CF_CLASS_OTHER:
2097 if (cf->inst == V_SQ_CF_WORD1_SQ_CF_INST_POP) {
2098 cf->barrier = 0;
2099 stack -= cf->pop_count;
2100 }
2101 break;
2102 }
2103
2104 id &= ~0xFF;
2105 if (cf && cf->barrier)
2106 barrier[old_stack] = id;
2107
2108 for (i = old_stack + 1; i <= stack; ++i)
2109 barrier[i] = barrier[old_stack];
2110
2111 id += 0x100;
2112 if (stack != 0) /* ensue exports are placed outside of conditional blocks */
2113 continue;
2114
2115 for (i = 0; i < 128; ++i) {
2116 if (!export_cf[i] || id < export_remap[i])
2117 continue;
2118
2119 r600_bc_move_cf(bc, export_cf[i], next_cf);
2120 replace_export_gprs(export_cf[i], usage, export_remap[i], barrier[stack]);
2121 if (export_cf[i]->barrier)
2122 barrier[stack] = id - 1;
2123 next_cf = LIST_ENTRY(struct r600_bc_cf, export_cf[i]->list.next, list);
2124 optimize_export_inst(bc, export_cf[i]);
2125 export_cf[i] = NULL;
2126 }
2127 }
2128 assert(stack == 0);
2129
2130 out:
2131 for (i = 0; i < 128; ++i) {
2132 free(usage[i].ranges);
2133 }
2134 }
2135
2136 int r600_bc_build(struct r600_bc *bc)
2137 {
2138 struct r600_bc_cf *cf;
2139 struct r600_bc_alu *alu;
2140 struct r600_bc_vtx *vtx;
2141 struct r600_bc_tex *tex;
2142 struct r600_bc_cf *exports[4] = { NULL };
2143 uint32_t literal[4];
2144 unsigned nliteral;
2145 unsigned addr;
2146 int i, r;
2147
2148 if (bc->callstack[0].max > 0)
2149 bc->nstack = ((bc->callstack[0].max + 3) >> 2) + 2;
2150 if (bc->type == TGSI_PROCESSOR_VERTEX && !bc->nstack) {
2151 bc->nstack = 1;
2152 }
2153
2154 r600_bc_optimize(bc);
2155
2156 /* first path compute addr of each CF block */
2157 /* addr start after all the CF instructions */
2158 addr = LIST_ENTRY(struct r600_bc_cf, bc->cf.prev, list)->id + 2;
2159 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
2160 switch (get_cf_class(cf)) {
2161 case CF_CLASS_ALU:
2162 nliteral = 0;
2163 LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
2164 r = r600_bc_alu_nliterals(alu, literal, &nliteral);
2165 if (r)
2166 return r;
2167 if (alu->last) {
2168 cf->ndw += align(nliteral, 2);
2169 nliteral = 0;
2170 }
2171 }
2172 break;
2173 case CF_CLASS_TEXTURE:
2174 case CF_CLASS_VERTEX:
2175 /* fetch node need to be 16 bytes aligned*/
2176 addr += 3;
2177 addr &= 0xFFFFFFFCUL;
2178 break;
2179 break;
2180 case CF_CLASS_EXPORT:
2181 if (cf->inst == BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT))
2182 exports[cf->output.type] = cf;
2183 break;
2184 case CF_CLASS_OTHER:
2185 break;
2186 default:
2187 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
2188 return -EINVAL;
2189 }
2190 cf->addr = addr;
2191 addr += cf->ndw;
2192 bc->ndw = cf->addr + cf->ndw;
2193 }
2194
2195 /* set export done on last export of each type */
2196 for (i = 0; i < 4; ++i) {
2197 if (exports[i]) {
2198 exports[i]->inst = BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE);
2199 }
2200 }
2201
2202 free(bc->bytecode);
2203 bc->bytecode = calloc(1, bc->ndw * 4);
2204 if (bc->bytecode == NULL)
2205 return -ENOMEM;
2206 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
2207 addr = cf->addr;
2208 if (bc->chiprev == CHIPREV_EVERGREEN)
2209 r = eg_bc_cf_build(bc, cf);
2210 else
2211 r = r600_bc_cf_build(bc, cf);
2212 if (r)
2213 return r;
2214 switch (get_cf_class(cf)) {
2215 case CF_CLASS_ALU:
2216 nliteral = 0;
2217 LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
2218 r = r600_bc_alu_nliterals(alu, literal, &nliteral);
2219 if (r)
2220 return r;
2221 r600_bc_alu_adjust_literals(alu, literal, nliteral);
2222 switch(bc->chiprev) {
2223 case CHIPREV_R600:
2224 r = r600_bc_alu_build(bc, alu, addr);
2225 break;
2226 case CHIPREV_R700:
2227 case CHIPREV_EVERGREEN: /* eg alu is same encoding as r700 */
2228 r = r700_bc_alu_build(bc, alu, addr);
2229 break;
2230 default:
2231 R600_ERR("unknown family %d\n", bc->family);
2232 return -EINVAL;
2233 }
2234 if (r)
2235 return r;
2236 addr += 2;
2237 if (alu->last) {
2238 for (i = 0; i < align(nliteral, 2); ++i) {
2239 bc->bytecode[addr++] = literal[i];
2240 }
2241 nliteral = 0;
2242 }
2243 }
2244 break;
2245 case CF_CLASS_VERTEX:
2246 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
2247 r = r600_bc_vtx_build(bc, vtx, addr);
2248 if (r)
2249 return r;
2250 addr += 4;
2251 }
2252 break;
2253 case CF_CLASS_TEXTURE:
2254 LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
2255 r = r600_bc_tex_build(bc, tex, addr);
2256 if (r)
2257 return r;
2258 addr += 4;
2259 }
2260 break;
2261 case CF_CLASS_EXPORT:
2262 case CF_CLASS_OTHER:
2263 break;
2264 default:
2265 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
2266 return -EINVAL;
2267 }
2268 }
2269 return 0;
2270 }
2271
2272 void r600_bc_clear(struct r600_bc *bc)
2273 {
2274 struct r600_bc_cf *cf = NULL, *next_cf;
2275
2276 free(bc->bytecode);
2277 bc->bytecode = NULL;
2278
2279 LIST_FOR_EACH_ENTRY_SAFE(cf, next_cf, &bc->cf, list) {
2280 struct r600_bc_alu *alu = NULL, *next_alu;
2281 struct r600_bc_tex *tex = NULL, *next_tex;
2282 struct r600_bc_tex *vtx = NULL, *next_vtx;
2283
2284 LIST_FOR_EACH_ENTRY_SAFE(alu, next_alu, &cf->alu, list) {
2285 free(alu);
2286 }
2287
2288 LIST_INITHEAD(&cf->alu);
2289
2290 LIST_FOR_EACH_ENTRY_SAFE(tex, next_tex, &cf->tex, list) {
2291 free(tex);
2292 }
2293
2294 LIST_INITHEAD(&cf->tex);
2295
2296 LIST_FOR_EACH_ENTRY_SAFE(vtx, next_vtx, &cf->vtx, list) {
2297 free(vtx);
2298 }
2299
2300 LIST_INITHEAD(&cf->vtx);
2301
2302 free(cf);
2303 }
2304
2305 LIST_INITHEAD(&cf->list);
2306 }
2307
2308 void r600_bc_dump(struct r600_bc *bc)
2309 {
2310 struct r600_bc_cf *cf;
2311 struct r600_bc_alu *alu;
2312 struct r600_bc_vtx *vtx;
2313 struct r600_bc_tex *tex;
2314
2315 unsigned i, id;
2316 uint32_t literal[4];
2317 unsigned nliteral;
2318 char chip = '6';
2319
2320 switch (bc->chiprev) {
2321 case 1:
2322 chip = '7';
2323 break;
2324 case 2:
2325 chip = 'E';
2326 break;
2327 case 0:
2328 default:
2329 chip = '6';
2330 break;
2331 }
2332 fprintf(stderr, "bytecode %d dw -- %d gprs -----------------------\n", bc->ndw, bc->ngpr);
2333 fprintf(stderr, " %c\n", chip);
2334
2335 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
2336 id = cf->id;
2337
2338 switch (get_cf_class(cf)) {
2339 case CF_CLASS_ALU:
2340 fprintf(stderr, "%04d %08X ALU ", id, bc->bytecode[id]);
2341 fprintf(stderr, "ADDR:%04d ", cf->addr);
2342 fprintf(stderr, "KCACHE_MODE0:%X ", cf->kcache[0].mode);
2343 fprintf(stderr, "KCACHE_BANK0:%X ", cf->kcache[0].bank);
2344 fprintf(stderr, "KCACHE_BANK1:%X\n", cf->kcache[1].bank);
2345 id++;
2346 fprintf(stderr, "%04d %08X ALU ", id, bc->bytecode[id]);
2347 fprintf(stderr, "INST:%d ", cf->inst);
2348 fprintf(stderr, "KCACHE_MODE1:%X ", cf->kcache[1].mode);
2349 fprintf(stderr, "KCACHE_ADDR0:%X ", cf->kcache[0].addr);
2350 fprintf(stderr, "KCACHE_ADDR1:%X ", cf->kcache[1].addr);
2351 fprintf(stderr, "BARRIER:%d ", cf->barrier);
2352 fprintf(stderr, "COUNT:%d\n", cf->ndw / 2);
2353 break;
2354 case CF_CLASS_TEXTURE:
2355 case CF_CLASS_VERTEX:
2356 fprintf(stderr, "%04d %08X TEX/VTX ", id, bc->bytecode[id]);
2357 fprintf(stderr, "ADDR:%04d\n", cf->addr);
2358 id++;
2359 fprintf(stderr, "%04d %08X TEX/VTX ", id, bc->bytecode[id]);
2360 fprintf(stderr, "INST:%d ", cf->inst);
2361 fprintf(stderr, "BARRIER:%d ", cf->barrier);
2362 fprintf(stderr, "COUNT:%d\n", cf->ndw / 4);
2363 break;
2364 case CF_CLASS_EXPORT:
2365 fprintf(stderr, "%04d %08X EXPORT ", id, bc->bytecode[id]);
2366 fprintf(stderr, "GPR:%d ", cf->output.gpr);
2367 fprintf(stderr, "ELEM_SIZE:%X ", cf->output.elem_size);
2368 fprintf(stderr, "ARRAY_BASE:%X ", cf->output.array_base);
2369 fprintf(stderr, "TYPE:%X\n", cf->output.type);
2370 id++;
2371 fprintf(stderr, "%04d %08X EXPORT ", id, bc->bytecode[id]);
2372 fprintf(stderr, "SWIZ_X:%X ", cf->output.swizzle_x);
2373 fprintf(stderr, "SWIZ_Y:%X ", cf->output.swizzle_y);
2374 fprintf(stderr, "SWIZ_Z:%X ", cf->output.swizzle_z);
2375 fprintf(stderr, "SWIZ_W:%X ", cf->output.swizzle_w);
2376 fprintf(stderr, "SWIZ_W:%X ", cf->output.swizzle_w);
2377 fprintf(stderr, "BARRIER:%d ", cf->barrier);
2378 fprintf(stderr, "INST:%d ", cf->inst);
2379 fprintf(stderr, "BURST_COUNT:%d\n", cf->output.burst_count);
2380 break;
2381 case CF_CLASS_OTHER:
2382 fprintf(stderr, "%04d %08X CF ", id, bc->bytecode[id]);
2383 fprintf(stderr, "ADDR:%04d\n", cf->cf_addr);
2384 id++;
2385 fprintf(stderr, "%04d %08X CF ", id, bc->bytecode[id]);
2386 fprintf(stderr, "INST:%d ", cf->inst);
2387 fprintf(stderr, "COND:%X ", cf->cond);
2388 fprintf(stderr, "BARRIER:%d ", cf->barrier);
2389 fprintf(stderr, "POP_COUNT:%X\n", cf->pop_count);
2390 break;
2391 }
2392
2393 id = cf->addr;
2394 nliteral = 0;
2395 LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
2396 r600_bc_alu_nliterals(alu, literal, &nliteral);
2397
2398 fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]);
2399 fprintf(stderr, "SRC0(SEL:%d ", alu->src[0].sel);
2400 fprintf(stderr, "REL:%d ", alu->src[0].rel);
2401 fprintf(stderr, "CHAN:%d ", alu->src[0].chan);
2402 fprintf(stderr, "NEG:%d) ", alu->src[0].neg);
2403 fprintf(stderr, "SRC1(SEL:%d ", alu->src[1].sel);
2404 fprintf(stderr, "REL:%d ", alu->src[1].rel);
2405 fprintf(stderr, "CHAN:%d ", alu->src[1].chan);
2406 fprintf(stderr, "NEG:%d) ", alu->src[1].neg);
2407 fprintf(stderr, "LAST:%d)\n", alu->last);
2408 id++;
2409 fprintf(stderr, "%04d %08X %c ", id, bc->bytecode[id], alu->last ? '*' : ' ');
2410 fprintf(stderr, "INST:%d ", alu->inst);
2411 fprintf(stderr, "DST(SEL:%d ", alu->dst.sel);
2412 fprintf(stderr, "CHAN:%d ", alu->dst.chan);
2413 fprintf(stderr, "REL:%d ", alu->dst.rel);
2414 fprintf(stderr, "CLAMP:%d) ", alu->dst.clamp);
2415 fprintf(stderr, "BANK_SWIZZLE:%d ", alu->bank_swizzle);
2416 if (alu->is_op3) {
2417 fprintf(stderr, "SRC2(SEL:%d ", alu->src[2].sel);
2418 fprintf(stderr, "REL:%d ", alu->src[2].rel);
2419 fprintf(stderr, "CHAN:%d ", alu->src[2].chan);
2420 fprintf(stderr, "NEG:%d)\n", alu->src[2].neg);
2421 } else {
2422 fprintf(stderr, "SRC0_ABS:%d ", alu->src[0].abs);
2423 fprintf(stderr, "SRC1_ABS:%d ", alu->src[1].abs);
2424 fprintf(stderr, "WRITE_MASK:%d ", alu->dst.write);
2425 fprintf(stderr, "OMOD:%d ", alu->omod);
2426 fprintf(stderr, "EXECUTE_MASK:%d ", alu->predicate);
2427 fprintf(stderr, "UPDATE_PRED:%d\n", alu->predicate);
2428 }
2429
2430 id++;
2431 if (alu->last) {
2432 for (i = 0; i < nliteral; i++, id++) {
2433 float *f = (float*)(bc->bytecode + id);
2434 fprintf(stderr, "%04d %08X %f\n", id, bc->bytecode[id], *f);
2435 }
2436 id += nliteral & 1;
2437 nliteral = 0;
2438 }
2439 }
2440
2441 LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
2442 //TODO
2443 }
2444
2445 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
2446 //TODO
2447 }
2448 }
2449
2450 fprintf(stderr, "--------------------------------------\n");
2451 }
2452
2453 void r600_cf_vtx(struct r600_vertex_element *ve, u32 *bytecode, unsigned count)
2454 {
2455 struct r600_pipe_state *rstate;
2456 unsigned i = 0;
2457
2458 if (count > 8) {
2459 bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2460 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX) |
2461 S_SQ_CF_WORD1_BARRIER(0) |
2462 S_SQ_CF_WORD1_COUNT(8 - 1);
2463 bytecode[i++] = S_SQ_CF_WORD0_ADDR(40 >> 1);
2464 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX) |
2465 S_SQ_CF_WORD1_BARRIER(0) |
2466 S_SQ_CF_WORD1_COUNT(count - 8 - 1);
2467 } else {
2468 bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2469 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX) |
2470 S_SQ_CF_WORD1_BARRIER(0) |
2471 S_SQ_CF_WORD1_COUNT(count - 1);
2472 }
2473 bytecode[i++] = S_SQ_CF_WORD0_ADDR(0);
2474 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_RETURN) |
2475 S_SQ_CF_WORD1_BARRIER(0);
2476
2477 rstate = &ve->rstate;
2478 rstate->id = R600_PIPE_STATE_FETCH_SHADER;
2479 rstate->nregs = 0;
2480 r600_pipe_state_add_reg(rstate, R_0288A4_SQ_PGM_RESOURCES_FS,
2481 0x00000000, 0xFFFFFFFF, NULL);
2482 r600_pipe_state_add_reg(rstate, R_0288DC_SQ_PGM_CF_OFFSET_FS,
2483 0x00000000, 0xFFFFFFFF, NULL);
2484 r600_pipe_state_add_reg(rstate, R_028894_SQ_PGM_START_FS,
2485 r600_bo_offset(ve->fetch_shader) >> 8,
2486 0xFFFFFFFF, ve->fetch_shader);
2487 }
2488
2489 void r600_cf_vtx_tc(struct r600_vertex_element *ve, u32 *bytecode, unsigned count)
2490 {
2491 struct r600_pipe_state *rstate;
2492 unsigned i = 0;
2493
2494 if (count > 8) {
2495 bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2496 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) |
2497 S_SQ_CF_WORD1_BARRIER(0) |
2498 S_SQ_CF_WORD1_COUNT(8 - 1);
2499 bytecode[i++] = S_SQ_CF_WORD0_ADDR(40 >> 1);
2500 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) |
2501 S_SQ_CF_WORD1_BARRIER(0) |
2502 S_SQ_CF_WORD1_COUNT((count - 8) - 1);
2503 } else {
2504 bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2505 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) |
2506 S_SQ_CF_WORD1_BARRIER(0) |
2507 S_SQ_CF_WORD1_COUNT(count - 1);
2508 }
2509 bytecode[i++] = S_SQ_CF_WORD0_ADDR(0);
2510 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_RETURN) |
2511 S_SQ_CF_WORD1_BARRIER(0);
2512
2513 rstate = &ve->rstate;
2514 rstate->id = R600_PIPE_STATE_FETCH_SHADER;
2515 rstate->nregs = 0;
2516 r600_pipe_state_add_reg(rstate, R_0288A4_SQ_PGM_RESOURCES_FS,
2517 0x00000000, 0xFFFFFFFF, NULL);
2518 r600_pipe_state_add_reg(rstate, R_0288DC_SQ_PGM_CF_OFFSET_FS,
2519 0x00000000, 0xFFFFFFFF, NULL);
2520 r600_pipe_state_add_reg(rstate, R_028894_SQ_PGM_START_FS,
2521 r600_bo_offset(ve->fetch_shader) >> 8,
2522 0xFFFFFFFF, ve->fetch_shader);
2523 }
2524
2525 static void r600_vertex_data_type(enum pipe_format pformat, unsigned *format,
2526 unsigned *num_format, unsigned *format_comp)
2527 {
2528 const struct util_format_description *desc;
2529 unsigned i;
2530
2531 *format = 0;
2532 *num_format = 0;
2533 *format_comp = 0;
2534
2535 desc = util_format_description(pformat);
2536 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN) {
2537 goto out_unknown;
2538 }
2539
2540 /* Find the first non-VOID channel. */
2541 for (i = 0; i < 4; i++) {
2542 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
2543 break;
2544 }
2545 }
2546
2547 switch (desc->channel[i].type) {
2548 /* Half-floats, floats, doubles */
2549 case UTIL_FORMAT_TYPE_FLOAT:
2550 switch (desc->channel[i].size) {
2551 case 16:
2552 switch (desc->nr_channels) {
2553 case 1:
2554 *format = FMT_16_FLOAT;
2555 break;
2556 case 2:
2557 *format = FMT_16_16_FLOAT;
2558 break;
2559 case 3:
2560 *format = FMT_16_16_16_FLOAT;
2561 break;
2562 case 4:
2563 *format = FMT_16_16_16_16_FLOAT;
2564 break;
2565 }
2566 break;
2567 case 32:
2568 switch (desc->nr_channels) {
2569 case 1:
2570 *format = FMT_32_FLOAT;
2571 break;
2572 case 2:
2573 *format = FMT_32_32_FLOAT;
2574 break;
2575 case 3:
2576 *format = FMT_32_32_32_FLOAT;
2577 break;
2578 case 4:
2579 *format = FMT_32_32_32_32_FLOAT;
2580 break;
2581 }
2582 break;
2583 default:
2584 goto out_unknown;
2585 }
2586 break;
2587 /* Unsigned ints */
2588 case UTIL_FORMAT_TYPE_UNSIGNED:
2589 /* Signed ints */
2590 case UTIL_FORMAT_TYPE_SIGNED:
2591 switch (desc->channel[i].size) {
2592 case 8:
2593 switch (desc->nr_channels) {
2594 case 1:
2595 *format = FMT_8;
2596 break;
2597 case 2:
2598 *format = FMT_8_8;
2599 break;
2600 case 3:
2601 // *format = FMT_8_8_8; /* fails piglit draw-vertices test */
2602 // break;
2603 case 4:
2604 *format = FMT_8_8_8_8;
2605 break;
2606 }
2607 break;
2608 case 16:
2609 switch (desc->nr_channels) {
2610 case 1:
2611 *format = FMT_16;
2612 break;
2613 case 2:
2614 *format = FMT_16_16;
2615 break;
2616 case 3:
2617 // *format = FMT_16_16_16; /* fails piglit draw-vertices test */
2618 // break;
2619 case 4:
2620 *format = FMT_16_16_16_16;
2621 break;
2622 }
2623 break;
2624 case 32:
2625 switch (desc->nr_channels) {
2626 case 1:
2627 *format = FMT_32;
2628 break;
2629 case 2:
2630 *format = FMT_32_32;
2631 break;
2632 case 3:
2633 *format = FMT_32_32_32;
2634 break;
2635 case 4:
2636 *format = FMT_32_32_32_32;
2637 break;
2638 }
2639 break;
2640 default:
2641 goto out_unknown;
2642 }
2643 break;
2644 default:
2645 goto out_unknown;
2646 }
2647
2648 if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
2649 *format_comp = 1;
2650 }
2651 if (desc->channel[i].normalized) {
2652 *num_format = 0;
2653 } else {
2654 *num_format = 2;
2655 }
2656 return;
2657 out_unknown:
2658 R600_ERR("unsupported vertex format %s\n", util_format_name(pformat));
2659 }
2660
2661 int r600_vertex_elements_build_fetch_shader(struct r600_pipe_context *rctx, struct r600_vertex_element *ve)
2662 {
2663 unsigned ndw, i;
2664 u32 *bytecode;
2665 unsigned fetch_resource_start = 0, format, num_format, format_comp;
2666 struct pipe_vertex_element *elements = ve->elements;
2667 const struct util_format_description *desc;
2668
2669 /* 2 dwords for cf aligned to 4 + 4 dwords per input */
2670 ndw = 8 + ve->count * 4;
2671 ve->fs_size = ndw * 4;
2672
2673 /* use PIPE_BIND_VERTEX_BUFFER so we use the cache buffer manager */
2674 ve->fetch_shader = r600_bo(rctx->radeon, ndw*4, 256, PIPE_BIND_VERTEX_BUFFER, 0);
2675 if (ve->fetch_shader == NULL) {
2676 return -ENOMEM;
2677 }
2678
2679 bytecode = r600_bo_map(rctx->radeon, ve->fetch_shader, 0, NULL);
2680 if (bytecode == NULL) {
2681 r600_bo_reference(rctx->radeon, &ve->fetch_shader, NULL);
2682 return -ENOMEM;
2683 }
2684
2685 if (rctx->family >= CHIP_CEDAR) {
2686 eg_cf_vtx(ve, &bytecode[0], (ndw - 8) / 4);
2687 } else {
2688 r600_cf_vtx(ve, &bytecode[0], (ndw - 8) / 4);
2689 fetch_resource_start = 160;
2690 }
2691
2692 /* vertex elements offset need special handling, if offset is bigger
2693 * than what we can put in fetch instruction then we need to alterate
2694 * the vertex resource offset. In such case in order to simplify code
2695 * we will bound one resource per elements. It's a worst case scenario.
2696 */
2697 for (i = 0; i < ve->count; i++) {
2698 ve->vbuffer_offset[i] = C_SQ_VTX_WORD2_OFFSET & elements[i].src_offset;
2699 if (ve->vbuffer_offset[i]) {
2700 ve->vbuffer_need_offset = 1;
2701 }
2702 }
2703
2704 for (i = 0; i < ve->count; i++) {
2705 unsigned vbuffer_index;
2706 r600_vertex_data_type(ve->hw_format[i], &format, &num_format, &format_comp);
2707 desc = util_format_description(ve->hw_format[i]);
2708 if (desc == NULL) {
2709 R600_ERR("unknown format %d\n", ve->hw_format[i]);
2710 r600_bo_reference(rctx->radeon, &ve->fetch_shader, NULL);
2711 return -EINVAL;
2712 }
2713
2714 /* see above for vbuffer_need_offset explanation */
2715 vbuffer_index = elements[i].vertex_buffer_index;
2716 if (ve->vbuffer_need_offset) {
2717 bytecode[8 + i * 4 + 0] = S_SQ_VTX_WORD0_BUFFER_ID(i + fetch_resource_start);
2718 } else {
2719 bytecode[8 + i * 4 + 0] = S_SQ_VTX_WORD0_BUFFER_ID(vbuffer_index + fetch_resource_start);
2720 }
2721 bytecode[8 + i * 4 + 0] |= S_SQ_VTX_WORD0_SRC_GPR(0) |
2722 S_SQ_VTX_WORD0_SRC_SEL_X(0) |
2723 S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(0x1F);
2724 bytecode[8 + i * 4 + 1] = S_SQ_VTX_WORD1_DST_SEL_X(desc->swizzle[0]) |
2725 S_SQ_VTX_WORD1_DST_SEL_Y(desc->swizzle[1]) |
2726 S_SQ_VTX_WORD1_DST_SEL_Z(desc->swizzle[2]) |
2727 S_SQ_VTX_WORD1_DST_SEL_W(desc->swizzle[3]) |
2728 S_SQ_VTX_WORD1_USE_CONST_FIELDS(0) |
2729 S_SQ_VTX_WORD1_DATA_FORMAT(format) |
2730 S_SQ_VTX_WORD1_NUM_FORMAT_ALL(num_format) |
2731 S_SQ_VTX_WORD1_FORMAT_COMP_ALL(format_comp) |
2732 S_SQ_VTX_WORD1_SRF_MODE_ALL(1) |
2733 S_SQ_VTX_WORD1_GPR_DST_GPR(i + 1);
2734 bytecode[8 + i * 4 + 2] = S_SQ_VTX_WORD2_OFFSET(elements[i].src_offset) |
2735 S_SQ_VTX_WORD2_MEGA_FETCH(1);
2736 bytecode[8 + i * 4 + 3] = 0;
2737 }
2738 r600_bo_unmap(rctx->radeon, ve->fetch_shader);
2739 return 0;
2740 }