Merge remote branch 'origin/master' into pipe-video
[mesa.git] / src / gallium / drivers / r600 / r600_asm.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <stdio.h>
24 #include <errno.h>
25 #include "util/u_format.h"
26 #include "util/u_memory.h"
27 #include "pipe/p_shader_tokens.h"
28 #include "r600_pipe.h"
29 #include "r600_sq.h"
30 #include "r600_opcodes.h"
31 #include "r600_asm.h"
32 #include "r600_formats.h"
33 #include "r600d.h"
34
35 #define NUM_OF_CYCLES 3
36 #define NUM_OF_COMPONENTS 4
37
38 #define PREV_ALU(alu) LIST_ENTRY(struct r600_bc_alu, alu->list.prev, list)
39 #define NEXT_ALU(alu) LIST_ENTRY(struct r600_bc_alu, alu->list.next, list)
40
41 static inline unsigned int r600_bc_get_num_operands(struct r600_bc_alu *alu)
42 {
43 if(alu->is_op3)
44 return 3;
45
46 switch (alu->inst) {
47 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP:
48 return 0;
49 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD:
50 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE:
51 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT:
52 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE:
53 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE:
54 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL:
55 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX:
56 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN:
57 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE:
58 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE:
59 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT:
60 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE:
61 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE:
62 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT:
63 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE:
64 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE:
65 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4:
66 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE:
67 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE:
68 return 2;
69
70 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV:
71 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR:
72 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT:
73 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR:
74 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC:
75 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE:
76 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED:
77 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE:
78 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE:
79 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE:
80 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT:
81 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN:
82 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS:
83 return 1;
84 default: R600_ERR(
85 "Need instruction operand number for 0x%x.\n", alu->inst);
86 };
87
88 return 3;
89 }
90
91 int r700_bc_alu_build(struct r600_bc *bc, struct r600_bc_alu *alu, unsigned id);
92
93 static struct r600_bc_cf *r600_bc_cf(void)
94 {
95 struct r600_bc_cf *cf = CALLOC_STRUCT(r600_bc_cf);
96
97 if (cf == NULL)
98 return NULL;
99 LIST_INITHEAD(&cf->list);
100 LIST_INITHEAD(&cf->alu);
101 LIST_INITHEAD(&cf->vtx);
102 LIST_INITHEAD(&cf->tex);
103 cf->barrier = 1;
104 return cf;
105 }
106
107 static struct r600_bc_alu *r600_bc_alu(void)
108 {
109 struct r600_bc_alu *alu = CALLOC_STRUCT(r600_bc_alu);
110
111 if (alu == NULL)
112 return NULL;
113 LIST_INITHEAD(&alu->list);
114 return alu;
115 }
116
117 static struct r600_bc_vtx *r600_bc_vtx(void)
118 {
119 struct r600_bc_vtx *vtx = CALLOC_STRUCT(r600_bc_vtx);
120
121 if (vtx == NULL)
122 return NULL;
123 LIST_INITHEAD(&vtx->list);
124 return vtx;
125 }
126
127 static struct r600_bc_tex *r600_bc_tex(void)
128 {
129 struct r600_bc_tex *tex = CALLOC_STRUCT(r600_bc_tex);
130
131 if (tex == NULL)
132 return NULL;
133 LIST_INITHEAD(&tex->list);
134 return tex;
135 }
136
137 int r600_bc_init(struct r600_bc *bc, enum radeon_family family)
138 {
139 LIST_INITHEAD(&bc->cf);
140 bc->family = family;
141 switch (bc->family) {
142 case CHIP_R600:
143 case CHIP_RV610:
144 case CHIP_RV630:
145 case CHIP_RV670:
146 case CHIP_RV620:
147 case CHIP_RV635:
148 case CHIP_RS780:
149 case CHIP_RS880:
150 bc->chiprev = CHIPREV_R600;
151 break;
152 case CHIP_RV770:
153 case CHIP_RV730:
154 case CHIP_RV710:
155 case CHIP_RV740:
156 bc->chiprev = CHIPREV_R700;
157 break;
158 case CHIP_CEDAR:
159 case CHIP_REDWOOD:
160 case CHIP_JUNIPER:
161 case CHIP_CYPRESS:
162 case CHIP_HEMLOCK:
163 case CHIP_PALM:
164 case CHIP_BARTS:
165 case CHIP_TURKS:
166 case CHIP_CAICOS:
167 bc->chiprev = CHIPREV_EVERGREEN;
168 break;
169 default:
170 R600_ERR("unknown family %d\n", bc->family);
171 return -EINVAL;
172 }
173 return 0;
174 }
175
176 static int r600_bc_add_cf(struct r600_bc *bc)
177 {
178 struct r600_bc_cf *cf = r600_bc_cf();
179
180 if (cf == NULL)
181 return -ENOMEM;
182 LIST_ADDTAIL(&cf->list, &bc->cf);
183 if (bc->cf_last)
184 cf->id = bc->cf_last->id + 2;
185 bc->cf_last = cf;
186 bc->ncf++;
187 bc->ndw += 2;
188 bc->force_add_cf = 0;
189 return 0;
190 }
191
192 static void r600_bc_remove_cf(struct r600_bc *bc, struct r600_bc_cf *cf)
193 {
194 struct r600_bc_cf *other;
195 LIST_FOR_EACH_ENTRY(other, &bc->cf, list) {
196 if (other->id > cf->id)
197 other->id -= 2;
198 if (other->cf_addr > cf->id)
199 other->cf_addr -= 2;
200 }
201 LIST_DEL(&cf->list);
202 free(cf);
203 }
204
205 static void r600_bc_move_cf(struct r600_bc *bc, struct r600_bc_cf *cf, struct r600_bc_cf *next)
206 {
207 struct r600_bc_cf *prev = LIST_ENTRY(struct r600_bc_cf, next->list.prev, list);
208 unsigned old_id = cf->id;
209 unsigned new_id = prev->id + 2;
210 struct r600_bc_cf *other;
211
212 if (prev == cf)
213 return; /* position hasn't changed */
214
215 LIST_DEL(&cf->list);
216 LIST_FOR_EACH_ENTRY(other, &bc->cf, list) {
217 if (other->id > old_id)
218 other->id -= 2;
219 if (other->id >= new_id)
220 other->id += 2;
221 if (other->cf_addr > old_id)
222 other->cf_addr -= 2;
223 if (other->cf_addr > new_id)
224 other->cf_addr += 2;
225 }
226 cf->id = new_id;
227 LIST_ADD(&cf->list, &prev->list);
228 }
229
230 int r600_bc_add_output(struct r600_bc *bc, const struct r600_bc_output *output)
231 {
232 int r;
233
234 r = r600_bc_add_cf(bc);
235 if (r)
236 return r;
237 bc->cf_last->inst = BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT);
238 memcpy(&bc->cf_last->output, output, sizeof(struct r600_bc_output));
239 bc->cf_last->output.burst_count = 1;
240 return 0;
241 }
242
243 /* alu predicate instructions */
244 static int is_alu_pred_inst(struct r600_bc_alu *alu)
245 {
246 return !alu->is_op3 && (
247 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_UINT ||
248 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_UINT ||
249 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE ||
250 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT ||
251 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE ||
252 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE ||
253 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_INV ||
254 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_POP ||
255 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_CLR ||
256 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_RESTORE ||
257 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH ||
258 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH ||
259 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH ||
260 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH ||
261 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_INT ||
262 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_INT ||
263 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_INT ||
264 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT ||
265 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH_INT ||
266 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH_INT ||
267 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH_INT ||
268 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH_INT ||
269 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLT_PUSH_INT ||
270 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLE_PUSH_INT);
271 }
272
273 /* alu kill instructions */
274 static int is_alu_kill_inst(struct r600_bc_alu *alu)
275 {
276 return !alu->is_op3 && (
277 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE ||
278 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT ||
279 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE ||
280 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE ||
281 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_UINT ||
282 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_UINT ||
283 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE_INT ||
284 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_INT ||
285 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_INT ||
286 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE_INT);
287 }
288
289 /* alu instructions that can ony exits once per group */
290 static int is_alu_once_inst(struct r600_bc_alu *alu)
291 {
292 return is_alu_kill_inst(alu) ||
293 is_alu_pred_inst(alu);
294 }
295
296 static int is_alu_reduction_inst(struct r600_bc_alu *alu)
297 {
298 return !alu->is_op3 && (
299 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE ||
300 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4 ||
301 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE ||
302 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX4);
303 }
304
305 static int is_alu_mova_inst(struct r600_bc_alu *alu)
306 {
307 return !alu->is_op3 && (
308 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA ||
309 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR ||
310 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT);
311 }
312
313 /* alu instructions that can only execute on the vector unit */
314 static int is_alu_vec_unit_inst(struct r600_bc_alu *alu)
315 {
316 return is_alu_reduction_inst(alu) ||
317 is_alu_mova_inst(alu);
318 }
319
320 /* alu instructions that can only execute on the trans unit */
321 static int is_alu_trans_unit_inst(struct r600_bc_alu *alu)
322 {
323 if(!alu->is_op3)
324 return alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT ||
325 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT ||
326 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT ||
327 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHL_INT ||
328 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT ||
329 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_INT ||
330 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT ||
331 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_INT ||
332 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT ||
333 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_INT ||
334 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_UINT ||
335 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT ||
336 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS ||
337 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE ||
338 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED ||
339 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE ||
340 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED ||
341 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_FF ||
342 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE ||
343 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED ||
344 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_FF ||
345 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE ||
346 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN ||
347 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SQRT_IEEE;
348 else
349 return alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT ||
350 alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_D2 ||
351 alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_M2 ||
352 alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_M4;
353 }
354
355 /* alu instructions that can execute on any unit */
356 static int is_alu_any_unit_inst(struct r600_bc_alu *alu)
357 {
358 return !is_alu_vec_unit_inst(alu) &&
359 !is_alu_trans_unit_inst(alu);
360 }
361
362 static int assign_alu_units(struct r600_bc_alu *alu_first, struct r600_bc_alu *assignment[5])
363 {
364 struct r600_bc_alu *alu;
365 unsigned i, chan, trans;
366
367 for (i = 0; i < 5; i++)
368 assignment[i] = NULL;
369
370 for (alu = alu_first; alu; alu = NEXT_ALU(alu)) {
371 chan = alu->dst.chan;
372 if (is_alu_trans_unit_inst(alu))
373 trans = 1;
374 else if (is_alu_vec_unit_inst(alu))
375 trans = 0;
376 else if (assignment[chan])
377 trans = 1; // assume ALU_INST_PREFER_VECTOR
378 else
379 trans = 0;
380
381 if (trans) {
382 if (assignment[4]) {
383 assert(0); //ALU.Trans has already been allocated
384 return -1;
385 }
386 assignment[4] = alu;
387 } else {
388 if (assignment[chan]) {
389 assert(0); //ALU.chan has already been allocated
390 return -1;
391 }
392 assignment[chan] = alu;
393 }
394
395 if (alu->last)
396 break;
397 }
398 return 0;
399 }
400
401 struct alu_bank_swizzle {
402 int hw_gpr[NUM_OF_CYCLES][NUM_OF_COMPONENTS];
403 int hw_cfile_addr[4];
404 int hw_cfile_elem[4];
405 };
406
407 const unsigned cycle_for_bank_swizzle_vec[][3] = {
408 [SQ_ALU_VEC_012] = { 0, 1, 2 },
409 [SQ_ALU_VEC_021] = { 0, 2, 1 },
410 [SQ_ALU_VEC_120] = { 1, 2, 0 },
411 [SQ_ALU_VEC_102] = { 1, 0, 2 },
412 [SQ_ALU_VEC_201] = { 2, 0, 1 },
413 [SQ_ALU_VEC_210] = { 2, 1, 0 }
414 };
415
416 const unsigned cycle_for_bank_swizzle_scl[][3] = {
417 [SQ_ALU_SCL_210] = { 2, 1, 0 },
418 [SQ_ALU_SCL_122] = { 1, 2, 2 },
419 [SQ_ALU_SCL_212] = { 2, 1, 2 },
420 [SQ_ALU_SCL_221] = { 2, 2, 1 }
421 };
422
423 static void init_bank_swizzle(struct alu_bank_swizzle *bs)
424 {
425 int i, cycle, component;
426 /* set up gpr use */
427 for (cycle = 0; cycle < NUM_OF_CYCLES; cycle++)
428 for (component = 0; component < NUM_OF_COMPONENTS; component++)
429 bs->hw_gpr[cycle][component] = -1;
430 for (i = 0; i < 4; i++)
431 bs->hw_cfile_addr[i] = -1;
432 for (i = 0; i < 4; i++)
433 bs->hw_cfile_elem[i] = -1;
434 }
435
436 static int reserve_gpr(struct alu_bank_swizzle *bs, unsigned sel, unsigned chan, unsigned cycle)
437 {
438 if (bs->hw_gpr[cycle][chan] == -1)
439 bs->hw_gpr[cycle][chan] = sel;
440 else if (bs->hw_gpr[cycle][chan] != (int)sel) {
441 // Another scalar operation has already used GPR read port for channel
442 return -1;
443 }
444 return 0;
445 }
446
447 static int reserve_cfile(struct alu_bank_swizzle *bs, unsigned sel, unsigned chan)
448 {
449 int res, resmatch = -1, resempty = -1;
450 for (res = 3; res >= 0; --res) {
451 if (bs->hw_cfile_addr[res] == -1)
452 resempty = res;
453 else if (bs->hw_cfile_addr[res] == sel &&
454 bs->hw_cfile_elem[res] == chan)
455 resmatch = res;
456 }
457 if (resmatch != -1)
458 return 0; // Read for this scalar element already reserved, nothing to do here.
459 else if (resempty != -1) {
460 bs->hw_cfile_addr[resempty] = sel;
461 bs->hw_cfile_elem[resempty] = chan;
462 } else {
463 // All cfile read ports are used, cannot reference vector element
464 return -1;
465 }
466 return 0;
467 }
468
469 static int is_gpr(unsigned sel)
470 {
471 return (sel >= 0 && sel <= 127);
472 }
473
474 static int is_cfile(unsigned sel)
475 {
476 return (sel > 255 && sel < 512);
477 }
478
479 /* CB constants start at 512, and get translated to a kcache index when ALU
480 * clauses are constructed. Note that we handle kcache constants the same way
481 * as (the now gone) cfile constants, is that really required? */
482 static int is_cb_const(int sel)
483 {
484 if (sel > 511 && sel < 4607)
485 return 1;
486 return 0;
487 }
488
489 static int is_const(int sel)
490 {
491 return is_cfile(sel) ||
492 is_cb_const(sel) ||
493 (sel >= V_SQ_ALU_SRC_0 &&
494 sel <= V_SQ_ALU_SRC_LITERAL);
495 }
496
497 static int check_vector(struct r600_bc_alu *alu, struct alu_bank_swizzle *bs, int bank_swizzle)
498 {
499 int r, src, num_src, sel, elem, cycle;
500
501 num_src = r600_bc_get_num_operands(alu);
502 for (src = 0; src < num_src; src++) {
503 sel = alu->src[src].sel;
504 elem = alu->src[src].chan;
505 if (is_gpr(sel)) {
506 cycle = cycle_for_bank_swizzle_vec[bank_swizzle][src];
507 if (src == 1 && sel == alu->src[0].sel && elem == alu->src[0].chan)
508 // Nothing to do; special-case optimization,
509 // second source uses first source’s reservation
510 continue;
511 else {
512 r = reserve_gpr(bs, sel, elem, cycle);
513 if (r)
514 return r;
515 }
516 } else if (is_cfile(sel)) {
517 r = reserve_cfile(bs, sel, elem);
518 if (r)
519 return r;
520 }
521 // No restrictions on PV, PS, literal or special constants
522 }
523 return 0;
524 }
525
526 static int check_scalar(struct r600_bc_alu *alu, struct alu_bank_swizzle *bs, int bank_swizzle)
527 {
528 int r, src, num_src, const_count, sel, elem, cycle;
529
530 num_src = r600_bc_get_num_operands(alu);
531 for (const_count = 0, src = 0; src < num_src; ++src) {
532 sel = alu->src[src].sel;
533 elem = alu->src[src].chan;
534 if (is_const(sel)) { // Any constant, including literal and inline constants
535 if (const_count >= 2)
536 // More than two references to a constant in
537 // transcendental operation.
538 return -1;
539 else
540 const_count++;
541 }
542 if (is_cfile(sel)) {
543 r = reserve_cfile(bs, sel, elem);
544 if (r)
545 return r;
546 }
547 }
548 for (src = 0; src < num_src; ++src) {
549 sel = alu->src[src].sel;
550 elem = alu->src[src].chan;
551 if (is_gpr(sel)) {
552 cycle = cycle_for_bank_swizzle_scl[bank_swizzle][src];
553 if (cycle < const_count)
554 // Cycle for GPR load conflicts with
555 // constant load in transcendental operation.
556 return -1;
557 r = reserve_gpr(bs, sel, elem, cycle);
558 if (r)
559 return r;
560 }
561 // Constants already processed
562 // No restrictions on PV, PS
563 }
564 return 0;
565 }
566
567 static int check_and_set_bank_swizzle(struct r600_bc_alu *slots[5])
568 {
569 struct alu_bank_swizzle bs;
570 int bank_swizzle[5];
571 int i, r = 0, forced = 0;
572
573 for (i = 0; i < 5; i++)
574 if (slots[i] && slots[i]->bank_swizzle_force) {
575 slots[i]->bank_swizzle = slots[i]->bank_swizzle_force;
576 forced = 1;
577 }
578
579 if (forced)
580 return 0;
581
582 // just check every possible combination of bank swizzle
583 // not very efficent, but works on the first try in most of the cases
584 for (i = 0; i < 4; i++)
585 bank_swizzle[i] = SQ_ALU_VEC_012;
586 bank_swizzle[4] = SQ_ALU_SCL_210;
587 while(bank_swizzle[4] <= SQ_ALU_SCL_221) {
588 init_bank_swizzle(&bs);
589 for (i = 0; i < 4; i++) {
590 if (slots[i]) {
591 r = check_vector(slots[i], &bs, bank_swizzle[i]);
592 if (r)
593 break;
594 }
595 }
596 if (!r && slots[4]) {
597 r = check_scalar(slots[4], &bs, bank_swizzle[4]);
598 }
599 if (!r) {
600 for (i = 0; i < 5; i++) {
601 if (slots[i])
602 slots[i]->bank_swizzle = bank_swizzle[i];
603 }
604 return 0;
605 }
606
607 for (i = 0; i < 5; i++) {
608 bank_swizzle[i]++;
609 if (bank_swizzle[i] <= SQ_ALU_VEC_210)
610 break;
611 else
612 bank_swizzle[i] = SQ_ALU_VEC_012;
613 }
614 }
615
616 // couldn't find a working swizzle
617 return -1;
618 }
619
620 static int replace_gpr_with_pv_ps(struct r600_bc_alu *slots[5], struct r600_bc_alu *alu_prev)
621 {
622 struct r600_bc_alu *prev[5];
623 int gpr[5], chan[5];
624 int i, j, r, src, num_src;
625
626 r = assign_alu_units(alu_prev, prev);
627 if (r)
628 return r;
629
630 for (i = 0; i < 5; ++i) {
631 if(prev[i] && prev[i]->dst.write && !prev[i]->dst.rel) {
632 gpr[i] = prev[i]->dst.sel;
633 if (is_alu_reduction_inst(prev[i]))
634 chan[i] = 0;
635 else
636 chan[i] = prev[i]->dst.chan;
637 } else
638 gpr[i] = -1;
639 }
640
641 for (i = 0; i < 5; ++i) {
642 struct r600_bc_alu *alu = slots[i];
643 if(!alu)
644 continue;
645
646 num_src = r600_bc_get_num_operands(alu);
647 for (src = 0; src < num_src; ++src) {
648 if (!is_gpr(alu->src[src].sel) || alu->src[src].rel)
649 continue;
650
651 if (alu->src[src].sel == gpr[4] &&
652 alu->src[src].chan == chan[4]) {
653 alu->src[src].sel = V_SQ_ALU_SRC_PS;
654 alu->src[src].chan = 0;
655 continue;
656 }
657
658 for (j = 0; j < 4; ++j) {
659 if (alu->src[src].sel == gpr[j] &&
660 alu->src[src].chan == j) {
661 alu->src[src].sel = V_SQ_ALU_SRC_PV;
662 alu->src[src].chan = chan[j];
663 break;
664 }
665 }
666 }
667 }
668
669 return 0;
670 }
671
672 void r600_bc_special_constants(u32 value, unsigned *sel, unsigned *neg)
673 {
674 switch(value) {
675 case 0:
676 *sel = V_SQ_ALU_SRC_0;
677 break;
678 case 1:
679 *sel = V_SQ_ALU_SRC_1_INT;
680 break;
681 case -1:
682 *sel = V_SQ_ALU_SRC_M_1_INT;
683 break;
684 case 0x3F800000: // 1.0f
685 *sel = V_SQ_ALU_SRC_1;
686 break;
687 case 0x3F000000: // 0.5f
688 *sel = V_SQ_ALU_SRC_0_5;
689 break;
690 case 0xBF800000: // -1.0f
691 *sel = V_SQ_ALU_SRC_1;
692 *neg ^= 1;
693 break;
694 case 0xBF000000: // -0.5f
695 *sel = V_SQ_ALU_SRC_0_5;
696 *neg ^= 1;
697 break;
698 default:
699 *sel = V_SQ_ALU_SRC_LITERAL;
700 break;
701 }
702 }
703
704 /* compute how many literal are needed */
705 static int r600_bc_alu_nliterals(struct r600_bc_alu *alu, uint32_t literal[4], unsigned *nliteral)
706 {
707 unsigned num_src = r600_bc_get_num_operands(alu);
708 unsigned i, j;
709
710 for (i = 0; i < num_src; ++i) {
711 if (alu->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
712 uint32_t value = alu->src[i].value[alu->src[i].chan];
713 unsigned found = 0;
714 for (j = 0; j < *nliteral; ++j) {
715 if (literal[j] == value) {
716 found = 1;
717 break;
718 }
719 }
720 if (!found) {
721 if (*nliteral >= 4)
722 return -EINVAL;
723 literal[(*nliteral)++] = value;
724 }
725 }
726 }
727 return 0;
728 }
729
730 static void r600_bc_alu_adjust_literals(struct r600_bc_alu *alu, uint32_t literal[4], unsigned nliteral)
731 {
732 unsigned num_src = r600_bc_get_num_operands(alu);
733 unsigned i, j;
734
735 for (i = 0; i < num_src; ++i) {
736 if (alu->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
737 uint32_t value = alu->src[i].value[alu->src[i].chan];
738 for (j = 0; j < nliteral; ++j) {
739 if (literal[j] == value) {
740 alu->src[i].chan = j;
741 break;
742 }
743 }
744 }
745 }
746 }
747
748 static int merge_inst_groups(struct r600_bc *bc, struct r600_bc_alu *slots[5], struct r600_bc_alu *alu_prev)
749 {
750 struct r600_bc_alu *prev[5];
751 struct r600_bc_alu *result[5] = { NULL };
752
753 uint32_t literal[4];
754 unsigned nliteral = 0;
755
756 int i, j, r, src, num_src;
757 int num_once_inst = 0;
758
759 r = assign_alu_units(alu_prev, prev);
760 if (r)
761 return r;
762
763 for (i = 0; i < 5; ++i) {
764 /* check number of literals */
765 if (prev[i] && r600_bc_alu_nliterals(prev[i], literal, &nliteral))
766 return 0;
767 if (slots[i] && r600_bc_alu_nliterals(slots[i], literal, &nliteral))
768 return 0;
769
770 // let's check used slots
771 if (prev[i] && !slots[i]) {
772 result[i] = prev[i];
773 num_once_inst += is_alu_once_inst(prev[i]);
774 continue;
775 } else if (prev[i] && slots[i]) {
776 if (result[4] == NULL && prev[4] == NULL && slots[4] == NULL) {
777 // trans unit is still free try to use it
778 if (is_alu_any_unit_inst(slots[i])) {
779 result[i] = prev[i];
780 result[4] = slots[i];
781 } else if (is_alu_any_unit_inst(prev[i])) {
782 result[i] = slots[i];
783 result[4] = prev[i];
784 } else
785 return 0;
786 } else
787 return 0;
788 } else if(!slots[i]) {
789 continue;
790 } else
791 result[i] = slots[i];
792
793 // let's check source gprs
794 struct r600_bc_alu *alu = slots[i];
795 num_once_inst += is_alu_once_inst(alu);
796
797 num_src = r600_bc_get_num_operands(alu);
798 for (src = 0; src < num_src; ++src) {
799 // constants doesn't matter
800 if (!is_gpr(alu->src[src].sel))
801 continue;
802
803 for (j = 0; j < 5; ++j) {
804 if (!prev[j] || !prev[j]->dst.write)
805 continue;
806
807 // if it's relative then we can't determin which gpr is really used
808 if (prev[j]->dst.chan == alu->src[src].chan &&
809 (prev[j]->dst.sel == alu->src[src].sel ||
810 prev[j]->dst.rel || alu->src[src].rel))
811 return 0;
812 }
813 }
814 }
815
816 /* more than one PRED_ or KILL_ ? */
817 if (num_once_inst > 1)
818 return 0;
819
820 /* check if the result can still be swizzlet */
821 r = check_and_set_bank_swizzle(result);
822 if (r)
823 return 0;
824
825 /* looks like everything worked out right, apply the changes */
826
827 /* sort instructions */
828 for (i = 0; i < 5; ++i) {
829 slots[i] = result[i];
830 if (result[i]) {
831 LIST_DEL(&result[i]->list);
832 result[i]->last = 0;
833 LIST_ADDTAIL(&result[i]->list, &bc->cf_last->alu);
834 }
835 }
836
837 /* determine new last instruction */
838 LIST_ENTRY(struct r600_bc_alu, bc->cf_last->alu.prev, list)->last = 1;
839
840 /* determine new first instruction */
841 for (i = 0; i < 5; ++i) {
842 if (result[i]) {
843 bc->cf_last->curr_bs_head = result[i];
844 break;
845 }
846 }
847
848 bc->cf_last->prev_bs_head = bc->cf_last->prev2_bs_head;
849 bc->cf_last->prev2_bs_head = NULL;
850
851 return 0;
852 }
853
854 /* This code handles kcache lines as single blocks of 32 constants. We could
855 * probably do slightly better by recognizing that we actually have two
856 * consecutive lines of 16 constants, but the resulting code would also be
857 * somewhat more complicated. */
858 static int r600_bc_alloc_kcache_lines(struct r600_bc *bc, struct r600_bc_alu *alu, int type)
859 {
860 struct r600_bc_kcache *kcache = bc->cf_last->kcache;
861 unsigned int required_lines;
862 unsigned int free_lines = 0;
863 unsigned int cache_line[3];
864 unsigned int count = 0;
865 unsigned int i, j;
866 int r;
867
868 /* Collect required cache lines. */
869 for (i = 0; i < 3; ++i) {
870 bool found = false;
871 unsigned int line;
872
873 if (alu->src[i].sel < 512)
874 continue;
875
876 line = ((alu->src[i].sel - 512) / 32) * 2;
877
878 for (j = 0; j < count; ++j) {
879 if (cache_line[j] == line) {
880 found = true;
881 break;
882 }
883 }
884
885 if (!found)
886 cache_line[count++] = line;
887 }
888
889 /* This should never actually happen. */
890 if (count >= 3) return -ENOMEM;
891
892 for (i = 0; i < 2; ++i) {
893 if (kcache[i].mode == V_SQ_CF_KCACHE_NOP) {
894 ++free_lines;
895 }
896 }
897
898 /* Filter lines pulled in by previous intructions. Note that this is
899 * only for the required_lines count, we can't remove these from the
900 * cache_line array since we may have to start a new ALU clause. */
901 for (i = 0, required_lines = count; i < count; ++i) {
902 for (j = 0; j < 2; ++j) {
903 if (kcache[j].mode == V_SQ_CF_KCACHE_LOCK_2 &&
904 kcache[j].addr == cache_line[i]) {
905 --required_lines;
906 break;
907 }
908 }
909 }
910
911 /* Start a new ALU clause if needed. */
912 if (required_lines > free_lines) {
913 if ((r = r600_bc_add_cf(bc))) {
914 return r;
915 }
916 bc->cf_last->inst = (type << 3);
917 kcache = bc->cf_last->kcache;
918 }
919
920 /* Setup the kcache lines. */
921 for (i = 0; i < count; ++i) {
922 bool found = false;
923
924 for (j = 0; j < 2; ++j) {
925 if (kcache[j].mode == V_SQ_CF_KCACHE_LOCK_2 &&
926 kcache[j].addr == cache_line[i]) {
927 found = true;
928 break;
929 }
930 }
931
932 if (found) continue;
933
934 for (j = 0; j < 2; ++j) {
935 if (kcache[j].mode == V_SQ_CF_KCACHE_NOP) {
936 kcache[j].bank = 0;
937 kcache[j].addr = cache_line[i];
938 kcache[j].mode = V_SQ_CF_KCACHE_LOCK_2;
939 break;
940 }
941 }
942 }
943
944 /* Alter the src operands to refer to the kcache. */
945 for (i = 0; i < 3; ++i) {
946 static const unsigned int base[] = {128, 160, 256, 288};
947 unsigned int line;
948
949 if (alu->src[i].sel < 512)
950 continue;
951
952 alu->src[i].sel -= 512;
953 line = (alu->src[i].sel / 32) * 2;
954
955 for (j = 0; j < 2; ++j) {
956 if (kcache[j].mode == V_SQ_CF_KCACHE_LOCK_2 &&
957 kcache[j].addr == line) {
958 alu->src[i].sel &= 0x1f;
959 alu->src[i].sel += base[j];
960 break;
961 }
962 }
963 }
964
965 return 0;
966 }
967
968 int r600_bc_add_alu_type(struct r600_bc *bc, const struct r600_bc_alu *alu, int type)
969 {
970 struct r600_bc_alu *nalu = r600_bc_alu();
971 struct r600_bc_alu *lalu;
972 int i, r;
973
974 if (nalu == NULL)
975 return -ENOMEM;
976 memcpy(nalu, alu, sizeof(struct r600_bc_alu));
977
978 if (bc->cf_last != NULL && bc->cf_last->inst != (type << 3)) {
979 /* check if we could add it anyway */
980 if (bc->cf_last->inst == (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3) &&
981 type == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE) {
982 LIST_FOR_EACH_ENTRY(lalu, &bc->cf_last->alu, list) {
983 if (lalu->predicate) {
984 bc->force_add_cf = 1;
985 break;
986 }
987 }
988 } else
989 bc->force_add_cf = 1;
990 }
991
992 /* cf can contains only alu or only vtx or only tex */
993 if (bc->cf_last == NULL || bc->force_add_cf) {
994 r = r600_bc_add_cf(bc);
995 if (r) {
996 free(nalu);
997 return r;
998 }
999 }
1000 bc->cf_last->inst = (type << 3);
1001
1002 /* Setup the kcache for this ALU instruction. This will start a new
1003 * ALU clause if needed. */
1004 if ((r = r600_bc_alloc_kcache_lines(bc, nalu, type))) {
1005 free(nalu);
1006 return r;
1007 }
1008
1009 if (!bc->cf_last->curr_bs_head) {
1010 bc->cf_last->curr_bs_head = nalu;
1011 }
1012 /* at most 128 slots, one add alu can add 5 slots + 4 constants(2 slots)
1013 * worst case */
1014 if (nalu->last && (bc->cf_last->ndw >> 1) >= 120) {
1015 bc->force_add_cf = 1;
1016 }
1017 /* replace special constants */
1018 for (i = 0; i < 3; i++) {
1019 if (nalu->src[i].sel == V_SQ_ALU_SRC_LITERAL)
1020 r600_bc_special_constants(
1021 nalu->src[i].value[nalu->src[i].chan],
1022 &nalu->src[i].sel, &nalu->src[i].neg);
1023 }
1024 LIST_ADDTAIL(&nalu->list, &bc->cf_last->alu);
1025 /* each alu use 2 dwords */
1026 bc->cf_last->ndw += 2;
1027 bc->ndw += 2;
1028
1029 /* process cur ALU instructions for bank swizzle */
1030 if (nalu->last) {
1031 struct r600_bc_alu *slots[5];
1032 r = assign_alu_units(bc->cf_last->curr_bs_head, slots);
1033 if (r)
1034 return r;
1035
1036 if (bc->cf_last->prev_bs_head) {
1037 r = merge_inst_groups(bc, slots, bc->cf_last->prev_bs_head);
1038 if (r)
1039 return r;
1040 }
1041
1042 if (bc->cf_last->prev_bs_head) {
1043 r = replace_gpr_with_pv_ps(slots, bc->cf_last->prev_bs_head);
1044 if (r)
1045 return r;
1046 }
1047
1048 r = check_and_set_bank_swizzle(slots);
1049 if (r)
1050 return r;
1051
1052 bc->cf_last->prev2_bs_head = bc->cf_last->prev_bs_head;
1053 bc->cf_last->prev_bs_head = bc->cf_last->curr_bs_head;
1054 bc->cf_last->curr_bs_head = NULL;
1055 }
1056 return 0;
1057 }
1058
1059 int r600_bc_add_alu(struct r600_bc *bc, const struct r600_bc_alu *alu)
1060 {
1061 return r600_bc_add_alu_type(bc, alu, BC_INST(bc, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU));
1062 }
1063
1064 static void r600_bc_remove_alu(struct r600_bc_cf *cf, struct r600_bc_alu *alu)
1065 {
1066 if (alu->last && alu->list.prev != &cf->alu) {
1067 PREV_ALU(alu)->last = 1;
1068 }
1069 LIST_DEL(&alu->list);
1070 free(alu);
1071 cf->ndw -= 2;
1072 }
1073
1074 int r600_bc_add_vtx(struct r600_bc *bc, const struct r600_bc_vtx *vtx)
1075 {
1076 struct r600_bc_vtx *nvtx = r600_bc_vtx();
1077 int r;
1078
1079 if (nvtx == NULL)
1080 return -ENOMEM;
1081 memcpy(nvtx, vtx, sizeof(struct r600_bc_vtx));
1082
1083 /* cf can contains only alu or only vtx or only tex */
1084 if (bc->cf_last == NULL ||
1085 (bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_VTX &&
1086 bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) ||
1087 bc->force_add_cf) {
1088 r = r600_bc_add_cf(bc);
1089 if (r) {
1090 free(nvtx);
1091 return r;
1092 }
1093 bc->cf_last->inst = V_SQ_CF_WORD1_SQ_CF_INST_VTX;
1094 }
1095 LIST_ADDTAIL(&nvtx->list, &bc->cf_last->vtx);
1096 /* each fetch use 4 dwords */
1097 bc->cf_last->ndw += 4;
1098 bc->ndw += 4;
1099 if ((bc->cf_last->ndw / 4) > 7)
1100 bc->force_add_cf = 1;
1101 return 0;
1102 }
1103
1104 int r600_bc_add_tex(struct r600_bc *bc, const struct r600_bc_tex *tex)
1105 {
1106 struct r600_bc_tex *ntex = r600_bc_tex();
1107 int r;
1108
1109 if (ntex == NULL)
1110 return -ENOMEM;
1111 memcpy(ntex, tex, sizeof(struct r600_bc_tex));
1112
1113 /* cf can contains only alu or only vtx or only tex */
1114 if (bc->cf_last == NULL ||
1115 bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_TEX ||
1116 bc->force_add_cf) {
1117 r = r600_bc_add_cf(bc);
1118 if (r) {
1119 free(ntex);
1120 return r;
1121 }
1122 bc->cf_last->inst = V_SQ_CF_WORD1_SQ_CF_INST_TEX;
1123 }
1124 LIST_ADDTAIL(&ntex->list, &bc->cf_last->tex);
1125 /* each texture fetch use 4 dwords */
1126 bc->cf_last->ndw += 4;
1127 bc->ndw += 4;
1128 if ((bc->cf_last->ndw / 4) > 7)
1129 bc->force_add_cf = 1;
1130 return 0;
1131 }
1132
1133 int r600_bc_add_cfinst(struct r600_bc *bc, int inst)
1134 {
1135 int r;
1136 r = r600_bc_add_cf(bc);
1137 if (r)
1138 return r;
1139
1140 bc->cf_last->cond = V_SQ_CF_COND_ACTIVE;
1141 bc->cf_last->inst = inst;
1142 return 0;
1143 }
1144
1145 /* common to all 3 families */
1146 static int r600_bc_vtx_build(struct r600_bc *bc, struct r600_bc_vtx *vtx, unsigned id)
1147 {
1148 unsigned fetch_resource_start = 0;
1149
1150 /* check if we are fetch shader */
1151 /* fetch shader can also access vertex resource,
1152 * first fetch shader resource is at 160
1153 */
1154 if (bc->type == -1) {
1155 switch (bc->chiprev) {
1156 /* r600 */
1157 case CHIPREV_R600:
1158 /* r700 */
1159 case CHIPREV_R700:
1160 fetch_resource_start = 160;
1161 break;
1162 /* evergreen */
1163 case CHIPREV_EVERGREEN:
1164 fetch_resource_start = 0;
1165 break;
1166 default:
1167 fprintf(stderr, "%s:%s:%d unknown chiprev %d\n",
1168 __FILE__, __func__, __LINE__, bc->chiprev);
1169 break;
1170 }
1171 }
1172 bc->bytecode[id++] = S_SQ_VTX_WORD0_BUFFER_ID(vtx->buffer_id + fetch_resource_start) |
1173 S_SQ_VTX_WORD0_SRC_GPR(vtx->src_gpr) |
1174 S_SQ_VTX_WORD0_SRC_SEL_X(vtx->src_sel_x) |
1175 S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(vtx->mega_fetch_count);
1176 bc->bytecode[id++] = S_SQ_VTX_WORD1_DST_SEL_X(vtx->dst_sel_x) |
1177 S_SQ_VTX_WORD1_DST_SEL_Y(vtx->dst_sel_y) |
1178 S_SQ_VTX_WORD1_DST_SEL_Z(vtx->dst_sel_z) |
1179 S_SQ_VTX_WORD1_DST_SEL_W(vtx->dst_sel_w) |
1180 S_SQ_VTX_WORD1_USE_CONST_FIELDS(vtx->use_const_fields) |
1181 S_SQ_VTX_WORD1_DATA_FORMAT(vtx->data_format) |
1182 S_SQ_VTX_WORD1_NUM_FORMAT_ALL(vtx->num_format_all) |
1183 S_SQ_VTX_WORD1_FORMAT_COMP_ALL(vtx->format_comp_all) |
1184 S_SQ_VTX_WORD1_SRF_MODE_ALL(vtx->srf_mode_all) |
1185 S_SQ_VTX_WORD1_GPR_DST_GPR(vtx->dst_gpr);
1186 bc->bytecode[id++] = S_SQ_VTX_WORD2_MEGA_FETCH(1);
1187 bc->bytecode[id++] = 0;
1188 return 0;
1189 }
1190
1191 /* common to all 3 families */
1192 static int r600_bc_tex_build(struct r600_bc *bc, struct r600_bc_tex *tex, unsigned id)
1193 {
1194 bc->bytecode[id++] = S_SQ_TEX_WORD0_TEX_INST(tex->inst) |
1195 S_SQ_TEX_WORD0_RESOURCE_ID(tex->resource_id) |
1196 S_SQ_TEX_WORD0_SRC_GPR(tex->src_gpr) |
1197 S_SQ_TEX_WORD0_SRC_REL(tex->src_rel);
1198 bc->bytecode[id++] = S_SQ_TEX_WORD1_DST_GPR(tex->dst_gpr) |
1199 S_SQ_TEX_WORD1_DST_REL(tex->dst_rel) |
1200 S_SQ_TEX_WORD1_DST_SEL_X(tex->dst_sel_x) |
1201 S_SQ_TEX_WORD1_DST_SEL_Y(tex->dst_sel_y) |
1202 S_SQ_TEX_WORD1_DST_SEL_Z(tex->dst_sel_z) |
1203 S_SQ_TEX_WORD1_DST_SEL_W(tex->dst_sel_w) |
1204 S_SQ_TEX_WORD1_LOD_BIAS(tex->lod_bias) |
1205 S_SQ_TEX_WORD1_COORD_TYPE_X(tex->coord_type_x) |
1206 S_SQ_TEX_WORD1_COORD_TYPE_Y(tex->coord_type_y) |
1207 S_SQ_TEX_WORD1_COORD_TYPE_Z(tex->coord_type_z) |
1208 S_SQ_TEX_WORD1_COORD_TYPE_W(tex->coord_type_w);
1209 bc->bytecode[id++] = S_SQ_TEX_WORD2_OFFSET_X(tex->offset_x) |
1210 S_SQ_TEX_WORD2_OFFSET_Y(tex->offset_y) |
1211 S_SQ_TEX_WORD2_OFFSET_Z(tex->offset_z) |
1212 S_SQ_TEX_WORD2_SAMPLER_ID(tex->sampler_id) |
1213 S_SQ_TEX_WORD2_SRC_SEL_X(tex->src_sel_x) |
1214 S_SQ_TEX_WORD2_SRC_SEL_Y(tex->src_sel_y) |
1215 S_SQ_TEX_WORD2_SRC_SEL_Z(tex->src_sel_z) |
1216 S_SQ_TEX_WORD2_SRC_SEL_W(tex->src_sel_w);
1217 bc->bytecode[id++] = 0;
1218 return 0;
1219 }
1220
1221 /* r600 only, r700/eg bits in r700_asm.c */
1222 static int r600_bc_alu_build(struct r600_bc *bc, struct r600_bc_alu *alu, unsigned id)
1223 {
1224 /* don't replace gpr by pv or ps for destination register */
1225 bc->bytecode[id++] = S_SQ_ALU_WORD0_SRC0_SEL(alu->src[0].sel) |
1226 S_SQ_ALU_WORD0_SRC0_REL(alu->src[0].rel) |
1227 S_SQ_ALU_WORD0_SRC0_CHAN(alu->src[0].chan) |
1228 S_SQ_ALU_WORD0_SRC0_NEG(alu->src[0].neg) |
1229 S_SQ_ALU_WORD0_SRC1_SEL(alu->src[1].sel) |
1230 S_SQ_ALU_WORD0_SRC1_REL(alu->src[1].rel) |
1231 S_SQ_ALU_WORD0_SRC1_CHAN(alu->src[1].chan) |
1232 S_SQ_ALU_WORD0_SRC1_NEG(alu->src[1].neg) |
1233 S_SQ_ALU_WORD0_LAST(alu->last);
1234
1235 if (alu->is_op3) {
1236 bc->bytecode[id++] = S_SQ_ALU_WORD1_DST_GPR(alu->dst.sel) |
1237 S_SQ_ALU_WORD1_DST_CHAN(alu->dst.chan) |
1238 S_SQ_ALU_WORD1_DST_REL(alu->dst.rel) |
1239 S_SQ_ALU_WORD1_CLAMP(alu->dst.clamp) |
1240 S_SQ_ALU_WORD1_OP3_SRC2_SEL(alu->src[2].sel) |
1241 S_SQ_ALU_WORD1_OP3_SRC2_REL(alu->src[2].rel) |
1242 S_SQ_ALU_WORD1_OP3_SRC2_CHAN(alu->src[2].chan) |
1243 S_SQ_ALU_WORD1_OP3_SRC2_NEG(alu->src[2].neg) |
1244 S_SQ_ALU_WORD1_OP3_ALU_INST(alu->inst) |
1245 S_SQ_ALU_WORD1_BANK_SWIZZLE(alu->bank_swizzle);
1246 } else {
1247 bc->bytecode[id++] = S_SQ_ALU_WORD1_DST_GPR(alu->dst.sel) |
1248 S_SQ_ALU_WORD1_DST_CHAN(alu->dst.chan) |
1249 S_SQ_ALU_WORD1_DST_REL(alu->dst.rel) |
1250 S_SQ_ALU_WORD1_CLAMP(alu->dst.clamp) |
1251 S_SQ_ALU_WORD1_OP2_SRC0_ABS(alu->src[0].abs) |
1252 S_SQ_ALU_WORD1_OP2_SRC1_ABS(alu->src[1].abs) |
1253 S_SQ_ALU_WORD1_OP2_WRITE_MASK(alu->dst.write) |
1254 S_SQ_ALU_WORD1_OP2_OMOD(alu->omod) |
1255 S_SQ_ALU_WORD1_OP2_ALU_INST(alu->inst) |
1256 S_SQ_ALU_WORD1_BANK_SWIZZLE(alu->bank_swizzle) |
1257 S_SQ_ALU_WORD1_OP2_UPDATE_EXECUTE_MASK(alu->predicate) |
1258 S_SQ_ALU_WORD1_OP2_UPDATE_PRED(alu->predicate);
1259 }
1260 return 0;
1261 }
1262
1263 enum cf_class
1264 {
1265 CF_CLASS_ALU,
1266 CF_CLASS_TEXTURE,
1267 CF_CLASS_VERTEX,
1268 CF_CLASS_EXPORT,
1269 CF_CLASS_OTHER
1270 };
1271
1272 static enum cf_class get_cf_class(struct r600_bc_cf *cf)
1273 {
1274 switch (cf->inst) {
1275 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
1276 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3):
1277 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3):
1278 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
1279 return CF_CLASS_ALU;
1280
1281 case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
1282 return CF_CLASS_TEXTURE;
1283
1284 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
1285 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
1286 return CF_CLASS_VERTEX;
1287
1288 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1289 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1290 return CF_CLASS_EXPORT;
1291
1292 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
1293 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
1294 case V_SQ_CF_WORD1_SQ_CF_INST_POP:
1295 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
1296 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
1297 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
1298 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
1299 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
1300 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
1301 return CF_CLASS_OTHER;
1302
1303 default:
1304 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
1305 return -EINVAL;
1306 }
1307 }
1308
1309 /* common for r600/r700 - eg in eg_asm.c */
1310 static int r600_bc_cf_build(struct r600_bc *bc, struct r600_bc_cf *cf)
1311 {
1312 unsigned id = cf->id;
1313 unsigned end_of_program = bc->cf.prev == &cf->list;
1314
1315 switch (get_cf_class(cf)) {
1316 case CF_CLASS_ALU:
1317 assert(!end_of_program);
1318 bc->bytecode[id++] = S_SQ_CF_ALU_WORD0_ADDR(cf->addr >> 1) |
1319 S_SQ_CF_ALU_WORD0_KCACHE_MODE0(cf->kcache[0].mode) |
1320 S_SQ_CF_ALU_WORD0_KCACHE_BANK0(cf->kcache[0].bank) |
1321 S_SQ_CF_ALU_WORD0_KCACHE_BANK1(cf->kcache[1].bank);
1322
1323 bc->bytecode[id++] = S_SQ_CF_ALU_WORD1_CF_INST(cf->inst >> 3) |
1324 S_SQ_CF_ALU_WORD1_KCACHE_MODE1(cf->kcache[1].mode) |
1325 S_SQ_CF_ALU_WORD1_KCACHE_ADDR0(cf->kcache[0].addr) |
1326 S_SQ_CF_ALU_WORD1_KCACHE_ADDR1(cf->kcache[1].addr) |
1327 S_SQ_CF_ALU_WORD1_BARRIER(cf->barrier) |
1328 S_SQ_CF_ALU_WORD1_USES_WATERFALL(bc->chiprev == CHIPREV_R600 ? cf->r6xx_uses_waterfall : 0) |
1329 S_SQ_CF_ALU_WORD1_COUNT((cf->ndw / 2) - 1);
1330 break;
1331 case CF_CLASS_TEXTURE:
1332 case CF_CLASS_VERTEX:
1333 bc->bytecode[id++] = S_SQ_CF_WORD0_ADDR(cf->addr >> 1);
1334 bc->bytecode[id++] = S_SQ_CF_WORD1_CF_INST(cf->inst) |
1335 S_SQ_CF_WORD1_BARRIER(cf->barrier) |
1336 S_SQ_CF_WORD1_COUNT((cf->ndw / 4) - 1) |
1337 S_SQ_CF_WORD1_END_OF_PROGRAM(end_of_program);
1338 break;
1339 case CF_CLASS_EXPORT:
1340 bc->bytecode[id++] = S_SQ_CF_ALLOC_EXPORT_WORD0_RW_GPR(cf->output.gpr) |
1341 S_SQ_CF_ALLOC_EXPORT_WORD0_ELEM_SIZE(cf->output.elem_size) |
1342 S_SQ_CF_ALLOC_EXPORT_WORD0_ARRAY_BASE(cf->output.array_base) |
1343 S_SQ_CF_ALLOC_EXPORT_WORD0_TYPE(cf->output.type);
1344 bc->bytecode[id++] = S_SQ_CF_ALLOC_EXPORT_WORD1_BURST_COUNT(cf->output.burst_count - 1) |
1345 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_X(cf->output.swizzle_x) |
1346 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Y(cf->output.swizzle_y) |
1347 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Z(cf->output.swizzle_z) |
1348 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_W(cf->output.swizzle_w) |
1349 S_SQ_CF_ALLOC_EXPORT_WORD1_BARRIER(cf->barrier) |
1350 S_SQ_CF_ALLOC_EXPORT_WORD1_CF_INST(cf->inst) |
1351 S_SQ_CF_ALLOC_EXPORT_WORD1_END_OF_PROGRAM(end_of_program);
1352 break;
1353 case CF_CLASS_OTHER:
1354 bc->bytecode[id++] = S_SQ_CF_WORD0_ADDR(cf->cf_addr >> 1);
1355 bc->bytecode[id++] = S_SQ_CF_WORD1_CF_INST(cf->inst) |
1356 S_SQ_CF_WORD1_BARRIER(cf->barrier) |
1357 S_SQ_CF_WORD1_COND(cf->cond) |
1358 S_SQ_CF_WORD1_POP_COUNT(cf->pop_count) |
1359 S_SQ_CF_WORD1_END_OF_PROGRAM(end_of_program);
1360
1361 break;
1362 default:
1363 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
1364 return -EINVAL;
1365 }
1366 return 0;
1367 }
1368
1369 struct gpr_usage_range {
1370 int replacement;
1371 int32_t start;
1372 int32_t end;
1373 };
1374
1375 struct gpr_usage {
1376 unsigned channels:4;
1377 int32_t first_write;
1378 int32_t last_write[4];
1379 unsigned nranges;
1380 struct gpr_usage_range *ranges;
1381 };
1382
1383 static struct gpr_usage_range* add_gpr_usage_range(struct gpr_usage *usage)
1384 {
1385 usage->nranges++;
1386 usage->ranges = realloc(usage->ranges, usage->nranges * sizeof(struct gpr_usage_range));
1387 if (!usage->ranges)
1388 return NULL;
1389 return &usage->ranges[usage->nranges-1];
1390 }
1391
1392 static void notice_gpr_read(struct gpr_usage *usage, int32_t id, unsigned chan)
1393 {
1394 usage->channels |= 1 << chan;
1395 usage->first_write = -1;
1396 if (!usage->nranges) {
1397 struct gpr_usage_range* range = add_gpr_usage_range(usage);
1398 range->replacement = -1;
1399 range->start = -1;
1400 range->end = -1;
1401 }
1402 if (usage->ranges[usage->nranges-1].end < id)
1403 usage->ranges[usage->nranges-1].end = id;
1404 }
1405
1406 static void notice_gpr_rel_read(struct gpr_usage usage[128], int32_t id, unsigned chan)
1407 {
1408 unsigned i;
1409 for (i = 0; i < 128; ++i)
1410 notice_gpr_read(&usage[i], id, chan);
1411 }
1412
1413 static void notice_gpr_last_write(struct gpr_usage *usage, int32_t id, unsigned chan)
1414 {
1415 usage->last_write[chan] = id;
1416 }
1417
1418 static void notice_gpr_write(struct gpr_usage *usage, int32_t id, unsigned chan,
1419 int predicate, int prefered_replacement)
1420 {
1421 int32_t start = usage->first_write != -1 ? usage->first_write : id;
1422 usage->channels &= ~(1 << chan);
1423 if (usage->channels) {
1424 if (usage->first_write == -1)
1425 usage->first_write = id;
1426 } else if (!usage->nranges || (usage->ranges[usage->nranges-1].start != start && !predicate)) {
1427 usage->first_write = start;
1428 struct gpr_usage_range* range = add_gpr_usage_range(usage);
1429 range->replacement = prefered_replacement;
1430 range->start = start;
1431 range->end = -1;
1432 } else if (usage->ranges[usage->nranges-1].start == start && prefered_replacement != -1) {
1433 usage->ranges[usage->nranges-1].replacement = prefered_replacement;
1434 }
1435 notice_gpr_last_write(usage, id, chan);
1436 }
1437
1438 static void notice_gpr_rel_last_write(struct gpr_usage usage[128], int32_t id, unsigned chan)
1439 {
1440 unsigned i;
1441 for (i = 0; i < 128; ++i)
1442 notice_gpr_last_write(&usage[i], id, chan);
1443 }
1444
1445 static void notice_gpr_rel_write(struct gpr_usage usage[128], int32_t id, unsigned chan)
1446 {
1447 unsigned i;
1448 for (i = 0; i < 128; ++i)
1449 notice_gpr_write(&usage[i], id, chan, 1, -1);
1450 }
1451
1452 static void notice_alu_src_gprs(struct r600_bc_alu *alu, struct gpr_usage usage[128], int32_t id)
1453 {
1454 unsigned src, num_src;
1455
1456 num_src = r600_bc_get_num_operands(alu);
1457 for (src = 0; src < num_src; ++src) {
1458 // constants doesn't matter
1459 if (!is_gpr(alu->src[src].sel))
1460 continue;
1461
1462 if (alu->src[src].rel)
1463 notice_gpr_rel_read(usage, id, alu->src[src].chan);
1464 else
1465 notice_gpr_read(&usage[alu->src[src].sel], id, alu->src[src].chan);
1466 }
1467 }
1468
1469 static void notice_alu_dst_gprs(struct r600_bc_alu *alu_first, struct gpr_usage usage[128],
1470 int32_t id, int predicate)
1471 {
1472 struct r600_bc_alu *alu;
1473 for (alu = alu_first; alu; alu = LIST_ENTRY(struct r600_bc_alu, alu->list.next, list)) {
1474 if (alu->dst.write) {
1475 if (alu->dst.rel)
1476 notice_gpr_rel_write(usage, id, alu->dst.chan);
1477 else if (alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV && is_gpr(alu->src[0].sel))
1478 notice_gpr_write(&usage[alu->dst.sel], id, alu->dst.chan,
1479 predicate, alu->src[0].sel);
1480 else
1481 notice_gpr_write(&usage[alu->dst.sel], id, alu->dst.chan, predicate, -1);
1482 }
1483
1484 if (alu->last)
1485 break;
1486 }
1487 }
1488
1489 static void notice_tex_gprs(struct r600_bc_tex *tex, struct gpr_usage usage[128],
1490 int32_t id, int predicate)
1491 {
1492 if (tex->src_rel) {
1493 if (tex->src_sel_x < 4)
1494 notice_gpr_rel_read(usage, id, tex->src_sel_x);
1495 if (tex->src_sel_y < 4)
1496 notice_gpr_rel_read(usage, id, tex->src_sel_y);
1497 if (tex->src_sel_z < 4)
1498 notice_gpr_rel_read(usage, id, tex->src_sel_z);
1499 if (tex->src_sel_w < 4)
1500 notice_gpr_rel_read(usage, id, tex->src_sel_w);
1501 } else {
1502 if (tex->src_sel_x < 4)
1503 notice_gpr_read(&usage[tex->src_gpr], id, tex->src_sel_x);
1504 if (tex->src_sel_y < 4)
1505 notice_gpr_read(&usage[tex->src_gpr], id, tex->src_sel_y);
1506 if (tex->src_sel_z < 4)
1507 notice_gpr_read(&usage[tex->src_gpr], id, tex->src_sel_z);
1508 if (tex->src_sel_w < 4)
1509 notice_gpr_read(&usage[tex->src_gpr], id, tex->src_sel_w);
1510 }
1511 if (tex->dst_rel) {
1512 if (tex->dst_sel_x != 7)
1513 notice_gpr_rel_write(usage, id, 0);
1514 if (tex->dst_sel_y != 7)
1515 notice_gpr_rel_write(usage, id, 1);
1516 if (tex->dst_sel_z != 7)
1517 notice_gpr_rel_write(usage, id, 2);
1518 if (tex->dst_sel_w != 7)
1519 notice_gpr_rel_write(usage, id, 3);
1520 } else {
1521 if (tex->dst_sel_x != 7)
1522 notice_gpr_write(&usage[tex->dst_gpr], id, 0, predicate, -1);
1523 if (tex->dst_sel_y != 7)
1524 notice_gpr_write(&usage[tex->dst_gpr], id, 1, predicate, -1);
1525 if (tex->dst_sel_z != 7)
1526 notice_gpr_write(&usage[tex->dst_gpr], id, 2, predicate, -1);
1527 if (tex->dst_sel_w != 7)
1528 notice_gpr_write(&usage[tex->dst_gpr], id, 3, predicate, -1);
1529 }
1530 }
1531
1532 static void notice_vtx_gprs(struct r600_bc_vtx *vtx, struct gpr_usage usage[128],
1533 int32_t id, int predicate)
1534 {
1535 notice_gpr_read(&usage[vtx->src_gpr], id, vtx->src_sel_x);
1536
1537 if (vtx->dst_sel_x != 7)
1538 notice_gpr_write(&usage[vtx->dst_gpr], id, 0, predicate, -1);
1539 if (vtx->dst_sel_y != 7)
1540 notice_gpr_write(&usage[vtx->dst_gpr], id, 1, predicate, -1);
1541 if (vtx->dst_sel_z != 7)
1542 notice_gpr_write(&usage[vtx->dst_gpr], id, 2, predicate, -1);
1543 if (vtx->dst_sel_w != 7)
1544 notice_gpr_write(&usage[vtx->dst_gpr], id, 3, predicate, -1);
1545 }
1546
1547 static void notice_export_gprs(struct r600_bc_cf *cf, struct gpr_usage usage[128],
1548 struct r600_bc_cf *export_cf[128], int32_t export_remap[128])
1549 {
1550 //TODO handle other memory operations
1551 struct gpr_usage *output = &usage[cf->output.gpr];
1552 int32_t id = (output->last_write[0] + 0x100) & ~0xFF;
1553
1554 export_cf[cf->output.gpr] = cf;
1555 export_remap[cf->output.gpr] = id;
1556 if (cf->output.swizzle_x < 4)
1557 notice_gpr_read(output, id, cf->output.swizzle_x);
1558 if (cf->output.swizzle_y < 4)
1559 notice_gpr_read(output, id, cf->output.swizzle_y);
1560 if (cf->output.swizzle_z < 4)
1561 notice_gpr_read(output, id, cf->output.swizzle_z);
1562 if (cf->output.swizzle_w < 4)
1563 notice_gpr_read(output, id, cf->output.swizzle_w);
1564 }
1565
1566 static struct gpr_usage_range *find_src_range(struct gpr_usage *usage, int32_t id)
1567 {
1568 unsigned i;
1569 for (i = 0; i < usage->nranges; ++i) {
1570 struct gpr_usage_range* range = &usage->ranges[i];
1571
1572 if (range->start < id && id <= range->end)
1573 return range;
1574 }
1575 return NULL;
1576 }
1577
1578 static struct gpr_usage_range *find_dst_range(struct gpr_usage *usage, int32_t id)
1579 {
1580 unsigned i;
1581 for (i = 0; i < usage->nranges; ++i) {
1582 struct gpr_usage_range* range = &usage->ranges[i];
1583 int32_t end = range->end;
1584
1585 if (range->start <= id && (id < end || end == -1))
1586 return range;
1587 }
1588 assert(0); /* should not happen */
1589 return NULL;
1590 }
1591
1592 static int is_barrier_needed(struct gpr_usage *usage, int32_t id, unsigned chan, int32_t last_barrier)
1593 {
1594 if (usage->last_write[chan] != (id & ~0xFF))
1595 return usage->last_write[chan] >= last_barrier;
1596 else
1597 return 0;
1598 }
1599
1600 static int is_intersection(struct gpr_usage_range* a, struct gpr_usage_range* b)
1601 {
1602 return a->start <= b->end && b->start < a->end;
1603 }
1604
1605 static int rate_replacement(struct gpr_usage *usage, struct gpr_usage_range* range)
1606 {
1607 unsigned i;
1608 int32_t best_start = 0x3FFFFFFF, best_end = 0x3FFFFFFF;
1609
1610 for (i = 0; i < usage->nranges; ++i) {
1611 if (usage->ranges[i].replacement != -1)
1612 continue; /* ignore already remapped ranges */
1613
1614 if (is_intersection(&usage->ranges[i], range))
1615 return -1; /* forget it if usages overlap */
1616
1617 if (range->start >= usage->ranges[i].end)
1618 best_start = MIN2(best_start, range->start - usage->ranges[i].end);
1619
1620 if (range->end != -1 && range->end <= usage->ranges[i].start)
1621 best_end = MIN2(best_end, usage->ranges[i].start - range->end);
1622 }
1623 return best_start + best_end;
1624 }
1625
1626 static void find_replacement(struct gpr_usage usage[128], unsigned current,
1627 struct gpr_usage_range *range, int is_export)
1628 {
1629 unsigned i;
1630 int best_gpr = -1, best_rate = 0x7FFFFFFF;
1631
1632 if (range->replacement != -1 && range->replacement <= current) {
1633 struct gpr_usage_range *other = find_src_range(&usage[range->replacement], range->start);
1634 if (other && other->replacement != -1)
1635 range->replacement = other->replacement;
1636 }
1637
1638 if (range->replacement != -1 && range->replacement < current) {
1639 int rate = rate_replacement(&usage[range->replacement], range);
1640
1641 /* check if prefered replacement can be used */
1642 if (rate != -1) {
1643 best_rate = rate;
1644 best_gpr = range->replacement;
1645 }
1646 }
1647
1648 if (best_gpr == -1 && (range->start & ~0xFF) == (range->end & ~0xFF)) {
1649 /* register is just used inside one ALU clause */
1650 /* try to use clause temporaryis for it */
1651 for (i = 127; i > 123; --i) {
1652 int rate = rate_replacement(&usage[i], range);
1653
1654 if (rate == -1) /* can't be used because ranges overlap */
1655 continue;
1656
1657 if (rate < best_rate) {
1658 best_rate = rate;
1659 best_gpr = i;
1660
1661 /* can't get better than this */
1662 if (rate == 0 || is_export)
1663 break;
1664 }
1665 }
1666 }
1667
1668 if (best_gpr == -1) {
1669 for (i = 0; i < current; ++i) {
1670 int rate = rate_replacement(&usage[i], range);
1671
1672 if (rate == -1) /* can't be used because ranges overlap */
1673 continue;
1674
1675 if (rate < best_rate) {
1676 best_rate = rate;
1677 best_gpr = i;
1678
1679 /* can't get better than this */
1680 if (rate == 0)
1681 break;
1682 }
1683 }
1684 }
1685
1686 range->replacement = best_gpr;
1687 if (best_gpr != -1) {
1688 struct gpr_usage_range *reservation = add_gpr_usage_range(&usage[best_gpr]);
1689 reservation->replacement = -1;
1690 reservation->start = range->start;
1691 reservation->end = range->end;
1692 }
1693 }
1694
1695 static void find_export_replacement(struct gpr_usage usage[128],
1696 struct gpr_usage_range *range, struct r600_bc_cf *current,
1697 struct r600_bc_cf *next, int32_t next_id)
1698 {
1699 if (!next || next_id <= range->start || next_id > range->end)
1700 return;
1701
1702 if (current->output.type != next->output.type)
1703 return;
1704
1705 if ((current->output.array_base + 1) != next->output.array_base)
1706 return;
1707
1708 find_src_range(&usage[next->output.gpr], next_id)->replacement = range->replacement + 1;
1709 }
1710
1711 static void replace_alu_gprs(struct r600_bc_alu *alu, struct gpr_usage usage[128],
1712 int32_t id, int32_t last_barrier, unsigned *barrier)
1713 {
1714 struct gpr_usage *cur_usage;
1715 struct gpr_usage_range *range;
1716 unsigned src, num_src;
1717
1718 num_src = r600_bc_get_num_operands(alu);
1719 for (src = 0; src < num_src; ++src) {
1720 // constants doesn't matter
1721 if (!is_gpr(alu->src[src].sel))
1722 continue;
1723
1724 cur_usage = &usage[alu->src[src].sel];
1725 range = find_src_range(cur_usage, id);
1726 if (range->replacement != -1)
1727 alu->src[src].sel = range->replacement;
1728
1729 *barrier |= is_barrier_needed(cur_usage, id, alu->src[src].chan, last_barrier);
1730 }
1731
1732 if (alu->dst.write) {
1733 cur_usage = &usage[alu->dst.sel];
1734 range = find_dst_range(cur_usage, id);
1735 if (range->replacement == alu->dst.sel) {
1736 if (!alu->is_op3)
1737 alu->dst.write = 0;
1738 else
1739 /*TODO: really check that register 123 is useable */
1740 alu->dst.sel = 123;
1741 } else if (range->replacement != -1) {
1742 alu->dst.sel = range->replacement;
1743 }
1744 if (alu->dst.rel)
1745 notice_gpr_rel_last_write(usage, id, alu->dst.chan);
1746 else
1747 notice_gpr_last_write(cur_usage, id, alu->dst.chan);
1748 }
1749 }
1750
1751 static void replace_tex_gprs(struct r600_bc_tex *tex, struct gpr_usage usage[128],
1752 int32_t id, int32_t last_barrier, unsigned *barrier)
1753 {
1754 struct gpr_usage *cur_usage = &usage[tex->src_gpr];
1755 struct gpr_usage_range *range = find_src_range(cur_usage, id);
1756
1757 if (tex->src_rel) {
1758 *barrier = 1;
1759 } else {
1760 if (tex->src_sel_x < 4)
1761 *barrier |= is_barrier_needed(cur_usage, id, tex->src_sel_x, last_barrier);
1762 if (tex->src_sel_y < 4)
1763 *barrier |= is_barrier_needed(cur_usage, id, tex->src_sel_y, last_barrier);
1764 if (tex->src_sel_z < 4)
1765 *barrier |= is_barrier_needed(cur_usage, id, tex->src_sel_z, last_barrier);
1766 if (tex->src_sel_w < 4)
1767 *barrier |= is_barrier_needed(cur_usage, id, tex->src_sel_w, last_barrier);
1768 }
1769
1770 if (range->replacement != -1)
1771 tex->src_gpr = range->replacement;
1772
1773 cur_usage = &usage[tex->dst_gpr];
1774 range = find_dst_range(cur_usage, id);
1775 if (range->replacement != -1)
1776 tex->dst_gpr = range->replacement;
1777
1778 if (tex->dst_rel) {
1779 if (tex->dst_sel_x != 7)
1780 notice_gpr_rel_last_write(usage, id, tex->dst_sel_x);
1781 if (tex->dst_sel_y != 7)
1782 notice_gpr_rel_last_write(usage, id, tex->dst_sel_y);
1783 if (tex->dst_sel_z != 7)
1784 notice_gpr_rel_last_write(usage, id, tex->dst_sel_z);
1785 if (tex->dst_sel_w != 7)
1786 notice_gpr_rel_last_write(usage, id, tex->dst_sel_w);
1787 } else {
1788 if (tex->dst_sel_x != 7)
1789 notice_gpr_last_write(cur_usage, id, tex->dst_sel_x);
1790 if (tex->dst_sel_y != 7)
1791 notice_gpr_last_write(cur_usage, id, tex->dst_sel_y);
1792 if (tex->dst_sel_z != 7)
1793 notice_gpr_last_write(cur_usage, id, tex->dst_sel_z);
1794 if (tex->dst_sel_w != 7)
1795 notice_gpr_last_write(cur_usage, id, tex->dst_sel_w);
1796 }
1797 }
1798
1799 static void replace_vtx_gprs(struct r600_bc_vtx *vtx, struct gpr_usage usage[128],
1800 int32_t id, int32_t last_barrier, unsigned *barrier)
1801 {
1802 struct gpr_usage *cur_usage = &usage[vtx->src_gpr];
1803 struct gpr_usage_range *range = find_src_range(cur_usage, id);
1804
1805 *barrier |= is_barrier_needed(cur_usage, id, vtx->src_sel_x, last_barrier);
1806
1807 if (range->replacement != -1)
1808 vtx->src_gpr = range->replacement;
1809
1810 cur_usage = &usage[vtx->dst_gpr];
1811 range = find_dst_range(cur_usage, id);
1812 if (range->replacement != -1)
1813 vtx->dst_gpr = range->replacement;
1814
1815 if (vtx->dst_sel_x != 7)
1816 notice_gpr_last_write(cur_usage, id, vtx->dst_sel_x);
1817 if (vtx->dst_sel_y != 7)
1818 notice_gpr_last_write(cur_usage, id, vtx->dst_sel_y);
1819 if (vtx->dst_sel_z != 7)
1820 notice_gpr_last_write(cur_usage, id, vtx->dst_sel_z);
1821 if (vtx->dst_sel_w != 7)
1822 notice_gpr_last_write(cur_usage, id, vtx->dst_sel_w);
1823 }
1824
1825 static void replace_export_gprs(struct r600_bc_cf *cf, struct gpr_usage usage[128],
1826 int32_t id, int32_t last_barrier)
1827 {
1828 //TODO handle other memory operations
1829 struct gpr_usage *cur_usage = &usage[cf->output.gpr];
1830 struct gpr_usage_range *range = find_src_range(cur_usage, id);
1831
1832 cf->barrier = 0;
1833 if (cf->output.swizzle_x < 4)
1834 cf->barrier |= is_barrier_needed(cur_usage, -1, cf->output.swizzle_x, last_barrier);
1835 if (cf->output.swizzle_y < 4)
1836 cf->barrier |= is_barrier_needed(cur_usage, -1, cf->output.swizzle_y, last_barrier);
1837 if (cf->output.swizzle_z < 4)
1838 cf->barrier |= is_barrier_needed(cur_usage, -1, cf->output.swizzle_z, last_barrier);
1839 if (cf->output.swizzle_w < 4)
1840 cf->barrier |= is_barrier_needed(cur_usage, -1, cf->output.swizzle_w, last_barrier);
1841
1842 if (range->replacement != -1)
1843 cf->output.gpr = range->replacement;
1844 }
1845
1846 static void optimize_alu_inst(struct r600_bc_cf *cf, struct r600_bc_alu *alu)
1847 {
1848 struct r600_bc_alu *alu_next;
1849 unsigned chan;
1850 unsigned src, num_src;
1851
1852 /* check if a MOV could be optimized away */
1853 if (alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV) {
1854
1855 /* destination equals source? */
1856 if (alu->dst.sel != alu->src[0].sel ||
1857 alu->dst.chan != alu->src[0].chan)
1858 return;
1859
1860 /* any special handling for the source? */
1861 if (alu->src[0].rel || alu->src[0].neg || alu->src[0].abs)
1862 return;
1863
1864 /* any special handling for destination? */
1865 if (alu->dst.rel || alu->dst.clamp)
1866 return;
1867
1868 /* ok find next instruction group and check if ps/pv is used */
1869 for (alu_next = alu; !alu_next->last; alu_next = NEXT_ALU(alu_next));
1870
1871 if (alu_next->list.next != &cf->alu) {
1872 chan = is_alu_reduction_inst(alu) ? 0 : alu->dst.chan;
1873 for (alu_next = NEXT_ALU(alu_next); alu_next; alu_next = NEXT_ALU(alu_next)) {
1874 num_src = r600_bc_get_num_operands(alu_next);
1875 for (src = 0; src < num_src; ++src) {
1876 if (alu_next->src[src].sel == V_SQ_ALU_SRC_PV &&
1877 alu_next->src[src].chan == chan)
1878 return;
1879
1880 if (alu_next->src[src].sel == V_SQ_ALU_SRC_PS)
1881 return;
1882 }
1883
1884 if (alu_next->last)
1885 break;
1886 }
1887 }
1888
1889 r600_bc_remove_alu(cf, alu);
1890 }
1891 }
1892
1893 static void optimize_export_inst(struct r600_bc *bc, struct r600_bc_cf *cf)
1894 {
1895 struct r600_bc_cf *prev = LIST_ENTRY(struct r600_bc_cf, cf->list.prev, list);
1896 if (&prev->list == &bc->cf ||
1897 prev->inst != cf->inst ||
1898 prev->output.type != cf->output.type ||
1899 prev->output.elem_size != cf->output.elem_size ||
1900 prev->output.swizzle_x != cf->output.swizzle_x ||
1901 prev->output.swizzle_y != cf->output.swizzle_y ||
1902 prev->output.swizzle_z != cf->output.swizzle_z ||
1903 prev->output.swizzle_w != cf->output.swizzle_w)
1904 return;
1905
1906 if ((prev->output.burst_count + cf->output.burst_count) > 16)
1907 return;
1908
1909 if ((prev->output.gpr + prev->output.burst_count) == cf->output.gpr &&
1910 (prev->output.array_base + prev->output.burst_count) == cf->output.array_base) {
1911
1912 prev->output.burst_count += cf->output.burst_count;
1913 r600_bc_remove_cf(bc, cf);
1914
1915 } else if (prev->output.gpr == (cf->output.gpr + cf->output.burst_count) &&
1916 prev->output.array_base == (cf->output.array_base + cf->output.burst_count)) {
1917
1918 cf->output.burst_count += prev->output.burst_count;
1919 r600_bc_remove_cf(bc, prev);
1920 }
1921 }
1922
1923 static void r600_bc_optimize(struct r600_bc *bc)
1924 {
1925 struct r600_bc_cf *cf, *next_cf;
1926 struct r600_bc_alu *first, *next_alu;
1927 struct r600_bc_alu *alu;
1928 struct r600_bc_vtx *vtx;
1929 struct r600_bc_tex *tex;
1930 struct gpr_usage usage[128];
1931
1932 /* assume that each gpr is exported only once */
1933 struct r600_bc_cf *export_cf[128] = { NULL };
1934 int32_t export_remap[128];
1935
1936 int32_t id, barrier[bc->nstack];
1937 unsigned i, j, stack, predicate, old_stack;
1938
1939 memset(&usage, 0, sizeof(usage));
1940 for (i = 0; i < 128; ++i) {
1941 usage[i].first_write = -1;
1942 usage[i].last_write[0] = -1;
1943 usage[i].last_write[1] = -1;
1944 usage[i].last_write[2] = -1;
1945 usage[i].last_write[3] = -1;
1946 }
1947
1948 /* first gather some informations about the gpr usage */
1949 id = 0; stack = 0;
1950 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
1951 switch (get_cf_class(cf)) {
1952 case CF_CLASS_ALU:
1953 predicate = 0;
1954 first = NULL;
1955 LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
1956 if (!first)
1957 first = alu;
1958 notice_alu_src_gprs(alu, usage, id);
1959 if (alu->last) {
1960 notice_alu_dst_gprs(first, usage, id, predicate || stack > 0);
1961 first = NULL;
1962 ++id;
1963 }
1964 if (is_alu_pred_inst(alu))
1965 predicate++;
1966 }
1967 if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3)
1968 stack += predicate;
1969 else if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3)
1970 stack -= 1;
1971 else if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3)
1972 stack -= 2;
1973 break;
1974 case CF_CLASS_TEXTURE:
1975 LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
1976 notice_tex_gprs(tex, usage, id++, stack > 0);
1977 }
1978 break;
1979 case CF_CLASS_VERTEX:
1980 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
1981 notice_vtx_gprs(vtx, usage, id++, stack > 0);
1982 }
1983 break;
1984 case CF_CLASS_EXPORT:
1985 notice_export_gprs(cf, usage, export_cf, export_remap);
1986 continue; // don't increment id
1987 case CF_CLASS_OTHER:
1988 switch (cf->inst) {
1989 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
1990 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
1991 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
1992 break;
1993
1994 case V_SQ_CF_WORD1_SQ_CF_INST_POP:
1995 stack -= cf->pop_count;
1996 break;
1997
1998 default:
1999 // TODO implement loop handling
2000 goto out;
2001 }
2002 }
2003 id += 0x100;
2004 id &= ~0xFF;
2005 }
2006 assert(stack == 0);
2007
2008 /* try to optimize gpr usage */
2009 for (i = 0; i < 124; ++i) {
2010 for (j = 0; j < usage[i].nranges; ++j) {
2011 struct gpr_usage_range *range = &usage[i].ranges[j];
2012 int is_export = export_cf[i] && export_cf[i + 1] &&
2013 range->start < export_remap[i] &&
2014 export_remap[i] <= range->end;
2015
2016 if (range->start == -1)
2017 range->replacement = -1;
2018 else if (range->end == -1)
2019 range->replacement = i;
2020 else
2021 find_replacement(usage, i, range, is_export);
2022
2023 if (range->replacement == -1)
2024 bc->ngpr = i;
2025 else if (range->replacement < i && range->replacement > bc->ngpr)
2026 bc->ngpr = range->replacement;
2027
2028 if (is_export && range->replacement != -1) {
2029 find_export_replacement(usage, range, export_cf[i],
2030 export_cf[i + 1], export_remap[i + 1]);
2031 }
2032 }
2033 }
2034 bc->ngpr++;
2035
2036 /* apply the changes */
2037 for (i = 0; i < 128; ++i) {
2038 usage[i].last_write[0] = -1;
2039 usage[i].last_write[1] = -1;
2040 usage[i].last_write[2] = -1;
2041 usage[i].last_write[3] = -1;
2042 }
2043 barrier[0] = 0;
2044 id = 0; stack = 0;
2045 LIST_FOR_EACH_ENTRY_SAFE(cf, next_cf, &bc->cf, list) {
2046 old_stack = stack;
2047 switch (get_cf_class(cf)) {
2048 case CF_CLASS_ALU:
2049 predicate = 0;
2050 first = NULL;
2051 cf->barrier = 0;
2052 LIST_FOR_EACH_ENTRY_SAFE(alu, next_alu, &cf->alu, list) {
2053 replace_alu_gprs(alu, usage, id, barrier[stack], &cf->barrier);
2054 if (alu->last)
2055 ++id;
2056
2057 if (is_alu_pred_inst(alu))
2058 predicate++;
2059
2060 if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3)
2061 optimize_alu_inst(cf, alu);
2062 }
2063 if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3)
2064 stack += predicate;
2065 else if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3)
2066 stack -= 1;
2067 else if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3)
2068 stack -= 2;
2069 if (LIST_IS_EMPTY(&cf->alu)) {
2070 r600_bc_remove_cf(bc, cf);
2071 cf = NULL;
2072 }
2073 break;
2074 case CF_CLASS_TEXTURE:
2075 cf->barrier = 0;
2076 LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
2077 replace_tex_gprs(tex, usage, id++, barrier[stack], &cf->barrier);
2078 }
2079 break;
2080 case CF_CLASS_VERTEX:
2081 cf->barrier = 0;
2082 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
2083 replace_vtx_gprs(vtx, usage, id++, barrier[stack], &cf->barrier);
2084 }
2085 break;
2086 case CF_CLASS_EXPORT:
2087 continue; // don't increment id
2088 case CF_CLASS_OTHER:
2089 if (cf->inst == V_SQ_CF_WORD1_SQ_CF_INST_POP) {
2090 cf->barrier = 0;
2091 stack -= cf->pop_count;
2092 }
2093 break;
2094 }
2095
2096 id &= ~0xFF;
2097 if (cf && cf->barrier)
2098 barrier[old_stack] = id;
2099
2100 for (i = old_stack + 1; i <= stack; ++i)
2101 barrier[i] = barrier[old_stack];
2102
2103 id += 0x100;
2104 if (stack != 0) /* ensue exports are placed outside of conditional blocks */
2105 continue;
2106
2107 for (i = 0; i < 128; ++i) {
2108 if (!export_cf[i] || id < export_remap[i])
2109 continue;
2110
2111 r600_bc_move_cf(bc, export_cf[i], next_cf);
2112 replace_export_gprs(export_cf[i], usage, export_remap[i], barrier[stack]);
2113 if (export_cf[i]->barrier)
2114 barrier[stack] = id - 1;
2115 next_cf = LIST_ENTRY(struct r600_bc_cf, export_cf[i]->list.next, list);
2116 optimize_export_inst(bc, export_cf[i]);
2117 export_cf[i] = NULL;
2118 }
2119 }
2120 assert(stack == 0);
2121
2122 out:
2123 for (i = 0; i < 128; ++i) {
2124 free(usage[i].ranges);
2125 }
2126 }
2127
2128 int r600_bc_build(struct r600_bc *bc)
2129 {
2130 struct r600_bc_cf *cf;
2131 struct r600_bc_alu *alu;
2132 struct r600_bc_vtx *vtx;
2133 struct r600_bc_tex *tex;
2134 struct r600_bc_cf *exports[4] = { NULL };
2135 uint32_t literal[4];
2136 unsigned nliteral;
2137 unsigned addr;
2138 int i, r;
2139
2140 if (bc->callstack[0].max > 0)
2141 bc->nstack = ((bc->callstack[0].max + 3) >> 2) + 2;
2142 if (bc->type == TGSI_PROCESSOR_VERTEX && !bc->nstack) {
2143 bc->nstack = 1;
2144 }
2145
2146 r600_bc_optimize(bc);
2147
2148 /* first path compute addr of each CF block */
2149 /* addr start after all the CF instructions */
2150 addr = LIST_ENTRY(struct r600_bc_cf, bc->cf.prev, list)->id + 2;
2151 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
2152 switch (get_cf_class(cf)) {
2153 case CF_CLASS_ALU:
2154 nliteral = 0;
2155 LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
2156 r = r600_bc_alu_nliterals(alu, literal, &nliteral);
2157 if (r)
2158 return r;
2159 if (alu->last) {
2160 cf->ndw += align(nliteral, 2);
2161 nliteral = 0;
2162 }
2163 }
2164 break;
2165 case CF_CLASS_TEXTURE:
2166 case CF_CLASS_VERTEX:
2167 /* fetch node need to be 16 bytes aligned*/
2168 addr += 3;
2169 addr &= 0xFFFFFFFCUL;
2170 break;
2171 break;
2172 case CF_CLASS_EXPORT:
2173 if (cf->inst == BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT))
2174 exports[cf->output.type] = cf;
2175 break;
2176 case CF_CLASS_OTHER:
2177 break;
2178 default:
2179 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
2180 return -EINVAL;
2181 }
2182 cf->addr = addr;
2183 addr += cf->ndw;
2184 bc->ndw = cf->addr + cf->ndw;
2185 }
2186
2187 /* set export done on last export of each type */
2188 for (i = 0; i < 4; ++i) {
2189 if (exports[i]) {
2190 exports[i]->inst = BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE);
2191 }
2192 }
2193
2194 free(bc->bytecode);
2195 bc->bytecode = calloc(1, bc->ndw * 4);
2196 if (bc->bytecode == NULL)
2197 return -ENOMEM;
2198 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
2199 addr = cf->addr;
2200 if (bc->chiprev == CHIPREV_EVERGREEN)
2201 r = eg_bc_cf_build(bc, cf);
2202 else
2203 r = r600_bc_cf_build(bc, cf);
2204 if (r)
2205 return r;
2206 switch (get_cf_class(cf)) {
2207 case CF_CLASS_ALU:
2208 nliteral = 0;
2209 LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
2210 r = r600_bc_alu_nliterals(alu, literal, &nliteral);
2211 if (r)
2212 return r;
2213 r600_bc_alu_adjust_literals(alu, literal, nliteral);
2214 switch(bc->chiprev) {
2215 case CHIPREV_R600:
2216 r = r600_bc_alu_build(bc, alu, addr);
2217 break;
2218 case CHIPREV_R700:
2219 case CHIPREV_EVERGREEN: /* eg alu is same encoding as r700 */
2220 r = r700_bc_alu_build(bc, alu, addr);
2221 break;
2222 default:
2223 R600_ERR("unknown family %d\n", bc->family);
2224 return -EINVAL;
2225 }
2226 if (r)
2227 return r;
2228 addr += 2;
2229 if (alu->last) {
2230 for (i = 0; i < align(nliteral, 2); ++i) {
2231 bc->bytecode[addr++] = literal[i];
2232 }
2233 nliteral = 0;
2234 }
2235 }
2236 break;
2237 case CF_CLASS_VERTEX:
2238 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
2239 r = r600_bc_vtx_build(bc, vtx, addr);
2240 if (r)
2241 return r;
2242 addr += 4;
2243 }
2244 break;
2245 case CF_CLASS_TEXTURE:
2246 LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
2247 r = r600_bc_tex_build(bc, tex, addr);
2248 if (r)
2249 return r;
2250 addr += 4;
2251 }
2252 break;
2253 case CF_CLASS_EXPORT:
2254 case CF_CLASS_OTHER:
2255 break;
2256 default:
2257 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
2258 return -EINVAL;
2259 }
2260 }
2261 return 0;
2262 }
2263
2264 void r600_bc_clear(struct r600_bc *bc)
2265 {
2266 struct r600_bc_cf *cf = NULL, *next_cf;
2267
2268 free(bc->bytecode);
2269 bc->bytecode = NULL;
2270
2271 LIST_FOR_EACH_ENTRY_SAFE(cf, next_cf, &bc->cf, list) {
2272 struct r600_bc_alu *alu = NULL, *next_alu;
2273 struct r600_bc_tex *tex = NULL, *next_tex;
2274 struct r600_bc_tex *vtx = NULL, *next_vtx;
2275
2276 LIST_FOR_EACH_ENTRY_SAFE(alu, next_alu, &cf->alu, list) {
2277 free(alu);
2278 }
2279
2280 LIST_INITHEAD(&cf->alu);
2281
2282 LIST_FOR_EACH_ENTRY_SAFE(tex, next_tex, &cf->tex, list) {
2283 free(tex);
2284 }
2285
2286 LIST_INITHEAD(&cf->tex);
2287
2288 LIST_FOR_EACH_ENTRY_SAFE(vtx, next_vtx, &cf->vtx, list) {
2289 free(vtx);
2290 }
2291
2292 LIST_INITHEAD(&cf->vtx);
2293
2294 free(cf);
2295 }
2296
2297 LIST_INITHEAD(&cf->list);
2298 }
2299
2300 void r600_bc_dump(struct r600_bc *bc)
2301 {
2302 struct r600_bc_cf *cf;
2303 struct r600_bc_alu *alu;
2304 struct r600_bc_vtx *vtx;
2305 struct r600_bc_tex *tex;
2306
2307 unsigned i, id;
2308 uint32_t literal[4];
2309 unsigned nliteral;
2310 char chip = '6';
2311
2312 switch (bc->chiprev) {
2313 case 1:
2314 chip = '7';
2315 break;
2316 case 2:
2317 chip = 'E';
2318 break;
2319 case 0:
2320 default:
2321 chip = '6';
2322 break;
2323 }
2324 fprintf(stderr, "bytecode %d dw -- %d gprs -----------------------\n", bc->ndw, bc->ngpr);
2325 fprintf(stderr, " %c\n", chip);
2326
2327 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
2328 id = cf->id;
2329
2330 switch (get_cf_class(cf)) {
2331 case CF_CLASS_ALU:
2332 fprintf(stderr, "%04d %08X ALU ", id, bc->bytecode[id]);
2333 fprintf(stderr, "ADDR:%04d ", cf->addr);
2334 fprintf(stderr, "KCACHE_MODE0:%X ", cf->kcache0_mode);
2335 fprintf(stderr, "KCACHE_BANK0:%X ", cf->kcache0_bank);
2336 fprintf(stderr, "KCACHE_BANK1:%X\n", cf->kcache1_bank);
2337 id++;
2338 fprintf(stderr, "%04d %08X ALU ", id, bc->bytecode[id]);
2339 fprintf(stderr, "INST:%d ", cf->inst);
2340 fprintf(stderr, "KCACHE_MODE1:%X ", cf->kcache1_mode);
2341 fprintf(stderr, "KCACHE_ADDR0:%X ", cf->kcache0_addr);
2342 fprintf(stderr, "KCACHE_ADDR1:%X ", cf->kcache1_addr);
2343 fprintf(stderr, "BARRIER:%d ", cf->barrier);
2344 fprintf(stderr, "COUNT:%d\n", cf->ndw / 2);
2345 break;
2346 case CF_CLASS_TEXTURE:
2347 case CF_CLASS_VERTEX:
2348 fprintf(stderr, "%04d %08X TEX/VTX ", id, bc->bytecode[id]);
2349 fprintf(stderr, "ADDR:%04d\n", cf->addr);
2350 id++;
2351 fprintf(stderr, "%04d %08X TEX/VTX ", id, bc->bytecode[id]);
2352 fprintf(stderr, "INST:%d ", cf->inst);
2353 fprintf(stderr, "BARRIER:%d ", cf->barrier);
2354 fprintf(stderr, "COUNT:%d\n", cf->ndw / 4);
2355 break;
2356 case CF_CLASS_EXPORT:
2357 fprintf(stderr, "%04d %08X EXPORT ", id, bc->bytecode[id]);
2358 fprintf(stderr, "GPR:%d ", cf->output.gpr);
2359 fprintf(stderr, "ELEM_SIZE:%X ", cf->output.elem_size);
2360 fprintf(stderr, "ARRAY_BASE:%X ", cf->output.array_base);
2361 fprintf(stderr, "TYPE:%X\n", cf->output.type);
2362 id++;
2363 fprintf(stderr, "%04d %08X EXPORT ", id, bc->bytecode[id]);
2364 fprintf(stderr, "SWIZ_X:%X ", cf->output.swizzle_x);
2365 fprintf(stderr, "SWIZ_Y:%X ", cf->output.swizzle_y);
2366 fprintf(stderr, "SWIZ_Z:%X ", cf->output.swizzle_z);
2367 fprintf(stderr, "SWIZ_W:%X ", cf->output.swizzle_w);
2368 fprintf(stderr, "SWIZ_W:%X ", cf->output.swizzle_w);
2369 fprintf(stderr, "BARRIER:%d ", cf->barrier);
2370 fprintf(stderr, "INST:%d ", cf->inst);
2371 fprintf(stderr, "BURST_COUNT:%d\n", cf->output.burst_count);
2372 break;
2373 case CF_CLASS_OTHER:
2374 fprintf(stderr, "%04d %08X CF ", id, bc->bytecode[id]);
2375 fprintf(stderr, "ADDR:%04d\n", cf->cf_addr);
2376 id++;
2377 fprintf(stderr, "%04d %08X CF ", id, bc->bytecode[id]);
2378 fprintf(stderr, "INST:%d ", cf->inst);
2379 fprintf(stderr, "COND:%X ", cf->cond);
2380 fprintf(stderr, "BARRIER:%d ", cf->barrier);
2381 fprintf(stderr, "POP_COUNT:%X\n", cf->pop_count);
2382 break;
2383 }
2384
2385 id = cf->addr;
2386 nliteral = 0;
2387 LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
2388 r600_bc_alu_nliterals(alu, literal, &nliteral);
2389
2390 fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]);
2391 fprintf(stderr, "SRC0(SEL:%d ", alu->src[0].sel);
2392 fprintf(stderr, "REL:%d ", alu->src[0].rel);
2393 fprintf(stderr, "CHAN:%d ", alu->src[0].chan);
2394 fprintf(stderr, "NEG:%d) ", alu->src[0].neg);
2395 fprintf(stderr, "SRC1(SEL:%d ", alu->src[1].sel);
2396 fprintf(stderr, "REL:%d ", alu->src[1].rel);
2397 fprintf(stderr, "CHAN:%d ", alu->src[1].chan);
2398 fprintf(stderr, "NEG:%d) ", alu->src[1].neg);
2399 fprintf(stderr, "LAST:%d)\n", alu->last);
2400 id++;
2401 fprintf(stderr, "%04d %08X %c ", id, bc->bytecode[id], alu->last ? '*' : ' ');
2402 fprintf(stderr, "INST:%d ", alu->inst);
2403 fprintf(stderr, "DST(SEL:%d ", alu->dst.sel);
2404 fprintf(stderr, "CHAN:%d ", alu->dst.chan);
2405 fprintf(stderr, "REL:%d ", alu->dst.rel);
2406 fprintf(stderr, "CLAMP:%d) ", alu->dst.clamp);
2407 fprintf(stderr, "BANK_SWIZZLE:%d ", alu->bank_swizzle);
2408 if (alu->is_op3) {
2409 fprintf(stderr, "SRC2(SEL:%d ", alu->src[2].sel);
2410 fprintf(stderr, "REL:%d ", alu->src[2].rel);
2411 fprintf(stderr, "CHAN:%d ", alu->src[2].chan);
2412 fprintf(stderr, "NEG:%d)\n", alu->src[2].neg);
2413 } else {
2414 fprintf(stderr, "SRC0_ABS:%d ", alu->src[0].abs);
2415 fprintf(stderr, "SRC1_ABS:%d ", alu->src[1].abs);
2416 fprintf(stderr, "WRITE_MASK:%d ", alu->dst.write);
2417 fprintf(stderr, "OMOD:%d ", alu->omod);
2418 fprintf(stderr, "EXECUTE_MASK:%d ", alu->predicate);
2419 fprintf(stderr, "UPDATE_PRED:%d\n", alu->predicate);
2420 }
2421
2422 id++;
2423 if (alu->last) {
2424 for (i = 0; i < nliteral; i++, id++) {
2425 float *f = (float*)(bc->bytecode + id);
2426 fprintf(stderr, "%04d %08X %f\n", id, bc->bytecode[id], *f);
2427 }
2428 id += nliteral & 1;
2429 nliteral = 0;
2430 }
2431 }
2432
2433 LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
2434 //TODO
2435 }
2436
2437 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
2438 //TODO
2439 }
2440 }
2441
2442 fprintf(stderr, "--------------------------------------\n");
2443 }
2444
2445 void r600_cf_vtx(struct r600_vertex_element *ve, u32 *bytecode, unsigned count)
2446 {
2447 struct r600_pipe_state *rstate;
2448 unsigned i = 0;
2449
2450 if (count > 8) {
2451 bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2452 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX) |
2453 S_SQ_CF_WORD1_BARRIER(0) |
2454 S_SQ_CF_WORD1_COUNT(8 - 1);
2455 bytecode[i++] = S_SQ_CF_WORD0_ADDR(40 >> 1);
2456 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX) |
2457 S_SQ_CF_WORD1_BARRIER(0) |
2458 S_SQ_CF_WORD1_COUNT(count - 8 - 1);
2459 } else {
2460 bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2461 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX) |
2462 S_SQ_CF_WORD1_BARRIER(0) |
2463 S_SQ_CF_WORD1_COUNT(count - 1);
2464 }
2465 bytecode[i++] = S_SQ_CF_WORD0_ADDR(0);
2466 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_RETURN) |
2467 S_SQ_CF_WORD1_BARRIER(0);
2468
2469 rstate = &ve->rstate;
2470 rstate->id = R600_PIPE_STATE_FETCH_SHADER;
2471 rstate->nregs = 0;
2472 r600_pipe_state_add_reg(rstate, R_0288A4_SQ_PGM_RESOURCES_FS,
2473 0x00000000, 0xFFFFFFFF, NULL);
2474 r600_pipe_state_add_reg(rstate, R_0288DC_SQ_PGM_CF_OFFSET_FS,
2475 0x00000000, 0xFFFFFFFF, NULL);
2476 r600_pipe_state_add_reg(rstate, R_028894_SQ_PGM_START_FS,
2477 r600_bo_offset(ve->fetch_shader) >> 8,
2478 0xFFFFFFFF, ve->fetch_shader);
2479 }
2480
2481 void r600_cf_vtx_tc(struct r600_vertex_element *ve, u32 *bytecode, unsigned count)
2482 {
2483 struct r600_pipe_state *rstate;
2484 unsigned i = 0;
2485
2486 if (count > 8) {
2487 bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2488 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) |
2489 S_SQ_CF_WORD1_BARRIER(0) |
2490 S_SQ_CF_WORD1_COUNT(8 - 1);
2491 bytecode[i++] = S_SQ_CF_WORD0_ADDR(40 >> 1);
2492 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) |
2493 S_SQ_CF_WORD1_BARRIER(0) |
2494 S_SQ_CF_WORD1_COUNT((count - 8) - 1);
2495 } else {
2496 bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2497 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) |
2498 S_SQ_CF_WORD1_BARRIER(0) |
2499 S_SQ_CF_WORD1_COUNT(count - 1);
2500 }
2501 bytecode[i++] = S_SQ_CF_WORD0_ADDR(0);
2502 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_RETURN) |
2503 S_SQ_CF_WORD1_BARRIER(0);
2504
2505 rstate = &ve->rstate;
2506 rstate->id = R600_PIPE_STATE_FETCH_SHADER;
2507 rstate->nregs = 0;
2508 r600_pipe_state_add_reg(rstate, R_0288A4_SQ_PGM_RESOURCES_FS,
2509 0x00000000, 0xFFFFFFFF, NULL);
2510 r600_pipe_state_add_reg(rstate, R_0288DC_SQ_PGM_CF_OFFSET_FS,
2511 0x00000000, 0xFFFFFFFF, NULL);
2512 r600_pipe_state_add_reg(rstate, R_028894_SQ_PGM_START_FS,
2513 r600_bo_offset(ve->fetch_shader) >> 8,
2514 0xFFFFFFFF, ve->fetch_shader);
2515 }
2516
2517 static void r600_vertex_data_type(enum pipe_format pformat, unsigned *format,
2518 unsigned *num_format, unsigned *format_comp)
2519 {
2520 const struct util_format_description *desc;
2521 unsigned i;
2522
2523 *format = 0;
2524 *num_format = 0;
2525 *format_comp = 0;
2526
2527 desc = util_format_description(pformat);
2528 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN) {
2529 goto out_unknown;
2530 }
2531
2532 /* Find the first non-VOID channel. */
2533 for (i = 0; i < 4; i++) {
2534 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
2535 break;
2536 }
2537 }
2538
2539 switch (desc->channel[i].type) {
2540 /* Half-floats, floats, doubles */
2541 case UTIL_FORMAT_TYPE_FLOAT:
2542 switch (desc->channel[i].size) {
2543 case 16:
2544 switch (desc->nr_channels) {
2545 case 1:
2546 *format = FMT_16_FLOAT;
2547 break;
2548 case 2:
2549 *format = FMT_16_16_FLOAT;
2550 break;
2551 case 3:
2552 *format = FMT_16_16_16_FLOAT;
2553 break;
2554 case 4:
2555 *format = FMT_16_16_16_16_FLOAT;
2556 break;
2557 }
2558 break;
2559 case 32:
2560 switch (desc->nr_channels) {
2561 case 1:
2562 *format = FMT_32_FLOAT;
2563 break;
2564 case 2:
2565 *format = FMT_32_32_FLOAT;
2566 break;
2567 case 3:
2568 *format = FMT_32_32_32_FLOAT;
2569 break;
2570 case 4:
2571 *format = FMT_32_32_32_32_FLOAT;
2572 break;
2573 }
2574 break;
2575 default:
2576 goto out_unknown;
2577 }
2578 break;
2579 /* Unsigned ints */
2580 case UTIL_FORMAT_TYPE_UNSIGNED:
2581 /* Signed ints */
2582 case UTIL_FORMAT_TYPE_SIGNED:
2583 switch (desc->channel[i].size) {
2584 case 8:
2585 switch (desc->nr_channels) {
2586 case 1:
2587 *format = FMT_8;
2588 break;
2589 case 2:
2590 *format = FMT_8_8;
2591 break;
2592 case 3:
2593 // *format = FMT_8_8_8; /* fails piglit draw-vertices test */
2594 // break;
2595 case 4:
2596 *format = FMT_8_8_8_8;
2597 break;
2598 }
2599 break;
2600 case 16:
2601 switch (desc->nr_channels) {
2602 case 1:
2603 *format = FMT_16;
2604 break;
2605 case 2:
2606 *format = FMT_16_16;
2607 break;
2608 case 3:
2609 // *format = FMT_16_16_16; /* fails piglit draw-vertices test */
2610 // break;
2611 case 4:
2612 *format = FMT_16_16_16_16;
2613 break;
2614 }
2615 break;
2616 case 32:
2617 switch (desc->nr_channels) {
2618 case 1:
2619 *format = FMT_32;
2620 break;
2621 case 2:
2622 *format = FMT_32_32;
2623 break;
2624 case 3:
2625 *format = FMT_32_32_32;
2626 break;
2627 case 4:
2628 *format = FMT_32_32_32_32;
2629 break;
2630 }
2631 break;
2632 default:
2633 goto out_unknown;
2634 }
2635 break;
2636 default:
2637 goto out_unknown;
2638 }
2639
2640 if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
2641 *format_comp = 1;
2642 }
2643 if (desc->channel[i].normalized) {
2644 *num_format = 0;
2645 } else {
2646 *num_format = 2;
2647 }
2648 return;
2649 out_unknown:
2650 R600_ERR("unsupported vertex format %s\n", util_format_name(pformat));
2651 }
2652
2653 int r600_vertex_elements_build_fetch_shader(struct r600_pipe_context *rctx, struct r600_vertex_element *ve)
2654 {
2655 unsigned ndw, i;
2656 u32 *bytecode;
2657 unsigned fetch_resource_start = 0, format, num_format, format_comp;
2658 struct pipe_vertex_element *elements = ve->elements;
2659 const struct util_format_description *desc;
2660
2661 /* 2 dwords for cf aligned to 4 + 4 dwords per input */
2662 ndw = 8 + ve->count * 4;
2663 ve->fs_size = ndw * 4;
2664
2665 /* use PIPE_BIND_VERTEX_BUFFER so we use the cache buffer manager */
2666 ve->fetch_shader = r600_bo(rctx->radeon, ndw*4, 256, PIPE_BIND_VERTEX_BUFFER, 0);
2667 if (ve->fetch_shader == NULL) {
2668 return -ENOMEM;
2669 }
2670
2671 bytecode = r600_bo_map(rctx->radeon, ve->fetch_shader, 0, NULL);
2672 if (bytecode == NULL) {
2673 r600_bo_reference(rctx->radeon, &ve->fetch_shader, NULL);
2674 return -ENOMEM;
2675 }
2676
2677 if (rctx->family >= CHIP_CEDAR) {
2678 eg_cf_vtx(ve, &bytecode[0], (ndw - 8) / 4);
2679 } else {
2680 r600_cf_vtx(ve, &bytecode[0], (ndw - 8) / 4);
2681 fetch_resource_start = 160;
2682 }
2683
2684 /* vertex elements offset need special handling, if offset is bigger
2685 * than what we can put in fetch instruction then we need to alterate
2686 * the vertex resource offset. In such case in order to simplify code
2687 * we will bound one resource per elements. It's a worst case scenario.
2688 */
2689 for (i = 0; i < ve->count; i++) {
2690 ve->vbuffer_offset[i] = C_SQ_VTX_WORD2_OFFSET & elements[i].src_offset;
2691 if (ve->vbuffer_offset[i]) {
2692 ve->vbuffer_need_offset = 1;
2693 }
2694 }
2695
2696 for (i = 0; i < ve->count; i++) {
2697 unsigned vbuffer_index;
2698 r600_vertex_data_type(ve->hw_format[i], &format, &num_format, &format_comp);
2699 desc = util_format_description(ve->hw_format[i]);
2700 if (desc == NULL) {
2701 R600_ERR("unknown format %d\n", ve->hw_format[i]);
2702 r600_bo_reference(rctx->radeon, &ve->fetch_shader, NULL);
2703 return -EINVAL;
2704 }
2705
2706 /* see above for vbuffer_need_offset explanation */
2707 vbuffer_index = elements[i].vertex_buffer_index;
2708 if (ve->vbuffer_need_offset) {
2709 bytecode[8 + i * 4 + 0] = S_SQ_VTX_WORD0_BUFFER_ID(i + fetch_resource_start);
2710 } else {
2711 bytecode[8 + i * 4 + 0] = S_SQ_VTX_WORD0_BUFFER_ID(vbuffer_index + fetch_resource_start);
2712 }
2713 bytecode[8 + i * 4 + 0] |= S_SQ_VTX_WORD0_SRC_GPR(0) |
2714 S_SQ_VTX_WORD0_SRC_SEL_X(0) |
2715 S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(0x1F);
2716 bytecode[8 + i * 4 + 1] = S_SQ_VTX_WORD1_DST_SEL_X(desc->swizzle[0]) |
2717 S_SQ_VTX_WORD1_DST_SEL_Y(desc->swizzle[1]) |
2718 S_SQ_VTX_WORD1_DST_SEL_Z(desc->swizzle[2]) |
2719 S_SQ_VTX_WORD1_DST_SEL_W(desc->swizzle[3]) |
2720 S_SQ_VTX_WORD1_USE_CONST_FIELDS(0) |
2721 S_SQ_VTX_WORD1_DATA_FORMAT(format) |
2722 S_SQ_VTX_WORD1_NUM_FORMAT_ALL(num_format) |
2723 S_SQ_VTX_WORD1_FORMAT_COMP_ALL(format_comp) |
2724 S_SQ_VTX_WORD1_SRF_MODE_ALL(1) |
2725 S_SQ_VTX_WORD1_GPR_DST_GPR(i + 1);
2726 bytecode[8 + i * 4 + 2] = S_SQ_VTX_WORD2_OFFSET(elements[i].src_offset) |
2727 S_SQ_VTX_WORD2_MEGA_FETCH(1);
2728 bytecode[8 + i * 4 + 3] = 0;
2729 }
2730 r600_bo_unmap(rctx->radeon, ve->fetch_shader);
2731 return 0;
2732 }