ca2bf93b0b81dfd6c1c9d7908aca9203d33daf77
[mesa.git] / src / gallium / drivers / r600 / r600_asm.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <stdio.h>
24 #include <errno.h>
25 #include "util/u_format.h"
26 #include "util/u_memory.h"
27 #include "pipe/p_shader_tokens.h"
28 #include "r600_pipe.h"
29 #include "r600_sq.h"
30 #include "r600_opcodes.h"
31 #include "r600_asm.h"
32 #include "r600_formats.h"
33 #include "r600d.h"
34
35 #define NUM_OF_CYCLES 3
36 #define NUM_OF_COMPONENTS 4
37
38 static inline unsigned int r600_bc_get_num_operands(struct r600_bc_alu *alu)
39 {
40 if(alu->is_op3)
41 return 3;
42
43 switch (alu->inst) {
44 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP:
45 return 0;
46 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD:
47 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE:
48 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT:
49 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE:
50 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE:
51 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL:
52 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX:
53 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN:
54 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE:
55 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE:
56 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT:
57 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE:
58 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE:
59 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT:
60 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE:
61 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE:
62 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4:
63 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE:
64 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE:
65 return 2;
66
67 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV:
68 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR:
69 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT:
70 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR:
71 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC:
72 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE:
73 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED:
74 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE:
75 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE:
76 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED:
77 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE:
78 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT:
79 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN:
80 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS:
81 return 1;
82 default: R600_ERR(
83 "Need instruction operand number for 0x%x.\n", alu->inst);
84 };
85
86 return 3;
87 }
88
89 int r700_bc_alu_build(struct r600_bc *bc, struct r600_bc_alu *alu, unsigned id);
90
91 static struct r600_bc_cf *r600_bc_cf(void)
92 {
93 struct r600_bc_cf *cf = CALLOC_STRUCT(r600_bc_cf);
94
95 if (cf == NULL)
96 return NULL;
97 LIST_INITHEAD(&cf->list);
98 LIST_INITHEAD(&cf->alu);
99 LIST_INITHEAD(&cf->vtx);
100 LIST_INITHEAD(&cf->tex);
101 return cf;
102 }
103
104 static struct r600_bc_alu *r600_bc_alu(void)
105 {
106 struct r600_bc_alu *alu = CALLOC_STRUCT(r600_bc_alu);
107
108 if (alu == NULL)
109 return NULL;
110 LIST_INITHEAD(&alu->list);
111 return alu;
112 }
113
114 static struct r600_bc_vtx *r600_bc_vtx(void)
115 {
116 struct r600_bc_vtx *vtx = CALLOC_STRUCT(r600_bc_vtx);
117
118 if (vtx == NULL)
119 return NULL;
120 LIST_INITHEAD(&vtx->list);
121 return vtx;
122 }
123
124 static struct r600_bc_tex *r600_bc_tex(void)
125 {
126 struct r600_bc_tex *tex = CALLOC_STRUCT(r600_bc_tex);
127
128 if (tex == NULL)
129 return NULL;
130 LIST_INITHEAD(&tex->list);
131 return tex;
132 }
133
134 int r600_bc_init(struct r600_bc *bc, enum radeon_family family)
135 {
136 LIST_INITHEAD(&bc->cf);
137 bc->family = family;
138 switch (bc->family) {
139 case CHIP_R600:
140 case CHIP_RV610:
141 case CHIP_RV630:
142 case CHIP_RV670:
143 case CHIP_RV620:
144 case CHIP_RV635:
145 case CHIP_RS780:
146 case CHIP_RS880:
147 bc->chiprev = CHIPREV_R600;
148 break;
149 case CHIP_RV770:
150 case CHIP_RV730:
151 case CHIP_RV710:
152 case CHIP_RV740:
153 bc->chiprev = CHIPREV_R700;
154 break;
155 case CHIP_CEDAR:
156 case CHIP_REDWOOD:
157 case CHIP_JUNIPER:
158 case CHIP_CYPRESS:
159 case CHIP_HEMLOCK:
160 case CHIP_PALM:
161 case CHIP_BARTS:
162 case CHIP_TURKS:
163 case CHIP_CAICOS:
164 bc->chiprev = CHIPREV_EVERGREEN;
165 break;
166 default:
167 R600_ERR("unknown family %d\n", bc->family);
168 return -EINVAL;
169 }
170 return 0;
171 }
172
173 static int r600_bc_add_cf(struct r600_bc *bc)
174 {
175 struct r600_bc_cf *cf = r600_bc_cf();
176
177 if (cf == NULL)
178 return -ENOMEM;
179 LIST_ADDTAIL(&cf->list, &bc->cf);
180 if (bc->cf_last)
181 cf->id = bc->cf_last->id + 2;
182 bc->cf_last = cf;
183 bc->ncf++;
184 bc->ndw += 2;
185 bc->force_add_cf = 0;
186 return 0;
187 }
188
189 int r600_bc_add_output(struct r600_bc *bc, const struct r600_bc_output *output)
190 {
191 int r;
192
193 r = r600_bc_add_cf(bc);
194 if (r)
195 return r;
196 bc->cf_last->inst = output->inst;
197 memcpy(&bc->cf_last->output, output, sizeof(struct r600_bc_output));
198 return 0;
199 }
200
201 /* alu instructions that can ony exits once per group */
202 static int is_alu_once_inst(struct r600_bc_alu *alu)
203 {
204 return !alu->is_op3 && (
205 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE ||
206 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT ||
207 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE ||
208 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE ||
209 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_UINT ||
210 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_UINT ||
211 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE_INT ||
212 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_INT ||
213 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_INT ||
214 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE_INT ||
215 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_UINT ||
216 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_UINT ||
217 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE ||
218 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT ||
219 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE ||
220 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE ||
221 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_INV ||
222 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_POP ||
223 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_CLR ||
224 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_RESTORE ||
225 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH ||
226 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH ||
227 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH ||
228 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH ||
229 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_INT ||
230 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_INT ||
231 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_INT ||
232 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT ||
233 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH_INT ||
234 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH_INT ||
235 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH_INT ||
236 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH_INT ||
237 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLT_PUSH_INT ||
238 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLE_PUSH_INT);
239 }
240
241 static int is_alu_reduction_inst(struct r600_bc_alu *alu)
242 {
243 return !alu->is_op3 && (
244 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE ||
245 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4 ||
246 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE ||
247 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX4);
248 }
249
250 static int is_alu_mova_inst(struct r600_bc_alu *alu)
251 {
252 return !alu->is_op3 && (
253 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA ||
254 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR ||
255 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT);
256 }
257
258 /* alu instructions that can only execute on the vector unit */
259 static int is_alu_vec_unit_inst(struct r600_bc_alu *alu)
260 {
261 return is_alu_reduction_inst(alu) ||
262 is_alu_mova_inst(alu);
263 }
264
265 /* alu instructions that can only execute on the trans unit */
266 static int is_alu_trans_unit_inst(struct r600_bc_alu *alu)
267 {
268 if(!alu->is_op3)
269 return alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT ||
270 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT ||
271 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT ||
272 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHL_INT ||
273 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT ||
274 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_INT ||
275 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT ||
276 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_INT ||
277 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT ||
278 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_INT ||
279 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_UINT ||
280 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT ||
281 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS ||
282 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE ||
283 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED ||
284 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE ||
285 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED ||
286 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_FF ||
287 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE ||
288 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED ||
289 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_FF ||
290 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE ||
291 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN ||
292 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SQRT_IEEE;
293 else
294 return alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT ||
295 alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_D2 ||
296 alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_M2 ||
297 alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_M4;
298 }
299
300 /* alu instructions that can execute on any unit */
301 static int is_alu_any_unit_inst(struct r600_bc_alu *alu)
302 {
303 return !is_alu_vec_unit_inst(alu) &&
304 !is_alu_trans_unit_inst(alu);
305 }
306
307 static int assign_alu_units(struct r600_bc_alu *alu_first, struct r600_bc_alu *assignment[5])
308 {
309 struct r600_bc_alu *alu;
310 unsigned i, chan, trans;
311
312 for (i = 0; i < 5; i++)
313 assignment[i] = NULL;
314
315 for (alu = alu_first; alu; alu = LIST_ENTRY(struct r600_bc_alu, alu->list.next, list)) {
316 chan = alu->dst.chan;
317 if (is_alu_trans_unit_inst(alu))
318 trans = 1;
319 else if (is_alu_vec_unit_inst(alu))
320 trans = 0;
321 else if (assignment[chan])
322 trans = 1; // assume ALU_INST_PREFER_VECTOR
323 else
324 trans = 0;
325
326 if (trans) {
327 if (assignment[4]) {
328 assert(0); //ALU.Trans has already been allocated
329 return -1;
330 }
331 assignment[4] = alu;
332 } else {
333 if (assignment[chan]) {
334 assert(0); //ALU.chan has already been allocated
335 return -1;
336 }
337 assignment[chan] = alu;
338 }
339
340 if (alu->last)
341 break;
342 }
343 return 0;
344 }
345
346 struct alu_bank_swizzle {
347 int hw_gpr[NUM_OF_CYCLES][NUM_OF_COMPONENTS];
348 int hw_cfile_addr[4];
349 int hw_cfile_elem[4];
350 };
351
352 const unsigned cycle_for_bank_swizzle_vec[][3] = {
353 [SQ_ALU_VEC_012] = { 0, 1, 2 },
354 [SQ_ALU_VEC_021] = { 0, 2, 1 },
355 [SQ_ALU_VEC_120] = { 1, 2, 0 },
356 [SQ_ALU_VEC_102] = { 1, 0, 2 },
357 [SQ_ALU_VEC_201] = { 2, 0, 1 },
358 [SQ_ALU_VEC_210] = { 2, 1, 0 }
359 };
360
361 const unsigned cycle_for_bank_swizzle_scl[][3] = {
362 [SQ_ALU_SCL_210] = { 2, 1, 0 },
363 [SQ_ALU_SCL_122] = { 1, 2, 2 },
364 [SQ_ALU_SCL_212] = { 2, 1, 2 },
365 [SQ_ALU_SCL_221] = { 2, 2, 1 }
366 };
367
368 static void init_bank_swizzle(struct alu_bank_swizzle *bs)
369 {
370 int i, cycle, component;
371 /* set up gpr use */
372 for (cycle = 0; cycle < NUM_OF_CYCLES; cycle++)
373 for (component = 0; component < NUM_OF_COMPONENTS; component++)
374 bs->hw_gpr[cycle][component] = -1;
375 for (i = 0; i < 4; i++)
376 bs->hw_cfile_addr[i] = -1;
377 for (i = 0; i < 4; i++)
378 bs->hw_cfile_elem[i] = -1;
379 }
380
381 static int reserve_gpr(struct alu_bank_swizzle *bs, unsigned sel, unsigned chan, unsigned cycle)
382 {
383 if (bs->hw_gpr[cycle][chan] == -1)
384 bs->hw_gpr[cycle][chan] = sel;
385 else if (bs->hw_gpr[cycle][chan] != (int)sel) {
386 // Another scalar operation has already used GPR read port for channel
387 return -1;
388 }
389 return 0;
390 }
391
392 static int reserve_cfile(struct alu_bank_swizzle *bs, unsigned sel, unsigned chan)
393 {
394 int res, resmatch = -1, resempty = -1;
395 for (res = 3; res >= 0; --res) {
396 if (bs->hw_cfile_addr[res] == -1)
397 resempty = res;
398 else if (bs->hw_cfile_addr[res] == sel &&
399 bs->hw_cfile_elem[res] == chan)
400 resmatch = res;
401 }
402 if (resmatch != -1)
403 return 0; // Read for this scalar element already reserved, nothing to do here.
404 else if (resempty != -1) {
405 bs->hw_cfile_addr[resempty] = sel;
406 bs->hw_cfile_elem[resempty] = chan;
407 } else {
408 // All cfile read ports are used, cannot reference vector element
409 return -1;
410 }
411 return 0;
412 }
413
414 static int is_gpr(unsigned sel)
415 {
416 return (sel >= 0 && sel <= 127);
417 }
418
419 /* CB constants start at 512, and get translated to a kcache index when ALU
420 * clauses are constructed. Note that we handle kcache constants the same way
421 * as (the now gone) cfile constants, is that really required? */
422 static int is_cfile(unsigned sel)
423 {
424 return (sel > 255 && sel < 512) ||
425 (sel > 511 && sel < 4607) || // Kcache before translate
426 (sel > 127 && sel < 192); // Kcache after translate
427 }
428
429 static int is_const(int sel)
430 {
431 return is_cfile(sel) ||
432 (sel >= V_SQ_ALU_SRC_0 &&
433 sel <= V_SQ_ALU_SRC_LITERAL);
434 }
435
436 static int check_vector(struct r600_bc_alu *alu, struct alu_bank_swizzle *bs, int bank_swizzle)
437 {
438 int r, src, num_src, sel, elem, cycle;
439
440 num_src = r600_bc_get_num_operands(alu);
441 for (src = 0; src < num_src; src++) {
442 sel = alu->src[src].sel;
443 elem = alu->src[src].chan;
444 if (is_gpr(sel)) {
445 cycle = cycle_for_bank_swizzle_vec[bank_swizzle][src];
446 if (src == 1 && sel == alu->src[0].sel && elem == alu->src[0].chan)
447 // Nothing to do; special-case optimization,
448 // second source uses first source’s reservation
449 continue;
450 else {
451 r = reserve_gpr(bs, sel, elem, cycle);
452 if (r)
453 return r;
454 }
455 } else if (is_cfile(sel)) {
456 r = reserve_cfile(bs, sel, elem);
457 if (r)
458 return r;
459 }
460 // No restrictions on PV, PS, literal or special constants
461 }
462 return 0;
463 }
464
465 static int check_scalar(struct r600_bc_alu *alu, struct alu_bank_swizzle *bs, int bank_swizzle)
466 {
467 int r, src, num_src, const_count, sel, elem, cycle;
468
469 num_src = r600_bc_get_num_operands(alu);
470 for (const_count = 0, src = 0; src < num_src; ++src) {
471 sel = alu->src[src].sel;
472 elem = alu->src[src].chan;
473 if (is_const(sel)) { // Any constant, including literal and inline constants
474 if (const_count >= 2)
475 // More than two references to a constant in
476 // transcendental operation.
477 return -1;
478 else
479 const_count++;
480 }
481 if (is_cfile(sel)) {
482 r = reserve_cfile(bs, sel, elem);
483 if (r)
484 return r;
485 }
486 }
487 for (src = 0; src < num_src; ++src) {
488 sel = alu->src[src].sel;
489 elem = alu->src[src].chan;
490 if (is_gpr(sel)) {
491 cycle = cycle_for_bank_swizzle_scl[bank_swizzle][src];
492 if (cycle < const_count)
493 // Cycle for GPR load conflicts with
494 // constant load in transcendental operation.
495 return -1;
496 r = reserve_gpr(bs, sel, elem, cycle);
497 if (r)
498 return r;
499 }
500 // Constants already processed
501 // No restrictions on PV, PS
502 }
503 return 0;
504 }
505
506 static int check_and_set_bank_swizzle(struct r600_bc_alu *slots[5])
507 {
508 struct alu_bank_swizzle bs;
509 int bank_swizzle[5];
510 int i, r = 0, forced = 0;
511
512 for (i = 0; i < 5; i++)
513 if (slots[i] && slots[i]->bank_swizzle_force) {
514 slots[i]->bank_swizzle = slots[i]->bank_swizzle_force;
515 forced = 1;
516 }
517
518 if (forced)
519 return 0;
520
521 // just check every possible combination of bank swizzle
522 // not very efficent, but works on the first try in most of the cases
523 for (i = 0; i < 4; i++)
524 bank_swizzle[i] = SQ_ALU_VEC_012;
525 bank_swizzle[4] = SQ_ALU_SCL_210;
526 while(bank_swizzle[4] <= SQ_ALU_SCL_221) {
527 init_bank_swizzle(&bs);
528 for (i = 0; i < 4; i++) {
529 if (slots[i]) {
530 r = check_vector(slots[i], &bs, bank_swizzle[i]);
531 if (r)
532 break;
533 }
534 }
535 if (!r && slots[4]) {
536 r = check_scalar(slots[4], &bs, bank_swizzle[4]);
537 }
538 if (!r) {
539 for (i = 0; i < 5; i++) {
540 if (slots[i])
541 slots[i]->bank_swizzle = bank_swizzle[i];
542 }
543 return 0;
544 }
545
546 for (i = 0; i < 5; i++) {
547 bank_swizzle[i]++;
548 if (bank_swizzle[i] <= SQ_ALU_VEC_210)
549 break;
550 else
551 bank_swizzle[i] = SQ_ALU_VEC_012;
552 }
553 }
554
555 // couldn't find a working swizzle
556 return -1;
557 }
558
559 static int replace_gpr_with_pv_ps(struct r600_bc_alu *slots[5], struct r600_bc_alu *alu_prev)
560 {
561 struct r600_bc_alu *prev[5];
562 int gpr[5], chan[5];
563 int i, j, r, src, num_src;
564
565 r = assign_alu_units(alu_prev, prev);
566 if (r)
567 return r;
568
569 for (i = 0; i < 5; ++i) {
570 if(prev[i] && prev[i]->dst.write && !prev[i]->dst.rel) {
571 gpr[i] = prev[i]->dst.sel;
572 if (is_alu_reduction_inst(prev[i]))
573 chan[i] = 0;
574 else
575 chan[i] = prev[i]->dst.chan;
576 } else
577 gpr[i] = -1;
578 }
579
580 for (i = 0; i < 5; ++i) {
581 struct r600_bc_alu *alu = slots[i];
582 if(!alu)
583 continue;
584
585 num_src = r600_bc_get_num_operands(alu);
586 for (src = 0; src < num_src; ++src) {
587 if (!is_gpr(alu->src[src].sel) || alu->src[src].rel)
588 continue;
589
590 if (alu->src[src].sel == gpr[4] &&
591 alu->src[src].chan == chan[4]) {
592 alu->src[src].sel = V_SQ_ALU_SRC_PS;
593 alu->src[src].chan = 0;
594 continue;
595 }
596
597 for (j = 0; j < 4; ++j) {
598 if (alu->src[src].sel == gpr[j] &&
599 alu->src[src].chan == j) {
600 alu->src[src].sel = V_SQ_ALU_SRC_PV;
601 alu->src[src].chan = chan[j];
602 break;
603 }
604 }
605 }
606 }
607
608 return 0;
609 }
610
611 static int merge_inst_groups(struct r600_bc *bc, struct r600_bc_alu *slots[5], struct r600_bc_alu *alu_prev)
612 {
613 struct r600_bc_alu *prev[5];
614 struct r600_bc_alu *result[5] = { NULL };
615 int i, j, r, src, num_src;
616 int num_once_inst = 0;
617
618 r = assign_alu_units(alu_prev, prev);
619 if (r)
620 return r;
621
622 for (i = 0; i < 5; ++i) {
623 // TODO: we have literals? forget it!
624 if (prev[i] && prev[i]->nliteral)
625 return 0;
626 if (slots[i] && slots[i]->nliteral)
627 return 0;
628
629
630 // let's check used slots
631 if (prev[i] && !slots[i]) {
632 result[i] = prev[i];
633 num_once_inst += is_alu_once_inst(prev[i]);
634 continue;
635 } else if (prev[i] && slots[i]) {
636 if (result[4] == NULL && prev[4] == NULL && slots[4] == NULL) {
637 // trans unit is still free try to use it
638 if (is_alu_any_unit_inst(slots[i])) {
639 result[i] = prev[i];
640 result[4] = slots[i];
641 } else if (is_alu_any_unit_inst(prev[i])) {
642 result[i] = slots[i];
643 result[4] = prev[i];
644 } else
645 return 0;
646 } else
647 return 0;
648 } else if(!slots[i]) {
649 continue;
650 } else
651 result[i] = slots[i];
652
653 // let's check source gprs
654 struct r600_bc_alu *alu = slots[i];
655 num_once_inst += is_alu_once_inst(alu);
656
657 num_src = r600_bc_get_num_operands(alu);
658 for (src = 0; src < num_src; ++src) {
659 // constants doesn't matter
660 if (!is_gpr(alu->src[src].sel))
661 continue;
662
663 for (j = 0; j < 5; ++j) {
664 if (!prev[j] || !prev[j]->dst.write)
665 continue;
666
667 // if it's relative then we can't determin which gpr is really used
668 if (prev[j]->dst.chan == alu->src[src].chan &&
669 (prev[j]->dst.sel == alu->src[src].sel ||
670 prev[j]->dst.rel || alu->src[src].rel))
671 return 0;
672 }
673 }
674 }
675
676 /* more than one PRED_ or KILL_ ? */
677 if (num_once_inst > 1)
678 return 0;
679
680 /* check if the result can still be swizzlet */
681 r = check_and_set_bank_swizzle(result);
682 if (r)
683 return 0;
684
685 /* looks like everything worked out right, apply the changes */
686
687 /* sort instructions */
688 for (i = 0; i < 5; ++i) {
689 slots[i] = result[i];
690 if (result[i]) {
691 LIST_DEL(&result[i]->list);
692 result[i]->last = 0;
693 LIST_ADDTAIL(&result[i]->list, &bc->cf_last->alu);
694 }
695 }
696
697 /* determine new last instruction */
698 LIST_ENTRY(struct r600_bc_alu, bc->cf_last->alu.prev, list)->last = 1;
699
700 /* determine new first instruction */
701 for (i = 0; i < 5; ++i) {
702 if (result[i]) {
703 bc->cf_last->curr_bs_head = result[i];
704 break;
705 }
706 }
707
708 bc->cf_last->prev_bs_head = bc->cf_last->prev2_bs_head;
709 bc->cf_last->prev2_bs_head = NULL;
710
711 return 0;
712 }
713
714 /* This code handles kcache lines as single blocks of 32 constants. We could
715 * probably do slightly better by recognizing that we actually have two
716 * consecutive lines of 16 constants, but the resulting code would also be
717 * somewhat more complicated. */
718 static int r600_bc_alloc_kcache_lines(struct r600_bc *bc, struct r600_bc_alu *alu, int type)
719 {
720 struct r600_bc_kcache *kcache = bc->cf_last->kcache;
721 unsigned int required_lines;
722 unsigned int free_lines = 0;
723 unsigned int cache_line[3];
724 unsigned int count = 0;
725 unsigned int i, j;
726 int r;
727
728 /* Collect required cache lines. */
729 for (i = 0; i < 3; ++i) {
730 bool found = false;
731 unsigned int line;
732
733 if (alu->src[i].sel < 512)
734 continue;
735
736 line = ((alu->src[i].sel - 512) / 32) * 2;
737
738 for (j = 0; j < count; ++j) {
739 if (cache_line[j] == line) {
740 found = true;
741 break;
742 }
743 }
744
745 if (!found)
746 cache_line[count++] = line;
747 }
748
749 /* This should never actually happen. */
750 if (count >= 3) return -ENOMEM;
751
752 for (i = 0; i < 2; ++i) {
753 if (kcache[i].mode == V_SQ_CF_KCACHE_NOP) {
754 ++free_lines;
755 }
756 }
757
758 /* Filter lines pulled in by previous intructions. Note that this is
759 * only for the required_lines count, we can't remove these from the
760 * cache_line array since we may have to start a new ALU clause. */
761 for (i = 0, required_lines = count; i < count; ++i) {
762 for (j = 0; j < 2; ++j) {
763 if (kcache[j].mode == V_SQ_CF_KCACHE_LOCK_2 &&
764 kcache[j].addr == cache_line[i]) {
765 --required_lines;
766 break;
767 }
768 }
769 }
770
771 /* Start a new ALU clause if needed. */
772 if (required_lines > free_lines) {
773 if ((r = r600_bc_add_cf(bc))) {
774 return r;
775 }
776 bc->cf_last->inst = (type << 3);
777 kcache = bc->cf_last->kcache;
778 }
779
780 /* Setup the kcache lines. */
781 for (i = 0; i < count; ++i) {
782 bool found = false;
783
784 for (j = 0; j < 2; ++j) {
785 if (kcache[j].mode == V_SQ_CF_KCACHE_LOCK_2 &&
786 kcache[j].addr == cache_line[i]) {
787 found = true;
788 break;
789 }
790 }
791
792 if (found) continue;
793
794 for (j = 0; j < 2; ++j) {
795 if (kcache[j].mode == V_SQ_CF_KCACHE_NOP) {
796 kcache[j].bank = 0;
797 kcache[j].addr = cache_line[i];
798 kcache[j].mode = V_SQ_CF_KCACHE_LOCK_2;
799 break;
800 }
801 }
802 }
803
804 /* Alter the src operands to refer to the kcache. */
805 for (i = 0; i < 3; ++i) {
806 static const unsigned int base[] = {128, 160, 256, 288};
807 unsigned int line;
808
809 if (alu->src[i].sel < 512)
810 continue;
811
812 alu->src[i].sel -= 512;
813 line = (alu->src[i].sel / 32) * 2;
814
815 for (j = 0; j < 2; ++j) {
816 if (kcache[j].mode == V_SQ_CF_KCACHE_LOCK_2 &&
817 kcache[j].addr == line) {
818 alu->src[i].sel &= 0x1f;
819 alu->src[i].sel += base[j];
820 break;
821 }
822 }
823 }
824
825 return 0;
826 }
827
828 int r600_bc_add_alu_type(struct r600_bc *bc, const struct r600_bc_alu *alu, int type)
829 {
830 struct r600_bc_alu *nalu = r600_bc_alu();
831 struct r600_bc_alu *lalu;
832 int i, r;
833
834 if (nalu == NULL)
835 return -ENOMEM;
836 memcpy(nalu, alu, sizeof(struct r600_bc_alu));
837 nalu->nliteral = 0;
838
839 if (bc->cf_last != NULL && bc->cf_last->inst != (type << 3)) {
840 /* check if we could add it anyway */
841 if (bc->cf_last->inst == (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3) &&
842 type == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE) {
843 LIST_FOR_EACH_ENTRY(lalu, &bc->cf_last->alu, list) {
844 if (lalu->predicate) {
845 bc->force_add_cf = 1;
846 break;
847 }
848 }
849 } else
850 bc->force_add_cf = 1;
851 }
852
853 /* cf can contains only alu or only vtx or only tex */
854 if (bc->cf_last == NULL || bc->force_add_cf) {
855 r = r600_bc_add_cf(bc);
856 if (r) {
857 free(nalu);
858 return r;
859 }
860 }
861 bc->cf_last->inst = (type << 3);
862
863 /* Setup the kcache for this ALU instruction. This will start a new
864 * ALU clause if needed. */
865 if ((r = r600_bc_alloc_kcache_lines(bc, nalu, type))) {
866 free(nalu);
867 return r;
868 }
869
870 if (!bc->cf_last->curr_bs_head) {
871 bc->cf_last->curr_bs_head = nalu;
872 }
873 /* at most 128 slots, one add alu can add 5 slots + 4 constants(2 slots)
874 * worst case */
875 if (nalu->last && (bc->cf_last->ndw >> 1) >= 120) {
876 bc->force_add_cf = 1;
877 }
878 /* number of gpr == the last gpr used in any alu */
879 for (i = 0; i < 3; i++) {
880 if (nalu->src[i].sel >= bc->ngpr && nalu->src[i].sel < 128) {
881 bc->ngpr = nalu->src[i].sel + 1;
882 }
883 /* compute how many literal are needed
884 * either 2 or 4 literals
885 */
886 if (nalu->src[i].sel == 253) {
887 if (((nalu->src[i].chan + 2) & 0x6) > nalu->nliteral) {
888 nalu->nliteral = (nalu->src[i].chan + 2) & 0x6;
889 }
890 }
891 }
892 if (!LIST_IS_EMPTY(&bc->cf_last->alu)) {
893 lalu = LIST_ENTRY(struct r600_bc_alu, bc->cf_last->alu.prev, list);
894 if (!lalu->last && lalu->nliteral > nalu->nliteral) {
895 nalu->nliteral = lalu->nliteral;
896 }
897 }
898 if (nalu->dst.sel >= bc->ngpr) {
899 bc->ngpr = nalu->dst.sel + 1;
900 }
901 LIST_ADDTAIL(&nalu->list, &bc->cf_last->alu);
902 /* each alu use 2 dwords */
903 bc->cf_last->ndw += 2;
904 bc->ndw += 2;
905
906 /* process cur ALU instructions for bank swizzle */
907 if (nalu->last) {
908 struct r600_bc_alu *slots[5];
909 r = assign_alu_units(bc->cf_last->curr_bs_head, slots);
910 if (r)
911 return r;
912
913 if (bc->cf_last->prev_bs_head) {
914 r = merge_inst_groups(bc, slots, bc->cf_last->prev_bs_head);
915 if (r)
916 return r;
917 }
918
919 if (bc->cf_last->prev_bs_head) {
920 r = replace_gpr_with_pv_ps(slots, bc->cf_last->prev_bs_head);
921 if (r)
922 return r;
923 }
924
925 r = check_and_set_bank_swizzle(slots);
926 if (r)
927 return r;
928
929 bc->cf_last->prev2_bs_head = bc->cf_last->prev_bs_head;
930 bc->cf_last->prev_bs_head = bc->cf_last->curr_bs_head;
931 bc->cf_last->curr_bs_head = NULL;
932 }
933 return 0;
934 }
935
936 int r600_bc_add_alu(struct r600_bc *bc, const struct r600_bc_alu *alu)
937 {
938 return r600_bc_add_alu_type(bc, alu, BC_INST(bc, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU));
939 }
940
941 int r600_bc_add_literal(struct r600_bc *bc, const u32 *value)
942 {
943 struct r600_bc_alu *alu;
944
945 if (bc->cf_last == NULL) {
946 return 0;
947 }
948 if (bc->cf_last->inst == V_SQ_CF_WORD1_SQ_CF_INST_TEX) {
949 return 0;
950 }
951 /* all same on EG */
952 if (bc->cf_last->inst == V_SQ_CF_WORD1_SQ_CF_INST_JUMP ||
953 bc->cf_last->inst == V_SQ_CF_WORD1_SQ_CF_INST_ELSE ||
954 bc->cf_last->inst == V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL ||
955 bc->cf_last->inst == V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK ||
956 bc->cf_last->inst == V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE ||
957 bc->cf_last->inst == V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END ||
958 bc->cf_last->inst == V_SQ_CF_WORD1_SQ_CF_INST_POP) {
959 return 0;
960 }
961 /* same on EG */
962 if (((bc->cf_last->inst != (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3)) &&
963 (bc->cf_last->inst != (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3)) &&
964 (bc->cf_last->inst != (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3)) &&
965 (bc->cf_last->inst != (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3))) ||
966 LIST_IS_EMPTY(&bc->cf_last->alu)) {
967 R600_ERR("last CF is not ALU (%p)\n", bc->cf_last);
968 return -EINVAL;
969 }
970 alu = LIST_ENTRY(struct r600_bc_alu, bc->cf_last->alu.prev, list);
971 if (!alu->last || !alu->nliteral || alu->literal_added) {
972 return 0;
973 }
974 memcpy(alu->value, value, 4 * 4);
975 bc->cf_last->ndw += alu->nliteral;
976 bc->ndw += alu->nliteral;
977 alu->literal_added = 1;
978 return 0;
979 }
980
981 int r600_bc_add_vtx(struct r600_bc *bc, const struct r600_bc_vtx *vtx)
982 {
983 struct r600_bc_vtx *nvtx = r600_bc_vtx();
984 int r;
985
986 if (nvtx == NULL)
987 return -ENOMEM;
988 memcpy(nvtx, vtx, sizeof(struct r600_bc_vtx));
989
990 /* cf can contains only alu or only vtx or only tex */
991 if (bc->cf_last == NULL ||
992 (bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_VTX &&
993 bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) ||
994 bc->force_add_cf) {
995 r = r600_bc_add_cf(bc);
996 if (r) {
997 free(nvtx);
998 return r;
999 }
1000 bc->cf_last->inst = V_SQ_CF_WORD1_SQ_CF_INST_VTX;
1001 }
1002 LIST_ADDTAIL(&nvtx->list, &bc->cf_last->vtx);
1003 /* each fetch use 4 dwords */
1004 bc->cf_last->ndw += 4;
1005 bc->ndw += 4;
1006 if ((bc->cf_last->ndw / 4) > 7)
1007 bc->force_add_cf = 1;
1008 return 0;
1009 }
1010
1011 int r600_bc_add_tex(struct r600_bc *bc, const struct r600_bc_tex *tex)
1012 {
1013 struct r600_bc_tex *ntex = r600_bc_tex();
1014 int r;
1015
1016 if (ntex == NULL)
1017 return -ENOMEM;
1018 memcpy(ntex, tex, sizeof(struct r600_bc_tex));
1019
1020 /* cf can contains only alu or only vtx or only tex */
1021 if (bc->cf_last == NULL ||
1022 bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_TEX ||
1023 bc->force_add_cf) {
1024 r = r600_bc_add_cf(bc);
1025 if (r) {
1026 free(ntex);
1027 return r;
1028 }
1029 bc->cf_last->inst = V_SQ_CF_WORD1_SQ_CF_INST_TEX;
1030 }
1031 if (ntex->src_gpr >= bc->ngpr) {
1032 bc->ngpr = ntex->src_gpr + 1;
1033 }
1034 if (ntex->dst_gpr >= bc->ngpr) {
1035 bc->ngpr = ntex->dst_gpr + 1;
1036 }
1037 LIST_ADDTAIL(&ntex->list, &bc->cf_last->tex);
1038 /* each texture fetch use 4 dwords */
1039 bc->cf_last->ndw += 4;
1040 bc->ndw += 4;
1041 if ((bc->cf_last->ndw / 4) > 7)
1042 bc->force_add_cf = 1;
1043 return 0;
1044 }
1045
1046 int r600_bc_add_cfinst(struct r600_bc *bc, int inst)
1047 {
1048 int r;
1049 r = r600_bc_add_cf(bc);
1050 if (r)
1051 return r;
1052
1053 bc->cf_last->cond = V_SQ_CF_COND_ACTIVE;
1054 bc->cf_last->inst = inst;
1055 return 0;
1056 }
1057
1058 /* common to all 3 families */
1059 static int r600_bc_vtx_build(struct r600_bc *bc, struct r600_bc_vtx *vtx, unsigned id)
1060 {
1061 unsigned fetch_resource_start = 0;
1062
1063 /* check if we are fetch shader */
1064 /* fetch shader can also access vertex resource,
1065 * first fetch shader resource is at 160
1066 */
1067 if (bc->type == -1) {
1068 switch (bc->chiprev) {
1069 /* r600 */
1070 case CHIPREV_R600:
1071 /* r700 */
1072 case CHIPREV_R700:
1073 fetch_resource_start = 160;
1074 break;
1075 /* evergreen */
1076 case CHIPREV_EVERGREEN:
1077 fetch_resource_start = 0;
1078 break;
1079 default:
1080 fprintf(stderr, "%s:%s:%d unknown chiprev %d\n",
1081 __FILE__, __func__, __LINE__, bc->chiprev);
1082 break;
1083 }
1084 }
1085 bc->bytecode[id++] = S_SQ_VTX_WORD0_BUFFER_ID(vtx->buffer_id + fetch_resource_start) |
1086 S_SQ_VTX_WORD0_SRC_GPR(vtx->src_gpr) |
1087 S_SQ_VTX_WORD0_SRC_SEL_X(vtx->src_sel_x) |
1088 S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(vtx->mega_fetch_count);
1089 bc->bytecode[id++] = S_SQ_VTX_WORD1_DST_SEL_X(vtx->dst_sel_x) |
1090 S_SQ_VTX_WORD1_DST_SEL_Y(vtx->dst_sel_y) |
1091 S_SQ_VTX_WORD1_DST_SEL_Z(vtx->dst_sel_z) |
1092 S_SQ_VTX_WORD1_DST_SEL_W(vtx->dst_sel_w) |
1093 S_SQ_VTX_WORD1_USE_CONST_FIELDS(vtx->use_const_fields) |
1094 S_SQ_VTX_WORD1_DATA_FORMAT(vtx->data_format) |
1095 S_SQ_VTX_WORD1_NUM_FORMAT_ALL(vtx->num_format_all) |
1096 S_SQ_VTX_WORD1_FORMAT_COMP_ALL(vtx->format_comp_all) |
1097 S_SQ_VTX_WORD1_SRF_MODE_ALL(vtx->srf_mode_all) |
1098 S_SQ_VTX_WORD1_GPR_DST_GPR(vtx->dst_gpr);
1099 bc->bytecode[id++] = S_SQ_VTX_WORD2_MEGA_FETCH(1);
1100 bc->bytecode[id++] = 0;
1101 return 0;
1102 }
1103
1104 /* common to all 3 families */
1105 static int r600_bc_tex_build(struct r600_bc *bc, struct r600_bc_tex *tex, unsigned id)
1106 {
1107 bc->bytecode[id++] = S_SQ_TEX_WORD0_TEX_INST(tex->inst) |
1108 S_SQ_TEX_WORD0_RESOURCE_ID(tex->resource_id) |
1109 S_SQ_TEX_WORD0_SRC_GPR(tex->src_gpr) |
1110 S_SQ_TEX_WORD0_SRC_REL(tex->src_rel);
1111 bc->bytecode[id++] = S_SQ_TEX_WORD1_DST_GPR(tex->dst_gpr) |
1112 S_SQ_TEX_WORD1_DST_REL(tex->dst_rel) |
1113 S_SQ_TEX_WORD1_DST_SEL_X(tex->dst_sel_x) |
1114 S_SQ_TEX_WORD1_DST_SEL_Y(tex->dst_sel_y) |
1115 S_SQ_TEX_WORD1_DST_SEL_Z(tex->dst_sel_z) |
1116 S_SQ_TEX_WORD1_DST_SEL_W(tex->dst_sel_w) |
1117 S_SQ_TEX_WORD1_LOD_BIAS(tex->lod_bias) |
1118 S_SQ_TEX_WORD1_COORD_TYPE_X(tex->coord_type_x) |
1119 S_SQ_TEX_WORD1_COORD_TYPE_Y(tex->coord_type_y) |
1120 S_SQ_TEX_WORD1_COORD_TYPE_Z(tex->coord_type_z) |
1121 S_SQ_TEX_WORD1_COORD_TYPE_W(tex->coord_type_w);
1122 bc->bytecode[id++] = S_SQ_TEX_WORD2_OFFSET_X(tex->offset_x) |
1123 S_SQ_TEX_WORD2_OFFSET_Y(tex->offset_y) |
1124 S_SQ_TEX_WORD2_OFFSET_Z(tex->offset_z) |
1125 S_SQ_TEX_WORD2_SAMPLER_ID(tex->sampler_id) |
1126 S_SQ_TEX_WORD2_SRC_SEL_X(tex->src_sel_x) |
1127 S_SQ_TEX_WORD2_SRC_SEL_Y(tex->src_sel_y) |
1128 S_SQ_TEX_WORD2_SRC_SEL_Z(tex->src_sel_z) |
1129 S_SQ_TEX_WORD2_SRC_SEL_W(tex->src_sel_w);
1130 bc->bytecode[id++] = 0;
1131 return 0;
1132 }
1133
1134 /* r600 only, r700/eg bits in r700_asm.c */
1135 static int r600_bc_alu_build(struct r600_bc *bc, struct r600_bc_alu *alu, unsigned id)
1136 {
1137 unsigned i;
1138
1139 /* don't replace gpr by pv or ps for destination register */
1140 bc->bytecode[id++] = S_SQ_ALU_WORD0_SRC0_SEL(alu->src[0].sel) |
1141 S_SQ_ALU_WORD0_SRC0_REL(alu->src[0].rel) |
1142 S_SQ_ALU_WORD0_SRC0_CHAN(alu->src[0].chan) |
1143 S_SQ_ALU_WORD0_SRC0_NEG(alu->src[0].neg) |
1144 S_SQ_ALU_WORD0_SRC1_SEL(alu->src[1].sel) |
1145 S_SQ_ALU_WORD0_SRC1_REL(alu->src[1].rel) |
1146 S_SQ_ALU_WORD0_SRC1_CHAN(alu->src[1].chan) |
1147 S_SQ_ALU_WORD0_SRC1_NEG(alu->src[1].neg) |
1148 S_SQ_ALU_WORD0_LAST(alu->last);
1149
1150 if (alu->is_op3) {
1151 bc->bytecode[id++] = S_SQ_ALU_WORD1_DST_GPR(alu->dst.sel) |
1152 S_SQ_ALU_WORD1_DST_CHAN(alu->dst.chan) |
1153 S_SQ_ALU_WORD1_DST_REL(alu->dst.rel) |
1154 S_SQ_ALU_WORD1_CLAMP(alu->dst.clamp) |
1155 S_SQ_ALU_WORD1_OP3_SRC2_SEL(alu->src[2].sel) |
1156 S_SQ_ALU_WORD1_OP3_SRC2_REL(alu->src[2].rel) |
1157 S_SQ_ALU_WORD1_OP3_SRC2_CHAN(alu->src[2].chan) |
1158 S_SQ_ALU_WORD1_OP3_SRC2_NEG(alu->src[2].neg) |
1159 S_SQ_ALU_WORD1_OP3_ALU_INST(alu->inst) |
1160 S_SQ_ALU_WORD1_BANK_SWIZZLE(alu->bank_swizzle);
1161 } else {
1162 bc->bytecode[id++] = S_SQ_ALU_WORD1_DST_GPR(alu->dst.sel) |
1163 S_SQ_ALU_WORD1_DST_CHAN(alu->dst.chan) |
1164 S_SQ_ALU_WORD1_DST_REL(alu->dst.rel) |
1165 S_SQ_ALU_WORD1_CLAMP(alu->dst.clamp) |
1166 S_SQ_ALU_WORD1_OP2_SRC0_ABS(alu->src[0].abs) |
1167 S_SQ_ALU_WORD1_OP2_SRC1_ABS(alu->src[1].abs) |
1168 S_SQ_ALU_WORD1_OP2_WRITE_MASK(alu->dst.write) |
1169 S_SQ_ALU_WORD1_OP2_OMOD(alu->omod) |
1170 S_SQ_ALU_WORD1_OP2_ALU_INST(alu->inst) |
1171 S_SQ_ALU_WORD1_BANK_SWIZZLE(alu->bank_swizzle) |
1172 S_SQ_ALU_WORD1_OP2_UPDATE_EXECUTE_MASK(alu->predicate) |
1173 S_SQ_ALU_WORD1_OP2_UPDATE_PRED(alu->predicate);
1174 }
1175 if (alu->last) {
1176 if (alu->nliteral && !alu->literal_added) {
1177 R600_ERR("Bug in ALU processing for instruction 0x%08x, literal not added correctly\n", alu->inst);
1178 }
1179 for (i = 0; i < alu->nliteral; i++) {
1180 bc->bytecode[id++] = alu->value[i];
1181 }
1182 }
1183 return 0;
1184 }
1185
1186 /* common for r600/r700 - eg in eg_asm.c */
1187 static int r600_bc_cf_build(struct r600_bc *bc, struct r600_bc_cf *cf)
1188 {
1189 unsigned id = cf->id;
1190
1191 switch (cf->inst) {
1192 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
1193 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
1194 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3):
1195 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3):
1196 bc->bytecode[id++] = S_SQ_CF_ALU_WORD0_ADDR(cf->addr >> 1) |
1197 S_SQ_CF_ALU_WORD0_KCACHE_MODE0(cf->kcache[0].mode) |
1198 S_SQ_CF_ALU_WORD0_KCACHE_BANK0(cf->kcache[0].bank) |
1199 S_SQ_CF_ALU_WORD0_KCACHE_BANK1(cf->kcache[1].bank);
1200
1201 bc->bytecode[id++] = S_SQ_CF_ALU_WORD1_CF_INST(cf->inst >> 3) |
1202 S_SQ_CF_ALU_WORD1_KCACHE_MODE1(cf->kcache[1].mode) |
1203 S_SQ_CF_ALU_WORD1_KCACHE_ADDR0(cf->kcache[0].addr) |
1204 S_SQ_CF_ALU_WORD1_KCACHE_ADDR1(cf->kcache[1].addr) |
1205 S_SQ_CF_ALU_WORD1_BARRIER(1) |
1206 S_SQ_CF_ALU_WORD1_USES_WATERFALL(bc->chiprev == CHIPREV_R600 ? cf->r6xx_uses_waterfall : 0) |
1207 S_SQ_CF_ALU_WORD1_COUNT((cf->ndw / 2) - 1);
1208 break;
1209 case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
1210 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
1211 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
1212 bc->bytecode[id++] = S_SQ_CF_WORD0_ADDR(cf->addr >> 1);
1213 bc->bytecode[id++] = S_SQ_CF_WORD1_CF_INST(cf->inst) |
1214 S_SQ_CF_WORD1_BARRIER(1) |
1215 S_SQ_CF_WORD1_COUNT((cf->ndw / 4) - 1);
1216 break;
1217 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1218 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1219 bc->bytecode[id++] = S_SQ_CF_ALLOC_EXPORT_WORD0_RW_GPR(cf->output.gpr) |
1220 S_SQ_CF_ALLOC_EXPORT_WORD0_ELEM_SIZE(cf->output.elem_size) |
1221 S_SQ_CF_ALLOC_EXPORT_WORD0_ARRAY_BASE(cf->output.array_base) |
1222 S_SQ_CF_ALLOC_EXPORT_WORD0_TYPE(cf->output.type);
1223 bc->bytecode[id++] = S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_X(cf->output.swizzle_x) |
1224 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Y(cf->output.swizzle_y) |
1225 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Z(cf->output.swizzle_z) |
1226 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_W(cf->output.swizzle_w) |
1227 S_SQ_CF_ALLOC_EXPORT_WORD1_BARRIER(cf->output.barrier) |
1228 S_SQ_CF_ALLOC_EXPORT_WORD1_CF_INST(cf->output.inst) |
1229 S_SQ_CF_ALLOC_EXPORT_WORD1_END_OF_PROGRAM(cf->output.end_of_program);
1230 break;
1231 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
1232 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
1233 case V_SQ_CF_WORD1_SQ_CF_INST_POP:
1234 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
1235 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
1236 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
1237 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
1238 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
1239 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
1240 bc->bytecode[id++] = S_SQ_CF_WORD0_ADDR(cf->cf_addr >> 1);
1241 bc->bytecode[id++] = S_SQ_CF_WORD1_CF_INST(cf->inst) |
1242 S_SQ_CF_WORD1_BARRIER(1) |
1243 S_SQ_CF_WORD1_COND(cf->cond) |
1244 S_SQ_CF_WORD1_POP_COUNT(cf->pop_count);
1245
1246 break;
1247 default:
1248 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
1249 return -EINVAL;
1250 }
1251 return 0;
1252 }
1253
1254 int r600_bc_build(struct r600_bc *bc)
1255 {
1256 struct r600_bc_cf *cf;
1257 struct r600_bc_alu *alu;
1258 struct r600_bc_vtx *vtx;
1259 struct r600_bc_tex *tex;
1260 unsigned addr;
1261 int r;
1262
1263 if (bc->callstack[0].max > 0)
1264 bc->nstack = ((bc->callstack[0].max + 3) >> 2) + 2;
1265 if (bc->type == TGSI_PROCESSOR_VERTEX && !bc->nstack) {
1266 bc->nstack = 1;
1267 }
1268
1269 /* first path compute addr of each CF block */
1270 /* addr start after all the CF instructions */
1271 addr = bc->cf_last->id + 2;
1272 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
1273 switch (cf->inst) {
1274 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
1275 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3):
1276 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3):
1277 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
1278 break;
1279 case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
1280 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
1281 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
1282 /* fetch node need to be 16 bytes aligned*/
1283 addr += 3;
1284 addr &= 0xFFFFFFFCUL;
1285 break;
1286 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1287 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1288 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1289 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1290 break;
1291 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
1292 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
1293 case V_SQ_CF_WORD1_SQ_CF_INST_POP:
1294 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
1295 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
1296 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
1297 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
1298 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
1299 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
1300 break;
1301 default:
1302 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
1303 return -EINVAL;
1304 }
1305 cf->addr = addr;
1306 addr += cf->ndw;
1307 bc->ndw = cf->addr + cf->ndw;
1308 }
1309 free(bc->bytecode);
1310 bc->bytecode = calloc(1, bc->ndw * 4);
1311 if (bc->bytecode == NULL)
1312 return -ENOMEM;
1313 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
1314 addr = cf->addr;
1315 if (bc->chiprev == CHIPREV_EVERGREEN)
1316 r = eg_bc_cf_build(bc, cf);
1317 else
1318 r = r600_bc_cf_build(bc, cf);
1319 if (r)
1320 return r;
1321 switch (cf->inst) {
1322 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
1323 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3):
1324 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3):
1325 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
1326 LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
1327 switch(bc->chiprev) {
1328 case CHIPREV_R600:
1329 r = r600_bc_alu_build(bc, alu, addr);
1330 break;
1331 case CHIPREV_R700:
1332 case CHIPREV_EVERGREEN: /* eg alu is same encoding as r700 */
1333 r = r700_bc_alu_build(bc, alu, addr);
1334 break;
1335 default:
1336 R600_ERR("unknown family %d\n", bc->family);
1337 return -EINVAL;
1338 }
1339 if (r)
1340 return r;
1341 addr += 2;
1342 if (alu->last) {
1343 addr += alu->nliteral;
1344 }
1345 }
1346 break;
1347 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
1348 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
1349 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
1350 r = r600_bc_vtx_build(bc, vtx, addr);
1351 if (r)
1352 return r;
1353 addr += 4;
1354 }
1355 break;
1356 case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
1357 LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
1358 r = r600_bc_tex_build(bc, tex, addr);
1359 if (r)
1360 return r;
1361 addr += 4;
1362 }
1363 break;
1364 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1365 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1366 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1367 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1368 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
1369 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
1370 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
1371 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
1372 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
1373 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
1374 case V_SQ_CF_WORD1_SQ_CF_INST_POP:
1375 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
1376 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
1377 break;
1378 default:
1379 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
1380 return -EINVAL;
1381 }
1382 }
1383 return 0;
1384 }
1385
1386 void r600_bc_clear(struct r600_bc *bc)
1387 {
1388 struct r600_bc_cf *cf = NULL, *next_cf;
1389
1390 free(bc->bytecode);
1391 bc->bytecode = NULL;
1392
1393 LIST_FOR_EACH_ENTRY_SAFE(cf, next_cf, &bc->cf, list) {
1394 struct r600_bc_alu *alu = NULL, *next_alu;
1395 struct r600_bc_tex *tex = NULL, *next_tex;
1396 struct r600_bc_tex *vtx = NULL, *next_vtx;
1397
1398 LIST_FOR_EACH_ENTRY_SAFE(alu, next_alu, &cf->alu, list) {
1399 free(alu);
1400 }
1401
1402 LIST_INITHEAD(&cf->alu);
1403
1404 LIST_FOR_EACH_ENTRY_SAFE(tex, next_tex, &cf->tex, list) {
1405 free(tex);
1406 }
1407
1408 LIST_INITHEAD(&cf->tex);
1409
1410 LIST_FOR_EACH_ENTRY_SAFE(vtx, next_vtx, &cf->vtx, list) {
1411 free(vtx);
1412 }
1413
1414 LIST_INITHEAD(&cf->vtx);
1415
1416 free(cf);
1417 }
1418
1419 LIST_INITHEAD(&cf->list);
1420 }
1421
1422 void r600_bc_dump(struct r600_bc *bc)
1423 {
1424 struct r600_bc_cf *cf = NULL;
1425 struct r600_bc_alu *alu = NULL;
1426 struct r600_bc_vtx *vtx = NULL;
1427 struct r600_bc_tex *tex = NULL;
1428
1429 unsigned i, id;
1430 char chip = '6';
1431
1432 switch (bc->chiprev) {
1433 case 1:
1434 chip = '7';
1435 break;
1436 case 2:
1437 chip = 'E';
1438 break;
1439 case 0:
1440 default:
1441 chip = '6';
1442 break;
1443 }
1444 fprintf(stderr, "bytecode %d dw -- %d gprs ---------------------\n", bc->ndw, bc->ngpr);
1445 fprintf(stderr, " %c\n", chip);
1446
1447 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
1448 id = cf->id;
1449
1450 switch (cf->inst) {
1451 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
1452 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3):
1453 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3):
1454 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
1455 fprintf(stderr, "%04d %08X ALU ", id, bc->bytecode[id]);
1456 fprintf(stderr, "ADDR:%d ", cf->addr);
1457 fprintf(stderr, "KCACHE_MODE0:%X ", cf->kcache[0].mode);
1458 fprintf(stderr, "KCACHE_BANK0:%X ", cf->kcache[0].bank);
1459 fprintf(stderr, "KCACHE_BANK1:%X\n", cf->kcache[1].bank);
1460 id++;
1461 fprintf(stderr, "%04d %08X ALU ", id, bc->bytecode[id]);
1462 fprintf(stderr, "INST:%d ", cf->inst);
1463 fprintf(stderr, "KCACHE_MODE1:%X ", cf->kcache[1].mode);
1464 fprintf(stderr, "KCACHE_ADDR0:%X ", cf->kcache[0].addr);
1465 fprintf(stderr, "KCACHE_ADDR1:%X ", cf->kcache[1].addr);
1466 fprintf(stderr, "COUNT:%d\n", cf->ndw / 2);
1467 break;
1468 case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
1469 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
1470 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
1471 fprintf(stderr, "%04d %08X TEX/VTX ", id, bc->bytecode[id]);
1472 fprintf(stderr, "ADDR:%d\n", cf->addr);
1473 id++;
1474 fprintf(stderr, "%04d %08X TEX/VTX ", id, bc->bytecode[id]);
1475 fprintf(stderr, "INST:%d ", cf->inst);
1476 fprintf(stderr, "COUNT:%d\n", cf->ndw / 4);
1477 break;
1478 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1479 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1480 fprintf(stderr, "%04d %08X EXPORT ", id, bc->bytecode[id]);
1481 fprintf(stderr, "GPR:%X ", cf->output.gpr);
1482 fprintf(stderr, "ELEM_SIZE:%X ", cf->output.elem_size);
1483 fprintf(stderr, "ARRAY_BASE:%X ", cf->output.array_base);
1484 fprintf(stderr, "TYPE:%X\n", cf->output.type);
1485 id++;
1486 fprintf(stderr, "%04d %08X EXPORT ", id, bc->bytecode[id]);
1487 fprintf(stderr, "SWIZ_X:%X ", cf->output.swizzle_x);
1488 fprintf(stderr, "SWIZ_Y:%X ", cf->output.swizzle_y);
1489 fprintf(stderr, "SWIZ_Z:%X ", cf->output.swizzle_z);
1490 fprintf(stderr, "SWIZ_W:%X ", cf->output.swizzle_w);
1491 fprintf(stderr, "SWIZ_W:%X ", cf->output.swizzle_w);
1492 fprintf(stderr, "BARRIER:%X ", cf->output.barrier);
1493 fprintf(stderr, "INST:%d ", cf->output.inst);
1494 fprintf(stderr, "EOP:%X\n", cf->output.end_of_program);
1495 break;
1496 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
1497 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
1498 case V_SQ_CF_WORD1_SQ_CF_INST_POP:
1499 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
1500 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
1501 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
1502 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
1503 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
1504 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
1505 fprintf(stderr, "%04d %08X CF ", id, bc->bytecode[id]);
1506 fprintf(stderr, "ADDR:%d\n", cf->cf_addr);
1507 id++;
1508 fprintf(stderr, "%04d %08X CF ", id, bc->bytecode[id]);
1509 fprintf(stderr, "INST:%d ", cf->inst);
1510 fprintf(stderr, "COND:%X ", cf->cond);
1511 fprintf(stderr, "POP_COUNT:%X\n", cf->pop_count);
1512 break;
1513 }
1514
1515 id = cf->addr;
1516 LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
1517 fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]);
1518 fprintf(stderr, "SRC0(SEL:%d ", alu->src[0].sel);
1519 fprintf(stderr, "REL:%d ", alu->src[0].rel);
1520 fprintf(stderr, "CHAN:%d ", alu->src[0].chan);
1521 fprintf(stderr, "NEG:%d) ", alu->src[0].neg);
1522 fprintf(stderr, "SRC1(SEL:%d ", alu->src[1].sel);
1523 fprintf(stderr, "REL:%d ", alu->src[1].rel);
1524 fprintf(stderr, "CHAN:%d ", alu->src[1].chan);
1525 fprintf(stderr, "NEG:%d) ", alu->src[1].neg);
1526 fprintf(stderr, "LAST:%d)\n", alu->last);
1527 id++;
1528 fprintf(stderr, "%04d %08X %c ", id, bc->bytecode[id], alu->last ? '*' : ' ');
1529 fprintf(stderr, "INST:%d ", alu->inst);
1530 fprintf(stderr, "DST(SEL:%d ", alu->dst.sel);
1531 fprintf(stderr, "CHAN:%d ", alu->dst.chan);
1532 fprintf(stderr, "REL:%d ", alu->dst.rel);
1533 fprintf(stderr, "CLAMP:%d) ", alu->dst.clamp);
1534 fprintf(stderr, "BANK_SWIZZLE:%d ", alu->bank_swizzle);
1535 if (alu->is_op3) {
1536 fprintf(stderr, "SRC2(SEL:%d ", alu->src[2].sel);
1537 fprintf(stderr, "REL:%d ", alu->src[2].rel);
1538 fprintf(stderr, "CHAN:%d ", alu->src[2].chan);
1539 fprintf(stderr, "NEG:%d)\n", alu->src[2].neg);
1540 } else {
1541 fprintf(stderr, "SRC0_ABS:%d ", alu->src[0].abs);
1542 fprintf(stderr, "SRC1_ABS:%d ", alu->src[1].abs);
1543 fprintf(stderr, "WRITE_MASK:%d ", alu->dst.write);
1544 fprintf(stderr, "OMOD:%d ", alu->omod);
1545 fprintf(stderr, "EXECUTE_MASK:%d ", alu->predicate);
1546 fprintf(stderr, "UPDATE_PRED:%d\n", alu->predicate);
1547 }
1548
1549 id++;
1550 if (alu->last) {
1551 for (i = 0; i < alu->nliteral; i++, id++) {
1552 float *f = (float*)(bc->bytecode + id);
1553 fprintf(stderr, "%04d %08X\t%f\n", id, bc->bytecode[id], *f);
1554 }
1555 }
1556 }
1557
1558 LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
1559 //TODO
1560 }
1561
1562 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
1563 //TODO
1564 }
1565 }
1566
1567 fprintf(stderr, "--------------------------------------\n");
1568 }
1569
1570 void r600_cf_vtx(struct r600_vertex_element *ve, u32 *bytecode, unsigned count)
1571 {
1572 struct r600_pipe_state *rstate;
1573 unsigned i = 0;
1574
1575 if (count > 8) {
1576 bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
1577 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX) |
1578 S_SQ_CF_WORD1_BARRIER(1) |
1579 S_SQ_CF_WORD1_COUNT(8 - 1);
1580 bytecode[i++] = S_SQ_CF_WORD0_ADDR(40 >> 1);
1581 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX) |
1582 S_SQ_CF_WORD1_BARRIER(1) |
1583 S_SQ_CF_WORD1_COUNT(count - 8 - 1);
1584 } else {
1585 bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
1586 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX) |
1587 S_SQ_CF_WORD1_BARRIER(1) |
1588 S_SQ_CF_WORD1_COUNT(count - 1);
1589 }
1590 bytecode[i++] = S_SQ_CF_WORD0_ADDR(0);
1591 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_RETURN) |
1592 S_SQ_CF_WORD1_BARRIER(1);
1593
1594 rstate = &ve->rstate;
1595 rstate->id = R600_PIPE_STATE_FETCH_SHADER;
1596 rstate->nregs = 0;
1597 r600_pipe_state_add_reg(rstate, R_0288A4_SQ_PGM_RESOURCES_FS,
1598 0x00000000, 0xFFFFFFFF, NULL);
1599 r600_pipe_state_add_reg(rstate, R_0288DC_SQ_PGM_CF_OFFSET_FS,
1600 0x00000000, 0xFFFFFFFF, NULL);
1601 r600_pipe_state_add_reg(rstate, R_028894_SQ_PGM_START_FS,
1602 r600_bo_offset(ve->fetch_shader) >> 8,
1603 0xFFFFFFFF, ve->fetch_shader);
1604 }
1605
1606 void r600_cf_vtx_tc(struct r600_vertex_element *ve, u32 *bytecode, unsigned count)
1607 {
1608 struct r600_pipe_state *rstate;
1609 unsigned i = 0;
1610
1611 if (count > 8) {
1612 bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
1613 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) |
1614 S_SQ_CF_WORD1_BARRIER(1) |
1615 S_SQ_CF_WORD1_COUNT(8 - 1);
1616 bytecode[i++] = S_SQ_CF_WORD0_ADDR(40 >> 1);
1617 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) |
1618 S_SQ_CF_WORD1_BARRIER(1) |
1619 S_SQ_CF_WORD1_COUNT((count - 8) - 1);
1620 } else {
1621 bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
1622 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) |
1623 S_SQ_CF_WORD1_BARRIER(1) |
1624 S_SQ_CF_WORD1_COUNT(count - 1);
1625 }
1626 bytecode[i++] = S_SQ_CF_WORD0_ADDR(0);
1627 bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_RETURN) |
1628 S_SQ_CF_WORD1_BARRIER(1);
1629
1630 rstate = &ve->rstate;
1631 rstate->id = R600_PIPE_STATE_FETCH_SHADER;
1632 rstate->nregs = 0;
1633 r600_pipe_state_add_reg(rstate, R_0288A4_SQ_PGM_RESOURCES_FS,
1634 0x00000000, 0xFFFFFFFF, NULL);
1635 r600_pipe_state_add_reg(rstate, R_0288DC_SQ_PGM_CF_OFFSET_FS,
1636 0x00000000, 0xFFFFFFFF, NULL);
1637 r600_pipe_state_add_reg(rstate, R_028894_SQ_PGM_START_FS,
1638 r600_bo_offset(ve->fetch_shader) >> 8,
1639 0xFFFFFFFF, ve->fetch_shader);
1640 }
1641
1642 static void r600_vertex_data_type(enum pipe_format pformat, unsigned *format,
1643 unsigned *num_format, unsigned *format_comp)
1644 {
1645 const struct util_format_description *desc;
1646 unsigned i;
1647
1648 *format = 0;
1649 *num_format = 0;
1650 *format_comp = 0;
1651
1652 desc = util_format_description(pformat);
1653 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN) {
1654 goto out_unknown;
1655 }
1656
1657 /* Find the first non-VOID channel. */
1658 for (i = 0; i < 4; i++) {
1659 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
1660 break;
1661 }
1662 }
1663
1664 switch (desc->channel[i].type) {
1665 /* Half-floats, floats, doubles */
1666 case UTIL_FORMAT_TYPE_FLOAT:
1667 switch (desc->channel[i].size) {
1668 case 16:
1669 switch (desc->nr_channels) {
1670 case 1:
1671 *format = FMT_16_FLOAT;
1672 break;
1673 case 2:
1674 *format = FMT_16_16_FLOAT;
1675 break;
1676 case 3:
1677 *format = FMT_16_16_16_FLOAT;
1678 break;
1679 case 4:
1680 *format = FMT_16_16_16_16_FLOAT;
1681 break;
1682 }
1683 break;
1684 case 32:
1685 switch (desc->nr_channels) {
1686 case 1:
1687 *format = FMT_32_FLOAT;
1688 break;
1689 case 2:
1690 *format = FMT_32_32_FLOAT;
1691 break;
1692 case 3:
1693 *format = FMT_32_32_32_FLOAT;
1694 break;
1695 case 4:
1696 *format = FMT_32_32_32_32_FLOAT;
1697 break;
1698 }
1699 break;
1700 default:
1701 goto out_unknown;
1702 }
1703 break;
1704 /* Unsigned ints */
1705 case UTIL_FORMAT_TYPE_UNSIGNED:
1706 /* Signed ints */
1707 case UTIL_FORMAT_TYPE_SIGNED:
1708 switch (desc->channel[i].size) {
1709 case 8:
1710 switch (desc->nr_channels) {
1711 case 1:
1712 *format = FMT_8;
1713 break;
1714 case 2:
1715 *format = FMT_8_8;
1716 break;
1717 case 3:
1718 // *format = FMT_8_8_8; /* fails piglit draw-vertices test */
1719 // break;
1720 case 4:
1721 *format = FMT_8_8_8_8;
1722 break;
1723 }
1724 break;
1725 case 16:
1726 switch (desc->nr_channels) {
1727 case 1:
1728 *format = FMT_16;
1729 break;
1730 case 2:
1731 *format = FMT_16_16;
1732 break;
1733 case 3:
1734 // *format = FMT_16_16_16; /* fails piglit draw-vertices test */
1735 // break;
1736 case 4:
1737 *format = FMT_16_16_16_16;
1738 break;
1739 }
1740 break;
1741 case 32:
1742 switch (desc->nr_channels) {
1743 case 1:
1744 *format = FMT_32;
1745 break;
1746 case 2:
1747 *format = FMT_32_32;
1748 break;
1749 case 3:
1750 *format = FMT_32_32_32;
1751 break;
1752 case 4:
1753 *format = FMT_32_32_32_32;
1754 break;
1755 }
1756 break;
1757 default:
1758 goto out_unknown;
1759 }
1760 break;
1761 default:
1762 goto out_unknown;
1763 }
1764
1765 if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
1766 *format_comp = 1;
1767 }
1768 if (desc->channel[i].normalized) {
1769 *num_format = 0;
1770 } else {
1771 *num_format = 2;
1772 }
1773 return;
1774 out_unknown:
1775 R600_ERR("unsupported vertex format %s\n", util_format_name(pformat));
1776 }
1777
1778 int r600_vertex_elements_build_fetch_shader(struct r600_pipe_context *rctx, struct r600_vertex_element *ve)
1779 {
1780 unsigned ndw, i;
1781 u32 *bytecode;
1782 unsigned fetch_resource_start = 0, format, num_format, format_comp;
1783 struct pipe_vertex_element *elements = ve->elements;
1784 const struct util_format_description *desc;
1785
1786 /* 2 dwords for cf aligned to 4 + 4 dwords per input */
1787 ndw = 8 + ve->count * 4;
1788 ve->fs_size = ndw * 4;
1789
1790 /* use PIPE_BIND_VERTEX_BUFFER so we use the cache buffer manager */
1791 ve->fetch_shader = r600_bo(rctx->radeon, ndw*4, 256, PIPE_BIND_VERTEX_BUFFER, 0);
1792 if (ve->fetch_shader == NULL) {
1793 return -ENOMEM;
1794 }
1795
1796 bytecode = r600_bo_map(rctx->radeon, ve->fetch_shader, 0, NULL);
1797 if (bytecode == NULL) {
1798 r600_bo_reference(rctx->radeon, &ve->fetch_shader, NULL);
1799 return -ENOMEM;
1800 }
1801
1802 if (rctx->family >= CHIP_CEDAR) {
1803 eg_cf_vtx(ve, &bytecode[0], (ndw - 8) / 4);
1804 } else {
1805 r600_cf_vtx(ve, &bytecode[0], (ndw - 8) / 4);
1806 fetch_resource_start = 160;
1807 }
1808
1809 /* vertex elements offset need special handling, if offset is bigger
1810 * than what we can put in fetch instruction then we need to alterate
1811 * the vertex resource offset. In such case in order to simplify code
1812 * we will bound one resource per elements. It's a worst case scenario.
1813 */
1814 for (i = 0; i < ve->count; i++) {
1815 ve->vbuffer_offset[i] = C_SQ_VTX_WORD2_OFFSET & elements[i].src_offset;
1816 if (ve->vbuffer_offset[i]) {
1817 ve->vbuffer_need_offset = 1;
1818 }
1819 }
1820
1821 for (i = 0; i < ve->count; i++) {
1822 unsigned vbuffer_index;
1823 r600_vertex_data_type(ve->hw_format[i], &format, &num_format, &format_comp);
1824 desc = util_format_description(ve->hw_format[i]);
1825 if (desc == NULL) {
1826 R600_ERR("unknown format %d\n", ve->hw_format[i]);
1827 r600_bo_reference(rctx->radeon, &ve->fetch_shader, NULL);
1828 return -EINVAL;
1829 }
1830
1831 /* see above for vbuffer_need_offset explanation */
1832 vbuffer_index = elements[i].vertex_buffer_index;
1833 if (ve->vbuffer_need_offset) {
1834 bytecode[8 + i * 4 + 0] = S_SQ_VTX_WORD0_BUFFER_ID(i + fetch_resource_start);
1835 } else {
1836 bytecode[8 + i * 4 + 0] = S_SQ_VTX_WORD0_BUFFER_ID(vbuffer_index + fetch_resource_start);
1837 }
1838 bytecode[8 + i * 4 + 0] |= S_SQ_VTX_WORD0_SRC_GPR(0) |
1839 S_SQ_VTX_WORD0_SRC_SEL_X(0) |
1840 S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(0x1F);
1841 bytecode[8 + i * 4 + 1] = S_SQ_VTX_WORD1_DST_SEL_X(desc->swizzle[0]) |
1842 S_SQ_VTX_WORD1_DST_SEL_Y(desc->swizzle[1]) |
1843 S_SQ_VTX_WORD1_DST_SEL_Z(desc->swizzle[2]) |
1844 S_SQ_VTX_WORD1_DST_SEL_W(desc->swizzle[3]) |
1845 S_SQ_VTX_WORD1_USE_CONST_FIELDS(0) |
1846 S_SQ_VTX_WORD1_DATA_FORMAT(format) |
1847 S_SQ_VTX_WORD1_NUM_FORMAT_ALL(num_format) |
1848 S_SQ_VTX_WORD1_FORMAT_COMP_ALL(format_comp) |
1849 S_SQ_VTX_WORD1_SRF_MODE_ALL(1) |
1850 S_SQ_VTX_WORD1_GPR_DST_GPR(i + 1);
1851 bytecode[8 + i * 4 + 2] = S_SQ_VTX_WORD2_OFFSET(elements[i].src_offset) |
1852 S_SQ_VTX_WORD2_MEGA_FETCH(1);
1853 bytecode[8 + i * 4 + 3] = 0;
1854 }
1855 r600_bo_unmap(rctx->radeon, ve->fetch_shader);
1856 return 0;
1857 }