gallium: split CAP_INSTANCE_DRAWING into INSTANCEID and INSTANCE_DIVISOR
[mesa.git] / src / gallium / drivers / r600 / r600_asm.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <stdio.h>
24 #include <errno.h>
25 #include "util/u_format.h"
26 #include "util/u_memory.h"
27 #include "pipe/p_shader_tokens.h"
28 #include "r600_pipe.h"
29 #include "r600_sq.h"
30 #include "r600_opcodes.h"
31 #include "r600_asm.h"
32 #include "r600_formats.h"
33 #include "r600d.h"
34
35 #define NUM_OF_CYCLES 3
36 #define NUM_OF_COMPONENTS 4
37
38 static inline unsigned int r600_bc_get_num_operands(struct r600_bc *bc, struct r600_bc_alu *alu)
39 {
40 if(alu->is_op3)
41 return 3;
42
43 switch (bc->chiprev) {
44 case CHIPREV_R600:
45 case CHIPREV_R700:
46 switch (alu->inst) {
47 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP:
48 return 0;
49 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD:
50 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT:
51 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE:
52 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT:
53 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE:
54 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE:
55 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL:
56 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT:
57 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX:
58 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN:
59 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE:
60 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE:
61 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT:
62 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE:
63 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE:
64 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT:
65 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE:
66 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE:
67 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4:
68 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE:
69 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE:
70 return 2;
71
72 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV:
73 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA:
74 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR:
75 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT:
76 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT:
77 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR:
78 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC:
79 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE:
80 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED:
81 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE:
82 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED:
83 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE:
84 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED:
85 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE:
86 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT:
87 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT:
88 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN:
89 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS:
90 return 1;
91 default: R600_ERR(
92 "Need instruction operand number for 0x%x.\n", alu->inst);
93 }
94 break;
95 case CHIPREV_EVERGREEN:
96 switch (alu->inst) {
97 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP:
98 return 0;
99 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD:
100 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT:
101 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE:
102 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT:
103 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE:
104 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE:
105 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL:
106 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT:
107 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX:
108 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN:
109 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE:
110 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE:
111 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT:
112 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE:
113 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE:
114 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT:
115 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE:
116 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE:
117 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4:
118 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE:
119 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE:
120 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INTERP_XY:
121 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INTERP_ZW:
122 return 2;
123
124 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV:
125 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT:
126 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT:
127 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR:
128 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC:
129 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE:
130 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED:
131 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE:
132 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED:
133 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE:
134 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED:
135 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE:
136 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT:
137 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT_FLOOR:
138 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT:
139 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN:
140 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS:
141 return 1;
142 default: R600_ERR(
143 "Need instruction operand number for 0x%x.\n", alu->inst);
144 }
145 break;
146 }
147
148 return 3;
149 }
150
151 int r700_bc_alu_build(struct r600_bc *bc, struct r600_bc_alu *alu, unsigned id);
152
153 static struct r600_bc_cf *r600_bc_cf(void)
154 {
155 struct r600_bc_cf *cf = CALLOC_STRUCT(r600_bc_cf);
156
157 if (cf == NULL)
158 return NULL;
159 LIST_INITHEAD(&cf->list);
160 LIST_INITHEAD(&cf->alu);
161 LIST_INITHEAD(&cf->vtx);
162 LIST_INITHEAD(&cf->tex);
163 return cf;
164 }
165
166 static struct r600_bc_alu *r600_bc_alu(void)
167 {
168 struct r600_bc_alu *alu = CALLOC_STRUCT(r600_bc_alu);
169
170 if (alu == NULL)
171 return NULL;
172 LIST_INITHEAD(&alu->list);
173 return alu;
174 }
175
176 static struct r600_bc_vtx *r600_bc_vtx(void)
177 {
178 struct r600_bc_vtx *vtx = CALLOC_STRUCT(r600_bc_vtx);
179
180 if (vtx == NULL)
181 return NULL;
182 LIST_INITHEAD(&vtx->list);
183 return vtx;
184 }
185
186 static struct r600_bc_tex *r600_bc_tex(void)
187 {
188 struct r600_bc_tex *tex = CALLOC_STRUCT(r600_bc_tex);
189
190 if (tex == NULL)
191 return NULL;
192 LIST_INITHEAD(&tex->list);
193 return tex;
194 }
195
196 int r600_bc_init(struct r600_bc *bc, enum radeon_family family)
197 {
198 LIST_INITHEAD(&bc->cf);
199 bc->family = family;
200 switch (bc->family) {
201 case CHIP_R600:
202 case CHIP_RV610:
203 case CHIP_RV630:
204 case CHIP_RV670:
205 case CHIP_RV620:
206 case CHIP_RV635:
207 case CHIP_RS780:
208 case CHIP_RS880:
209 bc->chiprev = CHIPREV_R600;
210 break;
211 case CHIP_RV770:
212 case CHIP_RV730:
213 case CHIP_RV710:
214 case CHIP_RV740:
215 bc->chiprev = CHIPREV_R700;
216 break;
217 case CHIP_CEDAR:
218 case CHIP_REDWOOD:
219 case CHIP_JUNIPER:
220 case CHIP_CYPRESS:
221 case CHIP_HEMLOCK:
222 case CHIP_PALM:
223 case CHIP_BARTS:
224 case CHIP_TURKS:
225 case CHIP_CAICOS:
226 bc->chiprev = CHIPREV_EVERGREEN;
227 break;
228 default:
229 R600_ERR("unknown family %d\n", bc->family);
230 return -EINVAL;
231 }
232 return 0;
233 }
234
235 static int r600_bc_add_cf(struct r600_bc *bc)
236 {
237 struct r600_bc_cf *cf = r600_bc_cf();
238
239 if (cf == NULL)
240 return -ENOMEM;
241 LIST_ADDTAIL(&cf->list, &bc->cf);
242 if (bc->cf_last)
243 cf->id = bc->cf_last->id + 2;
244 bc->cf_last = cf;
245 bc->ncf++;
246 bc->ndw += 2;
247 bc->force_add_cf = 0;
248 return 0;
249 }
250
251 int r600_bc_add_output(struct r600_bc *bc, const struct r600_bc_output *output)
252 {
253 int r;
254
255 if (bc->cf_last && (bc->cf_last->inst == output->inst ||
256 (bc->cf_last->inst == BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT) &&
257 output->inst == BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE))) &&
258 output->type == bc->cf_last->output.type &&
259 output->elem_size == bc->cf_last->output.elem_size &&
260 output->swizzle_x == bc->cf_last->output.swizzle_x &&
261 output->swizzle_y == bc->cf_last->output.swizzle_y &&
262 output->swizzle_z == bc->cf_last->output.swizzle_z &&
263 output->swizzle_w == bc->cf_last->output.swizzle_w &&
264 (output->burst_count + bc->cf_last->output.burst_count) <= 16) {
265
266 if ((output->gpr + output->burst_count) == bc->cf_last->output.gpr &&
267 (output->array_base + output->burst_count) == bc->cf_last->output.array_base) {
268
269 bc->cf_last->output.end_of_program |= output->end_of_program;
270 bc->cf_last->output.inst = output->inst;
271 bc->cf_last->output.gpr = output->gpr;
272 bc->cf_last->output.array_base = output->array_base;
273 bc->cf_last->output.burst_count += output->burst_count;
274 return 0;
275
276 } else if (output->gpr == (bc->cf_last->output.gpr + bc->cf_last->output.burst_count) &&
277 output->array_base == (bc->cf_last->output.array_base + bc->cf_last->output.burst_count)) {
278
279 bc->cf_last->output.end_of_program |= output->end_of_program;
280 bc->cf_last->output.inst = output->inst;
281 bc->cf_last->output.burst_count += output->burst_count;
282 return 0;
283 }
284 }
285
286 r = r600_bc_add_cf(bc);
287 if (r)
288 return r;
289 bc->cf_last->inst = output->inst;
290 memcpy(&bc->cf_last->output, output, sizeof(struct r600_bc_output));
291 return 0;
292 }
293
294 /* alu instructions that can ony exits once per group */
295 static int is_alu_once_inst(struct r600_bc *bc, struct r600_bc_alu *alu)
296 {
297 switch (bc->chiprev) {
298 case CHIPREV_R600:
299 case CHIPREV_R700:
300 return !alu->is_op3 && (
301 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE ||
302 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT ||
303 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE ||
304 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE ||
305 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_UINT ||
306 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_UINT ||
307 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE_INT ||
308 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_INT ||
309 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_INT ||
310 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE_INT ||
311 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_UINT ||
312 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_UINT ||
313 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE ||
314 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT ||
315 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE ||
316 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE ||
317 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_INV ||
318 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_POP ||
319 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_CLR ||
320 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_RESTORE ||
321 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH ||
322 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH ||
323 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH ||
324 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH ||
325 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_INT ||
326 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_INT ||
327 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_INT ||
328 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT ||
329 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH_INT ||
330 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH_INT ||
331 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH_INT ||
332 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH_INT ||
333 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLT_PUSH_INT ||
334 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLE_PUSH_INT);
335 case CHIPREV_EVERGREEN:
336 default:
337 return !alu->is_op3 && (
338 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE ||
339 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT ||
340 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE ||
341 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE ||
342 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_UINT ||
343 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_UINT ||
344 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE_INT ||
345 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_INT ||
346 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_INT ||
347 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE_INT ||
348 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_UINT ||
349 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_UINT ||
350 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE ||
351 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT ||
352 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE ||
353 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE ||
354 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_INV ||
355 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_POP ||
356 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_CLR ||
357 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_RESTORE ||
358 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH ||
359 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH ||
360 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH ||
361 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH ||
362 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_INT ||
363 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_INT ||
364 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_INT ||
365 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT ||
366 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH_INT ||
367 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH_INT ||
368 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH_INT ||
369 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH_INT ||
370 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLT_PUSH_INT ||
371 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLE_PUSH_INT);
372 }
373 }
374
375 static int is_alu_reduction_inst(struct r600_bc *bc, struct r600_bc_alu *alu)
376 {
377 switch (bc->chiprev) {
378 case CHIPREV_R600:
379 case CHIPREV_R700:
380 return !alu->is_op3 && (
381 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE ||
382 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4 ||
383 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE ||
384 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX4);
385 case CHIPREV_EVERGREEN:
386 default:
387 return !alu->is_op3 && (
388 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE ||
389 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4 ||
390 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE ||
391 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX4);
392 }
393 }
394
395 static int is_alu_cube_inst(struct r600_bc *bc, struct r600_bc_alu *alu)
396 {
397 switch (bc->chiprev) {
398 case CHIPREV_R600:
399 case CHIPREV_R700:
400 return !alu->is_op3 &&
401 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE;
402 case CHIPREV_EVERGREEN:
403 default:
404 return !alu->is_op3 &&
405 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE;
406 }
407 }
408
409 static int is_alu_mova_inst(struct r600_bc *bc, struct r600_bc_alu *alu)
410 {
411 switch (bc->chiprev) {
412 case CHIPREV_R600:
413 case CHIPREV_R700:
414 return !alu->is_op3 && (
415 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA ||
416 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR ||
417 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT);
418 case CHIPREV_EVERGREEN:
419 default:
420 return !alu->is_op3 && (
421 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT);
422 }
423 }
424
425 /* alu instructions that can only execute on the vector unit */
426 static int is_alu_vec_unit_inst(struct r600_bc *bc, struct r600_bc_alu *alu)
427 {
428 return is_alu_reduction_inst(bc, alu) ||
429 is_alu_mova_inst(bc, alu);
430 }
431
432 /* alu instructions that can only execute on the trans unit */
433 static int is_alu_trans_unit_inst(struct r600_bc *bc, struct r600_bc_alu *alu)
434 {
435 switch (bc->chiprev) {
436 case CHIPREV_R600:
437 case CHIPREV_R700:
438 if (!alu->is_op3)
439 return alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT ||
440 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT ||
441 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT ||
442 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHL_INT ||
443 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT ||
444 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_INT ||
445 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT ||
446 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_INT ||
447 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT ||
448 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_INT ||
449 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_UINT ||
450 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT ||
451 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS ||
452 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE ||
453 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED ||
454 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE ||
455 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED ||
456 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_FF ||
457 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE ||
458 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED ||
459 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_FF ||
460 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE ||
461 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN ||
462 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SQRT_IEEE;
463 else
464 return alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT ||
465 alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_D2 ||
466 alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_M2 ||
467 alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_M4;
468 case CHIPREV_EVERGREEN:
469 default:
470 if (!alu->is_op3)
471 /* Note that FLT_TO_INT* instructions are vector instructions
472 * on Evergreen, despite what the documentation says. */
473 return alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT ||
474 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT ||
475 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHL_INT ||
476 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT ||
477 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_INT ||
478 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT ||
479 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_INT ||
480 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT ||
481 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_INT ||
482 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_UINT ||
483 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT ||
484 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS ||
485 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE ||
486 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED ||
487 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE ||
488 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED ||
489 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_FF ||
490 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE ||
491 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED ||
492 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_FF ||
493 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE ||
494 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN ||
495 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SQRT_IEEE;
496 else
497 return alu->inst == EG_V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT;
498 }
499 }
500
501 /* alu instructions that can execute on any unit */
502 static int is_alu_any_unit_inst(struct r600_bc *bc, struct r600_bc_alu *alu)
503 {
504 return !is_alu_vec_unit_inst(bc, alu) &&
505 !is_alu_trans_unit_inst(bc, alu);
506 }
507
508 static int assign_alu_units(struct r600_bc *bc, struct r600_bc_alu *alu_first,
509 struct r600_bc_alu *assignment[5])
510 {
511 struct r600_bc_alu *alu;
512 unsigned i, chan, trans;
513
514 for (i = 0; i < 5; i++)
515 assignment[i] = NULL;
516
517 for (alu = alu_first; alu; alu = LIST_ENTRY(struct r600_bc_alu, alu->list.next, list)) {
518 chan = alu->dst.chan;
519 if (is_alu_trans_unit_inst(bc, alu))
520 trans = 1;
521 else if (is_alu_vec_unit_inst(bc, alu))
522 trans = 0;
523 else if (assignment[chan])
524 trans = 1; // assume ALU_INST_PREFER_VECTOR
525 else
526 trans = 0;
527
528 if (trans) {
529 if (assignment[4]) {
530 assert(0); //ALU.Trans has already been allocated
531 return -1;
532 }
533 assignment[4] = alu;
534 } else {
535 if (assignment[chan]) {
536 assert(0); //ALU.chan has already been allocated
537 return -1;
538 }
539 assignment[chan] = alu;
540 }
541
542 if (alu->last)
543 break;
544 }
545 return 0;
546 }
547
548 struct alu_bank_swizzle {
549 int hw_gpr[NUM_OF_CYCLES][NUM_OF_COMPONENTS];
550 int hw_cfile_addr[4];
551 int hw_cfile_elem[4];
552 };
553
554 static const unsigned cycle_for_bank_swizzle_vec[][3] = {
555 [SQ_ALU_VEC_012] = { 0, 1, 2 },
556 [SQ_ALU_VEC_021] = { 0, 2, 1 },
557 [SQ_ALU_VEC_120] = { 1, 2, 0 },
558 [SQ_ALU_VEC_102] = { 1, 0, 2 },
559 [SQ_ALU_VEC_201] = { 2, 0, 1 },
560 [SQ_ALU_VEC_210] = { 2, 1, 0 }
561 };
562
563 static const unsigned cycle_for_bank_swizzle_scl[][3] = {
564 [SQ_ALU_SCL_210] = { 2, 1, 0 },
565 [SQ_ALU_SCL_122] = { 1, 2, 2 },
566 [SQ_ALU_SCL_212] = { 2, 1, 2 },
567 [SQ_ALU_SCL_221] = { 2, 2, 1 }
568 };
569
570 static void init_bank_swizzle(struct alu_bank_swizzle *bs)
571 {
572 int i, cycle, component;
573 /* set up gpr use */
574 for (cycle = 0; cycle < NUM_OF_CYCLES; cycle++)
575 for (component = 0; component < NUM_OF_COMPONENTS; component++)
576 bs->hw_gpr[cycle][component] = -1;
577 for (i = 0; i < 4; i++)
578 bs->hw_cfile_addr[i] = -1;
579 for (i = 0; i < 4; i++)
580 bs->hw_cfile_elem[i] = -1;
581 }
582
583 static int reserve_gpr(struct alu_bank_swizzle *bs, unsigned sel, unsigned chan, unsigned cycle)
584 {
585 if (bs->hw_gpr[cycle][chan] == -1)
586 bs->hw_gpr[cycle][chan] = sel;
587 else if (bs->hw_gpr[cycle][chan] != (int)sel) {
588 // Another scalar operation has already used GPR read port for channel
589 return -1;
590 }
591 return 0;
592 }
593
594 static int reserve_cfile(struct r600_bc *bc, struct alu_bank_swizzle *bs, unsigned sel, unsigned chan)
595 {
596 int res, num_res = 4;
597 if (bc->chiprev >= CHIPREV_R700) {
598 num_res = 2;
599 chan /= 2;
600 }
601 for (res = 0; res < num_res; ++res) {
602 if (bs->hw_cfile_addr[res] == -1) {
603 bs->hw_cfile_addr[res] = sel;
604 bs->hw_cfile_elem[res] = chan;
605 return 0;
606 } else if (bs->hw_cfile_addr[res] == sel &&
607 bs->hw_cfile_elem[res] == chan)
608 return 0; // Read for this scalar element already reserved, nothing to do here.
609 }
610 // All cfile read ports are used, cannot reference vector element
611 return -1;
612 }
613
614 static int is_gpr(unsigned sel)
615 {
616 return (sel >= 0 && sel <= 127);
617 }
618
619 /* CB constants start at 512, and get translated to a kcache index when ALU
620 * clauses are constructed. Note that we handle kcache constants the same way
621 * as (the now gone) cfile constants, is that really required? */
622 static int is_cfile(unsigned sel)
623 {
624 return (sel > 255 && sel < 512) ||
625 (sel > 511 && sel < 4607) || // Kcache before translate
626 (sel > 127 && sel < 192); // Kcache after translate
627 }
628
629 static int is_const(int sel)
630 {
631 return is_cfile(sel) ||
632 (sel >= V_SQ_ALU_SRC_0 &&
633 sel <= V_SQ_ALU_SRC_LITERAL);
634 }
635
636 static int check_vector(struct r600_bc *bc, struct r600_bc_alu *alu,
637 struct alu_bank_swizzle *bs, int bank_swizzle)
638 {
639 int r, src, num_src, sel, elem, cycle;
640
641 num_src = r600_bc_get_num_operands(bc, alu);
642 for (src = 0; src < num_src; src++) {
643 sel = alu->src[src].sel;
644 elem = alu->src[src].chan;
645 if (is_gpr(sel)) {
646 cycle = cycle_for_bank_swizzle_vec[bank_swizzle][src];
647 if (src == 1 && sel == alu->src[0].sel && elem == alu->src[0].chan)
648 // Nothing to do; special-case optimization,
649 // second source uses first source’s reservation
650 continue;
651 else {
652 r = reserve_gpr(bs, sel, elem, cycle);
653 if (r)
654 return r;
655 }
656 } else if (is_cfile(sel)) {
657 r = reserve_cfile(bc, bs, sel, elem);
658 if (r)
659 return r;
660 }
661 // No restrictions on PV, PS, literal or special constants
662 }
663 return 0;
664 }
665
666 static int check_scalar(struct r600_bc *bc, struct r600_bc_alu *alu,
667 struct alu_bank_swizzle *bs, int bank_swizzle)
668 {
669 int r, src, num_src, const_count, sel, elem, cycle;
670
671 num_src = r600_bc_get_num_operands(bc, alu);
672 for (const_count = 0, src = 0; src < num_src; ++src) {
673 sel = alu->src[src].sel;
674 elem = alu->src[src].chan;
675 if (is_const(sel)) { // Any constant, including literal and inline constants
676 if (const_count >= 2)
677 // More than two references to a constant in
678 // transcendental operation.
679 return -1;
680 else
681 const_count++;
682 }
683 if (is_cfile(sel)) {
684 r = reserve_cfile(bc, bs, sel, elem);
685 if (r)
686 return r;
687 }
688 }
689 for (src = 0; src < num_src; ++src) {
690 sel = alu->src[src].sel;
691 elem = alu->src[src].chan;
692 if (is_gpr(sel)) {
693 cycle = cycle_for_bank_swizzle_scl[bank_swizzle][src];
694 if (cycle < const_count)
695 // Cycle for GPR load conflicts with
696 // constant load in transcendental operation.
697 return -1;
698 r = reserve_gpr(bs, sel, elem, cycle);
699 if (r)
700 return r;
701 }
702 // Constants already processed
703 // No restrictions on PV, PS
704 }
705 return 0;
706 }
707
708 static int check_and_set_bank_swizzle(struct r600_bc *bc,
709 struct r600_bc_alu *slots[5])
710 {
711 struct alu_bank_swizzle bs;
712 int bank_swizzle[5];
713 int i, r = 0, forced = 0;
714
715 for (i = 0; i < 5; i++)
716 if (slots[i] && slots[i]->bank_swizzle_force) {
717 slots[i]->bank_swizzle = slots[i]->bank_swizzle_force;
718 forced = 1;
719 }
720
721 if (forced)
722 return 0;
723
724 // just check every possible combination of bank swizzle
725 // not very efficent, but works on the first try in most of the cases
726 for (i = 0; i < 4; i++)
727 bank_swizzle[i] = SQ_ALU_VEC_012;
728 bank_swizzle[4] = SQ_ALU_SCL_210;
729 while(bank_swizzle[4] <= SQ_ALU_SCL_221) {
730 init_bank_swizzle(&bs);
731 for (i = 0; i < 4; i++) {
732 if (slots[i]) {
733 r = check_vector(bc, slots[i], &bs, bank_swizzle[i]);
734 if (r)
735 break;
736 }
737 }
738 if (!r && slots[4]) {
739 r = check_scalar(bc, slots[4], &bs, bank_swizzle[4]);
740 }
741 if (!r) {
742 for (i = 0; i < 5; i++) {
743 if (slots[i])
744 slots[i]->bank_swizzle = bank_swizzle[i];
745 }
746 return 0;
747 }
748
749 for (i = 0; i < 5; i++) {
750 bank_swizzle[i]++;
751 if (bank_swizzle[i] <= SQ_ALU_VEC_210)
752 break;
753 else
754 bank_swizzle[i] = SQ_ALU_VEC_012;
755 }
756 }
757
758 // couldn't find a working swizzle
759 return -1;
760 }
761
762 static int replace_gpr_with_pv_ps(struct r600_bc *bc,
763 struct r600_bc_alu *slots[5], struct r600_bc_alu *alu_prev)
764 {
765 struct r600_bc_alu *prev[5];
766 int gpr[5], chan[5];
767 int i, j, r, src, num_src;
768
769 r = assign_alu_units(bc, alu_prev, prev);
770 if (r)
771 return r;
772
773 for (i = 0; i < 5; ++i) {
774 if(prev[i] && prev[i]->dst.write && !prev[i]->dst.rel) {
775 gpr[i] = prev[i]->dst.sel;
776 /* cube writes more than PV.X */
777 if (!is_alu_cube_inst(bc, prev[i]) && is_alu_reduction_inst(bc, prev[i]))
778 chan[i] = 0;
779 else
780 chan[i] = prev[i]->dst.chan;
781 } else
782 gpr[i] = -1;
783 }
784
785 for (i = 0; i < 5; ++i) {
786 struct r600_bc_alu *alu = slots[i];
787 if(!alu)
788 continue;
789
790 num_src = r600_bc_get_num_operands(bc, alu);
791 for (src = 0; src < num_src; ++src) {
792 if (!is_gpr(alu->src[src].sel) || alu->src[src].rel)
793 continue;
794
795 if (alu->src[src].sel == gpr[4] &&
796 alu->src[src].chan == chan[4]) {
797 alu->src[src].sel = V_SQ_ALU_SRC_PS;
798 alu->src[src].chan = 0;
799 continue;
800 }
801
802 for (j = 0; j < 4; ++j) {
803 if (alu->src[src].sel == gpr[j] &&
804 alu->src[src].chan == j) {
805 alu->src[src].sel = V_SQ_ALU_SRC_PV;
806 alu->src[src].chan = chan[j];
807 break;
808 }
809 }
810 }
811 }
812
813 return 0;
814 }
815
816 void r600_bc_special_constants(u32 value, unsigned *sel, unsigned *neg)
817 {
818 switch(value) {
819 case 0:
820 *sel = V_SQ_ALU_SRC_0;
821 break;
822 case 1:
823 *sel = V_SQ_ALU_SRC_1_INT;
824 break;
825 case -1:
826 *sel = V_SQ_ALU_SRC_M_1_INT;
827 break;
828 case 0x3F800000: // 1.0f
829 *sel = V_SQ_ALU_SRC_1;
830 break;
831 case 0x3F000000: // 0.5f
832 *sel = V_SQ_ALU_SRC_0_5;
833 break;
834 case 0xBF800000: // -1.0f
835 *sel = V_SQ_ALU_SRC_1;
836 *neg ^= 1;
837 break;
838 case 0xBF000000: // -0.5f
839 *sel = V_SQ_ALU_SRC_0_5;
840 *neg ^= 1;
841 break;
842 default:
843 *sel = V_SQ_ALU_SRC_LITERAL;
844 break;
845 }
846 }
847
848 /* compute how many literal are needed */
849 static int r600_bc_alu_nliterals(struct r600_bc *bc, struct r600_bc_alu *alu,
850 uint32_t literal[4], unsigned *nliteral)
851 {
852 unsigned num_src = r600_bc_get_num_operands(bc, alu);
853 unsigned i, j;
854
855 for (i = 0; i < num_src; ++i) {
856 if (alu->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
857 uint32_t value = alu->src[i].value;
858 unsigned found = 0;
859 for (j = 0; j < *nliteral; ++j) {
860 if (literal[j] == value) {
861 found = 1;
862 break;
863 }
864 }
865 if (!found) {
866 if (*nliteral >= 4)
867 return -EINVAL;
868 literal[(*nliteral)++] = value;
869 }
870 }
871 }
872 return 0;
873 }
874
875 static void r600_bc_alu_adjust_literals(struct r600_bc *bc,
876 struct r600_bc_alu *alu,
877 uint32_t literal[4], unsigned nliteral)
878 {
879 unsigned num_src = r600_bc_get_num_operands(bc, alu);
880 unsigned i, j;
881
882 for (i = 0; i < num_src; ++i) {
883 if (alu->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
884 uint32_t value = alu->src[i].value;
885 for (j = 0; j < nliteral; ++j) {
886 if (literal[j] == value) {
887 alu->src[i].chan = j;
888 break;
889 }
890 }
891 }
892 }
893 }
894
895 static int merge_inst_groups(struct r600_bc *bc, struct r600_bc_alu *slots[5],
896 struct r600_bc_alu *alu_prev)
897 {
898 struct r600_bc_alu *prev[5];
899 struct r600_bc_alu *result[5] = { NULL };
900
901 uint32_t literal[4], prev_literal[4];
902 unsigned nliteral = 0, prev_nliteral = 0;
903
904 int i, j, r, src, num_src;
905 int num_once_inst = 0;
906 int have_mova = 0, have_rel = 0;
907
908 r = assign_alu_units(bc, alu_prev, prev);
909 if (r)
910 return r;
911
912 for (i = 0; i < 5; ++i) {
913 struct r600_bc_alu *alu;
914
915 /* check number of literals */
916 if (prev[i]) {
917 if (r600_bc_alu_nliterals(bc, prev[i], literal, &nliteral))
918 return 0;
919 if (r600_bc_alu_nliterals(bc, prev[i], prev_literal, &prev_nliteral))
920 return 0;
921 if (is_alu_mova_inst(bc, prev[i])) {
922 if (have_rel)
923 return 0;
924 have_mova = 1;
925 }
926 num_once_inst += is_alu_once_inst(bc, prev[i]);
927 }
928 if (slots[i] && r600_bc_alu_nliterals(bc, slots[i], literal, &nliteral))
929 return 0;
930
931 // let's check used slots
932 if (prev[i] && !slots[i]) {
933 result[i] = prev[i];
934 continue;
935 } else if (prev[i] && slots[i]) {
936 if (result[4] == NULL && prev[4] == NULL && slots[4] == NULL) {
937 // trans unit is still free try to use it
938 if (is_alu_any_unit_inst(bc, slots[i])) {
939 result[i] = prev[i];
940 result[4] = slots[i];
941 } else if (is_alu_any_unit_inst(bc, prev[i])) {
942 result[i] = slots[i];
943 result[4] = prev[i];
944 } else
945 return 0;
946 } else
947 return 0;
948 } else if(!slots[i]) {
949 continue;
950 } else
951 result[i] = slots[i];
952
953 // let's check source gprs
954 alu = slots[i];
955 num_once_inst += is_alu_once_inst(bc, alu);
956
957 num_src = r600_bc_get_num_operands(bc, alu);
958 for (src = 0; src < num_src; ++src) {
959 if (alu->src[src].rel) {
960 if (have_mova)
961 return 0;
962 have_rel = 1;
963 }
964
965 // constants doesn't matter
966 if (!is_gpr(alu->src[src].sel))
967 continue;
968
969 for (j = 0; j < 5; ++j) {
970 if (!prev[j] || !prev[j]->dst.write)
971 continue;
972
973 // if it's relative then we can't determin which gpr is really used
974 if (prev[j]->dst.chan == alu->src[src].chan &&
975 (prev[j]->dst.sel == alu->src[src].sel ||
976 prev[j]->dst.rel || alu->src[src].rel))
977 return 0;
978 }
979 }
980 }
981
982 /* more than one PRED_ or KILL_ ? */
983 if (num_once_inst > 1)
984 return 0;
985
986 /* check if the result can still be swizzlet */
987 r = check_and_set_bank_swizzle(bc, result);
988 if (r)
989 return 0;
990
991 /* looks like everything worked out right, apply the changes */
992
993 /* undo adding previus literals */
994 bc->cf_last->ndw -= align(prev_nliteral, 2);
995
996 /* sort instructions */
997 for (i = 0; i < 5; ++i) {
998 slots[i] = result[i];
999 if (result[i]) {
1000 LIST_DEL(&result[i]->list);
1001 result[i]->last = 0;
1002 LIST_ADDTAIL(&result[i]->list, &bc->cf_last->alu);
1003 }
1004 }
1005
1006 /* determine new last instruction */
1007 LIST_ENTRY(struct r600_bc_alu, bc->cf_last->alu.prev, list)->last = 1;
1008
1009 /* determine new first instruction */
1010 for (i = 0; i < 5; ++i) {
1011 if (result[i]) {
1012 bc->cf_last->curr_bs_head = result[i];
1013 break;
1014 }
1015 }
1016
1017 bc->cf_last->prev_bs_head = bc->cf_last->prev2_bs_head;
1018 bc->cf_last->prev2_bs_head = NULL;
1019
1020 return 0;
1021 }
1022
1023 /* This code handles kcache lines as single blocks of 32 constants. We could
1024 * probably do slightly better by recognizing that we actually have two
1025 * consecutive lines of 16 constants, but the resulting code would also be
1026 * somewhat more complicated. */
1027 static int r600_bc_alloc_kcache_lines(struct r600_bc *bc, struct r600_bc_alu *alu, int type)
1028 {
1029 struct r600_bc_kcache *kcache = bc->cf_last->kcache;
1030 unsigned int required_lines;
1031 unsigned int free_lines = 0;
1032 unsigned int cache_line[3];
1033 unsigned int count = 0;
1034 unsigned int i, j;
1035 int r;
1036
1037 /* Collect required cache lines. */
1038 for (i = 0; i < 3; ++i) {
1039 bool found = false;
1040 unsigned int line;
1041
1042 if (alu->src[i].sel < 512)
1043 continue;
1044
1045 line = ((alu->src[i].sel - 512) / 32) * 2;
1046
1047 for (j = 0; j < count; ++j) {
1048 if (cache_line[j] == line) {
1049 found = true;
1050 break;
1051 }
1052 }
1053
1054 if (!found)
1055 cache_line[count++] = line;
1056 }
1057
1058 /* This should never actually happen. */
1059 if (count >= 3) return -ENOMEM;
1060
1061 for (i = 0; i < 2; ++i) {
1062 if (kcache[i].mode == V_SQ_CF_KCACHE_NOP) {
1063 ++free_lines;
1064 }
1065 }
1066
1067 /* Filter lines pulled in by previous intructions. Note that this is
1068 * only for the required_lines count, we can't remove these from the
1069 * cache_line array since we may have to start a new ALU clause. */
1070 for (i = 0, required_lines = count; i < count; ++i) {
1071 for (j = 0; j < 2; ++j) {
1072 if (kcache[j].mode == V_SQ_CF_KCACHE_LOCK_2 &&
1073 kcache[j].addr == cache_line[i]) {
1074 --required_lines;
1075 break;
1076 }
1077 }
1078 }
1079
1080 /* Start a new ALU clause if needed. */
1081 if (required_lines > free_lines) {
1082 if ((r = r600_bc_add_cf(bc))) {
1083 return r;
1084 }
1085 bc->cf_last->inst = (type << 3);
1086 kcache = bc->cf_last->kcache;
1087 }
1088
1089 /* Setup the kcache lines. */
1090 for (i = 0; i < count; ++i) {
1091 bool found = false;
1092
1093 for (j = 0; j < 2; ++j) {
1094 if (kcache[j].mode == V_SQ_CF_KCACHE_LOCK_2 &&
1095 kcache[j].addr == cache_line[i]) {
1096 found = true;
1097 break;
1098 }
1099 }
1100
1101 if (found) continue;
1102
1103 for (j = 0; j < 2; ++j) {
1104 if (kcache[j].mode == V_SQ_CF_KCACHE_NOP) {
1105 kcache[j].bank = 0;
1106 kcache[j].addr = cache_line[i];
1107 kcache[j].mode = V_SQ_CF_KCACHE_LOCK_2;
1108 break;
1109 }
1110 }
1111 }
1112
1113 /* Alter the src operands to refer to the kcache. */
1114 for (i = 0; i < 3; ++i) {
1115 static const unsigned int base[] = {128, 160, 256, 288};
1116 unsigned int line;
1117
1118 if (alu->src[i].sel < 512)
1119 continue;
1120
1121 alu->src[i].sel -= 512;
1122 line = (alu->src[i].sel / 32) * 2;
1123
1124 for (j = 0; j < 2; ++j) {
1125 if (kcache[j].mode == V_SQ_CF_KCACHE_LOCK_2 &&
1126 kcache[j].addr == line) {
1127 alu->src[i].sel &= 0x1f;
1128 alu->src[i].sel += base[j];
1129 break;
1130 }
1131 }
1132 }
1133
1134 return 0;
1135 }
1136
1137 int r600_bc_add_alu_type(struct r600_bc *bc, const struct r600_bc_alu *alu, int type)
1138 {
1139 struct r600_bc_alu *nalu = r600_bc_alu();
1140 struct r600_bc_alu *lalu;
1141 int i, r;
1142
1143 if (nalu == NULL)
1144 return -ENOMEM;
1145 memcpy(nalu, alu, sizeof(struct r600_bc_alu));
1146
1147 if (bc->cf_last != NULL && bc->cf_last->inst != (type << 3)) {
1148 /* check if we could add it anyway */
1149 if (bc->cf_last->inst == (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3) &&
1150 type == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE) {
1151 LIST_FOR_EACH_ENTRY(lalu, &bc->cf_last->alu, list) {
1152 if (lalu->predicate) {
1153 bc->force_add_cf = 1;
1154 break;
1155 }
1156 }
1157 } else
1158 bc->force_add_cf = 1;
1159 }
1160
1161 /* cf can contains only alu or only vtx or only tex */
1162 if (bc->cf_last == NULL || bc->force_add_cf) {
1163 r = r600_bc_add_cf(bc);
1164 if (r) {
1165 free(nalu);
1166 return r;
1167 }
1168 }
1169 bc->cf_last->inst = (type << 3);
1170
1171 /* Setup the kcache for this ALU instruction. This will start a new
1172 * ALU clause if needed. */
1173 if ((r = r600_bc_alloc_kcache_lines(bc, nalu, type))) {
1174 free(nalu);
1175 return r;
1176 }
1177
1178 if (!bc->cf_last->curr_bs_head) {
1179 bc->cf_last->curr_bs_head = nalu;
1180 }
1181 /* number of gpr == the last gpr used in any alu */
1182 for (i = 0; i < 3; i++) {
1183 if (nalu->src[i].sel >= bc->ngpr && nalu->src[i].sel < 128) {
1184 bc->ngpr = nalu->src[i].sel + 1;
1185 }
1186 if (nalu->src[i].sel == V_SQ_ALU_SRC_LITERAL)
1187 r600_bc_special_constants(nalu->src[i].value,
1188 &nalu->src[i].sel, &nalu->src[i].neg);
1189 }
1190 if (nalu->dst.sel >= bc->ngpr) {
1191 bc->ngpr = nalu->dst.sel + 1;
1192 }
1193 LIST_ADDTAIL(&nalu->list, &bc->cf_last->alu);
1194 /* each alu use 2 dwords */
1195 bc->cf_last->ndw += 2;
1196 bc->ndw += 2;
1197
1198 /* process cur ALU instructions for bank swizzle */
1199 if (nalu->last) {
1200 uint32_t literal[4];
1201 unsigned nliteral;
1202 struct r600_bc_alu *slots[5];
1203 r = assign_alu_units(bc, bc->cf_last->curr_bs_head, slots);
1204 if (r)
1205 return r;
1206
1207 if (bc->cf_last->prev_bs_head) {
1208 r = merge_inst_groups(bc, slots, bc->cf_last->prev_bs_head);
1209 if (r)
1210 return r;
1211 }
1212
1213 if (bc->cf_last->prev_bs_head) {
1214 r = replace_gpr_with_pv_ps(bc, slots, bc->cf_last->prev_bs_head);
1215 if (r)
1216 return r;
1217 }
1218
1219 r = check_and_set_bank_swizzle(bc, slots);
1220 if (r)
1221 return r;
1222
1223 for (i = 0, nliteral = 0; i < 5; i++) {
1224 if (slots[i]) {
1225 r = r600_bc_alu_nliterals(bc, slots[i], literal, &nliteral);
1226 if (r)
1227 return r;
1228 }
1229 }
1230 bc->cf_last->ndw += align(nliteral, 2);
1231
1232 /* at most 128 slots, one add alu can add 5 slots + 4 constants(2 slots)
1233 * worst case */
1234 if ((bc->cf_last->ndw >> 1) >= 120) {
1235 bc->force_add_cf = 1;
1236 }
1237
1238 bc->cf_last->prev2_bs_head = bc->cf_last->prev_bs_head;
1239 bc->cf_last->prev_bs_head = bc->cf_last->curr_bs_head;
1240 bc->cf_last->curr_bs_head = NULL;
1241 }
1242 return 0;
1243 }
1244
1245 int r600_bc_add_alu(struct r600_bc *bc, const struct r600_bc_alu *alu)
1246 {
1247 return r600_bc_add_alu_type(bc, alu, BC_INST(bc, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU));
1248 }
1249
1250 int r600_bc_add_vtx(struct r600_bc *bc, const struct r600_bc_vtx *vtx)
1251 {
1252 struct r600_bc_vtx *nvtx = r600_bc_vtx();
1253 int r;
1254
1255 if (nvtx == NULL)
1256 return -ENOMEM;
1257 memcpy(nvtx, vtx, sizeof(struct r600_bc_vtx));
1258
1259 /* cf can contains only alu or only vtx or only tex */
1260 if (bc->cf_last == NULL ||
1261 (bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_VTX &&
1262 bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) ||
1263 bc->force_add_cf) {
1264 r = r600_bc_add_cf(bc);
1265 if (r) {
1266 free(nvtx);
1267 return r;
1268 }
1269 bc->cf_last->inst = V_SQ_CF_WORD1_SQ_CF_INST_VTX;
1270 }
1271 LIST_ADDTAIL(&nvtx->list, &bc->cf_last->vtx);
1272 /* each fetch use 4 dwords */
1273 bc->cf_last->ndw += 4;
1274 bc->ndw += 4;
1275 if ((bc->cf_last->ndw / 4) > 7)
1276 bc->force_add_cf = 1;
1277 return 0;
1278 }
1279
1280 int r600_bc_add_tex(struct r600_bc *bc, const struct r600_bc_tex *tex)
1281 {
1282 struct r600_bc_tex *ntex = r600_bc_tex();
1283 int r;
1284
1285 if (ntex == NULL)
1286 return -ENOMEM;
1287 memcpy(ntex, tex, sizeof(struct r600_bc_tex));
1288
1289 /* we can't fetch data und use it as texture lookup address in the same TEX clause */
1290 if (bc->cf_last != NULL &&
1291 bc->cf_last->inst == V_SQ_CF_WORD1_SQ_CF_INST_TEX) {
1292 struct r600_bc_tex *ttex;
1293 LIST_FOR_EACH_ENTRY(ttex, &bc->cf_last->tex, list) {
1294 if (ttex->dst_gpr == ntex->src_gpr) {
1295 bc->force_add_cf = 1;
1296 break;
1297 }
1298 }
1299 }
1300
1301 /* cf can contains only alu or only vtx or only tex */
1302 if (bc->cf_last == NULL ||
1303 bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_TEX ||
1304 bc->force_add_cf) {
1305 r = r600_bc_add_cf(bc);
1306 if (r) {
1307 free(ntex);
1308 return r;
1309 }
1310 bc->cf_last->inst = V_SQ_CF_WORD1_SQ_CF_INST_TEX;
1311 }
1312 if (ntex->src_gpr >= bc->ngpr) {
1313 bc->ngpr = ntex->src_gpr + 1;
1314 }
1315 if (ntex->dst_gpr >= bc->ngpr) {
1316 bc->ngpr = ntex->dst_gpr + 1;
1317 }
1318 LIST_ADDTAIL(&ntex->list, &bc->cf_last->tex);
1319 /* each texture fetch use 4 dwords */
1320 bc->cf_last->ndw += 4;
1321 bc->ndw += 4;
1322 if ((bc->cf_last->ndw / 4) > 7)
1323 bc->force_add_cf = 1;
1324 return 0;
1325 }
1326
1327 int r600_bc_add_cfinst(struct r600_bc *bc, int inst)
1328 {
1329 int r;
1330 r = r600_bc_add_cf(bc);
1331 if (r)
1332 return r;
1333
1334 bc->cf_last->cond = V_SQ_CF_COND_ACTIVE;
1335 bc->cf_last->inst = inst;
1336 return 0;
1337 }
1338
1339 /* common to all 3 families */
1340 static int r600_bc_vtx_build(struct r600_bc *bc, struct r600_bc_vtx *vtx, unsigned id)
1341 {
1342 unsigned fetch_resource_start = 0;
1343
1344 /* check if we are fetch shader */
1345 /* fetch shader can also access vertex resource,
1346 * first fetch shader resource is at 160
1347 */
1348 if (bc->type == -1) {
1349 switch (bc->chiprev) {
1350 /* r600 */
1351 case CHIPREV_R600:
1352 /* r700 */
1353 case CHIPREV_R700:
1354 fetch_resource_start = 160;
1355 break;
1356 /* evergreen */
1357 case CHIPREV_EVERGREEN:
1358 fetch_resource_start = 0;
1359 break;
1360 default:
1361 fprintf(stderr, "%s:%s:%d unknown chiprev %d\n",
1362 __FILE__, __func__, __LINE__, bc->chiprev);
1363 break;
1364 }
1365 }
1366 bc->bytecode[id++] = S_SQ_VTX_WORD0_BUFFER_ID(vtx->buffer_id + fetch_resource_start) |
1367 S_SQ_VTX_WORD0_FETCH_TYPE(vtx->fetch_type) |
1368 S_SQ_VTX_WORD0_SRC_GPR(vtx->src_gpr) |
1369 S_SQ_VTX_WORD0_SRC_SEL_X(vtx->src_sel_x) |
1370 S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(vtx->mega_fetch_count);
1371 bc->bytecode[id++] = S_SQ_VTX_WORD1_DST_SEL_X(vtx->dst_sel_x) |
1372 S_SQ_VTX_WORD1_DST_SEL_Y(vtx->dst_sel_y) |
1373 S_SQ_VTX_WORD1_DST_SEL_Z(vtx->dst_sel_z) |
1374 S_SQ_VTX_WORD1_DST_SEL_W(vtx->dst_sel_w) |
1375 S_SQ_VTX_WORD1_USE_CONST_FIELDS(vtx->use_const_fields) |
1376 S_SQ_VTX_WORD1_DATA_FORMAT(vtx->data_format) |
1377 S_SQ_VTX_WORD1_NUM_FORMAT_ALL(vtx->num_format_all) |
1378 S_SQ_VTX_WORD1_FORMAT_COMP_ALL(vtx->format_comp_all) |
1379 S_SQ_VTX_WORD1_SRF_MODE_ALL(vtx->srf_mode_all) |
1380 S_SQ_VTX_WORD1_GPR_DST_GPR(vtx->dst_gpr);
1381 bc->bytecode[id++] = S_SQ_VTX_WORD2_OFFSET(vtx->offset) |
1382 S_SQ_VTX_WORD2_MEGA_FETCH(1);
1383 bc->bytecode[id++] = 0;
1384 return 0;
1385 }
1386
1387 /* common to all 3 families */
1388 static int r600_bc_tex_build(struct r600_bc *bc, struct r600_bc_tex *tex, unsigned id)
1389 {
1390 bc->bytecode[id++] = S_SQ_TEX_WORD0_TEX_INST(tex->inst) |
1391 S_SQ_TEX_WORD0_RESOURCE_ID(tex->resource_id) |
1392 S_SQ_TEX_WORD0_SRC_GPR(tex->src_gpr) |
1393 S_SQ_TEX_WORD0_SRC_REL(tex->src_rel);
1394 bc->bytecode[id++] = S_SQ_TEX_WORD1_DST_GPR(tex->dst_gpr) |
1395 S_SQ_TEX_WORD1_DST_REL(tex->dst_rel) |
1396 S_SQ_TEX_WORD1_DST_SEL_X(tex->dst_sel_x) |
1397 S_SQ_TEX_WORD1_DST_SEL_Y(tex->dst_sel_y) |
1398 S_SQ_TEX_WORD1_DST_SEL_Z(tex->dst_sel_z) |
1399 S_SQ_TEX_WORD1_DST_SEL_W(tex->dst_sel_w) |
1400 S_SQ_TEX_WORD1_LOD_BIAS(tex->lod_bias) |
1401 S_SQ_TEX_WORD1_COORD_TYPE_X(tex->coord_type_x) |
1402 S_SQ_TEX_WORD1_COORD_TYPE_Y(tex->coord_type_y) |
1403 S_SQ_TEX_WORD1_COORD_TYPE_Z(tex->coord_type_z) |
1404 S_SQ_TEX_WORD1_COORD_TYPE_W(tex->coord_type_w);
1405 bc->bytecode[id++] = S_SQ_TEX_WORD2_OFFSET_X(tex->offset_x) |
1406 S_SQ_TEX_WORD2_OFFSET_Y(tex->offset_y) |
1407 S_SQ_TEX_WORD2_OFFSET_Z(tex->offset_z) |
1408 S_SQ_TEX_WORD2_SAMPLER_ID(tex->sampler_id) |
1409 S_SQ_TEX_WORD2_SRC_SEL_X(tex->src_sel_x) |
1410 S_SQ_TEX_WORD2_SRC_SEL_Y(tex->src_sel_y) |
1411 S_SQ_TEX_WORD2_SRC_SEL_Z(tex->src_sel_z) |
1412 S_SQ_TEX_WORD2_SRC_SEL_W(tex->src_sel_w);
1413 bc->bytecode[id++] = 0;
1414 return 0;
1415 }
1416
1417 /* r600 only, r700/eg bits in r700_asm.c */
1418 static int r600_bc_alu_build(struct r600_bc *bc, struct r600_bc_alu *alu, unsigned id)
1419 {
1420 /* don't replace gpr by pv or ps for destination register */
1421 bc->bytecode[id++] = S_SQ_ALU_WORD0_SRC0_SEL(alu->src[0].sel) |
1422 S_SQ_ALU_WORD0_SRC0_REL(alu->src[0].rel) |
1423 S_SQ_ALU_WORD0_SRC0_CHAN(alu->src[0].chan) |
1424 S_SQ_ALU_WORD0_SRC0_NEG(alu->src[0].neg) |
1425 S_SQ_ALU_WORD0_SRC1_SEL(alu->src[1].sel) |
1426 S_SQ_ALU_WORD0_SRC1_REL(alu->src[1].rel) |
1427 S_SQ_ALU_WORD0_SRC1_CHAN(alu->src[1].chan) |
1428 S_SQ_ALU_WORD0_SRC1_NEG(alu->src[1].neg) |
1429 S_SQ_ALU_WORD0_LAST(alu->last);
1430
1431 if (alu->is_op3) {
1432 bc->bytecode[id++] = S_SQ_ALU_WORD1_DST_GPR(alu->dst.sel) |
1433 S_SQ_ALU_WORD1_DST_CHAN(alu->dst.chan) |
1434 S_SQ_ALU_WORD1_DST_REL(alu->dst.rel) |
1435 S_SQ_ALU_WORD1_CLAMP(alu->dst.clamp) |
1436 S_SQ_ALU_WORD1_OP3_SRC2_SEL(alu->src[2].sel) |
1437 S_SQ_ALU_WORD1_OP3_SRC2_REL(alu->src[2].rel) |
1438 S_SQ_ALU_WORD1_OP3_SRC2_CHAN(alu->src[2].chan) |
1439 S_SQ_ALU_WORD1_OP3_SRC2_NEG(alu->src[2].neg) |
1440 S_SQ_ALU_WORD1_OP3_ALU_INST(alu->inst) |
1441 S_SQ_ALU_WORD1_BANK_SWIZZLE(alu->bank_swizzle);
1442 } else {
1443 bc->bytecode[id++] = S_SQ_ALU_WORD1_DST_GPR(alu->dst.sel) |
1444 S_SQ_ALU_WORD1_DST_CHAN(alu->dst.chan) |
1445 S_SQ_ALU_WORD1_DST_REL(alu->dst.rel) |
1446 S_SQ_ALU_WORD1_CLAMP(alu->dst.clamp) |
1447 S_SQ_ALU_WORD1_OP2_SRC0_ABS(alu->src[0].abs) |
1448 S_SQ_ALU_WORD1_OP2_SRC1_ABS(alu->src[1].abs) |
1449 S_SQ_ALU_WORD1_OP2_WRITE_MASK(alu->dst.write) |
1450 S_SQ_ALU_WORD1_OP2_OMOD(alu->omod) |
1451 S_SQ_ALU_WORD1_OP2_ALU_INST(alu->inst) |
1452 S_SQ_ALU_WORD1_BANK_SWIZZLE(alu->bank_swizzle) |
1453 S_SQ_ALU_WORD1_OP2_UPDATE_EXECUTE_MASK(alu->predicate) |
1454 S_SQ_ALU_WORD1_OP2_UPDATE_PRED(alu->predicate);
1455 }
1456 return 0;
1457 }
1458
1459 /* common for r600/r700 - eg in eg_asm.c */
1460 static int r600_bc_cf_build(struct r600_bc *bc, struct r600_bc_cf *cf)
1461 {
1462 unsigned id = cf->id;
1463
1464 switch (cf->inst) {
1465 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
1466 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
1467 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3):
1468 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3):
1469 bc->bytecode[id++] = S_SQ_CF_ALU_WORD0_ADDR(cf->addr >> 1) |
1470 S_SQ_CF_ALU_WORD0_KCACHE_MODE0(cf->kcache[0].mode) |
1471 S_SQ_CF_ALU_WORD0_KCACHE_BANK0(cf->kcache[0].bank) |
1472 S_SQ_CF_ALU_WORD0_KCACHE_BANK1(cf->kcache[1].bank);
1473
1474 bc->bytecode[id++] = S_SQ_CF_ALU_WORD1_CF_INST(cf->inst >> 3) |
1475 S_SQ_CF_ALU_WORD1_KCACHE_MODE1(cf->kcache[1].mode) |
1476 S_SQ_CF_ALU_WORD1_KCACHE_ADDR0(cf->kcache[0].addr) |
1477 S_SQ_CF_ALU_WORD1_KCACHE_ADDR1(cf->kcache[1].addr) |
1478 S_SQ_CF_ALU_WORD1_BARRIER(1) |
1479 S_SQ_CF_ALU_WORD1_USES_WATERFALL(bc->chiprev == CHIPREV_R600 ? cf->r6xx_uses_waterfall : 0) |
1480 S_SQ_CF_ALU_WORD1_COUNT((cf->ndw / 2) - 1);
1481 break;
1482 case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
1483 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
1484 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
1485 bc->bytecode[id++] = S_SQ_CF_WORD0_ADDR(cf->addr >> 1);
1486 bc->bytecode[id++] = S_SQ_CF_WORD1_CF_INST(cf->inst) |
1487 S_SQ_CF_WORD1_BARRIER(1) |
1488 S_SQ_CF_WORD1_COUNT((cf->ndw / 4) - 1);
1489 break;
1490 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1491 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1492 bc->bytecode[id++] = S_SQ_CF_ALLOC_EXPORT_WORD0_RW_GPR(cf->output.gpr) |
1493 S_SQ_CF_ALLOC_EXPORT_WORD0_ELEM_SIZE(cf->output.elem_size) |
1494 S_SQ_CF_ALLOC_EXPORT_WORD0_ARRAY_BASE(cf->output.array_base) |
1495 S_SQ_CF_ALLOC_EXPORT_WORD0_TYPE(cf->output.type);
1496 bc->bytecode[id++] = S_SQ_CF_ALLOC_EXPORT_WORD1_BURST_COUNT(cf->output.burst_count - 1) |
1497 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_X(cf->output.swizzle_x) |
1498 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Y(cf->output.swizzle_y) |
1499 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Z(cf->output.swizzle_z) |
1500 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_W(cf->output.swizzle_w) |
1501 S_SQ_CF_ALLOC_EXPORT_WORD1_BARRIER(cf->output.barrier) |
1502 S_SQ_CF_ALLOC_EXPORT_WORD1_CF_INST(cf->output.inst) |
1503 S_SQ_CF_ALLOC_EXPORT_WORD1_END_OF_PROGRAM(cf->output.end_of_program);
1504 break;
1505 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
1506 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
1507 case V_SQ_CF_WORD1_SQ_CF_INST_POP:
1508 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
1509 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
1510 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
1511 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
1512 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
1513 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
1514 bc->bytecode[id++] = S_SQ_CF_WORD0_ADDR(cf->cf_addr >> 1);
1515 bc->bytecode[id++] = S_SQ_CF_WORD1_CF_INST(cf->inst) |
1516 S_SQ_CF_WORD1_BARRIER(1) |
1517 S_SQ_CF_WORD1_COND(cf->cond) |
1518 S_SQ_CF_WORD1_POP_COUNT(cf->pop_count);
1519
1520 break;
1521 default:
1522 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
1523 return -EINVAL;
1524 }
1525 return 0;
1526 }
1527
1528 int r600_bc_build(struct r600_bc *bc)
1529 {
1530 struct r600_bc_cf *cf;
1531 struct r600_bc_alu *alu;
1532 struct r600_bc_vtx *vtx;
1533 struct r600_bc_tex *tex;
1534 uint32_t literal[4];
1535 unsigned nliteral;
1536 unsigned addr;
1537 int i, r;
1538
1539 if (bc->callstack[0].max > 0)
1540 bc->nstack = ((bc->callstack[0].max + 3) >> 2) + 2;
1541 if (bc->type == TGSI_PROCESSOR_VERTEX && !bc->nstack) {
1542 bc->nstack = 1;
1543 }
1544
1545 /* first path compute addr of each CF block */
1546 /* addr start after all the CF instructions */
1547 addr = bc->cf_last->id + 2;
1548 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
1549 switch (cf->inst) {
1550 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
1551 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3):
1552 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3):
1553 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
1554 break;
1555 case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
1556 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
1557 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
1558 /* fetch node need to be 16 bytes aligned*/
1559 addr += 3;
1560 addr &= 0xFFFFFFFCUL;
1561 break;
1562 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1563 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1564 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1565 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1566 break;
1567 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
1568 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
1569 case V_SQ_CF_WORD1_SQ_CF_INST_POP:
1570 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
1571 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
1572 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
1573 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
1574 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
1575 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
1576 break;
1577 default:
1578 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
1579 return -EINVAL;
1580 }
1581 cf->addr = addr;
1582 addr += cf->ndw;
1583 bc->ndw = cf->addr + cf->ndw;
1584 }
1585 free(bc->bytecode);
1586 bc->bytecode = calloc(1, bc->ndw * 4);
1587 if (bc->bytecode == NULL)
1588 return -ENOMEM;
1589 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
1590 addr = cf->addr;
1591 if (bc->chiprev == CHIPREV_EVERGREEN)
1592 r = eg_bc_cf_build(bc, cf);
1593 else
1594 r = r600_bc_cf_build(bc, cf);
1595 if (r)
1596 return r;
1597 switch (cf->inst) {
1598 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
1599 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3):
1600 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3):
1601 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
1602 nliteral = 0;
1603 memset(literal, 0, sizeof(literal));
1604 LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
1605 r = r600_bc_alu_nliterals(bc, alu, literal, &nliteral);
1606 if (r)
1607 return r;
1608 r600_bc_alu_adjust_literals(bc, alu, literal, nliteral);
1609 switch(bc->chiprev) {
1610 case CHIPREV_R600:
1611 r = r600_bc_alu_build(bc, alu, addr);
1612 break;
1613 case CHIPREV_R700:
1614 case CHIPREV_EVERGREEN: /* eg alu is same encoding as r700 */
1615 r = r700_bc_alu_build(bc, alu, addr);
1616 break;
1617 default:
1618 R600_ERR("unknown family %d\n", bc->family);
1619 return -EINVAL;
1620 }
1621 if (r)
1622 return r;
1623 addr += 2;
1624 if (alu->last) {
1625 for (i = 0; i < align(nliteral, 2); ++i) {
1626 bc->bytecode[addr++] = literal[i];
1627 }
1628 nliteral = 0;
1629 memset(literal, 0, sizeof(literal));
1630 }
1631 }
1632 break;
1633 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
1634 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
1635 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
1636 r = r600_bc_vtx_build(bc, vtx, addr);
1637 if (r)
1638 return r;
1639 addr += 4;
1640 }
1641 break;
1642 case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
1643 LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
1644 r = r600_bc_tex_build(bc, tex, addr);
1645 if (r)
1646 return r;
1647 addr += 4;
1648 }
1649 break;
1650 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1651 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1652 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1653 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1654 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
1655 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
1656 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
1657 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
1658 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
1659 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
1660 case V_SQ_CF_WORD1_SQ_CF_INST_POP:
1661 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
1662 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
1663 break;
1664 default:
1665 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
1666 return -EINVAL;
1667 }
1668 }
1669 return 0;
1670 }
1671
1672 void r600_bc_clear(struct r600_bc *bc)
1673 {
1674 struct r600_bc_cf *cf = NULL, *next_cf;
1675
1676 free(bc->bytecode);
1677 bc->bytecode = NULL;
1678
1679 LIST_FOR_EACH_ENTRY_SAFE(cf, next_cf, &bc->cf, list) {
1680 struct r600_bc_alu *alu = NULL, *next_alu;
1681 struct r600_bc_tex *tex = NULL, *next_tex;
1682 struct r600_bc_tex *vtx = NULL, *next_vtx;
1683
1684 LIST_FOR_EACH_ENTRY_SAFE(alu, next_alu, &cf->alu, list) {
1685 free(alu);
1686 }
1687
1688 LIST_INITHEAD(&cf->alu);
1689
1690 LIST_FOR_EACH_ENTRY_SAFE(tex, next_tex, &cf->tex, list) {
1691 free(tex);
1692 }
1693
1694 LIST_INITHEAD(&cf->tex);
1695
1696 LIST_FOR_EACH_ENTRY_SAFE(vtx, next_vtx, &cf->vtx, list) {
1697 free(vtx);
1698 }
1699
1700 LIST_INITHEAD(&cf->vtx);
1701
1702 free(cf);
1703 }
1704
1705 LIST_INITHEAD(&cf->list);
1706 }
1707
1708 void r600_bc_dump(struct r600_bc *bc)
1709 {
1710 struct r600_bc_cf *cf = NULL;
1711 struct r600_bc_alu *alu = NULL;
1712 struct r600_bc_vtx *vtx = NULL;
1713 struct r600_bc_tex *tex = NULL;
1714
1715 unsigned i, id;
1716 uint32_t literal[4];
1717 unsigned nliteral;
1718 char chip = '6';
1719
1720 switch (bc->chiprev) {
1721 case 1:
1722 chip = '7';
1723 break;
1724 case 2:
1725 chip = 'E';
1726 break;
1727 case 0:
1728 default:
1729 chip = '6';
1730 break;
1731 }
1732 fprintf(stderr, "bytecode %d dw -- %d gprs ---------------------\n", bc->ndw, bc->ngpr);
1733 fprintf(stderr, " %c\n", chip);
1734
1735 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
1736 id = cf->id;
1737
1738 switch (cf->inst) {
1739 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
1740 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3):
1741 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3):
1742 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
1743 fprintf(stderr, "%04d %08X ALU ", id, bc->bytecode[id]);
1744 fprintf(stderr, "ADDR:%d ", cf->addr);
1745 fprintf(stderr, "KCACHE_MODE0:%X ", cf->kcache[0].mode);
1746 fprintf(stderr, "KCACHE_BANK0:%X ", cf->kcache[0].bank);
1747 fprintf(stderr, "KCACHE_BANK1:%X\n", cf->kcache[1].bank);
1748 id++;
1749 fprintf(stderr, "%04d %08X ALU ", id, bc->bytecode[id]);
1750 fprintf(stderr, "INST:%d ", cf->inst);
1751 fprintf(stderr, "KCACHE_MODE1:%X ", cf->kcache[1].mode);
1752 fprintf(stderr, "KCACHE_ADDR0:%X ", cf->kcache[0].addr);
1753 fprintf(stderr, "KCACHE_ADDR1:%X ", cf->kcache[1].addr);
1754 fprintf(stderr, "COUNT:%d\n", cf->ndw / 2);
1755 break;
1756 case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
1757 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
1758 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
1759 fprintf(stderr, "%04d %08X TEX/VTX ", id, bc->bytecode[id]);
1760 fprintf(stderr, "ADDR:%d\n", cf->addr);
1761 id++;
1762 fprintf(stderr, "%04d %08X TEX/VTX ", id, bc->bytecode[id]);
1763 fprintf(stderr, "INST:%d ", cf->inst);
1764 fprintf(stderr, "COUNT:%d\n", cf->ndw / 4);
1765 break;
1766 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1767 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1768 fprintf(stderr, "%04d %08X EXPORT ", id, bc->bytecode[id]);
1769 fprintf(stderr, "GPR:%X ", cf->output.gpr);
1770 fprintf(stderr, "ELEM_SIZE:%X ", cf->output.elem_size);
1771 fprintf(stderr, "ARRAY_BASE:%X ", cf->output.array_base);
1772 fprintf(stderr, "TYPE:%X\n", cf->output.type);
1773 id++;
1774 fprintf(stderr, "%04d %08X EXPORT ", id, bc->bytecode[id]);
1775 fprintf(stderr, "SWIZ_X:%X ", cf->output.swizzle_x);
1776 fprintf(stderr, "SWIZ_Y:%X ", cf->output.swizzle_y);
1777 fprintf(stderr, "SWIZ_Z:%X ", cf->output.swizzle_z);
1778 fprintf(stderr, "SWIZ_W:%X ", cf->output.swizzle_w);
1779 fprintf(stderr, "BARRIER:%X ", cf->output.barrier);
1780 fprintf(stderr, "INST:%d ", cf->output.inst);
1781 fprintf(stderr, "BURST_COUNT:%d ", cf->output.burst_count);
1782 fprintf(stderr, "EOP:%X\n", cf->output.end_of_program);
1783 break;
1784 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
1785 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
1786 case V_SQ_CF_WORD1_SQ_CF_INST_POP:
1787 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
1788 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
1789 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
1790 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
1791 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
1792 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
1793 fprintf(stderr, "%04d %08X CF ", id, bc->bytecode[id]);
1794 fprintf(stderr, "ADDR:%d\n", cf->cf_addr);
1795 id++;
1796 fprintf(stderr, "%04d %08X CF ", id, bc->bytecode[id]);
1797 fprintf(stderr, "INST:%d ", cf->inst);
1798 fprintf(stderr, "COND:%X ", cf->cond);
1799 fprintf(stderr, "POP_COUNT:%X\n", cf->pop_count);
1800 break;
1801 }
1802
1803 id = cf->addr;
1804 nliteral = 0;
1805 LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
1806 r600_bc_alu_nliterals(bc, alu, literal, &nliteral);
1807
1808 fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]);
1809 fprintf(stderr, "SRC0(SEL:%d ", alu->src[0].sel);
1810 fprintf(stderr, "REL:%d ", alu->src[0].rel);
1811 fprintf(stderr, "CHAN:%d ", alu->src[0].chan);
1812 fprintf(stderr, "NEG:%d) ", alu->src[0].neg);
1813 fprintf(stderr, "SRC1(SEL:%d ", alu->src[1].sel);
1814 fprintf(stderr, "REL:%d ", alu->src[1].rel);
1815 fprintf(stderr, "CHAN:%d ", alu->src[1].chan);
1816 fprintf(stderr, "NEG:%d) ", alu->src[1].neg);
1817 fprintf(stderr, "LAST:%d)\n", alu->last);
1818 id++;
1819 fprintf(stderr, "%04d %08X %c ", id, bc->bytecode[id], alu->last ? '*' : ' ');
1820 fprintf(stderr, "INST:%d ", alu->inst);
1821 fprintf(stderr, "DST(SEL:%d ", alu->dst.sel);
1822 fprintf(stderr, "CHAN:%d ", alu->dst.chan);
1823 fprintf(stderr, "REL:%d ", alu->dst.rel);
1824 fprintf(stderr, "CLAMP:%d) ", alu->dst.clamp);
1825 fprintf(stderr, "BANK_SWIZZLE:%d ", alu->bank_swizzle);
1826 if (alu->is_op3) {
1827 fprintf(stderr, "SRC2(SEL:%d ", alu->src[2].sel);
1828 fprintf(stderr, "REL:%d ", alu->src[2].rel);
1829 fprintf(stderr, "CHAN:%d ", alu->src[2].chan);
1830 fprintf(stderr, "NEG:%d)\n", alu->src[2].neg);
1831 } else {
1832 fprintf(stderr, "SRC0_ABS:%d ", alu->src[0].abs);
1833 fprintf(stderr, "SRC1_ABS:%d ", alu->src[1].abs);
1834 fprintf(stderr, "WRITE_MASK:%d ", alu->dst.write);
1835 fprintf(stderr, "OMOD:%d ", alu->omod);
1836 fprintf(stderr, "EXECUTE_MASK:%d ", alu->predicate);
1837 fprintf(stderr, "UPDATE_PRED:%d\n", alu->predicate);
1838 }
1839
1840 id++;
1841 if (alu->last) {
1842 for (i = 0; i < nliteral; i++, id++) {
1843 float *f = (float*)(bc->bytecode + id);
1844 fprintf(stderr, "%04d %08X\t%f\n", id, bc->bytecode[id], *f);
1845 }
1846 id += nliteral & 1;
1847 nliteral = 0;
1848 }
1849 }
1850
1851 LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
1852 fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]);
1853 fprintf(stderr, "INST:%d ", tex->inst);
1854 fprintf(stderr, "RESOURCE_ID:%d ", tex->resource_id);
1855 fprintf(stderr, "SRC(GPR:%d ", tex->src_gpr);
1856 fprintf(stderr, "REL:%d)\n", tex->src_rel);
1857 id++;
1858 fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]);
1859 fprintf(stderr, "DST(GPR:%d ", tex->dst_gpr);
1860 fprintf(stderr, "REL:%d ", tex->dst_rel);
1861 fprintf(stderr, "SEL_X:%d ", tex->dst_sel_x);
1862 fprintf(stderr, "SEL_Y:%d ", tex->dst_sel_y);
1863 fprintf(stderr, "SEL_Z:%d ", tex->dst_sel_z);
1864 fprintf(stderr, "SEL_W:%d) ", tex->dst_sel_w);
1865 fprintf(stderr, "LOD_BIAS:%d ", tex->lod_bias);
1866 fprintf(stderr, "COORD_TYPE_X:%d ", tex->coord_type_x);
1867 fprintf(stderr, "COORD_TYPE_Y:%d ", tex->coord_type_y);
1868 fprintf(stderr, "COORD_TYPE_Z:%d ", tex->coord_type_z);
1869 fprintf(stderr, "COORD_TYPE_W:%d\n", tex->coord_type_w);
1870 id++;
1871 fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]);
1872 fprintf(stderr, "OFFSET_X:%d ", tex->offset_x);
1873 fprintf(stderr, "OFFSET_Y:%d ", tex->offset_y);
1874 fprintf(stderr, "OFFSET_Z:%d ", tex->offset_z);
1875 fprintf(stderr, "SAMPLER_ID:%d ", tex->sampler_id);
1876 fprintf(stderr, "SRC(SEL_X:%d ", tex->src_sel_x);
1877 fprintf(stderr, "SEL_Y:%d ", tex->src_sel_y);
1878 fprintf(stderr, "SEL_Z:%d ", tex->src_sel_z);
1879 fprintf(stderr, "SEL_W:%d)\n", tex->src_sel_w);
1880 id++;
1881 fprintf(stderr, "%04d %08X \n", id, bc->bytecode[id]);
1882 id++;
1883 }
1884
1885 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
1886 fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]);
1887 fprintf(stderr, "INST:%d ", vtx->inst);
1888 fprintf(stderr, "FETCH_TYPE:%d ", vtx->fetch_type);
1889 fprintf(stderr, "BUFFER_ID:%d\n", vtx->buffer_id);
1890 id++;
1891 /* This assumes that no semantic fetches exist */
1892 fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]);
1893 fprintf(stderr, "SRC(GPR:%d ", vtx->src_gpr);
1894 fprintf(stderr, "SEL_X:%d) ", vtx->src_sel_x);
1895 fprintf(stderr, "MEGA_FETCH_COUNT:%d ", vtx->mega_fetch_count);
1896 fprintf(stderr, "DST(GPR:%d ", vtx->dst_gpr);
1897 fprintf(stderr, "SEL_X:%d ", vtx->dst_sel_x);
1898 fprintf(stderr, "SEL_Y:%d ", vtx->dst_sel_y);
1899 fprintf(stderr, "SEL_Z:%d ", vtx->dst_sel_z);
1900 fprintf(stderr, "SEL_W:%d) ", vtx->dst_sel_w);
1901 fprintf(stderr, "USE_CONST_FIELDS:%d ", vtx->use_const_fields);
1902 fprintf(stderr, "FORMAT(DATA:%d ", vtx->data_format);
1903 fprintf(stderr, "NUM:%d ", vtx->num_format_all);
1904 fprintf(stderr, "COMP:%d ", vtx->format_comp_all);
1905 fprintf(stderr, "MODE:%d)\n", vtx->srf_mode_all);
1906 id++;
1907 fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]);
1908 fprintf(stderr, "OFFSET:%d\n", vtx->offset);
1909 //TODO
1910 id++;
1911 fprintf(stderr, "%04d %08X \n", id, bc->bytecode[id]);
1912 id++;
1913 }
1914 }
1915
1916 fprintf(stderr, "--------------------------------------\n");
1917 }
1918
1919 static void r600_cf_vtx(struct r600_vertex_element *ve)
1920 {
1921 struct r600_pipe_state *rstate;
1922
1923 rstate = &ve->rstate;
1924 rstate->id = R600_PIPE_STATE_FETCH_SHADER;
1925 rstate->nregs = 0;
1926 r600_pipe_state_add_reg(rstate, R_0288A4_SQ_PGM_RESOURCES_FS,
1927 0x00000000, 0xFFFFFFFF, NULL);
1928 r600_pipe_state_add_reg(rstate, R_0288DC_SQ_PGM_CF_OFFSET_FS,
1929 0x00000000, 0xFFFFFFFF, NULL);
1930 r600_pipe_state_add_reg(rstate, R_028894_SQ_PGM_START_FS,
1931 r600_bo_offset(ve->fetch_shader) >> 8,
1932 0xFFFFFFFF, ve->fetch_shader);
1933 }
1934
1935 static void r600_vertex_data_type(enum pipe_format pformat, unsigned *format,
1936 unsigned *num_format, unsigned *format_comp)
1937 {
1938 const struct util_format_description *desc;
1939 unsigned i;
1940
1941 *format = 0;
1942 *num_format = 0;
1943 *format_comp = 0;
1944
1945 desc = util_format_description(pformat);
1946 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN) {
1947 goto out_unknown;
1948 }
1949
1950 /* Find the first non-VOID channel. */
1951 for (i = 0; i < 4; i++) {
1952 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
1953 break;
1954 }
1955 }
1956
1957 switch (desc->channel[i].type) {
1958 /* Half-floats, floats, ints */
1959 case UTIL_FORMAT_TYPE_FLOAT:
1960 switch (desc->channel[i].size) {
1961 case 16:
1962 switch (desc->nr_channels) {
1963 case 1:
1964 *format = FMT_16_FLOAT;
1965 break;
1966 case 2:
1967 *format = FMT_16_16_FLOAT;
1968 break;
1969 case 3:
1970 case 4:
1971 *format = FMT_16_16_16_16_FLOAT;
1972 break;
1973 }
1974 break;
1975 case 32:
1976 switch (desc->nr_channels) {
1977 case 1:
1978 *format = FMT_32_FLOAT;
1979 break;
1980 case 2:
1981 *format = FMT_32_32_FLOAT;
1982 break;
1983 case 3:
1984 *format = FMT_32_32_32_FLOAT;
1985 break;
1986 case 4:
1987 *format = FMT_32_32_32_32_FLOAT;
1988 break;
1989 }
1990 break;
1991 default:
1992 goto out_unknown;
1993 }
1994 break;
1995 /* Unsigned ints */
1996 case UTIL_FORMAT_TYPE_UNSIGNED:
1997 /* Signed ints */
1998 case UTIL_FORMAT_TYPE_SIGNED:
1999 switch (desc->channel[i].size) {
2000 case 8:
2001 switch (desc->nr_channels) {
2002 case 1:
2003 *format = FMT_8;
2004 break;
2005 case 2:
2006 *format = FMT_8_8;
2007 break;
2008 case 3:
2009 case 4:
2010 *format = FMT_8_8_8_8;
2011 break;
2012 }
2013 break;
2014 case 16:
2015 switch (desc->nr_channels) {
2016 case 1:
2017 *format = FMT_16;
2018 break;
2019 case 2:
2020 *format = FMT_16_16;
2021 break;
2022 case 3:
2023 case 4:
2024 *format = FMT_16_16_16_16;
2025 break;
2026 }
2027 break;
2028 case 32:
2029 switch (desc->nr_channels) {
2030 case 1:
2031 *format = FMT_32;
2032 break;
2033 case 2:
2034 *format = FMT_32_32;
2035 break;
2036 case 3:
2037 *format = FMT_32_32_32;
2038 break;
2039 case 4:
2040 *format = FMT_32_32_32_32;
2041 break;
2042 }
2043 break;
2044 default:
2045 goto out_unknown;
2046 }
2047 break;
2048 default:
2049 goto out_unknown;
2050 }
2051
2052 if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
2053 *format_comp = 1;
2054 }
2055 if (desc->channel[i].normalized) {
2056 *num_format = 0;
2057 } else {
2058 *num_format = 2;
2059 }
2060 return;
2061 out_unknown:
2062 R600_ERR("unsupported vertex format %s\n", util_format_name(pformat));
2063 }
2064
2065 int r600_vertex_elements_build_fetch_shader(struct r600_pipe_context *rctx, struct r600_vertex_element *ve)
2066 {
2067 static int dump_shaders = -1;
2068
2069 struct r600_bc bc;
2070 struct r600_bc_vtx vtx;
2071 struct pipe_vertex_element *elements = ve->elements;
2072 const struct util_format_description *desc;
2073 unsigned fetch_resource_start = rctx->family >= CHIP_CEDAR ? 0 : 160;
2074 unsigned format, num_format, format_comp;
2075 u32 *bytecode;
2076 int i, r;
2077
2078 /* vertex elements offset need special handling, if offset is bigger
2079 + * than what we can put in fetch instruction then we need to alterate
2080 * the vertex resource offset. In such case in order to simplify code
2081 * we will bound one resource per elements. It's a worst case scenario.
2082 */
2083 for (i = 0; i < ve->count; i++) {
2084 ve->vbuffer_offset[i] = C_SQ_VTX_WORD2_OFFSET & elements[i].src_offset;
2085 if (ve->vbuffer_offset[i]) {
2086 ve->vbuffer_need_offset = 1;
2087 }
2088 }
2089
2090 memset(&bc, 0, sizeof(bc));
2091 r = r600_bc_init(&bc, r600_get_family(rctx->radeon));
2092 if (r)
2093 return r;
2094
2095 for (i = 0; i < ve->count; i++) {
2096 if (elements[i].instance_divisor > 1) {
2097 struct r600_bc_alu alu;
2098
2099 memset(&alu, 0, sizeof(alu));
2100 alu.inst = BC_INST(&bc, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT);
2101 alu.src[0].sel = 0;
2102 alu.src[0].chan = 3;
2103
2104 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2105 alu.src[1].value = (1l << 32) / elements[i].instance_divisor + 1;
2106
2107 alu.dst.sel = i + 1;
2108 alu.dst.chan = 3;
2109 alu.dst.write = 1;
2110 alu.last = 1;
2111
2112 if ((r = r600_bc_add_alu(&bc, &alu))) {
2113 r600_bc_clear(&bc);
2114 return r;
2115 }
2116 }
2117 }
2118
2119 for (i = 0; i < ve->count; i++) {
2120 unsigned vbuffer_index;
2121 r600_vertex_data_type(ve->elements[i].src_format, &format, &num_format, &format_comp);
2122 desc = util_format_description(ve->elements[i].src_format);
2123 if (desc == NULL) {
2124 r600_bc_clear(&bc);
2125 R600_ERR("unknown format %d\n", ve->elements[i].src_format);
2126 return -EINVAL;
2127 }
2128
2129 /* see above for vbuffer_need_offset explanation */
2130 vbuffer_index = elements[i].vertex_buffer_index;
2131 memset(&vtx, 0, sizeof(vtx));
2132 vtx.buffer_id = (ve->vbuffer_need_offset ? i : vbuffer_index) + fetch_resource_start;
2133 vtx.fetch_type = elements[i].instance_divisor ? 1 : 0;
2134 vtx.src_gpr = elements[i].instance_divisor > 1 ? i + 1 : 0;
2135 vtx.src_sel_x = elements[i].instance_divisor ? 3 : 0;
2136 vtx.mega_fetch_count = 0x1F;
2137 vtx.dst_gpr = i + 1;
2138 vtx.dst_sel_x = desc->swizzle[0];
2139 vtx.dst_sel_y = desc->swizzle[1];
2140 vtx.dst_sel_z = desc->swizzle[2];
2141 vtx.dst_sel_w = desc->swizzle[3];
2142 vtx.data_format = format;
2143 vtx.num_format_all = num_format;
2144 vtx.format_comp_all = format_comp;
2145 vtx.srf_mode_all = 1;
2146 vtx.offset = elements[i].src_offset;
2147
2148 if ((r = r600_bc_add_vtx(&bc, &vtx))) {
2149 r600_bc_clear(&bc);
2150 return r;
2151 }
2152 }
2153
2154 r600_bc_add_cfinst(&bc, BC_INST(&bc, V_SQ_CF_WORD1_SQ_CF_INST_RETURN));
2155
2156 if ((r = r600_bc_build(&bc))) {
2157 r600_bc_clear(&bc);
2158 return r;
2159 }
2160
2161 if (dump_shaders == -1)
2162 dump_shaders = debug_get_bool_option("R600_DUMP_SHADERS", FALSE);
2163
2164 if (dump_shaders) {
2165 fprintf(stderr, "--------------------------------------------------------------\n");
2166 r600_bc_dump(&bc);
2167 fprintf(stderr, "______________________________________________________________\n");
2168 }
2169
2170 ve->fs_size = bc.ndw*4;
2171
2172 /* use PIPE_BIND_VERTEX_BUFFER so we use the cache buffer manager */
2173 ve->fetch_shader = r600_bo(rctx->radeon, ve->fs_size, 256, PIPE_BIND_VERTEX_BUFFER, 0);
2174 if (ve->fetch_shader == NULL) {
2175 r600_bc_clear(&bc);
2176 return -ENOMEM;
2177 }
2178
2179 bytecode = r600_bo_map(rctx->radeon, ve->fetch_shader, 0, NULL);
2180 if (bytecode == NULL) {
2181 r600_bc_clear(&bc);
2182 r600_bo_reference(rctx->radeon, &ve->fetch_shader, NULL);
2183 return -ENOMEM;
2184 }
2185
2186 memcpy(bytecode, bc.bytecode, ve->fs_size);
2187
2188 r600_bo_unmap(rctx->radeon, ve->fetch_shader);
2189 r600_bc_clear(&bc);
2190
2191 if (rctx->family >= CHIP_CEDAR)
2192 eg_cf_vtx(ve);
2193 else
2194 r600_cf_vtx(ve);
2195
2196 return 0;
2197 }