r600g: set max max tex/vtx instructions count to 16 for cayman
[mesa.git] / src / gallium / drivers / r600 / r600_asm.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <stdio.h>
24 #include <errno.h>
25 #include <byteswap.h>
26 #include "util/u_format.h"
27 #include "util/u_memory.h"
28 #include "pipe/p_shader_tokens.h"
29 #include "r600_pipe.h"
30 #include "r600_sq.h"
31 #include "r600_opcodes.h"
32 #include "r600_asm.h"
33 #include "r600_formats.h"
34 #include "r600d.h"
35
36 #define NUM_OF_CYCLES 3
37 #define NUM_OF_COMPONENTS 4
38
39 static inline unsigned int r600_bytecode_get_num_operands(struct r600_bytecode *bc, struct r600_bytecode_alu *alu)
40 {
41 if(alu->is_op3)
42 return 3;
43
44 switch (bc->chip_class) {
45 case R600:
46 case R700:
47 switch (alu->inst) {
48 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP:
49 return 0;
50 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD:
51 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT:
52 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT:
53 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_AND_INT:
54 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_OR_INT:
55 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE:
56 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT:
57 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE:
58 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE:
59 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL:
60 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_INT:
61 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_INT:
62 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT:
63 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT:
64 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX:
65 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN:
66 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX_UINT:
67 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN_UINT:
68 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX_INT:
69 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN_INT:
70 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE:
71 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE_INT:
72 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE:
73 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE_INT:
74 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT:
75 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT_INT:
76 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT_UINT:
77 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE:
78 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE_INT:
79 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE_UINT:
80 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE:
81 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT:
82 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE:
83 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE:
84 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT:
85 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4:
86 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE:
87 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE:
88 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_XOR_INT:
89 return 2;
90
91 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV:
92 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA:
93 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR:
94 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT:
95 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT:
96 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR:
97 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC:
98 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE:
99 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED:
100 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE:
101 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED:
102 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE:
103 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED:
104 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE:
105 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT:
106 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT:
107 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN:
108 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS:
109 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RNDNE:
110 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOT_INT:
111 return 1;
112 default: R600_ERR(
113 "Need instruction operand number for 0x%x.\n", alu->inst);
114 }
115 break;
116 case EVERGREEN:
117 case CAYMAN:
118 switch (alu->inst) {
119 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP:
120 return 0;
121 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD:
122 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT:
123 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT:
124 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_AND_INT:
125 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_OR_INT:
126 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE:
127 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT:
128 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE:
129 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE:
130 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL:
131 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_INT:
132 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_INT:
133 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT:
134 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT:
135 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX:
136 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN:
137 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX_UINT:
138 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN_UINT:
139 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX_INT:
140 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN_INT:
141 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE:
142 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE_INT:
143 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE:
144 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE_INT:
145 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT:
146 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT_INT:
147 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT_UINT:
148 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE:
149 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE_INT:
150 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE_UINT:
151 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE:
152 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT:
153 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE:
154 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE:
155 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT:
156 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4:
157 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE:
158 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE:
159 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INTERP_XY:
160 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INTERP_ZW:
161 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_XOR_INT:
162 return 2;
163
164 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV:
165 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT:
166 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT:
167 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR:
168 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC:
169 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE:
170 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED:
171 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE:
172 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED:
173 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE:
174 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED:
175 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE:
176 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT:
177 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT_FLOOR:
178 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT:
179 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN:
180 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS:
181 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RNDNE:
182 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOT_INT:
183 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INTERP_LOAD_P0:
184 return 1;
185 default: R600_ERR(
186 "Need instruction operand number for 0x%x.\n", alu->inst);
187 }
188 break;
189 }
190
191 return 3;
192 }
193
194 int r700_bytecode_alu_build(struct r600_bytecode *bc, struct r600_bytecode_alu *alu, unsigned id);
195
196 static struct r600_bytecode_cf *r600_bytecode_cf(void)
197 {
198 struct r600_bytecode_cf *cf = CALLOC_STRUCT(r600_bytecode_cf);
199
200 if (cf == NULL)
201 return NULL;
202 LIST_INITHEAD(&cf->list);
203 LIST_INITHEAD(&cf->alu);
204 LIST_INITHEAD(&cf->vtx);
205 LIST_INITHEAD(&cf->tex);
206 return cf;
207 }
208
209 static struct r600_bytecode_alu *r600_bytecode_alu(void)
210 {
211 struct r600_bytecode_alu *alu = CALLOC_STRUCT(r600_bytecode_alu);
212
213 if (alu == NULL)
214 return NULL;
215 LIST_INITHEAD(&alu->list);
216 return alu;
217 }
218
219 static struct r600_bytecode_vtx *r600_bytecode_vtx(void)
220 {
221 struct r600_bytecode_vtx *vtx = CALLOC_STRUCT(r600_bytecode_vtx);
222
223 if (vtx == NULL)
224 return NULL;
225 LIST_INITHEAD(&vtx->list);
226 return vtx;
227 }
228
229 static struct r600_bytecode_tex *r600_bytecode_tex(void)
230 {
231 struct r600_bytecode_tex *tex = CALLOC_STRUCT(r600_bytecode_tex);
232
233 if (tex == NULL)
234 return NULL;
235 LIST_INITHEAD(&tex->list);
236 return tex;
237 }
238
239 void r600_bytecode_init(struct r600_bytecode *bc, enum chip_class chip_class)
240 {
241 LIST_INITHEAD(&bc->cf);
242 bc->chip_class = chip_class;
243 }
244
245 static int r600_bytecode_add_cf(struct r600_bytecode *bc)
246 {
247 struct r600_bytecode_cf *cf = r600_bytecode_cf();
248
249 if (cf == NULL)
250 return -ENOMEM;
251 LIST_ADDTAIL(&cf->list, &bc->cf);
252 if (bc->cf_last)
253 cf->id = bc->cf_last->id + 2;
254 bc->cf_last = cf;
255 bc->ncf++;
256 bc->ndw += 2;
257 bc->force_add_cf = 0;
258 bc->ar_loaded = 0;
259 return 0;
260 }
261
262 int r600_bytecode_add_output(struct r600_bytecode *bc, const struct r600_bytecode_output *output)
263 {
264 int r;
265
266 if (bc->cf_last && (bc->cf_last->inst == output->inst ||
267 (bc->cf_last->inst == BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT) &&
268 output->inst == BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE))) &&
269 output->type == bc->cf_last->output.type &&
270 output->elem_size == bc->cf_last->output.elem_size &&
271 output->swizzle_x == bc->cf_last->output.swizzle_x &&
272 output->swizzle_y == bc->cf_last->output.swizzle_y &&
273 output->swizzle_z == bc->cf_last->output.swizzle_z &&
274 output->swizzle_w == bc->cf_last->output.swizzle_w &&
275 (output->burst_count + bc->cf_last->output.burst_count) <= 16) {
276
277 if ((output->gpr + output->burst_count) == bc->cf_last->output.gpr &&
278 (output->array_base + output->burst_count) == bc->cf_last->output.array_base) {
279
280 bc->cf_last->output.end_of_program |= output->end_of_program;
281 bc->cf_last->output.inst = output->inst;
282 bc->cf_last->output.gpr = output->gpr;
283 bc->cf_last->output.array_base = output->array_base;
284 bc->cf_last->output.burst_count += output->burst_count;
285 return 0;
286
287 } else if (output->gpr == (bc->cf_last->output.gpr + bc->cf_last->output.burst_count) &&
288 output->array_base == (bc->cf_last->output.array_base + bc->cf_last->output.burst_count)) {
289
290 bc->cf_last->output.end_of_program |= output->end_of_program;
291 bc->cf_last->output.inst = output->inst;
292 bc->cf_last->output.burst_count += output->burst_count;
293 return 0;
294 }
295 }
296
297 r = r600_bytecode_add_cf(bc);
298 if (r)
299 return r;
300 bc->cf_last->inst = output->inst;
301 memcpy(&bc->cf_last->output, output, sizeof(struct r600_bytecode_output));
302 return 0;
303 }
304
305 /* alu instructions that can ony exits once per group */
306 static int is_alu_once_inst(struct r600_bytecode *bc, struct r600_bytecode_alu *alu)
307 {
308 switch (bc->chip_class) {
309 case R600:
310 case R700:
311 return !alu->is_op3 && (
312 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE ||
313 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT ||
314 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE ||
315 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE ||
316 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_UINT ||
317 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_UINT ||
318 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE_INT ||
319 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_INT ||
320 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_INT ||
321 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE_INT ||
322 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_UINT ||
323 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_UINT ||
324 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE ||
325 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT ||
326 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE ||
327 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE ||
328 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_INV ||
329 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_POP ||
330 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_CLR ||
331 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_RESTORE ||
332 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH ||
333 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH ||
334 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH ||
335 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH ||
336 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_INT ||
337 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_INT ||
338 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_INT ||
339 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT ||
340 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH_INT ||
341 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH_INT ||
342 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH_INT ||
343 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH_INT ||
344 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLT_PUSH_INT ||
345 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLE_PUSH_INT);
346 case EVERGREEN:
347 case CAYMAN:
348 default:
349 return !alu->is_op3 && (
350 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE ||
351 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT ||
352 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE ||
353 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE ||
354 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_UINT ||
355 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_UINT ||
356 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE_INT ||
357 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_INT ||
358 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_INT ||
359 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE_INT ||
360 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_UINT ||
361 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_UINT ||
362 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE ||
363 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT ||
364 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE ||
365 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE ||
366 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_INV ||
367 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_POP ||
368 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_CLR ||
369 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_RESTORE ||
370 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH ||
371 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH ||
372 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH ||
373 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH ||
374 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_INT ||
375 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_INT ||
376 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_INT ||
377 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT ||
378 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH_INT ||
379 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH_INT ||
380 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH_INT ||
381 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH_INT ||
382 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLT_PUSH_INT ||
383 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLE_PUSH_INT);
384 }
385 }
386
387 static int is_alu_reduction_inst(struct r600_bytecode *bc, struct r600_bytecode_alu *alu)
388 {
389 switch (bc->chip_class) {
390 case R600:
391 case R700:
392 return !alu->is_op3 && (
393 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE ||
394 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4 ||
395 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE ||
396 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX4);
397 case EVERGREEN:
398 case CAYMAN:
399 default:
400 return !alu->is_op3 && (
401 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE ||
402 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4 ||
403 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE ||
404 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX4);
405 }
406 }
407
408 static int is_alu_cube_inst(struct r600_bytecode *bc, struct r600_bytecode_alu *alu)
409 {
410 switch (bc->chip_class) {
411 case R600:
412 case R700:
413 return !alu->is_op3 &&
414 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE;
415 case EVERGREEN:
416 case CAYMAN:
417 default:
418 return !alu->is_op3 &&
419 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE;
420 }
421 }
422
423 static int is_alu_mova_inst(struct r600_bytecode *bc, struct r600_bytecode_alu *alu)
424 {
425 switch (bc->chip_class) {
426 case R600:
427 case R700:
428 return !alu->is_op3 && (
429 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA ||
430 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR ||
431 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT);
432 case EVERGREEN:
433 case CAYMAN:
434 default:
435 return !alu->is_op3 && (
436 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT);
437 }
438 }
439
440 /* alu instructions that can only execute on the vector unit */
441 static int is_alu_vec_unit_inst(struct r600_bytecode *bc, struct r600_bytecode_alu *alu)
442 {
443 switch (bc->chip_class) {
444 case R600:
445 case R700:
446 return is_alu_reduction_inst(bc, alu) ||
447 is_alu_mova_inst(bc, alu);
448 case EVERGREEN:
449 case CAYMAN:
450 default:
451 return is_alu_reduction_inst(bc, alu) ||
452 is_alu_mova_inst(bc, alu) ||
453 (alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT ||
454 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT_FLOOR ||
455 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INTERP_XY ||
456 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INTERP_ZW);
457 }
458 }
459
460 /* alu instructions that can only execute on the trans unit */
461 static int is_alu_trans_unit_inst(struct r600_bytecode *bc, struct r600_bytecode_alu *alu)
462 {
463 switch (bc->chip_class) {
464 case R600:
465 case R700:
466 if (!alu->is_op3)
467 return alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT ||
468 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT ||
469 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_UINT ||
470 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT ||
471 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHL_INT ||
472 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT ||
473 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_INT ||
474 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT ||
475 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_INT ||
476 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT ||
477 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_INT ||
478 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_UINT ||
479 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT ||
480 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS ||
481 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE ||
482 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED ||
483 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE ||
484 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED ||
485 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_FF ||
486 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE ||
487 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED ||
488 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_FF ||
489 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE ||
490 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN ||
491 alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SQRT_IEEE;
492 else
493 return alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT ||
494 alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_D2 ||
495 alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_M2 ||
496 alu->inst == V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_M4;
497 case EVERGREEN:
498 case CAYMAN:
499 default:
500 if (!alu->is_op3)
501 /* Note that FLT_TO_INT_* instructions are vector-only instructions
502 * on Evergreen, despite what the documentation says. FLT_TO_INT
503 * can do both vector and scalar. */
504 return alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT ||
505 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT ||
506 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHL_INT ||
507 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT ||
508 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_INT ||
509 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT ||
510 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_INT ||
511 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT ||
512 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_INT ||
513 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_UINT ||
514 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT ||
515 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS ||
516 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE ||
517 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED ||
518 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE ||
519 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED ||
520 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_FF ||
521 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE ||
522 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED ||
523 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_FF ||
524 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE ||
525 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN ||
526 alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SQRT_IEEE;
527 else
528 return alu->inst == EG_V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT;
529 }
530 }
531
532 /* alu instructions that can execute on any unit */
533 static int is_alu_any_unit_inst(struct r600_bytecode *bc, struct r600_bytecode_alu *alu)
534 {
535 return !is_alu_vec_unit_inst(bc, alu) &&
536 !is_alu_trans_unit_inst(bc, alu);
537 }
538
539 static int assign_alu_units(struct r600_bytecode *bc, struct r600_bytecode_alu *alu_first,
540 struct r600_bytecode_alu *assignment[5])
541 {
542 struct r600_bytecode_alu *alu;
543 unsigned i, chan, trans;
544 int max_slots = bc->chip_class == CAYMAN ? 4 : 5;
545
546 for (i = 0; i < max_slots; i++)
547 assignment[i] = NULL;
548
549 for (alu = alu_first; alu; alu = LIST_ENTRY(struct r600_bytecode_alu, alu->list.next, list)) {
550 chan = alu->dst.chan;
551 if (max_slots == 4)
552 trans = 0;
553 else if (is_alu_trans_unit_inst(bc, alu))
554 trans = 1;
555 else if (is_alu_vec_unit_inst(bc, alu))
556 trans = 0;
557 else if (assignment[chan])
558 trans = 1; /* Assume ALU_INST_PREFER_VECTOR. */
559 else
560 trans = 0;
561
562 if (trans) {
563 if (assignment[4]) {
564 assert(0); /* ALU.Trans has already been allocated. */
565 return -1;
566 }
567 assignment[4] = alu;
568 } else {
569 if (assignment[chan]) {
570 assert(0); /* ALU.chan has already been allocated. */
571 return -1;
572 }
573 assignment[chan] = alu;
574 }
575
576 if (alu->last)
577 break;
578 }
579 return 0;
580 }
581
582 struct alu_bank_swizzle {
583 int hw_gpr[NUM_OF_CYCLES][NUM_OF_COMPONENTS];
584 int hw_cfile_addr[4];
585 int hw_cfile_elem[4];
586 };
587
588 static const unsigned cycle_for_bank_swizzle_vec[][3] = {
589 [SQ_ALU_VEC_012] = { 0, 1, 2 },
590 [SQ_ALU_VEC_021] = { 0, 2, 1 },
591 [SQ_ALU_VEC_120] = { 1, 2, 0 },
592 [SQ_ALU_VEC_102] = { 1, 0, 2 },
593 [SQ_ALU_VEC_201] = { 2, 0, 1 },
594 [SQ_ALU_VEC_210] = { 2, 1, 0 }
595 };
596
597 static const unsigned cycle_for_bank_swizzle_scl[][3] = {
598 [SQ_ALU_SCL_210] = { 2, 1, 0 },
599 [SQ_ALU_SCL_122] = { 1, 2, 2 },
600 [SQ_ALU_SCL_212] = { 2, 1, 2 },
601 [SQ_ALU_SCL_221] = { 2, 2, 1 }
602 };
603
604 static void init_bank_swizzle(struct alu_bank_swizzle *bs)
605 {
606 int i, cycle, component;
607 /* set up gpr use */
608 for (cycle = 0; cycle < NUM_OF_CYCLES; cycle++)
609 for (component = 0; component < NUM_OF_COMPONENTS; component++)
610 bs->hw_gpr[cycle][component] = -1;
611 for (i = 0; i < 4; i++)
612 bs->hw_cfile_addr[i] = -1;
613 for (i = 0; i < 4; i++)
614 bs->hw_cfile_elem[i] = -1;
615 }
616
617 static int reserve_gpr(struct alu_bank_swizzle *bs, unsigned sel, unsigned chan, unsigned cycle)
618 {
619 if (bs->hw_gpr[cycle][chan] == -1)
620 bs->hw_gpr[cycle][chan] = sel;
621 else if (bs->hw_gpr[cycle][chan] != (int)sel) {
622 /* Another scalar operation has already used the GPR read port for the channel. */
623 return -1;
624 }
625 return 0;
626 }
627
628 static int reserve_cfile(struct r600_bytecode *bc, struct alu_bank_swizzle *bs, unsigned sel, unsigned chan)
629 {
630 int res, num_res = 4;
631 if (bc->chip_class >= R700) {
632 num_res = 2;
633 chan /= 2;
634 }
635 for (res = 0; res < num_res; ++res) {
636 if (bs->hw_cfile_addr[res] == -1) {
637 bs->hw_cfile_addr[res] = sel;
638 bs->hw_cfile_elem[res] = chan;
639 return 0;
640 } else if (bs->hw_cfile_addr[res] == sel &&
641 bs->hw_cfile_elem[res] == chan)
642 return 0; /* Read for this scalar element already reserved, nothing to do here. */
643 }
644 /* All cfile read ports are used, cannot reference vector element. */
645 return -1;
646 }
647
648 static int is_gpr(unsigned sel)
649 {
650 return (sel >= 0 && sel <= 127);
651 }
652
653 /* CB constants start at 512, and get translated to a kcache index when ALU
654 * clauses are constructed. Note that we handle kcache constants the same way
655 * as (the now gone) cfile constants, is that really required? */
656 static int is_cfile(unsigned sel)
657 {
658 return (sel > 255 && sel < 512) ||
659 (sel > 511 && sel < 4607) || /* Kcache before translation. */
660 (sel > 127 && sel < 192); /* Kcache after translation. */
661 }
662
663 static int is_const(int sel)
664 {
665 return is_cfile(sel) ||
666 (sel >= V_SQ_ALU_SRC_0 &&
667 sel <= V_SQ_ALU_SRC_LITERAL);
668 }
669
670 static int check_vector(struct r600_bytecode *bc, struct r600_bytecode_alu *alu,
671 struct alu_bank_swizzle *bs, int bank_swizzle)
672 {
673 int r, src, num_src, sel, elem, cycle;
674
675 num_src = r600_bytecode_get_num_operands(bc, alu);
676 for (src = 0; src < num_src; src++) {
677 sel = alu->src[src].sel;
678 elem = alu->src[src].chan;
679 if (is_gpr(sel)) {
680 cycle = cycle_for_bank_swizzle_vec[bank_swizzle][src];
681 if (src == 1 && sel == alu->src[0].sel && elem == alu->src[0].chan)
682 /* Nothing to do; special-case optimization,
683 * second source uses first source’s reservation. */
684 continue;
685 else {
686 r = reserve_gpr(bs, sel, elem, cycle);
687 if (r)
688 return r;
689 }
690 } else if (is_cfile(sel)) {
691 r = reserve_cfile(bc, bs, sel, elem);
692 if (r)
693 return r;
694 }
695 /* No restrictions on PV, PS, literal or special constants. */
696 }
697 return 0;
698 }
699
700 static int check_scalar(struct r600_bytecode *bc, struct r600_bytecode_alu *alu,
701 struct alu_bank_swizzle *bs, int bank_swizzle)
702 {
703 int r, src, num_src, const_count, sel, elem, cycle;
704
705 num_src = r600_bytecode_get_num_operands(bc, alu);
706 for (const_count = 0, src = 0; src < num_src; ++src) {
707 sel = alu->src[src].sel;
708 elem = alu->src[src].chan;
709 if (is_const(sel)) { /* Any constant, including literal and inline constants. */
710 if (const_count >= 2)
711 /* More than two references to a constant in
712 * transcendental operation. */
713 return -1;
714 else
715 const_count++;
716 }
717 if (is_cfile(sel)) {
718 r = reserve_cfile(bc, bs, sel, elem);
719 if (r)
720 return r;
721 }
722 }
723 for (src = 0; src < num_src; ++src) {
724 sel = alu->src[src].sel;
725 elem = alu->src[src].chan;
726 if (is_gpr(sel)) {
727 cycle = cycle_for_bank_swizzle_scl[bank_swizzle][src];
728 if (cycle < const_count)
729 /* Cycle for GPR load conflicts with
730 * constant load in transcendental operation. */
731 return -1;
732 r = reserve_gpr(bs, sel, elem, cycle);
733 if (r)
734 return r;
735 }
736 /* PV PS restrictions */
737 if (const_count && (sel == 254 || sel == 255)) {
738 cycle = cycle_for_bank_swizzle_scl[bank_swizzle][src];
739 if (cycle < const_count)
740 return -1;
741 }
742 }
743 return 0;
744 }
745
746 static int check_and_set_bank_swizzle(struct r600_bytecode *bc,
747 struct r600_bytecode_alu *slots[5])
748 {
749 struct alu_bank_swizzle bs;
750 int bank_swizzle[5];
751 int i, r = 0, forced = 1;
752 boolean scalar_only = bc->chip_class == CAYMAN ? false : true;
753 int max_slots = bc->chip_class == CAYMAN ? 4 : 5;
754
755 for (i = 0; i < max_slots; i++) {
756 if (slots[i]) {
757 if (slots[i]->bank_swizzle_force) {
758 slots[i]->bank_swizzle = slots[i]->bank_swizzle_force;
759 } else {
760 forced = 0;
761 }
762 }
763
764 if (i < 4 && slots[i])
765 scalar_only = false;
766 }
767 if (forced)
768 return 0;
769
770 /* Just check every possible combination of bank swizzle.
771 * Not very efficent, but works on the first try in most of the cases. */
772 for (i = 0; i < 4; i++)
773 if (!slots[i] || !slots[i]->bank_swizzle_force)
774 bank_swizzle[i] = SQ_ALU_VEC_012;
775 else
776 bank_swizzle[i] = slots[i]->bank_swizzle;
777
778 bank_swizzle[4] = SQ_ALU_SCL_210;
779 while(bank_swizzle[4] <= SQ_ALU_SCL_221) {
780
781 if (max_slots == 4) {
782 for (i = 0; i < max_slots; i++) {
783 if (bank_swizzle[i] == SQ_ALU_VEC_210)
784 return -1;
785 }
786 }
787 init_bank_swizzle(&bs);
788 if (scalar_only == false) {
789 for (i = 0; i < 4; i++) {
790 if (slots[i]) {
791 r = check_vector(bc, slots[i], &bs, bank_swizzle[i]);
792 if (r)
793 break;
794 }
795 }
796 } else
797 r = 0;
798
799 if (!r && slots[4] && max_slots == 5) {
800 r = check_scalar(bc, slots[4], &bs, bank_swizzle[4]);
801 }
802 if (!r) {
803 for (i = 0; i < max_slots; i++) {
804 if (slots[i])
805 slots[i]->bank_swizzle = bank_swizzle[i];
806 }
807 return 0;
808 }
809
810 if (scalar_only) {
811 bank_swizzle[4]++;
812 } else {
813 for (i = 0; i < max_slots; i++) {
814 if (!slots[i] || !slots[i]->bank_swizzle_force) {
815 bank_swizzle[i]++;
816 if (bank_swizzle[i] <= SQ_ALU_VEC_210)
817 break;
818 else
819 bank_swizzle[i] = SQ_ALU_VEC_012;
820 }
821 }
822 }
823 }
824
825 /* Couldn't find a working swizzle. */
826 return -1;
827 }
828
829 static int replace_gpr_with_pv_ps(struct r600_bytecode *bc,
830 struct r600_bytecode_alu *slots[5], struct r600_bytecode_alu *alu_prev)
831 {
832 struct r600_bytecode_alu *prev[5];
833 int gpr[5], chan[5];
834 int i, j, r, src, num_src;
835 int max_slots = bc->chip_class == CAYMAN ? 4 : 5;
836
837 r = assign_alu_units(bc, alu_prev, prev);
838 if (r)
839 return r;
840
841 for (i = 0; i < max_slots; ++i) {
842 if (prev[i] && (prev[i]->dst.write || prev[i]->is_op3) && !prev[i]->dst.rel) {
843 gpr[i] = prev[i]->dst.sel;
844 /* cube writes more than PV.X */
845 if (!is_alu_cube_inst(bc, prev[i]) && is_alu_reduction_inst(bc, prev[i]))
846 chan[i] = 0;
847 else
848 chan[i] = prev[i]->dst.chan;
849 } else
850 gpr[i] = -1;
851 }
852
853 for (i = 0; i < max_slots; ++i) {
854 struct r600_bytecode_alu *alu = slots[i];
855 if(!alu)
856 continue;
857
858 num_src = r600_bytecode_get_num_operands(bc, alu);
859 for (src = 0; src < num_src; ++src) {
860 if (!is_gpr(alu->src[src].sel) || alu->src[src].rel)
861 continue;
862
863 if (bc->chip_class < CAYMAN) {
864 if (alu->src[src].sel == gpr[4] &&
865 alu->src[src].chan == chan[4]) {
866 alu->src[src].sel = V_SQ_ALU_SRC_PS;
867 alu->src[src].chan = 0;
868 continue;
869 }
870 }
871
872 for (j = 0; j < 4; ++j) {
873 if (alu->src[src].sel == gpr[j] &&
874 alu->src[src].chan == j) {
875 alu->src[src].sel = V_SQ_ALU_SRC_PV;
876 alu->src[src].chan = chan[j];
877 break;
878 }
879 }
880 }
881 }
882
883 return 0;
884 }
885
886 void r600_bytecode_special_constants(u32 value, unsigned *sel, unsigned *neg)
887 {
888 switch(value) {
889 case 0:
890 *sel = V_SQ_ALU_SRC_0;
891 break;
892 case 1:
893 *sel = V_SQ_ALU_SRC_1_INT;
894 break;
895 case -1:
896 *sel = V_SQ_ALU_SRC_M_1_INT;
897 break;
898 case 0x3F800000: /* 1.0f */
899 *sel = V_SQ_ALU_SRC_1;
900 break;
901 case 0x3F000000: /* 0.5f */
902 *sel = V_SQ_ALU_SRC_0_5;
903 break;
904 case 0xBF800000: /* -1.0f */
905 *sel = V_SQ_ALU_SRC_1;
906 *neg ^= 1;
907 break;
908 case 0xBF000000: /* -0.5f */
909 *sel = V_SQ_ALU_SRC_0_5;
910 *neg ^= 1;
911 break;
912 default:
913 *sel = V_SQ_ALU_SRC_LITERAL;
914 break;
915 }
916 }
917
918 /* compute how many literal are needed */
919 static int r600_bytecode_alu_nliterals(struct r600_bytecode *bc, struct r600_bytecode_alu *alu,
920 uint32_t literal[4], unsigned *nliteral)
921 {
922 unsigned num_src = r600_bytecode_get_num_operands(bc, alu);
923 unsigned i, j;
924
925 for (i = 0; i < num_src; ++i) {
926 if (alu->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
927 uint32_t value = alu->src[i].value;
928 unsigned found = 0;
929 for (j = 0; j < *nliteral; ++j) {
930 if (literal[j] == value) {
931 found = 1;
932 break;
933 }
934 }
935 if (!found) {
936 if (*nliteral >= 4)
937 return -EINVAL;
938 literal[(*nliteral)++] = value;
939 }
940 }
941 }
942 return 0;
943 }
944
945 static void r600_bytecode_alu_adjust_literals(struct r600_bytecode *bc,
946 struct r600_bytecode_alu *alu,
947 uint32_t literal[4], unsigned nliteral)
948 {
949 unsigned num_src = r600_bytecode_get_num_operands(bc, alu);
950 unsigned i, j;
951
952 for (i = 0; i < num_src; ++i) {
953 if (alu->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
954 uint32_t value = alu->src[i].value;
955 for (j = 0; j < nliteral; ++j) {
956 if (literal[j] == value) {
957 alu->src[i].chan = j;
958 break;
959 }
960 }
961 }
962 }
963 }
964
965 static int merge_inst_groups(struct r600_bytecode *bc, struct r600_bytecode_alu *slots[5],
966 struct r600_bytecode_alu *alu_prev)
967 {
968 struct r600_bytecode_alu *prev[5];
969 struct r600_bytecode_alu *result[5] = { NULL };
970
971 uint32_t literal[4], prev_literal[4];
972 unsigned nliteral = 0, prev_nliteral = 0;
973
974 int i, j, r, src, num_src;
975 int num_once_inst = 0;
976 int have_mova = 0, have_rel = 0;
977 int max_slots = bc->chip_class == CAYMAN ? 4 : 5;
978
979 r = assign_alu_units(bc, alu_prev, prev);
980 if (r)
981 return r;
982
983 for (i = 0; i < max_slots; ++i) {
984 struct r600_bytecode_alu *alu;
985
986 /* check number of literals */
987 if (prev[i]) {
988 if (r600_bytecode_alu_nliterals(bc, prev[i], literal, &nliteral))
989 return 0;
990 if (r600_bytecode_alu_nliterals(bc, prev[i], prev_literal, &prev_nliteral))
991 return 0;
992 if (is_alu_mova_inst(bc, prev[i])) {
993 if (have_rel)
994 return 0;
995 have_mova = 1;
996 }
997 num_once_inst += is_alu_once_inst(bc, prev[i]);
998 }
999 if (slots[i] && r600_bytecode_alu_nliterals(bc, slots[i], literal, &nliteral))
1000 return 0;
1001
1002 /* Let's check used slots. */
1003 if (prev[i] && !slots[i]) {
1004 result[i] = prev[i];
1005 continue;
1006 } else if (prev[i] && slots[i]) {
1007 if (max_slots == 5 && result[4] == NULL && prev[4] == NULL && slots[4] == NULL) {
1008 /* Trans unit is still free try to use it. */
1009 if (is_alu_any_unit_inst(bc, slots[i])) {
1010 result[i] = prev[i];
1011 result[4] = slots[i];
1012 } else if (is_alu_any_unit_inst(bc, prev[i])) {
1013 result[i] = slots[i];
1014 result[4] = prev[i];
1015 } else
1016 return 0;
1017 } else
1018 return 0;
1019 } else if(!slots[i]) {
1020 continue;
1021 } else
1022 result[i] = slots[i];
1023
1024 alu = slots[i];
1025 num_once_inst += is_alu_once_inst(bc, alu);
1026
1027 /* Let's check dst gpr. */
1028 if (alu->dst.rel) {
1029 if (have_mova)
1030 return 0;
1031 have_rel = 1;
1032 }
1033
1034 /* Let's check source gprs */
1035 num_src = r600_bytecode_get_num_operands(bc, alu);
1036 for (src = 0; src < num_src; ++src) {
1037 if (alu->src[src].rel) {
1038 if (have_mova)
1039 return 0;
1040 have_rel = 1;
1041 }
1042
1043 /* Constants don't matter. */
1044 if (!is_gpr(alu->src[src].sel))
1045 continue;
1046
1047 for (j = 0; j < max_slots; ++j) {
1048 if (!prev[j] || !prev[j]->dst.write)
1049 continue;
1050
1051 /* If it's relative then we can't determin which gpr is really used. */
1052 if (prev[j]->dst.chan == alu->src[src].chan &&
1053 (prev[j]->dst.sel == alu->src[src].sel ||
1054 prev[j]->dst.rel || alu->src[src].rel))
1055 return 0;
1056 }
1057 }
1058 }
1059
1060 /* more than one PRED_ or KILL_ ? */
1061 if (num_once_inst > 1)
1062 return 0;
1063
1064 /* check if the result can still be swizzlet */
1065 r = check_and_set_bank_swizzle(bc, result);
1066 if (r)
1067 return 0;
1068
1069 /* looks like everything worked out right, apply the changes */
1070
1071 /* undo adding previus literals */
1072 bc->cf_last->ndw -= align(prev_nliteral, 2);
1073
1074 /* sort instructions */
1075 for (i = 0; i < max_slots; ++i) {
1076 slots[i] = result[i];
1077 if (result[i]) {
1078 LIST_DEL(&result[i]->list);
1079 result[i]->last = 0;
1080 LIST_ADDTAIL(&result[i]->list, &bc->cf_last->alu);
1081 }
1082 }
1083
1084 /* determine new last instruction */
1085 LIST_ENTRY(struct r600_bytecode_alu, bc->cf_last->alu.prev, list)->last = 1;
1086
1087 /* determine new first instruction */
1088 for (i = 0; i < max_slots; ++i) {
1089 if (result[i]) {
1090 bc->cf_last->curr_bs_head = result[i];
1091 break;
1092 }
1093 }
1094
1095 bc->cf_last->prev_bs_head = bc->cf_last->prev2_bs_head;
1096 bc->cf_last->prev2_bs_head = NULL;
1097
1098 return 0;
1099 }
1100
1101 /* This code handles kcache lines as single blocks of 32 constants. We could
1102 * probably do slightly better by recognizing that we actually have two
1103 * consecutive lines of 16 constants, but the resulting code would also be
1104 * somewhat more complicated. */
1105 static int r600_bytecode_alloc_kcache_lines(struct r600_bytecode *bc, struct r600_bytecode_alu *alu, int type)
1106 {
1107 struct r600_bytecode_kcache *kcache = bc->cf_last->kcache;
1108 unsigned int required_lines;
1109 unsigned int free_lines = 0;
1110 unsigned int cache_line[3];
1111 unsigned int count = 0;
1112 unsigned int i, j;
1113 int r;
1114
1115 /* Collect required cache lines. */
1116 for (i = 0; i < 3; ++i) {
1117 boolean found = false;
1118 unsigned int line;
1119
1120 if (alu->src[i].sel < 512)
1121 continue;
1122
1123 line = ((alu->src[i].sel - 512) / 32) * 2;
1124
1125 for (j = 0; j < count; ++j) {
1126 if (cache_line[j] == line) {
1127 found = true;
1128 break;
1129 }
1130 }
1131
1132 if (!found)
1133 cache_line[count++] = line;
1134 }
1135
1136 /* This should never actually happen. */
1137 if (count >= 3) return -ENOMEM;
1138
1139 for (i = 0; i < 2; ++i) {
1140 if (kcache[i].mode == V_SQ_CF_KCACHE_NOP) {
1141 ++free_lines;
1142 }
1143 }
1144
1145 /* Filter lines pulled in by previous intructions. Note that this is
1146 * only for the required_lines count, we can't remove these from the
1147 * cache_line array since we may have to start a new ALU clause. */
1148 for (i = 0, required_lines = count; i < count; ++i) {
1149 for (j = 0; j < 2; ++j) {
1150 if (kcache[j].mode == V_SQ_CF_KCACHE_LOCK_2 &&
1151 kcache[j].addr == cache_line[i]) {
1152 --required_lines;
1153 break;
1154 }
1155 }
1156 }
1157
1158 /* Start a new ALU clause if needed. */
1159 if (required_lines > free_lines) {
1160 if ((r = r600_bytecode_add_cf(bc))) {
1161 return r;
1162 }
1163 bc->cf_last->inst = (type << 3);
1164 kcache = bc->cf_last->kcache;
1165 }
1166
1167 /* Setup the kcache lines. */
1168 for (i = 0; i < count; ++i) {
1169 boolean found = false;
1170
1171 for (j = 0; j < 2; ++j) {
1172 if (kcache[j].mode == V_SQ_CF_KCACHE_LOCK_2 &&
1173 kcache[j].addr == cache_line[i]) {
1174 found = true;
1175 break;
1176 }
1177 }
1178
1179 if (found) continue;
1180
1181 for (j = 0; j < 2; ++j) {
1182 if (kcache[j].mode == V_SQ_CF_KCACHE_NOP) {
1183 kcache[j].bank = 0;
1184 kcache[j].addr = cache_line[i];
1185 kcache[j].mode = V_SQ_CF_KCACHE_LOCK_2;
1186 break;
1187 }
1188 }
1189 }
1190
1191 /* Alter the src operands to refer to the kcache. */
1192 for (i = 0; i < 3; ++i) {
1193 static const unsigned int base[] = {128, 160, 256, 288};
1194 unsigned int line;
1195
1196 if (alu->src[i].sel < 512)
1197 continue;
1198
1199 alu->src[i].sel -= 512;
1200 line = (alu->src[i].sel / 32) * 2;
1201
1202 for (j = 0; j < 2; ++j) {
1203 if (kcache[j].mode == V_SQ_CF_KCACHE_LOCK_2 &&
1204 kcache[j].addr == line) {
1205 alu->src[i].sel &= 0x1f;
1206 alu->src[i].sel += base[j];
1207 break;
1208 }
1209 }
1210 }
1211
1212 return 0;
1213 }
1214
1215 /* load AR register from gpr (bc->ar_reg) with MOVA_INT */
1216 static int load_ar(struct r600_bytecode *bc)
1217 {
1218 struct r600_bytecode_alu alu;
1219 int r;
1220
1221 if (bc->ar_loaded)
1222 return 0;
1223
1224 /* hack to avoid making MOVA the last instruction in the clause */
1225 if ((bc->cf_last->ndw>>1) >= 110)
1226 bc->force_add_cf = 1;
1227
1228 memset(&alu, 0, sizeof(alu));
1229 alu.inst = BC_INST(bc, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT);
1230 alu.src[0].sel = bc->ar_reg;
1231 alu.last = 1;
1232 r = r600_bytecode_add_alu(bc, &alu);
1233 if (r)
1234 return r;
1235
1236 bc->cf_last->r6xx_uses_waterfall = 1;
1237 bc->ar_loaded = 1;
1238 return 0;
1239 }
1240
1241 int r600_bytecode_add_alu_type(struct r600_bytecode *bc, const struct r600_bytecode_alu *alu, int type)
1242 {
1243 struct r600_bytecode_alu *nalu = r600_bytecode_alu();
1244 struct r600_bytecode_alu *lalu;
1245 int i, r;
1246
1247 if (nalu == NULL)
1248 return -ENOMEM;
1249 memcpy(nalu, alu, sizeof(struct r600_bytecode_alu));
1250
1251 if (bc->cf_last != NULL && bc->cf_last->inst != (type << 3)) {
1252 /* check if we could add it anyway */
1253 if (bc->cf_last->inst == (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3) &&
1254 type == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE) {
1255 LIST_FOR_EACH_ENTRY(lalu, &bc->cf_last->alu, list) {
1256 if (lalu->predicate) {
1257 bc->force_add_cf = 1;
1258 break;
1259 }
1260 }
1261 } else
1262 bc->force_add_cf = 1;
1263 }
1264
1265 /* cf can contains only alu or only vtx or only tex */
1266 if (bc->cf_last == NULL || bc->force_add_cf) {
1267 r = r600_bytecode_add_cf(bc);
1268 if (r) {
1269 free(nalu);
1270 return r;
1271 }
1272 }
1273 bc->cf_last->inst = (type << 3);
1274
1275 /* Check AR usage and load it if required */
1276 for (i = 0; i < 3; i++)
1277 if (nalu->src[i].rel && !bc->ar_loaded)
1278 load_ar(bc);
1279
1280 if (nalu->dst.rel && !bc->ar_loaded)
1281 load_ar(bc);
1282
1283 /* Setup the kcache for this ALU instruction. This will start a new
1284 * ALU clause if needed. */
1285 if ((r = r600_bytecode_alloc_kcache_lines(bc, nalu, type))) {
1286 free(nalu);
1287 return r;
1288 }
1289
1290 if (!bc->cf_last->curr_bs_head) {
1291 bc->cf_last->curr_bs_head = nalu;
1292 }
1293 /* number of gpr == the last gpr used in any alu */
1294 for (i = 0; i < 3; i++) {
1295 if (nalu->src[i].sel >= bc->ngpr && nalu->src[i].sel < 128) {
1296 bc->ngpr = nalu->src[i].sel + 1;
1297 }
1298 if (nalu->src[i].sel == V_SQ_ALU_SRC_LITERAL)
1299 r600_bytecode_special_constants(nalu->src[i].value,
1300 &nalu->src[i].sel, &nalu->src[i].neg);
1301 }
1302 if (nalu->dst.sel >= bc->ngpr) {
1303 bc->ngpr = nalu->dst.sel + 1;
1304 }
1305 LIST_ADDTAIL(&nalu->list, &bc->cf_last->alu);
1306 /* each alu use 2 dwords */
1307 bc->cf_last->ndw += 2;
1308 bc->ndw += 2;
1309
1310 /* process cur ALU instructions for bank swizzle */
1311 if (nalu->last) {
1312 uint32_t literal[4];
1313 unsigned nliteral;
1314 struct r600_bytecode_alu *slots[5];
1315 int max_slots = bc->chip_class == CAYMAN ? 4 : 5;
1316 r = assign_alu_units(bc, bc->cf_last->curr_bs_head, slots);
1317 if (r)
1318 return r;
1319
1320 if (bc->cf_last->prev_bs_head) {
1321 r = merge_inst_groups(bc, slots, bc->cf_last->prev_bs_head);
1322 if (r)
1323 return r;
1324 }
1325
1326 if (bc->cf_last->prev_bs_head) {
1327 r = replace_gpr_with_pv_ps(bc, slots, bc->cf_last->prev_bs_head);
1328 if (r)
1329 return r;
1330 }
1331
1332 r = check_and_set_bank_swizzle(bc, slots);
1333 if (r)
1334 return r;
1335
1336 for (i = 0, nliteral = 0; i < max_slots; i++) {
1337 if (slots[i]) {
1338 r = r600_bytecode_alu_nliterals(bc, slots[i], literal, &nliteral);
1339 if (r)
1340 return r;
1341 }
1342 }
1343 bc->cf_last->ndw += align(nliteral, 2);
1344
1345 /* at most 128 slots, one add alu can add 5 slots + 4 constants(2 slots)
1346 * worst case */
1347 if ((bc->cf_last->ndw >> 1) >= 120) {
1348 bc->force_add_cf = 1;
1349 }
1350
1351 bc->cf_last->prev2_bs_head = bc->cf_last->prev_bs_head;
1352 bc->cf_last->prev_bs_head = bc->cf_last->curr_bs_head;
1353 bc->cf_last->curr_bs_head = NULL;
1354 }
1355 return 0;
1356 }
1357
1358 int r600_bytecode_add_alu(struct r600_bytecode *bc, const struct r600_bytecode_alu *alu)
1359 {
1360 return r600_bytecode_add_alu_type(bc, alu, BC_INST(bc, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU));
1361 }
1362
1363 static unsigned r600_bytecode_num_tex_and_vtx_instructions(const struct r600_bytecode *bc)
1364 {
1365 switch (bc->chip_class) {
1366 case R600:
1367 return 8;
1368
1369 case R700:
1370 case EVERGREEN:
1371 case CAYMAN:
1372 return 16;
1373
1374 default:
1375 R600_ERR("Unknown chip class %d.\n", bc->chip_class);
1376 return 8;
1377 }
1378 }
1379
1380 static inline boolean last_inst_was_vtx_fetch(struct r600_bytecode *bc)
1381 {
1382 if (bc->chip_class == CAYMAN) {
1383 if (bc->cf_last->inst != CM_V_SQ_CF_WORD1_SQ_CF_INST_TC)
1384 return TRUE;
1385 } else {
1386 if (bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_VTX &&
1387 bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC)
1388 return TRUE;
1389 }
1390 return FALSE;
1391 }
1392
1393 int r600_bytecode_add_vtx(struct r600_bytecode *bc, const struct r600_bytecode_vtx *vtx)
1394 {
1395 struct r600_bytecode_vtx *nvtx = r600_bytecode_vtx();
1396 int r;
1397
1398 if (nvtx == NULL)
1399 return -ENOMEM;
1400 memcpy(nvtx, vtx, sizeof(struct r600_bytecode_vtx));
1401
1402 /* cf can contains only alu or only vtx or only tex */
1403 if (bc->cf_last == NULL ||
1404 last_inst_was_vtx_fetch(bc) ||
1405 bc->force_add_cf) {
1406 r = r600_bytecode_add_cf(bc);
1407 if (r) {
1408 free(nvtx);
1409 return r;
1410 }
1411 if (bc->chip_class == CAYMAN)
1412 bc->cf_last->inst = CM_V_SQ_CF_WORD1_SQ_CF_INST_TC;
1413 else
1414 bc->cf_last->inst = V_SQ_CF_WORD1_SQ_CF_INST_VTX;
1415 }
1416 LIST_ADDTAIL(&nvtx->list, &bc->cf_last->vtx);
1417 /* each fetch use 4 dwords */
1418 bc->cf_last->ndw += 4;
1419 bc->ndw += 4;
1420 if ((bc->cf_last->ndw / 4) >= r600_bytecode_num_tex_and_vtx_instructions(bc))
1421 bc->force_add_cf = 1;
1422 return 0;
1423 }
1424
1425 int r600_bytecode_add_tex(struct r600_bytecode *bc, const struct r600_bytecode_tex *tex)
1426 {
1427 struct r600_bytecode_tex *ntex = r600_bytecode_tex();
1428 int r;
1429
1430 if (ntex == NULL)
1431 return -ENOMEM;
1432 memcpy(ntex, tex, sizeof(struct r600_bytecode_tex));
1433
1434 /* we can't fetch data und use it as texture lookup address in the same TEX clause */
1435 if (bc->cf_last != NULL &&
1436 bc->cf_last->inst == V_SQ_CF_WORD1_SQ_CF_INST_TEX) {
1437 struct r600_bytecode_tex *ttex;
1438 LIST_FOR_EACH_ENTRY(ttex, &bc->cf_last->tex, list) {
1439 if (ttex->dst_gpr == ntex->src_gpr) {
1440 bc->force_add_cf = 1;
1441 break;
1442 }
1443 }
1444 /* slight hack to make gradients always go into same cf */
1445 if (ntex->inst == SQ_TEX_INST_SET_GRADIENTS_H)
1446 bc->force_add_cf = 1;
1447 }
1448
1449 /* cf can contains only alu or only vtx or only tex */
1450 if (bc->cf_last == NULL ||
1451 bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_TEX ||
1452 bc->force_add_cf) {
1453 r = r600_bytecode_add_cf(bc);
1454 if (r) {
1455 free(ntex);
1456 return r;
1457 }
1458 bc->cf_last->inst = V_SQ_CF_WORD1_SQ_CF_INST_TEX;
1459 }
1460 if (ntex->src_gpr >= bc->ngpr) {
1461 bc->ngpr = ntex->src_gpr + 1;
1462 }
1463 if (ntex->dst_gpr >= bc->ngpr) {
1464 bc->ngpr = ntex->dst_gpr + 1;
1465 }
1466 LIST_ADDTAIL(&ntex->list, &bc->cf_last->tex);
1467 /* each texture fetch use 4 dwords */
1468 bc->cf_last->ndw += 4;
1469 bc->ndw += 4;
1470 if ((bc->cf_last->ndw / 4) >= r600_bytecode_num_tex_and_vtx_instructions(bc))
1471 bc->force_add_cf = 1;
1472 return 0;
1473 }
1474
1475 int r600_bytecode_add_cfinst(struct r600_bytecode *bc, int inst)
1476 {
1477 int r;
1478 r = r600_bytecode_add_cf(bc);
1479 if (r)
1480 return r;
1481
1482 bc->cf_last->cond = V_SQ_CF_COND_ACTIVE;
1483 bc->cf_last->inst = inst;
1484 return 0;
1485 }
1486
1487 int cm_bytecode_add_cf_end(struct r600_bytecode *bc)
1488 {
1489 return r600_bytecode_add_cfinst(bc, CM_V_SQ_CF_WORD1_SQ_CF_INST_END);
1490 }
1491
1492 /* common to all 3 families */
1493 static int r600_bytecode_vtx_build(struct r600_bytecode *bc, struct r600_bytecode_vtx *vtx, unsigned id)
1494 {
1495 bc->bytecode[id] = S_SQ_VTX_WORD0_BUFFER_ID(vtx->buffer_id) |
1496 S_SQ_VTX_WORD0_FETCH_TYPE(vtx->fetch_type) |
1497 S_SQ_VTX_WORD0_SRC_GPR(vtx->src_gpr) |
1498 S_SQ_VTX_WORD0_SRC_SEL_X(vtx->src_sel_x);
1499 if (bc->chip_class < CAYMAN)
1500 bc->bytecode[id] |= S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(vtx->mega_fetch_count);
1501 id++;
1502 bc->bytecode[id++] = S_SQ_VTX_WORD1_DST_SEL_X(vtx->dst_sel_x) |
1503 S_SQ_VTX_WORD1_DST_SEL_Y(vtx->dst_sel_y) |
1504 S_SQ_VTX_WORD1_DST_SEL_Z(vtx->dst_sel_z) |
1505 S_SQ_VTX_WORD1_DST_SEL_W(vtx->dst_sel_w) |
1506 S_SQ_VTX_WORD1_USE_CONST_FIELDS(vtx->use_const_fields) |
1507 S_SQ_VTX_WORD1_DATA_FORMAT(vtx->data_format) |
1508 S_SQ_VTX_WORD1_NUM_FORMAT_ALL(vtx->num_format_all) |
1509 S_SQ_VTX_WORD1_FORMAT_COMP_ALL(vtx->format_comp_all) |
1510 S_SQ_VTX_WORD1_SRF_MODE_ALL(vtx->srf_mode_all) |
1511 S_SQ_VTX_WORD1_GPR_DST_GPR(vtx->dst_gpr);
1512 bc->bytecode[id] = S_SQ_VTX_WORD2_OFFSET(vtx->offset)|
1513 S_SQ_VTX_WORD2_ENDIAN_SWAP(vtx->endian);
1514 if (bc->chip_class < CAYMAN)
1515 bc->bytecode[id] |= S_SQ_VTX_WORD2_MEGA_FETCH(1);
1516 id++;
1517 bc->bytecode[id++] = 0;
1518 return 0;
1519 }
1520
1521 /* common to all 3 families */
1522 static int r600_bytecode_tex_build(struct r600_bytecode *bc, struct r600_bytecode_tex *tex, unsigned id)
1523 {
1524 bc->bytecode[id++] = S_SQ_TEX_WORD0_TEX_INST(tex->inst) |
1525 S_SQ_TEX_WORD0_RESOURCE_ID(tex->resource_id) |
1526 S_SQ_TEX_WORD0_SRC_GPR(tex->src_gpr) |
1527 S_SQ_TEX_WORD0_SRC_REL(tex->src_rel);
1528 bc->bytecode[id++] = S_SQ_TEX_WORD1_DST_GPR(tex->dst_gpr) |
1529 S_SQ_TEX_WORD1_DST_REL(tex->dst_rel) |
1530 S_SQ_TEX_WORD1_DST_SEL_X(tex->dst_sel_x) |
1531 S_SQ_TEX_WORD1_DST_SEL_Y(tex->dst_sel_y) |
1532 S_SQ_TEX_WORD1_DST_SEL_Z(tex->dst_sel_z) |
1533 S_SQ_TEX_WORD1_DST_SEL_W(tex->dst_sel_w) |
1534 S_SQ_TEX_WORD1_LOD_BIAS(tex->lod_bias) |
1535 S_SQ_TEX_WORD1_COORD_TYPE_X(tex->coord_type_x) |
1536 S_SQ_TEX_WORD1_COORD_TYPE_Y(tex->coord_type_y) |
1537 S_SQ_TEX_WORD1_COORD_TYPE_Z(tex->coord_type_z) |
1538 S_SQ_TEX_WORD1_COORD_TYPE_W(tex->coord_type_w);
1539 bc->bytecode[id++] = S_SQ_TEX_WORD2_OFFSET_X(tex->offset_x) |
1540 S_SQ_TEX_WORD2_OFFSET_Y(tex->offset_y) |
1541 S_SQ_TEX_WORD2_OFFSET_Z(tex->offset_z) |
1542 S_SQ_TEX_WORD2_SAMPLER_ID(tex->sampler_id) |
1543 S_SQ_TEX_WORD2_SRC_SEL_X(tex->src_sel_x) |
1544 S_SQ_TEX_WORD2_SRC_SEL_Y(tex->src_sel_y) |
1545 S_SQ_TEX_WORD2_SRC_SEL_Z(tex->src_sel_z) |
1546 S_SQ_TEX_WORD2_SRC_SEL_W(tex->src_sel_w);
1547 bc->bytecode[id++] = 0;
1548 return 0;
1549 }
1550
1551 /* r600 only, r700/eg bits in r700_asm.c */
1552 static int r600_bytecode_alu_build(struct r600_bytecode *bc, struct r600_bytecode_alu *alu, unsigned id)
1553 {
1554 /* don't replace gpr by pv or ps for destination register */
1555 bc->bytecode[id++] = S_SQ_ALU_WORD0_SRC0_SEL(alu->src[0].sel) |
1556 S_SQ_ALU_WORD0_SRC0_REL(alu->src[0].rel) |
1557 S_SQ_ALU_WORD0_SRC0_CHAN(alu->src[0].chan) |
1558 S_SQ_ALU_WORD0_SRC0_NEG(alu->src[0].neg) |
1559 S_SQ_ALU_WORD0_SRC1_SEL(alu->src[1].sel) |
1560 S_SQ_ALU_WORD0_SRC1_REL(alu->src[1].rel) |
1561 S_SQ_ALU_WORD0_SRC1_CHAN(alu->src[1].chan) |
1562 S_SQ_ALU_WORD0_SRC1_NEG(alu->src[1].neg) |
1563 S_SQ_ALU_WORD0_LAST(alu->last);
1564
1565 if (alu->is_op3) {
1566 bc->bytecode[id++] = S_SQ_ALU_WORD1_DST_GPR(alu->dst.sel) |
1567 S_SQ_ALU_WORD1_DST_CHAN(alu->dst.chan) |
1568 S_SQ_ALU_WORD1_DST_REL(alu->dst.rel) |
1569 S_SQ_ALU_WORD1_CLAMP(alu->dst.clamp) |
1570 S_SQ_ALU_WORD1_OP3_SRC2_SEL(alu->src[2].sel) |
1571 S_SQ_ALU_WORD1_OP3_SRC2_REL(alu->src[2].rel) |
1572 S_SQ_ALU_WORD1_OP3_SRC2_CHAN(alu->src[2].chan) |
1573 S_SQ_ALU_WORD1_OP3_SRC2_NEG(alu->src[2].neg) |
1574 S_SQ_ALU_WORD1_OP3_ALU_INST(alu->inst) |
1575 S_SQ_ALU_WORD1_BANK_SWIZZLE(alu->bank_swizzle);
1576 } else {
1577 bc->bytecode[id++] = S_SQ_ALU_WORD1_DST_GPR(alu->dst.sel) |
1578 S_SQ_ALU_WORD1_DST_CHAN(alu->dst.chan) |
1579 S_SQ_ALU_WORD1_DST_REL(alu->dst.rel) |
1580 S_SQ_ALU_WORD1_CLAMP(alu->dst.clamp) |
1581 S_SQ_ALU_WORD1_OP2_SRC0_ABS(alu->src[0].abs) |
1582 S_SQ_ALU_WORD1_OP2_SRC1_ABS(alu->src[1].abs) |
1583 S_SQ_ALU_WORD1_OP2_WRITE_MASK(alu->dst.write) |
1584 S_SQ_ALU_WORD1_OP2_OMOD(alu->omod) |
1585 S_SQ_ALU_WORD1_OP2_ALU_INST(alu->inst) |
1586 S_SQ_ALU_WORD1_BANK_SWIZZLE(alu->bank_swizzle) |
1587 S_SQ_ALU_WORD1_OP2_UPDATE_EXECUTE_MASK(alu->predicate) |
1588 S_SQ_ALU_WORD1_OP2_UPDATE_PRED(alu->predicate);
1589 }
1590 return 0;
1591 }
1592
1593 static void r600_bytecode_cf_vtx_build(uint32_t *bytecode, const struct r600_bytecode_cf *cf)
1594 {
1595 *bytecode++ = S_SQ_CF_WORD0_ADDR(cf->addr >> 1);
1596 *bytecode++ = S_SQ_CF_WORD1_CF_INST(cf->inst) |
1597 S_SQ_CF_WORD1_BARRIER(1) |
1598 S_SQ_CF_WORD1_COUNT((cf->ndw / 4) - 1);
1599 }
1600
1601 /* common for r600/r700 - eg in eg_asm.c */
1602 static int r600_bytecode_cf_build(struct r600_bytecode *bc, struct r600_bytecode_cf *cf)
1603 {
1604 unsigned id = cf->id;
1605
1606 switch (cf->inst) {
1607 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
1608 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
1609 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3):
1610 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3):
1611 bc->bytecode[id++] = S_SQ_CF_ALU_WORD0_ADDR(cf->addr >> 1) |
1612 S_SQ_CF_ALU_WORD0_KCACHE_MODE0(cf->kcache[0].mode) |
1613 S_SQ_CF_ALU_WORD0_KCACHE_BANK0(cf->kcache[0].bank) |
1614 S_SQ_CF_ALU_WORD0_KCACHE_BANK1(cf->kcache[1].bank);
1615
1616 bc->bytecode[id++] = S_SQ_CF_ALU_WORD1_CF_INST(cf->inst >> 3) |
1617 S_SQ_CF_ALU_WORD1_KCACHE_MODE1(cf->kcache[1].mode) |
1618 S_SQ_CF_ALU_WORD1_KCACHE_ADDR0(cf->kcache[0].addr) |
1619 S_SQ_CF_ALU_WORD1_KCACHE_ADDR1(cf->kcache[1].addr) |
1620 S_SQ_CF_ALU_WORD1_BARRIER(1) |
1621 S_SQ_CF_ALU_WORD1_USES_WATERFALL(bc->chip_class == R600 ? cf->r6xx_uses_waterfall : 0) |
1622 S_SQ_CF_ALU_WORD1_COUNT((cf->ndw / 2) - 1);
1623 break;
1624 case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
1625 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
1626 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
1627 if (bc->chip_class == R700)
1628 r700_bytecode_cf_vtx_build(&bc->bytecode[id], cf);
1629 else
1630 r600_bytecode_cf_vtx_build(&bc->bytecode[id], cf);
1631 break;
1632 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1633 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1634 bc->bytecode[id++] = S_SQ_CF_ALLOC_EXPORT_WORD0_RW_GPR(cf->output.gpr) |
1635 S_SQ_CF_ALLOC_EXPORT_WORD0_ELEM_SIZE(cf->output.elem_size) |
1636 S_SQ_CF_ALLOC_EXPORT_WORD0_ARRAY_BASE(cf->output.array_base) |
1637 S_SQ_CF_ALLOC_EXPORT_WORD0_TYPE(cf->output.type);
1638 bc->bytecode[id++] = S_SQ_CF_ALLOC_EXPORT_WORD1_BURST_COUNT(cf->output.burst_count - 1) |
1639 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_X(cf->output.swizzle_x) |
1640 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Y(cf->output.swizzle_y) |
1641 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Z(cf->output.swizzle_z) |
1642 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_W(cf->output.swizzle_w) |
1643 S_SQ_CF_ALLOC_EXPORT_WORD1_BARRIER(cf->output.barrier) |
1644 S_SQ_CF_ALLOC_EXPORT_WORD1_CF_INST(cf->output.inst) |
1645 S_SQ_CF_ALLOC_EXPORT_WORD1_END_OF_PROGRAM(cf->output.end_of_program);
1646 break;
1647 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
1648 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
1649 case V_SQ_CF_WORD1_SQ_CF_INST_POP:
1650 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
1651 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
1652 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
1653 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
1654 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
1655 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
1656 bc->bytecode[id++] = S_SQ_CF_WORD0_ADDR(cf->cf_addr >> 1);
1657 bc->bytecode[id++] = S_SQ_CF_WORD1_CF_INST(cf->inst) |
1658 S_SQ_CF_WORD1_BARRIER(1) |
1659 S_SQ_CF_WORD1_COND(cf->cond) |
1660 S_SQ_CF_WORD1_POP_COUNT(cf->pop_count);
1661
1662 break;
1663 default:
1664 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
1665 return -EINVAL;
1666 }
1667 return 0;
1668 }
1669
1670 int r600_bytecode_build(struct r600_bytecode *bc)
1671 {
1672 struct r600_bytecode_cf *cf;
1673 struct r600_bytecode_alu *alu;
1674 struct r600_bytecode_vtx *vtx;
1675 struct r600_bytecode_tex *tex;
1676 uint32_t literal[4];
1677 unsigned nliteral;
1678 unsigned addr;
1679 int i, r;
1680
1681 if (bc->callstack[0].max > 0)
1682 bc->nstack = ((bc->callstack[0].max + 3) >> 2) + 2;
1683 if (bc->type == TGSI_PROCESSOR_VERTEX && !bc->nstack) {
1684 bc->nstack = 1;
1685 }
1686
1687 /* first path compute addr of each CF block */
1688 /* addr start after all the CF instructions */
1689 addr = bc->cf_last->id + 2;
1690 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
1691 switch (cf->inst) {
1692 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
1693 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3):
1694 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3):
1695 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
1696 break;
1697 case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
1698 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
1699 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
1700 /* fetch node need to be 16 bytes aligned*/
1701 addr += 3;
1702 addr &= 0xFFFFFFFCUL;
1703 break;
1704 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1705 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1706 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1707 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1708 break;
1709 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
1710 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
1711 case V_SQ_CF_WORD1_SQ_CF_INST_POP:
1712 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
1713 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
1714 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
1715 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
1716 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
1717 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
1718 case CM_V_SQ_CF_WORD1_SQ_CF_INST_END:
1719 break;
1720 default:
1721 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
1722 return -EINVAL;
1723 }
1724 cf->addr = addr;
1725 addr += cf->ndw;
1726 bc->ndw = cf->addr + cf->ndw;
1727 }
1728 free(bc->bytecode);
1729 bc->bytecode = calloc(1, bc->ndw * 4);
1730 if (bc->bytecode == NULL)
1731 return -ENOMEM;
1732 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
1733 addr = cf->addr;
1734 if (bc->chip_class >= EVERGREEN)
1735 r = eg_bytecode_cf_build(bc, cf);
1736 else
1737 r = r600_bytecode_cf_build(bc, cf);
1738 if (r)
1739 return r;
1740 switch (cf->inst) {
1741 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
1742 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3):
1743 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3):
1744 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
1745 nliteral = 0;
1746 memset(literal, 0, sizeof(literal));
1747 LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
1748 r = r600_bytecode_alu_nliterals(bc, alu, literal, &nliteral);
1749 if (r)
1750 return r;
1751 r600_bytecode_alu_adjust_literals(bc, alu, literal, nliteral);
1752 switch(bc->chip_class) {
1753 case R600:
1754 r = r600_bytecode_alu_build(bc, alu, addr);
1755 break;
1756 case R700:
1757 case EVERGREEN: /* eg alu is same encoding as r700 */
1758 case CAYMAN: /* eg alu is same encoding as r700 */
1759 r = r700_bytecode_alu_build(bc, alu, addr);
1760 break;
1761 default:
1762 R600_ERR("unknown chip class %d.\n", bc->chip_class);
1763 return -EINVAL;
1764 }
1765 if (r)
1766 return r;
1767 addr += 2;
1768 if (alu->last) {
1769 for (i = 0; i < align(nliteral, 2); ++i) {
1770 bc->bytecode[addr++] = literal[i];
1771 }
1772 nliteral = 0;
1773 memset(literal, 0, sizeof(literal));
1774 }
1775 }
1776 break;
1777 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
1778 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
1779 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
1780 r = r600_bytecode_vtx_build(bc, vtx, addr);
1781 if (r)
1782 return r;
1783 addr += 4;
1784 }
1785 break;
1786 case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
1787 if (bc->chip_class == CAYMAN) {
1788 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
1789 r = r600_bytecode_vtx_build(bc, vtx, addr);
1790 if (r)
1791 return r;
1792 addr += 4;
1793 }
1794 }
1795 LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
1796 r = r600_bytecode_tex_build(bc, tex, addr);
1797 if (r)
1798 return r;
1799 addr += 4;
1800 }
1801 break;
1802 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1803 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1804 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1805 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1806 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
1807 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
1808 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
1809 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
1810 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
1811 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
1812 case V_SQ_CF_WORD1_SQ_CF_INST_POP:
1813 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
1814 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
1815 case CM_V_SQ_CF_WORD1_SQ_CF_INST_END:
1816 break;
1817 default:
1818 R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
1819 return -EINVAL;
1820 }
1821 }
1822 return 0;
1823 }
1824
1825 void r600_bytecode_clear(struct r600_bytecode *bc)
1826 {
1827 struct r600_bytecode_cf *cf = NULL, *next_cf;
1828
1829 free(bc->bytecode);
1830 bc->bytecode = NULL;
1831
1832 LIST_FOR_EACH_ENTRY_SAFE(cf, next_cf, &bc->cf, list) {
1833 struct r600_bytecode_alu *alu = NULL, *next_alu;
1834 struct r600_bytecode_tex *tex = NULL, *next_tex;
1835 struct r600_bytecode_tex *vtx = NULL, *next_vtx;
1836
1837 LIST_FOR_EACH_ENTRY_SAFE(alu, next_alu, &cf->alu, list) {
1838 free(alu);
1839 }
1840
1841 LIST_INITHEAD(&cf->alu);
1842
1843 LIST_FOR_EACH_ENTRY_SAFE(tex, next_tex, &cf->tex, list) {
1844 free(tex);
1845 }
1846
1847 LIST_INITHEAD(&cf->tex);
1848
1849 LIST_FOR_EACH_ENTRY_SAFE(vtx, next_vtx, &cf->vtx, list) {
1850 free(vtx);
1851 }
1852
1853 LIST_INITHEAD(&cf->vtx);
1854
1855 free(cf);
1856 }
1857
1858 LIST_INITHEAD(&cf->list);
1859 }
1860
1861 void r600_bytecode_dump(struct r600_bytecode *bc)
1862 {
1863 struct r600_bytecode_cf *cf = NULL;
1864 struct r600_bytecode_alu *alu = NULL;
1865 struct r600_bytecode_vtx *vtx = NULL;
1866 struct r600_bytecode_tex *tex = NULL;
1867
1868 unsigned i, id;
1869 uint32_t literal[4];
1870 unsigned nliteral;
1871 char chip = '6';
1872
1873 switch (bc->chip_class) {
1874 case R700:
1875 chip = '7';
1876 break;
1877 case EVERGREEN:
1878 chip = 'E';
1879 break;
1880 case CAYMAN:
1881 chip = 'C';
1882 break;
1883 case R600:
1884 default:
1885 chip = '6';
1886 break;
1887 }
1888 fprintf(stderr, "bytecode %d dw -- %d gprs ---------------------\n", bc->ndw, bc->ngpr);
1889 fprintf(stderr, " %c\n", chip);
1890
1891 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
1892 id = cf->id;
1893
1894 switch (cf->inst) {
1895 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
1896 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3):
1897 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3):
1898 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
1899 fprintf(stderr, "%04d %08X ALU ", id, bc->bytecode[id]);
1900 fprintf(stderr, "ADDR:%d ", cf->addr);
1901 fprintf(stderr, "KCACHE_MODE0:%X ", cf->kcache[0].mode);
1902 fprintf(stderr, "KCACHE_BANK0:%X ", cf->kcache[0].bank);
1903 fprintf(stderr, "KCACHE_BANK1:%X\n", cf->kcache[1].bank);
1904 id++;
1905 fprintf(stderr, "%04d %08X ALU ", id, bc->bytecode[id]);
1906 fprintf(stderr, "INST:0x%x ", cf->inst);
1907 fprintf(stderr, "KCACHE_MODE1:%X ", cf->kcache[1].mode);
1908 fprintf(stderr, "KCACHE_ADDR0:%X ", cf->kcache[0].addr);
1909 fprintf(stderr, "KCACHE_ADDR1:%X ", cf->kcache[1].addr);
1910 fprintf(stderr, "COUNT:%d\n", cf->ndw / 2);
1911 break;
1912 case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
1913 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
1914 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
1915 fprintf(stderr, "%04d %08X TEX/VTX ", id, bc->bytecode[id]);
1916 fprintf(stderr, "ADDR:%d\n", cf->addr);
1917 id++;
1918 fprintf(stderr, "%04d %08X TEX/VTX ", id, bc->bytecode[id]);
1919 fprintf(stderr, "INST:0x%x ", cf->inst);
1920 fprintf(stderr, "COUNT:%d\n", cf->ndw / 4);
1921 break;
1922 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1923 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1924 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
1925 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
1926 fprintf(stderr, "%04d %08X EXPORT ", id, bc->bytecode[id]);
1927 fprintf(stderr, "GPR:%X ", cf->output.gpr);
1928 fprintf(stderr, "ELEM_SIZE:%X ", cf->output.elem_size);
1929 fprintf(stderr, "ARRAY_BASE:%X ", cf->output.array_base);
1930 fprintf(stderr, "TYPE:%X\n", cf->output.type);
1931 id++;
1932 fprintf(stderr, "%04d %08X EXPORT ", id, bc->bytecode[id]);
1933 fprintf(stderr, "SWIZ_X:%X ", cf->output.swizzle_x);
1934 fprintf(stderr, "SWIZ_Y:%X ", cf->output.swizzle_y);
1935 fprintf(stderr, "SWIZ_Z:%X ", cf->output.swizzle_z);
1936 fprintf(stderr, "SWIZ_W:%X ", cf->output.swizzle_w);
1937 fprintf(stderr, "BARRIER:%X ", cf->output.barrier);
1938 fprintf(stderr, "INST:0x%x ", cf->output.inst);
1939 fprintf(stderr, "BURST_COUNT:%d ", cf->output.burst_count);
1940 fprintf(stderr, "EOP:%X\n", cf->output.end_of_program);
1941 break;
1942 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
1943 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
1944 case V_SQ_CF_WORD1_SQ_CF_INST_POP:
1945 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
1946 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
1947 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
1948 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
1949 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
1950 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
1951 case CM_V_SQ_CF_WORD1_SQ_CF_INST_END:
1952 fprintf(stderr, "%04d %08X CF ", id, bc->bytecode[id]);
1953 fprintf(stderr, "ADDR:%d\n", cf->cf_addr);
1954 id++;
1955 fprintf(stderr, "%04d %08X CF ", id, bc->bytecode[id]);
1956 fprintf(stderr, "INST:0x%x ", cf->inst);
1957 fprintf(stderr, "COND:%X ", cf->cond);
1958 fprintf(stderr, "POP_COUNT:%X\n", cf->pop_count);
1959 break;
1960 }
1961
1962 id = cf->addr;
1963 nliteral = 0;
1964 LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
1965 r600_bytecode_alu_nliterals(bc, alu, literal, &nliteral);
1966
1967 fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]);
1968 fprintf(stderr, "SRC0(SEL:%d ", alu->src[0].sel);
1969 fprintf(stderr, "REL:%d ", alu->src[0].rel);
1970 fprintf(stderr, "CHAN:%d ", alu->src[0].chan);
1971 fprintf(stderr, "NEG:%d) ", alu->src[0].neg);
1972 fprintf(stderr, "SRC1(SEL:%d ", alu->src[1].sel);
1973 fprintf(stderr, "REL:%d ", alu->src[1].rel);
1974 fprintf(stderr, "CHAN:%d ", alu->src[1].chan);
1975 fprintf(stderr, "NEG:%d) ", alu->src[1].neg);
1976 fprintf(stderr, "LAST:%d)\n", alu->last);
1977 id++;
1978 fprintf(stderr, "%04d %08X %c ", id, bc->bytecode[id], alu->last ? '*' : ' ');
1979 fprintf(stderr, "INST:0x%x ", alu->inst);
1980 fprintf(stderr, "DST(SEL:%d ", alu->dst.sel);
1981 fprintf(stderr, "CHAN:%d ", alu->dst.chan);
1982 fprintf(stderr, "REL:%d ", alu->dst.rel);
1983 fprintf(stderr, "CLAMP:%d) ", alu->dst.clamp);
1984 fprintf(stderr, "BANK_SWIZZLE:%d ", alu->bank_swizzle);
1985 if (alu->is_op3) {
1986 fprintf(stderr, "SRC2(SEL:%d ", alu->src[2].sel);
1987 fprintf(stderr, "REL:%d ", alu->src[2].rel);
1988 fprintf(stderr, "CHAN:%d ", alu->src[2].chan);
1989 fprintf(stderr, "NEG:%d)\n", alu->src[2].neg);
1990 } else {
1991 fprintf(stderr, "SRC0_ABS:%d ", alu->src[0].abs);
1992 fprintf(stderr, "SRC1_ABS:%d ", alu->src[1].abs);
1993 fprintf(stderr, "WRITE_MASK:%d ", alu->dst.write);
1994 fprintf(stderr, "OMOD:%d ", alu->omod);
1995 fprintf(stderr, "EXECUTE_MASK:%d ", alu->predicate);
1996 fprintf(stderr, "UPDATE_PRED:%d\n", alu->predicate);
1997 }
1998
1999 id++;
2000 if (alu->last) {
2001 for (i = 0; i < nliteral; i++, id++) {
2002 float *f = (float*)(bc->bytecode + id);
2003 fprintf(stderr, "%04d %08X\t%f\n", id, bc->bytecode[id], *f);
2004 }
2005 id += nliteral & 1;
2006 nliteral = 0;
2007 }
2008 }
2009
2010 LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
2011 fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]);
2012 fprintf(stderr, "INST:0x%x ", tex->inst);
2013 fprintf(stderr, "RESOURCE_ID:%d ", tex->resource_id);
2014 fprintf(stderr, "SRC(GPR:%d ", tex->src_gpr);
2015 fprintf(stderr, "REL:%d)\n", tex->src_rel);
2016 id++;
2017 fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]);
2018 fprintf(stderr, "DST(GPR:%d ", tex->dst_gpr);
2019 fprintf(stderr, "REL:%d ", tex->dst_rel);
2020 fprintf(stderr, "SEL_X:%d ", tex->dst_sel_x);
2021 fprintf(stderr, "SEL_Y:%d ", tex->dst_sel_y);
2022 fprintf(stderr, "SEL_Z:%d ", tex->dst_sel_z);
2023 fprintf(stderr, "SEL_W:%d) ", tex->dst_sel_w);
2024 fprintf(stderr, "LOD_BIAS:%d ", tex->lod_bias);
2025 fprintf(stderr, "COORD_TYPE_X:%d ", tex->coord_type_x);
2026 fprintf(stderr, "COORD_TYPE_Y:%d ", tex->coord_type_y);
2027 fprintf(stderr, "COORD_TYPE_Z:%d ", tex->coord_type_z);
2028 fprintf(stderr, "COORD_TYPE_W:%d\n", tex->coord_type_w);
2029 id++;
2030 fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]);
2031 fprintf(stderr, "OFFSET_X:%d ", tex->offset_x);
2032 fprintf(stderr, "OFFSET_Y:%d ", tex->offset_y);
2033 fprintf(stderr, "OFFSET_Z:%d ", tex->offset_z);
2034 fprintf(stderr, "SAMPLER_ID:%d ", tex->sampler_id);
2035 fprintf(stderr, "SRC(SEL_X:%d ", tex->src_sel_x);
2036 fprintf(stderr, "SEL_Y:%d ", tex->src_sel_y);
2037 fprintf(stderr, "SEL_Z:%d ", tex->src_sel_z);
2038 fprintf(stderr, "SEL_W:%d)\n", tex->src_sel_w);
2039 id++;
2040 fprintf(stderr, "%04d %08X \n", id, bc->bytecode[id]);
2041 id++;
2042 }
2043
2044 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
2045 fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]);
2046 fprintf(stderr, "INST:%d ", vtx->inst);
2047 fprintf(stderr, "FETCH_TYPE:%d ", vtx->fetch_type);
2048 fprintf(stderr, "BUFFER_ID:%d\n", vtx->buffer_id);
2049 id++;
2050 /* This assumes that no semantic fetches exist */
2051 fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]);
2052 fprintf(stderr, "SRC(GPR:%d ", vtx->src_gpr);
2053 fprintf(stderr, "SEL_X:%d) ", vtx->src_sel_x);
2054 if (bc->chip_class < CAYMAN)
2055 fprintf(stderr, "MEGA_FETCH_COUNT:%d ", vtx->mega_fetch_count);
2056 else
2057 fprintf(stderr, "SEL_Y:%d) ", 0);
2058 fprintf(stderr, "DST(GPR:%d ", vtx->dst_gpr);
2059 fprintf(stderr, "SEL_X:%d ", vtx->dst_sel_x);
2060 fprintf(stderr, "SEL_Y:%d ", vtx->dst_sel_y);
2061 fprintf(stderr, "SEL_Z:%d ", vtx->dst_sel_z);
2062 fprintf(stderr, "SEL_W:%d) ", vtx->dst_sel_w);
2063 fprintf(stderr, "USE_CONST_FIELDS:%d ", vtx->use_const_fields);
2064 fprintf(stderr, "FORMAT(DATA:%d ", vtx->data_format);
2065 fprintf(stderr, "NUM:%d ", vtx->num_format_all);
2066 fprintf(stderr, "COMP:%d ", vtx->format_comp_all);
2067 fprintf(stderr, "MODE:%d)\n", vtx->srf_mode_all);
2068 id++;
2069 fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]);
2070 fprintf(stderr, "ENDIAN:%d ", vtx->endian);
2071 fprintf(stderr, "OFFSET:%d\n", vtx->offset);
2072 /* TODO */
2073 id++;
2074 fprintf(stderr, "%04d %08X \n", id, bc->bytecode[id]);
2075 id++;
2076 }
2077 }
2078
2079 fprintf(stderr, "--------------------------------------\n");
2080 }
2081
2082 static void r600_vertex_data_type(enum pipe_format pformat,
2083 unsigned *format,
2084 unsigned *num_format, unsigned *format_comp, unsigned *endian)
2085 {
2086 const struct util_format_description *desc;
2087 unsigned i;
2088
2089 *format = 0;
2090 *num_format = 0;
2091 *format_comp = 0;
2092 *endian = ENDIAN_NONE;
2093
2094 desc = util_format_description(pformat);
2095 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN) {
2096 goto out_unknown;
2097 }
2098
2099 /* Find the first non-VOID channel. */
2100 for (i = 0; i < 4; i++) {
2101 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
2102 break;
2103 }
2104 }
2105
2106 *endian = r600_endian_swap(desc->channel[i].size);
2107
2108 switch (desc->channel[i].type) {
2109 /* Half-floats, floats, ints */
2110 case UTIL_FORMAT_TYPE_FLOAT:
2111 switch (desc->channel[i].size) {
2112 case 16:
2113 switch (desc->nr_channels) {
2114 case 1:
2115 *format = FMT_16_FLOAT;
2116 break;
2117 case 2:
2118 *format = FMT_16_16_FLOAT;
2119 break;
2120 case 3:
2121 case 4:
2122 *format = FMT_16_16_16_16_FLOAT;
2123 break;
2124 }
2125 break;
2126 case 32:
2127 switch (desc->nr_channels) {
2128 case 1:
2129 *format = FMT_32_FLOAT;
2130 break;
2131 case 2:
2132 *format = FMT_32_32_FLOAT;
2133 break;
2134 case 3:
2135 *format = FMT_32_32_32_FLOAT;
2136 break;
2137 case 4:
2138 *format = FMT_32_32_32_32_FLOAT;
2139 break;
2140 }
2141 break;
2142 default:
2143 goto out_unknown;
2144 }
2145 break;
2146 /* Unsigned ints */
2147 case UTIL_FORMAT_TYPE_UNSIGNED:
2148 /* Signed ints */
2149 case UTIL_FORMAT_TYPE_SIGNED:
2150 switch (desc->channel[i].size) {
2151 case 8:
2152 switch (desc->nr_channels) {
2153 case 1:
2154 *format = FMT_8;
2155 break;
2156 case 2:
2157 *format = FMT_8_8;
2158 break;
2159 case 3:
2160 case 4:
2161 *format = FMT_8_8_8_8;
2162 break;
2163 }
2164 break;
2165 case 10:
2166 if (desc->nr_channels != 4)
2167 goto out_unknown;
2168
2169 *format = FMT_2_10_10_10;
2170 break;
2171 case 16:
2172 switch (desc->nr_channels) {
2173 case 1:
2174 *format = FMT_16;
2175 break;
2176 case 2:
2177 *format = FMT_16_16;
2178 break;
2179 case 3:
2180 case 4:
2181 *format = FMT_16_16_16_16;
2182 break;
2183 }
2184 break;
2185 case 32:
2186 switch (desc->nr_channels) {
2187 case 1:
2188 *format = FMT_32;
2189 break;
2190 case 2:
2191 *format = FMT_32_32;
2192 break;
2193 case 3:
2194 *format = FMT_32_32_32;
2195 break;
2196 case 4:
2197 *format = FMT_32_32_32_32;
2198 break;
2199 }
2200 break;
2201 default:
2202 goto out_unknown;
2203 }
2204 break;
2205 default:
2206 goto out_unknown;
2207 }
2208
2209 if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
2210 *format_comp = 1;
2211 }
2212
2213 *num_format = 0;
2214 if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED ||
2215 desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
2216 if (!desc->channel[i].normalized) {
2217 if (desc->channel[i].pure_integer)
2218 *num_format = 1;
2219 else
2220 *num_format = 2;
2221 }
2222 }
2223 return;
2224 out_unknown:
2225 R600_ERR("unsupported vertex format %s\n", util_format_name(pformat));
2226 }
2227
2228 int r600_vertex_elements_build_fetch_shader(struct r600_pipe_context *rctx, struct r600_vertex_element *ve)
2229 {
2230 static int dump_shaders = -1;
2231
2232 struct r600_bytecode bc;
2233 struct r600_bytecode_vtx vtx;
2234 struct pipe_vertex_element *elements = ve->elements;
2235 const struct util_format_description *desc;
2236 unsigned fetch_resource_start = rctx->chip_class >= EVERGREEN ? 0 : 160;
2237 unsigned format, num_format, format_comp, endian;
2238 u32 *bytecode;
2239 int i, r;
2240
2241 /* Vertex element offsets need special handling. If the offset is
2242 * bigger than what we can put in the fetch instruction we need to
2243 * alter the vertex resource offset. In order to simplify code we
2244 * will bind one resource per element in such cases. It's a worst
2245 * case scenario. */
2246 for (i = 0; i < ve->count; i++) {
2247 ve->vbuffer_offset[i] = C_SQ_VTX_WORD2_OFFSET & elements[i].src_offset;
2248 if (ve->vbuffer_offset[i]) {
2249 ve->vbuffer_need_offset = 1;
2250 }
2251 }
2252
2253 memset(&bc, 0, sizeof(bc));
2254 r600_bytecode_init(&bc, rctx->chip_class);
2255
2256 for (i = 0; i < ve->count; i++) {
2257 if (elements[i].instance_divisor > 1) {
2258 struct r600_bytecode_alu alu;
2259
2260 memset(&alu, 0, sizeof(alu));
2261 alu.inst = BC_INST(&bc, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT);
2262 alu.src[0].sel = 0;
2263 alu.src[0].chan = 3;
2264
2265 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2266 alu.src[1].value = (1ll << 32) / elements[i].instance_divisor + 1;
2267
2268 alu.dst.sel = i + 1;
2269 alu.dst.chan = 3;
2270 alu.dst.write = 1;
2271 alu.last = 1;
2272
2273 if ((r = r600_bytecode_add_alu(&bc, &alu))) {
2274 r600_bytecode_clear(&bc);
2275 return r;
2276 }
2277 }
2278 }
2279
2280 for (i = 0; i < ve->count; i++) {
2281 unsigned vbuffer_index;
2282 r600_vertex_data_type(ve->elements[i].src_format,
2283 &format, &num_format, &format_comp, &endian);
2284 desc = util_format_description(ve->elements[i].src_format);
2285 if (desc == NULL) {
2286 r600_bytecode_clear(&bc);
2287 R600_ERR("unknown format %d\n", ve->elements[i].src_format);
2288 return -EINVAL;
2289 }
2290
2291 /* see above for vbuffer_need_offset explanation */
2292 vbuffer_index = elements[i].vertex_buffer_index;
2293 memset(&vtx, 0, sizeof(vtx));
2294 vtx.buffer_id = (ve->vbuffer_need_offset ? i : vbuffer_index) + fetch_resource_start;
2295 vtx.fetch_type = elements[i].instance_divisor ? 1 : 0;
2296 vtx.src_gpr = elements[i].instance_divisor > 1 ? i + 1 : 0;
2297 vtx.src_sel_x = elements[i].instance_divisor ? 3 : 0;
2298 vtx.mega_fetch_count = 0x1F;
2299 vtx.dst_gpr = i + 1;
2300 vtx.dst_sel_x = desc->swizzle[0];
2301 vtx.dst_sel_y = desc->swizzle[1];
2302 vtx.dst_sel_z = desc->swizzle[2];
2303 vtx.dst_sel_w = desc->swizzle[3];
2304 vtx.data_format = format;
2305 vtx.num_format_all = num_format;
2306 vtx.format_comp_all = format_comp;
2307 vtx.srf_mode_all = 1;
2308 vtx.offset = elements[i].src_offset;
2309 vtx.endian = endian;
2310
2311 if ((r = r600_bytecode_add_vtx(&bc, &vtx))) {
2312 r600_bytecode_clear(&bc);
2313 return r;
2314 }
2315 }
2316
2317 r600_bytecode_add_cfinst(&bc, BC_INST(&bc, V_SQ_CF_WORD1_SQ_CF_INST_RETURN));
2318
2319 if ((r = r600_bytecode_build(&bc))) {
2320 r600_bytecode_clear(&bc);
2321 return r;
2322 }
2323
2324 if (dump_shaders == -1)
2325 dump_shaders = debug_get_bool_option("R600_DUMP_SHADERS", FALSE);
2326
2327 if (dump_shaders) {
2328 fprintf(stderr, "--------------------------------------------------------------\n");
2329 r600_bytecode_dump(&bc);
2330 fprintf(stderr, "______________________________________________________________\n");
2331 }
2332
2333 ve->fs_size = bc.ndw*4;
2334
2335 ve->fetch_shader = (struct r600_resource*)
2336 pipe_buffer_create(rctx->context.screen,
2337 PIPE_BIND_CUSTOM,
2338 PIPE_USAGE_IMMUTABLE, ve->fs_size);
2339 if (ve->fetch_shader == NULL) {
2340 r600_bytecode_clear(&bc);
2341 return -ENOMEM;
2342 }
2343
2344 bytecode = rctx->ws->buffer_map(ve->fetch_shader->buf, rctx->ctx.cs, PIPE_TRANSFER_WRITE);
2345 if (bytecode == NULL) {
2346 r600_bytecode_clear(&bc);
2347 pipe_resource_reference((struct pipe_resource**)&ve->fetch_shader, NULL);
2348 return -ENOMEM;
2349 }
2350
2351 if (R600_BIG_ENDIAN) {
2352 for (i = 0; i < ve->fs_size / 4; ++i) {
2353 bytecode[i] = bswap_32(bc.bytecode[i]);
2354 }
2355 } else {
2356 memcpy(bytecode, bc.bytecode, ve->fs_size);
2357 }
2358
2359 rctx->ws->buffer_unmap(ve->fetch_shader->buf);
2360 r600_bytecode_clear(&bc);
2361
2362 if (rctx->chip_class >= EVERGREEN)
2363 evergreen_fetch_shader(&rctx->context, ve);
2364 else
2365 r600_fetch_shader(&rctx->context, ve);
2366
2367 return 0;
2368 }