2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "util/u_format.h"
26 #include "util/u_memory.h"
27 #include "pipe/p_shader_tokens.h"
28 #include "r600_pipe.h"
30 #include "r600_opcodes.h"
32 #include "r600_formats.h"
35 #define NUM_OF_CYCLES 3
36 #define NUM_OF_COMPONENTS 4
38 static inline unsigned int r600_bc_get_num_operands(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
43 switch (bc
->chiprev
) {
47 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
:
49 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD
:
50 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT
:
51 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE
:
52 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
:
53 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE
:
54 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE
:
55 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
:
56 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT
:
57 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX
:
58 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN
:
59 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE
:
60 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE
:
61 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT
:
62 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE
:
63 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE
:
64 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT
:
65 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE
:
66 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE
:
67 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
:
68 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE
:
69 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE
:
72 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
:
73 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA
:
74 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR
:
75 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT
:
76 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT
:
77 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR
:
78 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC
:
79 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
:
80 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED
:
81 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
:
82 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED
:
83 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
:
84 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED
:
85 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE
:
86 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT
:
87 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT
:
88 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN
:
89 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS
:
92 "Need instruction operand number for 0x%x.\n", alu
->inst
);
95 case CHIPREV_EVERGREEN
:
97 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
:
99 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD
:
100 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT
:
101 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE
:
102 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
:
103 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE
:
104 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE
:
105 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
:
106 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT
:
107 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX
:
108 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN
:
109 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE
:
110 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE
:
111 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT
:
112 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE
:
113 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE
:
114 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT
:
115 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE
:
116 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE
:
117 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
:
118 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE
:
119 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE
:
120 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INTERP_XY
:
121 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INTERP_ZW
:
124 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
:
125 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT
:
126 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT
:
127 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR
:
128 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC
:
129 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
:
130 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED
:
131 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
:
132 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED
:
133 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
:
134 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED
:
135 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE
:
136 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT
:
137 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT_FLOOR
:
138 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT
:
139 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN
:
140 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS
:
143 "Need instruction operand number for 0x%x.\n", alu
->inst
);
151 int r700_bc_alu_build(struct r600_bc
*bc
, struct r600_bc_alu
*alu
, unsigned id
);
153 static struct r600_bc_cf
*r600_bc_cf(void)
155 struct r600_bc_cf
*cf
= CALLOC_STRUCT(r600_bc_cf
);
159 LIST_INITHEAD(&cf
->list
);
160 LIST_INITHEAD(&cf
->alu
);
161 LIST_INITHEAD(&cf
->vtx
);
162 LIST_INITHEAD(&cf
->tex
);
166 static struct r600_bc_alu
*r600_bc_alu(void)
168 struct r600_bc_alu
*alu
= CALLOC_STRUCT(r600_bc_alu
);
172 LIST_INITHEAD(&alu
->list
);
176 static struct r600_bc_vtx
*r600_bc_vtx(void)
178 struct r600_bc_vtx
*vtx
= CALLOC_STRUCT(r600_bc_vtx
);
182 LIST_INITHEAD(&vtx
->list
);
186 static struct r600_bc_tex
*r600_bc_tex(void)
188 struct r600_bc_tex
*tex
= CALLOC_STRUCT(r600_bc_tex
);
192 LIST_INITHEAD(&tex
->list
);
196 int r600_bc_init(struct r600_bc
*bc
, enum radeon_family family
)
198 LIST_INITHEAD(&bc
->cf
);
200 switch (bc
->family
) {
209 bc
->chiprev
= CHIPREV_R600
;
215 bc
->chiprev
= CHIPREV_R700
;
226 bc
->chiprev
= CHIPREV_EVERGREEN
;
229 R600_ERR("unknown family %d\n", bc
->family
);
235 static int r600_bc_add_cf(struct r600_bc
*bc
)
237 struct r600_bc_cf
*cf
= r600_bc_cf();
241 LIST_ADDTAIL(&cf
->list
, &bc
->cf
);
243 cf
->id
= bc
->cf_last
->id
+ 2;
247 bc
->force_add_cf
= 0;
251 int r600_bc_add_output(struct r600_bc
*bc
, const struct r600_bc_output
*output
)
255 if (bc
->cf_last
&& (bc
->cf_last
->inst
== output
->inst
||
256 (bc
->cf_last
->inst
== BC_INST(bc
, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
) &&
257 output
->inst
== BC_INST(bc
, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE
))) &&
258 output
->type
== bc
->cf_last
->output
.type
&&
259 output
->elem_size
== bc
->cf_last
->output
.elem_size
&&
260 output
->swizzle_x
== bc
->cf_last
->output
.swizzle_x
&&
261 output
->swizzle_y
== bc
->cf_last
->output
.swizzle_y
&&
262 output
->swizzle_z
== bc
->cf_last
->output
.swizzle_z
&&
263 output
->swizzle_w
== bc
->cf_last
->output
.swizzle_w
&&
264 (output
->burst_count
+ bc
->cf_last
->output
.burst_count
) <= 16) {
266 if ((output
->gpr
+ output
->burst_count
) == bc
->cf_last
->output
.gpr
&&
267 (output
->array_base
+ output
->burst_count
) == bc
->cf_last
->output
.array_base
) {
269 bc
->cf_last
->output
.end_of_program
|= output
->end_of_program
;
270 bc
->cf_last
->output
.inst
= output
->inst
;
271 bc
->cf_last
->output
.gpr
= output
->gpr
;
272 bc
->cf_last
->output
.array_base
= output
->array_base
;
273 bc
->cf_last
->output
.burst_count
+= output
->burst_count
;
276 } else if (output
->gpr
== (bc
->cf_last
->output
.gpr
+ bc
->cf_last
->output
.burst_count
) &&
277 output
->array_base
== (bc
->cf_last
->output
.array_base
+ bc
->cf_last
->output
.burst_count
)) {
279 bc
->cf_last
->output
.end_of_program
|= output
->end_of_program
;
280 bc
->cf_last
->output
.inst
= output
->inst
;
281 bc
->cf_last
->output
.burst_count
+= output
->burst_count
;
286 r
= r600_bc_add_cf(bc
);
289 bc
->cf_last
->inst
= output
->inst
;
290 memcpy(&bc
->cf_last
->output
, output
, sizeof(struct r600_bc_output
));
294 /* alu instructions that can ony exits once per group */
295 static int is_alu_once_inst(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
297 switch (bc
->chiprev
) {
300 return !alu
->is_op3
&& (
301 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE
||
302 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
||
303 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE
||
304 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE
||
305 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_UINT
||
306 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_UINT
||
307 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE_INT
||
308 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_INT
||
309 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_INT
||
310 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE_INT
||
311 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_UINT
||
312 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_UINT
||
313 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE
||
314 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT
||
315 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE
||
316 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE
||
317 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_INV
||
318 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_POP
||
319 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_CLR
||
320 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_RESTORE
||
321 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH
||
322 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH
||
323 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH
||
324 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH
||
325 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_INT
||
326 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_INT
||
327 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_INT
||
328 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT
||
329 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH_INT
||
330 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH_INT
||
331 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH_INT
||
332 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH_INT
||
333 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLT_PUSH_INT
||
334 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLE_PUSH_INT
);
335 case CHIPREV_EVERGREEN
:
337 return !alu
->is_op3
&& (
338 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE
||
339 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
||
340 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE
||
341 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE
||
342 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_UINT
||
343 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_UINT
||
344 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE_INT
||
345 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_INT
||
346 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_INT
||
347 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE_INT
||
348 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_UINT
||
349 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_UINT
||
350 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE
||
351 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT
||
352 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE
||
353 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE
||
354 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_INV
||
355 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_POP
||
356 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_CLR
||
357 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_RESTORE
||
358 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH
||
359 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH
||
360 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH
||
361 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH
||
362 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_INT
||
363 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_INT
||
364 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_INT
||
365 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT
||
366 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH_INT
||
367 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH_INT
||
368 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH_INT
||
369 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH_INT
||
370 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLT_PUSH_INT
||
371 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLE_PUSH_INT
);
375 static int is_alu_reduction_inst(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
377 switch (bc
->chiprev
) {
380 return !alu
->is_op3
&& (
381 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE
||
382 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
||
383 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE
||
384 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX4
);
385 case CHIPREV_EVERGREEN
:
387 return !alu
->is_op3
&& (
388 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE
||
389 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
||
390 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE
||
391 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX4
);
395 static int is_alu_cube_inst(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
397 switch (bc
->chiprev
) {
400 return !alu
->is_op3
&&
401 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE
;
402 case CHIPREV_EVERGREEN
:
404 return !alu
->is_op3
&&
405 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE
;
409 static int is_alu_mova_inst(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
411 switch (bc
->chiprev
) {
414 return !alu
->is_op3
&& (
415 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA
||
416 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR
||
417 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT
);
418 case CHIPREV_EVERGREEN
:
420 return !alu
->is_op3
&& (
421 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT
);
425 /* alu instructions that can only execute on the vector unit */
426 static int is_alu_vec_unit_inst(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
428 return is_alu_reduction_inst(bc
, alu
) ||
429 is_alu_mova_inst(bc
, alu
);
432 /* alu instructions that can only execute on the trans unit */
433 static int is_alu_trans_unit_inst(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
435 switch (bc
->chiprev
) {
439 return alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT
||
440 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT
||
441 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT
||
442 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHL_INT
||
443 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT
||
444 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_INT
||
445 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT
||
446 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_INT
||
447 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT
||
448 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_INT
||
449 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_UINT
||
450 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT
||
451 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS
||
452 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
||
453 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED
||
454 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
||
455 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED
||
456 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_FF
||
457 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
||
458 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED
||
459 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_FF
||
460 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE
||
461 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN
||
462 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SQRT_IEEE
;
464 return alu
->inst
== V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT
||
465 alu
->inst
== V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_D2
||
466 alu
->inst
== V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_M2
||
467 alu
->inst
== V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_M4
;
468 case CHIPREV_EVERGREEN
:
471 /* Note that FLT_TO_INT* instructions are vector instructions
472 * on Evergreen, despite what the documentation says. */
473 return alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT
||
474 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT
||
475 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHL_INT
||
476 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT
||
477 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_INT
||
478 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT
||
479 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_INT
||
480 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT
||
481 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_INT
||
482 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_UINT
||
483 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT
||
484 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS
||
485 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
||
486 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED
||
487 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
||
488 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED
||
489 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_FF
||
490 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
||
491 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED
||
492 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_FF
||
493 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE
||
494 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN
||
495 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SQRT_IEEE
;
497 return alu
->inst
== EG_V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT
;
501 /* alu instructions that can execute on any unit */
502 static int is_alu_any_unit_inst(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
504 return !is_alu_vec_unit_inst(bc
, alu
) &&
505 !is_alu_trans_unit_inst(bc
, alu
);
508 static int assign_alu_units(struct r600_bc
*bc
, struct r600_bc_alu
*alu_first
,
509 struct r600_bc_alu
*assignment
[5])
511 struct r600_bc_alu
*alu
;
512 unsigned i
, chan
, trans
;
514 for (i
= 0; i
< 5; i
++)
515 assignment
[i
] = NULL
;
517 for (alu
= alu_first
; alu
; alu
= LIST_ENTRY(struct r600_bc_alu
, alu
->list
.next
, list
)) {
518 chan
= alu
->dst
.chan
;
519 if (is_alu_trans_unit_inst(bc
, alu
))
521 else if (is_alu_vec_unit_inst(bc
, alu
))
523 else if (assignment
[chan
])
524 trans
= 1; // assume ALU_INST_PREFER_VECTOR
530 assert(0); //ALU.Trans has already been allocated
535 if (assignment
[chan
]) {
536 assert(0); //ALU.chan has already been allocated
539 assignment
[chan
] = alu
;
548 struct alu_bank_swizzle
{
549 int hw_gpr
[NUM_OF_CYCLES
][NUM_OF_COMPONENTS
];
550 int hw_cfile_addr
[4];
551 int hw_cfile_elem
[4];
554 static const unsigned cycle_for_bank_swizzle_vec
[][3] = {
555 [SQ_ALU_VEC_012
] = { 0, 1, 2 },
556 [SQ_ALU_VEC_021
] = { 0, 2, 1 },
557 [SQ_ALU_VEC_120
] = { 1, 2, 0 },
558 [SQ_ALU_VEC_102
] = { 1, 0, 2 },
559 [SQ_ALU_VEC_201
] = { 2, 0, 1 },
560 [SQ_ALU_VEC_210
] = { 2, 1, 0 }
563 static const unsigned cycle_for_bank_swizzle_scl
[][3] = {
564 [SQ_ALU_SCL_210
] = { 2, 1, 0 },
565 [SQ_ALU_SCL_122
] = { 1, 2, 2 },
566 [SQ_ALU_SCL_212
] = { 2, 1, 2 },
567 [SQ_ALU_SCL_221
] = { 2, 2, 1 }
570 static void init_bank_swizzle(struct alu_bank_swizzle
*bs
)
572 int i
, cycle
, component
;
574 for (cycle
= 0; cycle
< NUM_OF_CYCLES
; cycle
++)
575 for (component
= 0; component
< NUM_OF_COMPONENTS
; component
++)
576 bs
->hw_gpr
[cycle
][component
] = -1;
577 for (i
= 0; i
< 4; i
++)
578 bs
->hw_cfile_addr
[i
] = -1;
579 for (i
= 0; i
< 4; i
++)
580 bs
->hw_cfile_elem
[i
] = -1;
583 static int reserve_gpr(struct alu_bank_swizzle
*bs
, unsigned sel
, unsigned chan
, unsigned cycle
)
585 if (bs
->hw_gpr
[cycle
][chan
] == -1)
586 bs
->hw_gpr
[cycle
][chan
] = sel
;
587 else if (bs
->hw_gpr
[cycle
][chan
] != (int)sel
) {
588 // Another scalar operation has already used GPR read port for channel
594 static int reserve_cfile(struct r600_bc
*bc
, struct alu_bank_swizzle
*bs
, unsigned sel
, unsigned chan
)
596 int res
, num_res
= 4;
597 if (bc
->chiprev
>= CHIPREV_R700
) {
601 for (res
= 0; res
< num_res
; ++res
) {
602 if (bs
->hw_cfile_addr
[res
] == -1) {
603 bs
->hw_cfile_addr
[res
] = sel
;
604 bs
->hw_cfile_elem
[res
] = chan
;
606 } else if (bs
->hw_cfile_addr
[res
] == sel
&&
607 bs
->hw_cfile_elem
[res
] == chan
)
608 return 0; // Read for this scalar element already reserved, nothing to do here.
610 // All cfile read ports are used, cannot reference vector element
614 static int is_gpr(unsigned sel
)
616 return (sel
>= 0 && sel
<= 127);
619 /* CB constants start at 512, and get translated to a kcache index when ALU
620 * clauses are constructed. Note that we handle kcache constants the same way
621 * as (the now gone) cfile constants, is that really required? */
622 static int is_cfile(unsigned sel
)
624 return (sel
> 255 && sel
< 512) ||
625 (sel
> 511 && sel
< 4607) || // Kcache before translate
626 (sel
> 127 && sel
< 192); // Kcache after translate
629 static int is_const(int sel
)
631 return is_cfile(sel
) ||
632 (sel
>= V_SQ_ALU_SRC_0
&&
633 sel
<= V_SQ_ALU_SRC_LITERAL
);
636 static int check_vector(struct r600_bc
*bc
, struct r600_bc_alu
*alu
,
637 struct alu_bank_swizzle
*bs
, int bank_swizzle
)
639 int r
, src
, num_src
, sel
, elem
, cycle
;
641 num_src
= r600_bc_get_num_operands(bc
, alu
);
642 for (src
= 0; src
< num_src
; src
++) {
643 sel
= alu
->src
[src
].sel
;
644 elem
= alu
->src
[src
].chan
;
646 cycle
= cycle_for_bank_swizzle_vec
[bank_swizzle
][src
];
647 if (src
== 1 && sel
== alu
->src
[0].sel
&& elem
== alu
->src
[0].chan
)
648 // Nothing to do; special-case optimization,
649 // second source uses first source’s reservation
652 r
= reserve_gpr(bs
, sel
, elem
, cycle
);
656 } else if (is_cfile(sel
)) {
657 r
= reserve_cfile(bc
, bs
, sel
, elem
);
661 // No restrictions on PV, PS, literal or special constants
666 static int check_scalar(struct r600_bc
*bc
, struct r600_bc_alu
*alu
,
667 struct alu_bank_swizzle
*bs
, int bank_swizzle
)
669 int r
, src
, num_src
, const_count
, sel
, elem
, cycle
;
671 num_src
= r600_bc_get_num_operands(bc
, alu
);
672 for (const_count
= 0, src
= 0; src
< num_src
; ++src
) {
673 sel
= alu
->src
[src
].sel
;
674 elem
= alu
->src
[src
].chan
;
675 if (is_const(sel
)) { // Any constant, including literal and inline constants
676 if (const_count
>= 2)
677 // More than two references to a constant in
678 // transcendental operation.
684 r
= reserve_cfile(bc
, bs
, sel
, elem
);
689 for (src
= 0; src
< num_src
; ++src
) {
690 sel
= alu
->src
[src
].sel
;
691 elem
= alu
->src
[src
].chan
;
693 cycle
= cycle_for_bank_swizzle_scl
[bank_swizzle
][src
];
694 if (cycle
< const_count
)
695 // Cycle for GPR load conflicts with
696 // constant load in transcendental operation.
698 r
= reserve_gpr(bs
, sel
, elem
, cycle
);
702 // Constants already processed
703 // No restrictions on PV, PS
708 static int check_and_set_bank_swizzle(struct r600_bc
*bc
,
709 struct r600_bc_alu
*slots
[5])
711 struct alu_bank_swizzle bs
;
713 int i
, r
= 0, forced
= 0;
715 for (i
= 0; i
< 5; i
++)
716 if (slots
[i
] && slots
[i
]->bank_swizzle_force
) {
717 slots
[i
]->bank_swizzle
= slots
[i
]->bank_swizzle_force
;
724 // just check every possible combination of bank swizzle
725 // not very efficent, but works on the first try in most of the cases
726 for (i
= 0; i
< 4; i
++)
727 bank_swizzle
[i
] = SQ_ALU_VEC_012
;
728 bank_swizzle
[4] = SQ_ALU_SCL_210
;
729 while(bank_swizzle
[4] <= SQ_ALU_SCL_221
) {
730 init_bank_swizzle(&bs
);
731 for (i
= 0; i
< 4; i
++) {
733 r
= check_vector(bc
, slots
[i
], &bs
, bank_swizzle
[i
]);
738 if (!r
&& slots
[4]) {
739 r
= check_scalar(bc
, slots
[4], &bs
, bank_swizzle
[4]);
742 for (i
= 0; i
< 5; i
++) {
744 slots
[i
]->bank_swizzle
= bank_swizzle
[i
];
749 for (i
= 0; i
< 5; i
++) {
751 if (bank_swizzle
[i
] <= SQ_ALU_VEC_210
)
754 bank_swizzle
[i
] = SQ_ALU_VEC_012
;
758 // couldn't find a working swizzle
762 static int replace_gpr_with_pv_ps(struct r600_bc
*bc
,
763 struct r600_bc_alu
*slots
[5], struct r600_bc_alu
*alu_prev
)
765 struct r600_bc_alu
*prev
[5];
767 int i
, j
, r
, src
, num_src
;
769 r
= assign_alu_units(bc
, alu_prev
, prev
);
773 for (i
= 0; i
< 5; ++i
) {
774 if(prev
[i
] && prev
[i
]->dst
.write
&& !prev
[i
]->dst
.rel
) {
775 gpr
[i
] = prev
[i
]->dst
.sel
;
776 /* cube writes more than PV.X */
777 if (!is_alu_cube_inst(bc
, prev
[i
]) && is_alu_reduction_inst(bc
, prev
[i
]))
780 chan
[i
] = prev
[i
]->dst
.chan
;
785 for (i
= 0; i
< 5; ++i
) {
786 struct r600_bc_alu
*alu
= slots
[i
];
790 num_src
= r600_bc_get_num_operands(bc
, alu
);
791 for (src
= 0; src
< num_src
; ++src
) {
792 if (!is_gpr(alu
->src
[src
].sel
) || alu
->src
[src
].rel
)
795 if (alu
->src
[src
].sel
== gpr
[4] &&
796 alu
->src
[src
].chan
== chan
[4]) {
797 alu
->src
[src
].sel
= V_SQ_ALU_SRC_PS
;
798 alu
->src
[src
].chan
= 0;
802 for (j
= 0; j
< 4; ++j
) {
803 if (alu
->src
[src
].sel
== gpr
[j
] &&
804 alu
->src
[src
].chan
== j
) {
805 alu
->src
[src
].sel
= V_SQ_ALU_SRC_PV
;
806 alu
->src
[src
].chan
= chan
[j
];
816 void r600_bc_special_constants(u32 value
, unsigned *sel
, unsigned *neg
)
820 *sel
= V_SQ_ALU_SRC_0
;
823 *sel
= V_SQ_ALU_SRC_1_INT
;
826 *sel
= V_SQ_ALU_SRC_M_1_INT
;
828 case 0x3F800000: // 1.0f
829 *sel
= V_SQ_ALU_SRC_1
;
831 case 0x3F000000: // 0.5f
832 *sel
= V_SQ_ALU_SRC_0_5
;
834 case 0xBF800000: // -1.0f
835 *sel
= V_SQ_ALU_SRC_1
;
838 case 0xBF000000: // -0.5f
839 *sel
= V_SQ_ALU_SRC_0_5
;
843 *sel
= V_SQ_ALU_SRC_LITERAL
;
848 /* compute how many literal are needed */
849 static int r600_bc_alu_nliterals(struct r600_bc
*bc
, struct r600_bc_alu
*alu
,
850 uint32_t literal
[4], unsigned *nliteral
)
852 unsigned num_src
= r600_bc_get_num_operands(bc
, alu
);
855 for (i
= 0; i
< num_src
; ++i
) {
856 if (alu
->src
[i
].sel
== V_SQ_ALU_SRC_LITERAL
) {
857 uint32_t value
= alu
->src
[i
].value
;
859 for (j
= 0; j
< *nliteral
; ++j
) {
860 if (literal
[j
] == value
) {
868 literal
[(*nliteral
)++] = value
;
875 static void r600_bc_alu_adjust_literals(struct r600_bc
*bc
,
876 struct r600_bc_alu
*alu
,
877 uint32_t literal
[4], unsigned nliteral
)
879 unsigned num_src
= r600_bc_get_num_operands(bc
, alu
);
882 for (i
= 0; i
< num_src
; ++i
) {
883 if (alu
->src
[i
].sel
== V_SQ_ALU_SRC_LITERAL
) {
884 uint32_t value
= alu
->src
[i
].value
;
885 for (j
= 0; j
< nliteral
; ++j
) {
886 if (literal
[j
] == value
) {
887 alu
->src
[i
].chan
= j
;
895 static int merge_inst_groups(struct r600_bc
*bc
, struct r600_bc_alu
*slots
[5],
896 struct r600_bc_alu
*alu_prev
)
898 struct r600_bc_alu
*prev
[5];
899 struct r600_bc_alu
*result
[5] = { NULL
};
901 uint32_t literal
[4], prev_literal
[4];
902 unsigned nliteral
= 0, prev_nliteral
= 0;
904 int i
, j
, r
, src
, num_src
;
905 int num_once_inst
= 0;
906 int have_mova
= 0, have_rel
= 0;
908 r
= assign_alu_units(bc
, alu_prev
, prev
);
912 for (i
= 0; i
< 5; ++i
) {
913 struct r600_bc_alu
*alu
;
915 /* check number of literals */
917 if (r600_bc_alu_nliterals(bc
, prev
[i
], literal
, &nliteral
))
919 if (r600_bc_alu_nliterals(bc
, prev
[i
], prev_literal
, &prev_nliteral
))
921 if (is_alu_mova_inst(bc
, prev
[i
])) {
926 num_once_inst
+= is_alu_once_inst(bc
, prev
[i
]);
928 if (slots
[i
] && r600_bc_alu_nliterals(bc
, slots
[i
], literal
, &nliteral
))
931 // let's check used slots
932 if (prev
[i
] && !slots
[i
]) {
935 } else if (prev
[i
] && slots
[i
]) {
936 if (result
[4] == NULL
&& prev
[4] == NULL
&& slots
[4] == NULL
) {
937 // trans unit is still free try to use it
938 if (is_alu_any_unit_inst(bc
, slots
[i
])) {
940 result
[4] = slots
[i
];
941 } else if (is_alu_any_unit_inst(bc
, prev
[i
])) {
942 result
[i
] = slots
[i
];
948 } else if(!slots
[i
]) {
951 result
[i
] = slots
[i
];
953 // let's check source gprs
955 num_once_inst
+= is_alu_once_inst(bc
, alu
);
957 num_src
= r600_bc_get_num_operands(bc
, alu
);
958 for (src
= 0; src
< num_src
; ++src
) {
959 if (alu
->src
[src
].rel
) {
965 // constants doesn't matter
966 if (!is_gpr(alu
->src
[src
].sel
))
969 for (j
= 0; j
< 5; ++j
) {
970 if (!prev
[j
] || !prev
[j
]->dst
.write
)
973 // if it's relative then we can't determin which gpr is really used
974 if (prev
[j
]->dst
.chan
== alu
->src
[src
].chan
&&
975 (prev
[j
]->dst
.sel
== alu
->src
[src
].sel
||
976 prev
[j
]->dst
.rel
|| alu
->src
[src
].rel
))
982 /* more than one PRED_ or KILL_ ? */
983 if (num_once_inst
> 1)
986 /* check if the result can still be swizzlet */
987 r
= check_and_set_bank_swizzle(bc
, result
);
991 /* looks like everything worked out right, apply the changes */
993 /* undo adding previus literals */
994 bc
->cf_last
->ndw
-= align(prev_nliteral
, 2);
996 /* sort instructions */
997 for (i
= 0; i
< 5; ++i
) {
998 slots
[i
] = result
[i
];
1000 LIST_DEL(&result
[i
]->list
);
1001 result
[i
]->last
= 0;
1002 LIST_ADDTAIL(&result
[i
]->list
, &bc
->cf_last
->alu
);
1006 /* determine new last instruction */
1007 LIST_ENTRY(struct r600_bc_alu
, bc
->cf_last
->alu
.prev
, list
)->last
= 1;
1009 /* determine new first instruction */
1010 for (i
= 0; i
< 5; ++i
) {
1012 bc
->cf_last
->curr_bs_head
= result
[i
];
1017 bc
->cf_last
->prev_bs_head
= bc
->cf_last
->prev2_bs_head
;
1018 bc
->cf_last
->prev2_bs_head
= NULL
;
1023 /* This code handles kcache lines as single blocks of 32 constants. We could
1024 * probably do slightly better by recognizing that we actually have two
1025 * consecutive lines of 16 constants, but the resulting code would also be
1026 * somewhat more complicated. */
1027 static int r600_bc_alloc_kcache_lines(struct r600_bc
*bc
, struct r600_bc_alu
*alu
, int type
)
1029 struct r600_bc_kcache
*kcache
= bc
->cf_last
->kcache
;
1030 unsigned int required_lines
;
1031 unsigned int free_lines
= 0;
1032 unsigned int cache_line
[3];
1033 unsigned int count
= 0;
1037 /* Collect required cache lines. */
1038 for (i
= 0; i
< 3; ++i
) {
1042 if (alu
->src
[i
].sel
< 512)
1045 line
= ((alu
->src
[i
].sel
- 512) / 32) * 2;
1047 for (j
= 0; j
< count
; ++j
) {
1048 if (cache_line
[j
] == line
) {
1055 cache_line
[count
++] = line
;
1058 /* This should never actually happen. */
1059 if (count
>= 3) return -ENOMEM
;
1061 for (i
= 0; i
< 2; ++i
) {
1062 if (kcache
[i
].mode
== V_SQ_CF_KCACHE_NOP
) {
1067 /* Filter lines pulled in by previous intructions. Note that this is
1068 * only for the required_lines count, we can't remove these from the
1069 * cache_line array since we may have to start a new ALU clause. */
1070 for (i
= 0, required_lines
= count
; i
< count
; ++i
) {
1071 for (j
= 0; j
< 2; ++j
) {
1072 if (kcache
[j
].mode
== V_SQ_CF_KCACHE_LOCK_2
&&
1073 kcache
[j
].addr
== cache_line
[i
]) {
1080 /* Start a new ALU clause if needed. */
1081 if (required_lines
> free_lines
) {
1082 if ((r
= r600_bc_add_cf(bc
))) {
1085 bc
->cf_last
->inst
= (type
<< 3);
1086 kcache
= bc
->cf_last
->kcache
;
1089 /* Setup the kcache lines. */
1090 for (i
= 0; i
< count
; ++i
) {
1093 for (j
= 0; j
< 2; ++j
) {
1094 if (kcache
[j
].mode
== V_SQ_CF_KCACHE_LOCK_2
&&
1095 kcache
[j
].addr
== cache_line
[i
]) {
1101 if (found
) continue;
1103 for (j
= 0; j
< 2; ++j
) {
1104 if (kcache
[j
].mode
== V_SQ_CF_KCACHE_NOP
) {
1106 kcache
[j
].addr
= cache_line
[i
];
1107 kcache
[j
].mode
= V_SQ_CF_KCACHE_LOCK_2
;
1113 /* Alter the src operands to refer to the kcache. */
1114 for (i
= 0; i
< 3; ++i
) {
1115 static const unsigned int base
[] = {128, 160, 256, 288};
1118 if (alu
->src
[i
].sel
< 512)
1121 alu
->src
[i
].sel
-= 512;
1122 line
= (alu
->src
[i
].sel
/ 32) * 2;
1124 for (j
= 0; j
< 2; ++j
) {
1125 if (kcache
[j
].mode
== V_SQ_CF_KCACHE_LOCK_2
&&
1126 kcache
[j
].addr
== line
) {
1127 alu
->src
[i
].sel
&= 0x1f;
1128 alu
->src
[i
].sel
+= base
[j
];
1137 int r600_bc_add_alu_type(struct r600_bc
*bc
, const struct r600_bc_alu
*alu
, int type
)
1139 struct r600_bc_alu
*nalu
= r600_bc_alu();
1140 struct r600_bc_alu
*lalu
;
1145 memcpy(nalu
, alu
, sizeof(struct r600_bc_alu
));
1147 if (bc
->cf_last
!= NULL
&& bc
->cf_last
->inst
!= (type
<< 3)) {
1148 /* check if we could add it anyway */
1149 if (bc
->cf_last
->inst
== (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU
<< 3) &&
1150 type
== V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE
) {
1151 LIST_FOR_EACH_ENTRY(lalu
, &bc
->cf_last
->alu
, list
) {
1152 if (lalu
->predicate
) {
1153 bc
->force_add_cf
= 1;
1158 bc
->force_add_cf
= 1;
1161 /* cf can contains only alu or only vtx or only tex */
1162 if (bc
->cf_last
== NULL
|| bc
->force_add_cf
) {
1163 r
= r600_bc_add_cf(bc
);
1169 bc
->cf_last
->inst
= (type
<< 3);
1171 /* Setup the kcache for this ALU instruction. This will start a new
1172 * ALU clause if needed. */
1173 if ((r
= r600_bc_alloc_kcache_lines(bc
, nalu
, type
))) {
1178 if (!bc
->cf_last
->curr_bs_head
) {
1179 bc
->cf_last
->curr_bs_head
= nalu
;
1181 /* number of gpr == the last gpr used in any alu */
1182 for (i
= 0; i
< 3; i
++) {
1183 if (nalu
->src
[i
].sel
>= bc
->ngpr
&& nalu
->src
[i
].sel
< 128) {
1184 bc
->ngpr
= nalu
->src
[i
].sel
+ 1;
1186 if (nalu
->src
[i
].sel
== V_SQ_ALU_SRC_LITERAL
)
1187 r600_bc_special_constants(nalu
->src
[i
].value
,
1188 &nalu
->src
[i
].sel
, &nalu
->src
[i
].neg
);
1190 if (nalu
->dst
.sel
>= bc
->ngpr
) {
1191 bc
->ngpr
= nalu
->dst
.sel
+ 1;
1193 LIST_ADDTAIL(&nalu
->list
, &bc
->cf_last
->alu
);
1194 /* each alu use 2 dwords */
1195 bc
->cf_last
->ndw
+= 2;
1198 /* process cur ALU instructions for bank swizzle */
1200 uint32_t literal
[4];
1202 struct r600_bc_alu
*slots
[5];
1203 r
= assign_alu_units(bc
, bc
->cf_last
->curr_bs_head
, slots
);
1207 if (bc
->cf_last
->prev_bs_head
) {
1208 r
= merge_inst_groups(bc
, slots
, bc
->cf_last
->prev_bs_head
);
1213 if (bc
->cf_last
->prev_bs_head
) {
1214 r
= replace_gpr_with_pv_ps(bc
, slots
, bc
->cf_last
->prev_bs_head
);
1219 r
= check_and_set_bank_swizzle(bc
, slots
);
1223 for (i
= 0, nliteral
= 0; i
< 5; i
++) {
1225 r
= r600_bc_alu_nliterals(bc
, slots
[i
], literal
, &nliteral
);
1230 bc
->cf_last
->ndw
+= align(nliteral
, 2);
1232 /* at most 128 slots, one add alu can add 5 slots + 4 constants(2 slots)
1234 if ((bc
->cf_last
->ndw
>> 1) >= 120) {
1235 bc
->force_add_cf
= 1;
1238 bc
->cf_last
->prev2_bs_head
= bc
->cf_last
->prev_bs_head
;
1239 bc
->cf_last
->prev_bs_head
= bc
->cf_last
->curr_bs_head
;
1240 bc
->cf_last
->curr_bs_head
= NULL
;
1245 int r600_bc_add_alu(struct r600_bc
*bc
, const struct r600_bc_alu
*alu
)
1247 return r600_bc_add_alu_type(bc
, alu
, BC_INST(bc
, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU
));
1250 int r600_bc_add_vtx(struct r600_bc
*bc
, const struct r600_bc_vtx
*vtx
)
1252 struct r600_bc_vtx
*nvtx
= r600_bc_vtx();
1257 memcpy(nvtx
, vtx
, sizeof(struct r600_bc_vtx
));
1259 /* cf can contains only alu or only vtx or only tex */
1260 if (bc
->cf_last
== NULL
||
1261 (bc
->cf_last
->inst
!= V_SQ_CF_WORD1_SQ_CF_INST_VTX
&&
1262 bc
->cf_last
->inst
!= V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC
) ||
1264 r
= r600_bc_add_cf(bc
);
1269 bc
->cf_last
->inst
= V_SQ_CF_WORD1_SQ_CF_INST_VTX
;
1271 LIST_ADDTAIL(&nvtx
->list
, &bc
->cf_last
->vtx
);
1272 /* each fetch use 4 dwords */
1273 bc
->cf_last
->ndw
+= 4;
1275 if ((bc
->cf_last
->ndw
/ 4) > 7)
1276 bc
->force_add_cf
= 1;
1280 int r600_bc_add_tex(struct r600_bc
*bc
, const struct r600_bc_tex
*tex
)
1282 struct r600_bc_tex
*ntex
= r600_bc_tex();
1287 memcpy(ntex
, tex
, sizeof(struct r600_bc_tex
));
1289 /* we can't fetch data und use it as texture lookup address in the same TEX clause */
1290 if (bc
->cf_last
!= NULL
&&
1291 bc
->cf_last
->inst
== V_SQ_CF_WORD1_SQ_CF_INST_TEX
) {
1292 struct r600_bc_tex
*ttex
;
1293 LIST_FOR_EACH_ENTRY(ttex
, &bc
->cf_last
->tex
, list
) {
1294 if (ttex
->dst_gpr
== ntex
->src_gpr
) {
1295 bc
->force_add_cf
= 1;
1301 /* cf can contains only alu or only vtx or only tex */
1302 if (bc
->cf_last
== NULL
||
1303 bc
->cf_last
->inst
!= V_SQ_CF_WORD1_SQ_CF_INST_TEX
||
1305 r
= r600_bc_add_cf(bc
);
1310 bc
->cf_last
->inst
= V_SQ_CF_WORD1_SQ_CF_INST_TEX
;
1312 if (ntex
->src_gpr
>= bc
->ngpr
) {
1313 bc
->ngpr
= ntex
->src_gpr
+ 1;
1315 if (ntex
->dst_gpr
>= bc
->ngpr
) {
1316 bc
->ngpr
= ntex
->dst_gpr
+ 1;
1318 LIST_ADDTAIL(&ntex
->list
, &bc
->cf_last
->tex
);
1319 /* each texture fetch use 4 dwords */
1320 bc
->cf_last
->ndw
+= 4;
1322 if ((bc
->cf_last
->ndw
/ 4) > 7)
1323 bc
->force_add_cf
= 1;
1327 int r600_bc_add_cfinst(struct r600_bc
*bc
, int inst
)
1330 r
= r600_bc_add_cf(bc
);
1334 bc
->cf_last
->cond
= V_SQ_CF_COND_ACTIVE
;
1335 bc
->cf_last
->inst
= inst
;
1339 /* common to all 3 families */
1340 static int r600_bc_vtx_build(struct r600_bc
*bc
, struct r600_bc_vtx
*vtx
, unsigned id
)
1342 unsigned fetch_resource_start
= 0;
1344 /* check if we are fetch shader */
1345 /* fetch shader can also access vertex resource,
1346 * first fetch shader resource is at 160
1348 if (bc
->type
== -1) {
1349 switch (bc
->chiprev
) {
1354 fetch_resource_start
= 160;
1357 case CHIPREV_EVERGREEN
:
1358 fetch_resource_start
= 0;
1361 fprintf(stderr
, "%s:%s:%d unknown chiprev %d\n",
1362 __FILE__
, __func__
, __LINE__
, bc
->chiprev
);
1366 bc
->bytecode
[id
++] = S_SQ_VTX_WORD0_BUFFER_ID(vtx
->buffer_id
+ fetch_resource_start
) |
1367 S_SQ_VTX_WORD0_FETCH_TYPE(vtx
->fetch_type
) |
1368 S_SQ_VTX_WORD0_SRC_GPR(vtx
->src_gpr
) |
1369 S_SQ_VTX_WORD0_SRC_SEL_X(vtx
->src_sel_x
) |
1370 S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(vtx
->mega_fetch_count
);
1371 bc
->bytecode
[id
++] = S_SQ_VTX_WORD1_DST_SEL_X(vtx
->dst_sel_x
) |
1372 S_SQ_VTX_WORD1_DST_SEL_Y(vtx
->dst_sel_y
) |
1373 S_SQ_VTX_WORD1_DST_SEL_Z(vtx
->dst_sel_z
) |
1374 S_SQ_VTX_WORD1_DST_SEL_W(vtx
->dst_sel_w
) |
1375 S_SQ_VTX_WORD1_USE_CONST_FIELDS(vtx
->use_const_fields
) |
1376 S_SQ_VTX_WORD1_DATA_FORMAT(vtx
->data_format
) |
1377 S_SQ_VTX_WORD1_NUM_FORMAT_ALL(vtx
->num_format_all
) |
1378 S_SQ_VTX_WORD1_FORMAT_COMP_ALL(vtx
->format_comp_all
) |
1379 S_SQ_VTX_WORD1_SRF_MODE_ALL(vtx
->srf_mode_all
) |
1380 S_SQ_VTX_WORD1_GPR_DST_GPR(vtx
->dst_gpr
);
1381 bc
->bytecode
[id
++] = S_SQ_VTX_WORD2_OFFSET(vtx
->offset
) |
1382 S_SQ_VTX_WORD2_MEGA_FETCH(1);
1383 bc
->bytecode
[id
++] = 0;
1387 /* common to all 3 families */
1388 static int r600_bc_tex_build(struct r600_bc
*bc
, struct r600_bc_tex
*tex
, unsigned id
)
1390 bc
->bytecode
[id
++] = S_SQ_TEX_WORD0_TEX_INST(tex
->inst
) |
1391 S_SQ_TEX_WORD0_RESOURCE_ID(tex
->resource_id
) |
1392 S_SQ_TEX_WORD0_SRC_GPR(tex
->src_gpr
) |
1393 S_SQ_TEX_WORD0_SRC_REL(tex
->src_rel
);
1394 bc
->bytecode
[id
++] = S_SQ_TEX_WORD1_DST_GPR(tex
->dst_gpr
) |
1395 S_SQ_TEX_WORD1_DST_REL(tex
->dst_rel
) |
1396 S_SQ_TEX_WORD1_DST_SEL_X(tex
->dst_sel_x
) |
1397 S_SQ_TEX_WORD1_DST_SEL_Y(tex
->dst_sel_y
) |
1398 S_SQ_TEX_WORD1_DST_SEL_Z(tex
->dst_sel_z
) |
1399 S_SQ_TEX_WORD1_DST_SEL_W(tex
->dst_sel_w
) |
1400 S_SQ_TEX_WORD1_LOD_BIAS(tex
->lod_bias
) |
1401 S_SQ_TEX_WORD1_COORD_TYPE_X(tex
->coord_type_x
) |
1402 S_SQ_TEX_WORD1_COORD_TYPE_Y(tex
->coord_type_y
) |
1403 S_SQ_TEX_WORD1_COORD_TYPE_Z(tex
->coord_type_z
) |
1404 S_SQ_TEX_WORD1_COORD_TYPE_W(tex
->coord_type_w
);
1405 bc
->bytecode
[id
++] = S_SQ_TEX_WORD2_OFFSET_X(tex
->offset_x
) |
1406 S_SQ_TEX_WORD2_OFFSET_Y(tex
->offset_y
) |
1407 S_SQ_TEX_WORD2_OFFSET_Z(tex
->offset_z
) |
1408 S_SQ_TEX_WORD2_SAMPLER_ID(tex
->sampler_id
) |
1409 S_SQ_TEX_WORD2_SRC_SEL_X(tex
->src_sel_x
) |
1410 S_SQ_TEX_WORD2_SRC_SEL_Y(tex
->src_sel_y
) |
1411 S_SQ_TEX_WORD2_SRC_SEL_Z(tex
->src_sel_z
) |
1412 S_SQ_TEX_WORD2_SRC_SEL_W(tex
->src_sel_w
);
1413 bc
->bytecode
[id
++] = 0;
1417 /* r600 only, r700/eg bits in r700_asm.c */
1418 static int r600_bc_alu_build(struct r600_bc
*bc
, struct r600_bc_alu
*alu
, unsigned id
)
1420 /* don't replace gpr by pv or ps for destination register */
1421 bc
->bytecode
[id
++] = S_SQ_ALU_WORD0_SRC0_SEL(alu
->src
[0].sel
) |
1422 S_SQ_ALU_WORD0_SRC0_REL(alu
->src
[0].rel
) |
1423 S_SQ_ALU_WORD0_SRC0_CHAN(alu
->src
[0].chan
) |
1424 S_SQ_ALU_WORD0_SRC0_NEG(alu
->src
[0].neg
) |
1425 S_SQ_ALU_WORD0_SRC1_SEL(alu
->src
[1].sel
) |
1426 S_SQ_ALU_WORD0_SRC1_REL(alu
->src
[1].rel
) |
1427 S_SQ_ALU_WORD0_SRC1_CHAN(alu
->src
[1].chan
) |
1428 S_SQ_ALU_WORD0_SRC1_NEG(alu
->src
[1].neg
) |
1429 S_SQ_ALU_WORD0_LAST(alu
->last
);
1432 bc
->bytecode
[id
++] = S_SQ_ALU_WORD1_DST_GPR(alu
->dst
.sel
) |
1433 S_SQ_ALU_WORD1_DST_CHAN(alu
->dst
.chan
) |
1434 S_SQ_ALU_WORD1_DST_REL(alu
->dst
.rel
) |
1435 S_SQ_ALU_WORD1_CLAMP(alu
->dst
.clamp
) |
1436 S_SQ_ALU_WORD1_OP3_SRC2_SEL(alu
->src
[2].sel
) |
1437 S_SQ_ALU_WORD1_OP3_SRC2_REL(alu
->src
[2].rel
) |
1438 S_SQ_ALU_WORD1_OP3_SRC2_CHAN(alu
->src
[2].chan
) |
1439 S_SQ_ALU_WORD1_OP3_SRC2_NEG(alu
->src
[2].neg
) |
1440 S_SQ_ALU_WORD1_OP3_ALU_INST(alu
->inst
) |
1441 S_SQ_ALU_WORD1_BANK_SWIZZLE(alu
->bank_swizzle
);
1443 bc
->bytecode
[id
++] = S_SQ_ALU_WORD1_DST_GPR(alu
->dst
.sel
) |
1444 S_SQ_ALU_WORD1_DST_CHAN(alu
->dst
.chan
) |
1445 S_SQ_ALU_WORD1_DST_REL(alu
->dst
.rel
) |
1446 S_SQ_ALU_WORD1_CLAMP(alu
->dst
.clamp
) |
1447 S_SQ_ALU_WORD1_OP2_SRC0_ABS(alu
->src
[0].abs
) |
1448 S_SQ_ALU_WORD1_OP2_SRC1_ABS(alu
->src
[1].abs
) |
1449 S_SQ_ALU_WORD1_OP2_WRITE_MASK(alu
->dst
.write
) |
1450 S_SQ_ALU_WORD1_OP2_OMOD(alu
->omod
) |
1451 S_SQ_ALU_WORD1_OP2_ALU_INST(alu
->inst
) |
1452 S_SQ_ALU_WORD1_BANK_SWIZZLE(alu
->bank_swizzle
) |
1453 S_SQ_ALU_WORD1_OP2_UPDATE_EXECUTE_MASK(alu
->predicate
) |
1454 S_SQ_ALU_WORD1_OP2_UPDATE_PRED(alu
->predicate
);
1459 /* common for r600/r700 - eg in eg_asm.c */
1460 static int r600_bc_cf_build(struct r600_bc
*bc
, struct r600_bc_cf
*cf
)
1462 unsigned id
= cf
->id
;
1465 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU
<< 3):
1466 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE
<< 3):
1467 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER
<< 3):
1468 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER
<< 3):
1469 bc
->bytecode
[id
++] = S_SQ_CF_ALU_WORD0_ADDR(cf
->addr
>> 1) |
1470 S_SQ_CF_ALU_WORD0_KCACHE_MODE0(cf
->kcache
[0].mode
) |
1471 S_SQ_CF_ALU_WORD0_KCACHE_BANK0(cf
->kcache
[0].bank
) |
1472 S_SQ_CF_ALU_WORD0_KCACHE_BANK1(cf
->kcache
[1].bank
);
1474 bc
->bytecode
[id
++] = S_SQ_CF_ALU_WORD1_CF_INST(cf
->inst
>> 3) |
1475 S_SQ_CF_ALU_WORD1_KCACHE_MODE1(cf
->kcache
[1].mode
) |
1476 S_SQ_CF_ALU_WORD1_KCACHE_ADDR0(cf
->kcache
[0].addr
) |
1477 S_SQ_CF_ALU_WORD1_KCACHE_ADDR1(cf
->kcache
[1].addr
) |
1478 S_SQ_CF_ALU_WORD1_BARRIER(1) |
1479 S_SQ_CF_ALU_WORD1_USES_WATERFALL(bc
->chiprev
== CHIPREV_R600
? cf
->r6xx_uses_waterfall
: 0) |
1480 S_SQ_CF_ALU_WORD1_COUNT((cf
->ndw
/ 2) - 1);
1482 case V_SQ_CF_WORD1_SQ_CF_INST_TEX
:
1483 case V_SQ_CF_WORD1_SQ_CF_INST_VTX
:
1484 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC
:
1485 bc
->bytecode
[id
++] = S_SQ_CF_WORD0_ADDR(cf
->addr
>> 1);
1486 bc
->bytecode
[id
++] = S_SQ_CF_WORD1_CF_INST(cf
->inst
) |
1487 S_SQ_CF_WORD1_BARRIER(1) |
1488 S_SQ_CF_WORD1_COUNT((cf
->ndw
/ 4) - 1);
1490 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
:
1491 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE
:
1492 bc
->bytecode
[id
++] = S_SQ_CF_ALLOC_EXPORT_WORD0_RW_GPR(cf
->output
.gpr
) |
1493 S_SQ_CF_ALLOC_EXPORT_WORD0_ELEM_SIZE(cf
->output
.elem_size
) |
1494 S_SQ_CF_ALLOC_EXPORT_WORD0_ARRAY_BASE(cf
->output
.array_base
) |
1495 S_SQ_CF_ALLOC_EXPORT_WORD0_TYPE(cf
->output
.type
);
1496 bc
->bytecode
[id
++] = S_SQ_CF_ALLOC_EXPORT_WORD1_BURST_COUNT(cf
->output
.burst_count
- 1) |
1497 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_X(cf
->output
.swizzle_x
) |
1498 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Y(cf
->output
.swizzle_y
) |
1499 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Z(cf
->output
.swizzle_z
) |
1500 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_W(cf
->output
.swizzle_w
) |
1501 S_SQ_CF_ALLOC_EXPORT_WORD1_BARRIER(cf
->output
.barrier
) |
1502 S_SQ_CF_ALLOC_EXPORT_WORD1_CF_INST(cf
->output
.inst
) |
1503 S_SQ_CF_ALLOC_EXPORT_WORD1_END_OF_PROGRAM(cf
->output
.end_of_program
);
1505 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP
:
1506 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE
:
1507 case V_SQ_CF_WORD1_SQ_CF_INST_POP
:
1508 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL
:
1509 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END
:
1510 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE
:
1511 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK
:
1512 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS
:
1513 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN
:
1514 bc
->bytecode
[id
++] = S_SQ_CF_WORD0_ADDR(cf
->cf_addr
>> 1);
1515 bc
->bytecode
[id
++] = S_SQ_CF_WORD1_CF_INST(cf
->inst
) |
1516 S_SQ_CF_WORD1_BARRIER(1) |
1517 S_SQ_CF_WORD1_COND(cf
->cond
) |
1518 S_SQ_CF_WORD1_POP_COUNT(cf
->pop_count
);
1522 R600_ERR("unsupported CF instruction (0x%X)\n", cf
->inst
);
1528 int r600_bc_build(struct r600_bc
*bc
)
1530 struct r600_bc_cf
*cf
;
1531 struct r600_bc_alu
*alu
;
1532 struct r600_bc_vtx
*vtx
;
1533 struct r600_bc_tex
*tex
;
1534 uint32_t literal
[4];
1539 if (bc
->callstack
[0].max
> 0)
1540 bc
->nstack
= ((bc
->callstack
[0].max
+ 3) >> 2) + 2;
1541 if (bc
->type
== TGSI_PROCESSOR_VERTEX
&& !bc
->nstack
) {
1545 /* first path compute addr of each CF block */
1546 /* addr start after all the CF instructions */
1547 addr
= bc
->cf_last
->id
+ 2;
1548 LIST_FOR_EACH_ENTRY(cf
, &bc
->cf
, list
) {
1550 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU
<< 3):
1551 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER
<< 3):
1552 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER
<< 3):
1553 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE
<< 3):
1555 case V_SQ_CF_WORD1_SQ_CF_INST_TEX
:
1556 case V_SQ_CF_WORD1_SQ_CF_INST_VTX
:
1557 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC
:
1558 /* fetch node need to be 16 bytes aligned*/
1560 addr
&= 0xFFFFFFFCUL
;
1562 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
:
1563 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE
:
1564 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
:
1565 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE
:
1567 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP
:
1568 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE
:
1569 case V_SQ_CF_WORD1_SQ_CF_INST_POP
:
1570 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL
:
1571 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END
:
1572 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE
:
1573 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK
:
1574 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS
:
1575 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN
:
1578 R600_ERR("unsupported CF instruction (0x%X)\n", cf
->inst
);
1583 bc
->ndw
= cf
->addr
+ cf
->ndw
;
1586 bc
->bytecode
= calloc(1, bc
->ndw
* 4);
1587 if (bc
->bytecode
== NULL
)
1589 LIST_FOR_EACH_ENTRY(cf
, &bc
->cf
, list
) {
1591 if (bc
->chiprev
== CHIPREV_EVERGREEN
)
1592 r
= eg_bc_cf_build(bc
, cf
);
1594 r
= r600_bc_cf_build(bc
, cf
);
1598 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU
<< 3):
1599 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER
<< 3):
1600 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER
<< 3):
1601 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE
<< 3):
1603 memset(literal
, 0, sizeof(literal
));
1604 LIST_FOR_EACH_ENTRY(alu
, &cf
->alu
, list
) {
1605 r
= r600_bc_alu_nliterals(bc
, alu
, literal
, &nliteral
);
1608 r600_bc_alu_adjust_literals(bc
, alu
, literal
, nliteral
);
1609 switch(bc
->chiprev
) {
1611 r
= r600_bc_alu_build(bc
, alu
, addr
);
1614 case CHIPREV_EVERGREEN
: /* eg alu is same encoding as r700 */
1615 r
= r700_bc_alu_build(bc
, alu
, addr
);
1618 R600_ERR("unknown family %d\n", bc
->family
);
1625 for (i
= 0; i
< align(nliteral
, 2); ++i
) {
1626 bc
->bytecode
[addr
++] = literal
[i
];
1629 memset(literal
, 0, sizeof(literal
));
1633 case V_SQ_CF_WORD1_SQ_CF_INST_VTX
:
1634 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC
:
1635 LIST_FOR_EACH_ENTRY(vtx
, &cf
->vtx
, list
) {
1636 r
= r600_bc_vtx_build(bc
, vtx
, addr
);
1642 case V_SQ_CF_WORD1_SQ_CF_INST_TEX
:
1643 LIST_FOR_EACH_ENTRY(tex
, &cf
->tex
, list
) {
1644 r
= r600_bc_tex_build(bc
, tex
, addr
);
1650 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
:
1651 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE
:
1652 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
:
1653 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE
:
1654 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL
:
1655 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END
:
1656 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE
:
1657 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK
:
1658 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP
:
1659 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE
:
1660 case V_SQ_CF_WORD1_SQ_CF_INST_POP
:
1661 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS
:
1662 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN
:
1665 R600_ERR("unsupported CF instruction (0x%X)\n", cf
->inst
);
1672 void r600_bc_clear(struct r600_bc
*bc
)
1674 struct r600_bc_cf
*cf
= NULL
, *next_cf
;
1677 bc
->bytecode
= NULL
;
1679 LIST_FOR_EACH_ENTRY_SAFE(cf
, next_cf
, &bc
->cf
, list
) {
1680 struct r600_bc_alu
*alu
= NULL
, *next_alu
;
1681 struct r600_bc_tex
*tex
= NULL
, *next_tex
;
1682 struct r600_bc_tex
*vtx
= NULL
, *next_vtx
;
1684 LIST_FOR_EACH_ENTRY_SAFE(alu
, next_alu
, &cf
->alu
, list
) {
1688 LIST_INITHEAD(&cf
->alu
);
1690 LIST_FOR_EACH_ENTRY_SAFE(tex
, next_tex
, &cf
->tex
, list
) {
1694 LIST_INITHEAD(&cf
->tex
);
1696 LIST_FOR_EACH_ENTRY_SAFE(vtx
, next_vtx
, &cf
->vtx
, list
) {
1700 LIST_INITHEAD(&cf
->vtx
);
1705 LIST_INITHEAD(&cf
->list
);
1708 void r600_bc_dump(struct r600_bc
*bc
)
1710 struct r600_bc_cf
*cf
= NULL
;
1711 struct r600_bc_alu
*alu
= NULL
;
1712 struct r600_bc_vtx
*vtx
= NULL
;
1713 struct r600_bc_tex
*tex
= NULL
;
1716 uint32_t literal
[4];
1720 switch (bc
->chiprev
) {
1732 fprintf(stderr
, "bytecode %d dw -- %d gprs ---------------------\n", bc
->ndw
, bc
->ngpr
);
1733 fprintf(stderr
, " %c\n", chip
);
1735 LIST_FOR_EACH_ENTRY(cf
, &bc
->cf
, list
) {
1739 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU
<< 3):
1740 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER
<< 3):
1741 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER
<< 3):
1742 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE
<< 3):
1743 fprintf(stderr
, "%04d %08X ALU ", id
, bc
->bytecode
[id
]);
1744 fprintf(stderr
, "ADDR:%d ", cf
->addr
);
1745 fprintf(stderr
, "KCACHE_MODE0:%X ", cf
->kcache
[0].mode
);
1746 fprintf(stderr
, "KCACHE_BANK0:%X ", cf
->kcache
[0].bank
);
1747 fprintf(stderr
, "KCACHE_BANK1:%X\n", cf
->kcache
[1].bank
);
1749 fprintf(stderr
, "%04d %08X ALU ", id
, bc
->bytecode
[id
]);
1750 fprintf(stderr
, "INST:%d ", cf
->inst
);
1751 fprintf(stderr
, "KCACHE_MODE1:%X ", cf
->kcache
[1].mode
);
1752 fprintf(stderr
, "KCACHE_ADDR0:%X ", cf
->kcache
[0].addr
);
1753 fprintf(stderr
, "KCACHE_ADDR1:%X ", cf
->kcache
[1].addr
);
1754 fprintf(stderr
, "COUNT:%d\n", cf
->ndw
/ 2);
1756 case V_SQ_CF_WORD1_SQ_CF_INST_TEX
:
1757 case V_SQ_CF_WORD1_SQ_CF_INST_VTX
:
1758 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC
:
1759 fprintf(stderr
, "%04d %08X TEX/VTX ", id
, bc
->bytecode
[id
]);
1760 fprintf(stderr
, "ADDR:%d\n", cf
->addr
);
1762 fprintf(stderr
, "%04d %08X TEX/VTX ", id
, bc
->bytecode
[id
]);
1763 fprintf(stderr
, "INST:%d ", cf
->inst
);
1764 fprintf(stderr
, "COUNT:%d\n", cf
->ndw
/ 4);
1766 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
:
1767 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE
:
1768 fprintf(stderr
, "%04d %08X EXPORT ", id
, bc
->bytecode
[id
]);
1769 fprintf(stderr
, "GPR:%X ", cf
->output
.gpr
);
1770 fprintf(stderr
, "ELEM_SIZE:%X ", cf
->output
.elem_size
);
1771 fprintf(stderr
, "ARRAY_BASE:%X ", cf
->output
.array_base
);
1772 fprintf(stderr
, "TYPE:%X\n", cf
->output
.type
);
1774 fprintf(stderr
, "%04d %08X EXPORT ", id
, bc
->bytecode
[id
]);
1775 fprintf(stderr
, "SWIZ_X:%X ", cf
->output
.swizzle_x
);
1776 fprintf(stderr
, "SWIZ_Y:%X ", cf
->output
.swizzle_y
);
1777 fprintf(stderr
, "SWIZ_Z:%X ", cf
->output
.swizzle_z
);
1778 fprintf(stderr
, "SWIZ_W:%X ", cf
->output
.swizzle_w
);
1779 fprintf(stderr
, "BARRIER:%X ", cf
->output
.barrier
);
1780 fprintf(stderr
, "INST:%d ", cf
->output
.inst
);
1781 fprintf(stderr
, "BURST_COUNT:%d ", cf
->output
.burst_count
);
1782 fprintf(stderr
, "EOP:%X\n", cf
->output
.end_of_program
);
1784 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP
:
1785 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE
:
1786 case V_SQ_CF_WORD1_SQ_CF_INST_POP
:
1787 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL
:
1788 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END
:
1789 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE
:
1790 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK
:
1791 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS
:
1792 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN
:
1793 fprintf(stderr
, "%04d %08X CF ", id
, bc
->bytecode
[id
]);
1794 fprintf(stderr
, "ADDR:%d\n", cf
->cf_addr
);
1796 fprintf(stderr
, "%04d %08X CF ", id
, bc
->bytecode
[id
]);
1797 fprintf(stderr
, "INST:%d ", cf
->inst
);
1798 fprintf(stderr
, "COND:%X ", cf
->cond
);
1799 fprintf(stderr
, "POP_COUNT:%X\n", cf
->pop_count
);
1805 LIST_FOR_EACH_ENTRY(alu
, &cf
->alu
, list
) {
1806 r600_bc_alu_nliterals(bc
, alu
, literal
, &nliteral
);
1808 fprintf(stderr
, "%04d %08X ", id
, bc
->bytecode
[id
]);
1809 fprintf(stderr
, "SRC0(SEL:%d ", alu
->src
[0].sel
);
1810 fprintf(stderr
, "REL:%d ", alu
->src
[0].rel
);
1811 fprintf(stderr
, "CHAN:%d ", alu
->src
[0].chan
);
1812 fprintf(stderr
, "NEG:%d) ", alu
->src
[0].neg
);
1813 fprintf(stderr
, "SRC1(SEL:%d ", alu
->src
[1].sel
);
1814 fprintf(stderr
, "REL:%d ", alu
->src
[1].rel
);
1815 fprintf(stderr
, "CHAN:%d ", alu
->src
[1].chan
);
1816 fprintf(stderr
, "NEG:%d) ", alu
->src
[1].neg
);
1817 fprintf(stderr
, "LAST:%d)\n", alu
->last
);
1819 fprintf(stderr
, "%04d %08X %c ", id
, bc
->bytecode
[id
], alu
->last
? '*' : ' ');
1820 fprintf(stderr
, "INST:%d ", alu
->inst
);
1821 fprintf(stderr
, "DST(SEL:%d ", alu
->dst
.sel
);
1822 fprintf(stderr
, "CHAN:%d ", alu
->dst
.chan
);
1823 fprintf(stderr
, "REL:%d ", alu
->dst
.rel
);
1824 fprintf(stderr
, "CLAMP:%d) ", alu
->dst
.clamp
);
1825 fprintf(stderr
, "BANK_SWIZZLE:%d ", alu
->bank_swizzle
);
1827 fprintf(stderr
, "SRC2(SEL:%d ", alu
->src
[2].sel
);
1828 fprintf(stderr
, "REL:%d ", alu
->src
[2].rel
);
1829 fprintf(stderr
, "CHAN:%d ", alu
->src
[2].chan
);
1830 fprintf(stderr
, "NEG:%d)\n", alu
->src
[2].neg
);
1832 fprintf(stderr
, "SRC0_ABS:%d ", alu
->src
[0].abs
);
1833 fprintf(stderr
, "SRC1_ABS:%d ", alu
->src
[1].abs
);
1834 fprintf(stderr
, "WRITE_MASK:%d ", alu
->dst
.write
);
1835 fprintf(stderr
, "OMOD:%d ", alu
->omod
);
1836 fprintf(stderr
, "EXECUTE_MASK:%d ", alu
->predicate
);
1837 fprintf(stderr
, "UPDATE_PRED:%d\n", alu
->predicate
);
1842 for (i
= 0; i
< nliteral
; i
++, id
++) {
1843 float *f
= (float*)(bc
->bytecode
+ id
);
1844 fprintf(stderr
, "%04d %08X\t%f\n", id
, bc
->bytecode
[id
], *f
);
1851 LIST_FOR_EACH_ENTRY(tex
, &cf
->tex
, list
) {
1852 fprintf(stderr
, "%04d %08X ", id
, bc
->bytecode
[id
]);
1853 fprintf(stderr
, "INST:%d ", tex
->inst
);
1854 fprintf(stderr
, "RESOURCE_ID:%d ", tex
->resource_id
);
1855 fprintf(stderr
, "SRC(GPR:%d ", tex
->src_gpr
);
1856 fprintf(stderr
, "REL:%d)\n", tex
->src_rel
);
1858 fprintf(stderr
, "%04d %08X ", id
, bc
->bytecode
[id
]);
1859 fprintf(stderr
, "DST(GPR:%d ", tex
->dst_gpr
);
1860 fprintf(stderr
, "REL:%d ", tex
->dst_rel
);
1861 fprintf(stderr
, "SEL_X:%d ", tex
->dst_sel_x
);
1862 fprintf(stderr
, "SEL_Y:%d ", tex
->dst_sel_y
);
1863 fprintf(stderr
, "SEL_Z:%d ", tex
->dst_sel_z
);
1864 fprintf(stderr
, "SEL_W:%d) ", tex
->dst_sel_w
);
1865 fprintf(stderr
, "LOD_BIAS:%d ", tex
->lod_bias
);
1866 fprintf(stderr
, "COORD_TYPE_X:%d ", tex
->coord_type_x
);
1867 fprintf(stderr
, "COORD_TYPE_Y:%d ", tex
->coord_type_y
);
1868 fprintf(stderr
, "COORD_TYPE_Z:%d ", tex
->coord_type_z
);
1869 fprintf(stderr
, "COORD_TYPE_W:%d\n", tex
->coord_type_w
);
1871 fprintf(stderr
, "%04d %08X ", id
, bc
->bytecode
[id
]);
1872 fprintf(stderr
, "OFFSET_X:%d ", tex
->offset_x
);
1873 fprintf(stderr
, "OFFSET_Y:%d ", tex
->offset_y
);
1874 fprintf(stderr
, "OFFSET_Z:%d ", tex
->offset_z
);
1875 fprintf(stderr
, "SAMPLER_ID:%d ", tex
->sampler_id
);
1876 fprintf(stderr
, "SRC(SEL_X:%d ", tex
->src_sel_x
);
1877 fprintf(stderr
, "SEL_Y:%d ", tex
->src_sel_y
);
1878 fprintf(stderr
, "SEL_Z:%d ", tex
->src_sel_z
);
1879 fprintf(stderr
, "SEL_W:%d)\n", tex
->src_sel_w
);
1881 fprintf(stderr
, "%04d %08X \n", id
, bc
->bytecode
[id
]);
1885 LIST_FOR_EACH_ENTRY(vtx
, &cf
->vtx
, list
) {
1886 fprintf(stderr
, "%04d %08X ", id
, bc
->bytecode
[id
]);
1887 fprintf(stderr
, "INST:%d ", vtx
->inst
);
1888 fprintf(stderr
, "FETCH_TYPE:%d ", vtx
->fetch_type
);
1889 fprintf(stderr
, "BUFFER_ID:%d\n", vtx
->buffer_id
);
1891 /* This assumes that no semantic fetches exist */
1892 fprintf(stderr
, "%04d %08X ", id
, bc
->bytecode
[id
]);
1893 fprintf(stderr
, "SRC(GPR:%d ", vtx
->src_gpr
);
1894 fprintf(stderr
, "SEL_X:%d) ", vtx
->src_sel_x
);
1895 fprintf(stderr
, "MEGA_FETCH_COUNT:%d ", vtx
->mega_fetch_count
);
1896 fprintf(stderr
, "DST(GPR:%d ", vtx
->dst_gpr
);
1897 fprintf(stderr
, "SEL_X:%d ", vtx
->dst_sel_x
);
1898 fprintf(stderr
, "SEL_Y:%d ", vtx
->dst_sel_y
);
1899 fprintf(stderr
, "SEL_Z:%d ", vtx
->dst_sel_z
);
1900 fprintf(stderr
, "SEL_W:%d) ", vtx
->dst_sel_w
);
1901 fprintf(stderr
, "USE_CONST_FIELDS:%d ", vtx
->use_const_fields
);
1902 fprintf(stderr
, "FORMAT(DATA:%d ", vtx
->data_format
);
1903 fprintf(stderr
, "NUM:%d ", vtx
->num_format_all
);
1904 fprintf(stderr
, "COMP:%d ", vtx
->format_comp_all
);
1905 fprintf(stderr
, "MODE:%d)\n", vtx
->srf_mode_all
);
1907 fprintf(stderr
, "%04d %08X ", id
, bc
->bytecode
[id
]);
1908 fprintf(stderr
, "OFFSET:%d\n", vtx
->offset
);
1911 fprintf(stderr
, "%04d %08X \n", id
, bc
->bytecode
[id
]);
1916 fprintf(stderr
, "--------------------------------------\n");
1919 static void r600_cf_vtx(struct r600_vertex_element
*ve
)
1921 struct r600_pipe_state
*rstate
;
1923 rstate
= &ve
->rstate
;
1924 rstate
->id
= R600_PIPE_STATE_FETCH_SHADER
;
1926 r600_pipe_state_add_reg(rstate
, R_0288A4_SQ_PGM_RESOURCES_FS
,
1927 0x00000000, 0xFFFFFFFF, NULL
);
1928 r600_pipe_state_add_reg(rstate
, R_0288DC_SQ_PGM_CF_OFFSET_FS
,
1929 0x00000000, 0xFFFFFFFF, NULL
);
1930 r600_pipe_state_add_reg(rstate
, R_028894_SQ_PGM_START_FS
,
1931 r600_bo_offset(ve
->fetch_shader
) >> 8,
1932 0xFFFFFFFF, ve
->fetch_shader
);
1935 static void r600_vertex_data_type(enum pipe_format pformat
, unsigned *format
,
1936 unsigned *num_format
, unsigned *format_comp
)
1938 const struct util_format_description
*desc
;
1945 desc
= util_format_description(pformat
);
1946 if (desc
->layout
!= UTIL_FORMAT_LAYOUT_PLAIN
) {
1950 /* Find the first non-VOID channel. */
1951 for (i
= 0; i
< 4; i
++) {
1952 if (desc
->channel
[i
].type
!= UTIL_FORMAT_TYPE_VOID
) {
1957 switch (desc
->channel
[i
].type
) {
1958 /* Half-floats, floats, ints */
1959 case UTIL_FORMAT_TYPE_FLOAT
:
1960 switch (desc
->channel
[i
].size
) {
1962 switch (desc
->nr_channels
) {
1964 *format
= FMT_16_FLOAT
;
1967 *format
= FMT_16_16_FLOAT
;
1971 *format
= FMT_16_16_16_16_FLOAT
;
1976 switch (desc
->nr_channels
) {
1978 *format
= FMT_32_FLOAT
;
1981 *format
= FMT_32_32_FLOAT
;
1984 *format
= FMT_32_32_32_FLOAT
;
1987 *format
= FMT_32_32_32_32_FLOAT
;
1996 case UTIL_FORMAT_TYPE_UNSIGNED
:
1998 case UTIL_FORMAT_TYPE_SIGNED
:
1999 switch (desc
->channel
[i
].size
) {
2001 switch (desc
->nr_channels
) {
2010 *format
= FMT_8_8_8_8
;
2015 switch (desc
->nr_channels
) {
2020 *format
= FMT_16_16
;
2024 *format
= FMT_16_16_16_16
;
2029 switch (desc
->nr_channels
) {
2034 *format
= FMT_32_32
;
2037 *format
= FMT_32_32_32
;
2040 *format
= FMT_32_32_32_32
;
2052 if (desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_SIGNED
) {
2055 if (desc
->channel
[i
].normalized
) {
2062 R600_ERR("unsupported vertex format %s\n", util_format_name(pformat
));
2065 int r600_vertex_elements_build_fetch_shader(struct r600_pipe_context
*rctx
, struct r600_vertex_element
*ve
)
2067 static int dump_shaders
= -1;
2070 struct r600_bc_vtx vtx
;
2071 struct pipe_vertex_element
*elements
= ve
->elements
;
2072 const struct util_format_description
*desc
;
2073 unsigned fetch_resource_start
= rctx
->family
>= CHIP_CEDAR
? 0 : 160;
2074 unsigned format
, num_format
, format_comp
;
2078 /* vertex elements offset need special handling, if offset is bigger
2079 + * than what we can put in fetch instruction then we need to alterate
2080 * the vertex resource offset. In such case in order to simplify code
2081 * we will bound one resource per elements. It's a worst case scenario.
2083 for (i
= 0; i
< ve
->count
; i
++) {
2084 ve
->vbuffer_offset
[i
] = C_SQ_VTX_WORD2_OFFSET
& elements
[i
].src_offset
;
2085 if (ve
->vbuffer_offset
[i
]) {
2086 ve
->vbuffer_need_offset
= 1;
2090 memset(&bc
, 0, sizeof(bc
));
2091 r
= r600_bc_init(&bc
, r600_get_family(rctx
->radeon
));
2095 for (i
= 0; i
< ve
->count
; i
++) {
2096 if (elements
[i
].instance_divisor
> 1) {
2097 struct r600_bc_alu alu
;
2099 memset(&alu
, 0, sizeof(alu
));
2100 alu
.inst
= BC_INST(&bc
, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT
);
2102 alu
.src
[0].chan
= 3;
2104 alu
.src
[1].sel
= V_SQ_ALU_SRC_LITERAL
;
2105 alu
.src
[1].value
= (1l << 32) / elements
[i
].instance_divisor
+ 1;
2107 alu
.dst
.sel
= i
+ 1;
2112 if ((r
= r600_bc_add_alu(&bc
, &alu
))) {
2119 for (i
= 0; i
< ve
->count
; i
++) {
2120 unsigned vbuffer_index
;
2121 r600_vertex_data_type(ve
->elements
[i
].src_format
, &format
, &num_format
, &format_comp
);
2122 desc
= util_format_description(ve
->elements
[i
].src_format
);
2125 R600_ERR("unknown format %d\n", ve
->elements
[i
].src_format
);
2129 /* see above for vbuffer_need_offset explanation */
2130 vbuffer_index
= elements
[i
].vertex_buffer_index
;
2131 memset(&vtx
, 0, sizeof(vtx
));
2132 vtx
.buffer_id
= (ve
->vbuffer_need_offset
? i
: vbuffer_index
) + fetch_resource_start
;
2133 vtx
.fetch_type
= elements
[i
].instance_divisor
? 1 : 0;
2134 vtx
.src_gpr
= elements
[i
].instance_divisor
> 1 ? i
+ 1 : 0;
2135 vtx
.src_sel_x
= elements
[i
].instance_divisor
? 3 : 0;
2136 vtx
.mega_fetch_count
= 0x1F;
2137 vtx
.dst_gpr
= i
+ 1;
2138 vtx
.dst_sel_x
= desc
->swizzle
[0];
2139 vtx
.dst_sel_y
= desc
->swizzle
[1];
2140 vtx
.dst_sel_z
= desc
->swizzle
[2];
2141 vtx
.dst_sel_w
= desc
->swizzle
[3];
2142 vtx
.data_format
= format
;
2143 vtx
.num_format_all
= num_format
;
2144 vtx
.format_comp_all
= format_comp
;
2145 vtx
.srf_mode_all
= 1;
2146 vtx
.offset
= elements
[i
].src_offset
;
2148 if ((r
= r600_bc_add_vtx(&bc
, &vtx
))) {
2154 r600_bc_add_cfinst(&bc
, BC_INST(&bc
, V_SQ_CF_WORD1_SQ_CF_INST_RETURN
));
2156 if ((r
= r600_bc_build(&bc
))) {
2161 if (dump_shaders
== -1)
2162 dump_shaders
= debug_get_bool_option("R600_DUMP_SHADERS", FALSE
);
2165 fprintf(stderr
, "--------------------------------------------------------------\n");
2167 fprintf(stderr
, "______________________________________________________________\n");
2170 ve
->fs_size
= bc
.ndw
*4;
2172 /* use PIPE_BIND_VERTEX_BUFFER so we use the cache buffer manager */
2173 ve
->fetch_shader
= r600_bo(rctx
->radeon
, ve
->fs_size
, 256, PIPE_BIND_VERTEX_BUFFER
, 0);
2174 if (ve
->fetch_shader
== NULL
) {
2179 bytecode
= r600_bo_map(rctx
->radeon
, ve
->fetch_shader
, 0, NULL
);
2180 if (bytecode
== NULL
) {
2182 r600_bo_reference(rctx
->radeon
, &ve
->fetch_shader
, NULL
);
2186 memcpy(bytecode
, bc
.bytecode
, ve
->fs_size
);
2188 r600_bo_unmap(rctx
->radeon
, ve
->fetch_shader
);
2191 if (rctx
->family
>= CHIP_CEDAR
)