2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "util/u_format.h"
26 #include "util/u_memory.h"
27 #include "pipe/p_shader_tokens.h"
28 #include "r600_pipe.h"
30 #include "r600_opcodes.h"
32 #include "r600_formats.h"
35 #define NUM_OF_CYCLES 3
36 #define NUM_OF_COMPONENTS 4
38 #define PREV_ALU(alu) LIST_ENTRY(struct r600_bc_alu, alu->list.prev, list)
39 #define NEXT_ALU(alu) LIST_ENTRY(struct r600_bc_alu, alu->list.next, list)
41 static inline unsigned int r600_bc_get_num_operands(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
46 switch (bc
->chiprev
) {
50 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
:
52 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD
:
53 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE
:
54 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
:
55 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE
:
56 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE
:
57 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
:
58 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX
:
59 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN
:
60 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE
:
61 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE
:
62 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT
:
63 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE
:
64 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE
:
65 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT
:
66 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE
:
67 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE
:
68 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
:
69 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE
:
70 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE
:
73 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
:
74 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA
:
75 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR
:
76 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT
:
77 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT
:
78 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR
:
79 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC
:
80 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
:
81 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED
:
82 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
:
83 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED
:
84 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
:
85 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED
:
86 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE
:
87 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT
:
88 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN
:
89 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS
:
92 "Need instruction operand number for 0x%x.\n", alu
->inst
);
95 case CHIPREV_EVERGREEN
:
97 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
:
99 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD
:
100 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE
:
101 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
:
102 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE
:
103 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE
:
104 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
:
105 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX
:
106 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN
:
107 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE
:
108 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE
:
109 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT
:
110 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE
:
111 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE
:
112 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT
:
113 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE
:
114 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE
:
115 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
:
116 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE
:
117 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE
:
118 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INTERP_XY
:
119 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INTERP_ZW
:
122 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
:
123 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT
:
124 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT
:
125 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR
:
126 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC
:
127 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
:
128 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED
:
129 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
:
130 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED
:
131 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
:
132 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED
:
133 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE
:
134 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT
:
135 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT_FLOOR
:
136 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN
:
137 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS
:
140 "Need instruction operand number for 0x%x.\n", alu
->inst
);
148 int r700_bc_alu_build(struct r600_bc
*bc
, struct r600_bc_alu
*alu
, unsigned id
);
150 static struct r600_bc_cf
*r600_bc_cf(void)
152 struct r600_bc_cf
*cf
= CALLOC_STRUCT(r600_bc_cf
);
156 LIST_INITHEAD(&cf
->list
);
157 LIST_INITHEAD(&cf
->alu
);
158 LIST_INITHEAD(&cf
->vtx
);
159 LIST_INITHEAD(&cf
->tex
);
164 static struct r600_bc_alu
*r600_bc_alu(void)
166 struct r600_bc_alu
*alu
= CALLOC_STRUCT(r600_bc_alu
);
170 LIST_INITHEAD(&alu
->list
);
174 static struct r600_bc_vtx
*r600_bc_vtx(void)
176 struct r600_bc_vtx
*vtx
= CALLOC_STRUCT(r600_bc_vtx
);
180 LIST_INITHEAD(&vtx
->list
);
184 static struct r600_bc_tex
*r600_bc_tex(void)
186 struct r600_bc_tex
*tex
= CALLOC_STRUCT(r600_bc_tex
);
190 LIST_INITHEAD(&tex
->list
);
194 int r600_bc_init(struct r600_bc
*bc
, enum radeon_family family
)
196 LIST_INITHEAD(&bc
->cf
);
198 switch (bc
->family
) {
207 bc
->chiprev
= CHIPREV_R600
;
213 bc
->chiprev
= CHIPREV_R700
;
224 bc
->chiprev
= CHIPREV_EVERGREEN
;
227 R600_ERR("unknown family %d\n", bc
->family
);
233 static int r600_bc_add_cf(struct r600_bc
*bc
)
235 struct r600_bc_cf
*cf
= r600_bc_cf();
239 LIST_ADDTAIL(&cf
->list
, &bc
->cf
);
241 cf
->id
= bc
->cf_last
->id
+ 2;
245 bc
->force_add_cf
= 0;
249 static void r600_bc_remove_cf(struct r600_bc
*bc
, struct r600_bc_cf
*cf
)
251 struct r600_bc_cf
*other
;
252 LIST_FOR_EACH_ENTRY(other
, &bc
->cf
, list
) {
253 if (other
->id
> cf
->id
)
255 if (other
->cf_addr
> cf
->id
)
262 static void r600_bc_move_cf(struct r600_bc
*bc
, struct r600_bc_cf
*cf
, struct r600_bc_cf
*next
)
264 struct r600_bc_cf
*prev
= LIST_ENTRY(struct r600_bc_cf
, next
->list
.prev
, list
);
265 unsigned old_id
= cf
->id
;
266 unsigned new_id
= next
->list
.prev
== &bc
->cf
? 0 : prev
->id
+ 2;
267 struct r600_bc_cf
*other
;
269 if (prev
== cf
|| next
== cf
)
270 return; /* position hasn't changed */
273 LIST_FOR_EACH_ENTRY(other
, &bc
->cf
, list
) {
274 if (other
->id
> old_id
)
276 if (other
->id
>= new_id
)
278 if (other
->cf_addr
> old_id
)
280 if (other
->cf_addr
> new_id
)
284 LIST_ADD(&cf
->list
, &prev
->list
);
287 int r600_bc_add_output(struct r600_bc
*bc
, const struct r600_bc_output
*output
)
291 r
= r600_bc_add_cf(bc
);
294 bc
->cf_last
->inst
= BC_INST(bc
, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
);
295 memcpy(&bc
->cf_last
->output
, output
, sizeof(struct r600_bc_output
));
296 bc
->cf_last
->output
.burst_count
= 1;
300 /* alu predicate instructions */
301 static int is_alu_pred_inst(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
303 switch (bc
->chiprev
) {
306 return !alu
->is_op3
&& (
307 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_UINT
||
308 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_UINT
||
309 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE
||
310 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT
||
311 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE
||
312 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE
||
313 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_INV
||
314 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_POP
||
315 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_CLR
||
316 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_RESTORE
||
317 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH
||
318 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH
||
319 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH
||
320 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH
||
321 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_INT
||
322 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_INT
||
323 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_INT
||
324 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT
||
325 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH_INT
||
326 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH_INT
||
327 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH_INT
||
328 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH_INT
||
329 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLT_PUSH_INT
||
330 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLE_PUSH_INT
);
331 case CHIPREV_EVERGREEN
:
333 return !alu
->is_op3
&& (
334 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_UINT
||
335 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_UINT
||
336 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE
||
337 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT
||
338 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE
||
339 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE
||
340 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_INV
||
341 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_POP
||
342 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_CLR
||
343 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_RESTORE
||
344 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH
||
345 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH
||
346 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH
||
347 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH
||
348 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_INT
||
349 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_INT
||
350 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_INT
||
351 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT
||
352 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH_INT
||
353 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH_INT
||
354 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH_INT
||
355 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH_INT
||
356 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLT_PUSH_INT
||
357 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLE_PUSH_INT
);
361 /* alu kill instructions */
362 static int is_alu_kill_inst(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
364 switch (bc
->chiprev
) {
367 return !alu
->is_op3
&& (
368 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE
||
369 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
||
370 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE
||
371 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE
||
372 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_UINT
||
373 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_UINT
||
374 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE_INT
||
375 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_INT
||
376 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_INT
||
377 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE_INT
);
378 case CHIPREV_EVERGREEN
:
380 return !alu
->is_op3
&& (
381 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE
||
382 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
||
383 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE
||
384 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE
||
385 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_UINT
||
386 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_UINT
||
387 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE_INT
||
388 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_INT
||
389 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_INT
||
390 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE_INT
);
394 /* alu instructions that can ony exits once per group */
395 static int is_alu_once_inst(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
397 return is_alu_kill_inst(bc
, alu
) ||
398 is_alu_pred_inst(bc
, alu
);
401 static int is_alu_reduction_inst(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
403 switch (bc
->chiprev
) {
406 return !alu
->is_op3
&& (
407 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE
||
408 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
||
409 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE
||
410 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX4
);
411 case CHIPREV_EVERGREEN
:
413 return !alu
->is_op3
&& (
414 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE
||
415 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
||
416 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE
||
417 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX4
);
421 static int is_alu_mova_inst(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
423 switch (bc
->chiprev
) {
426 return !alu
->is_op3
&& (
427 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA
||
428 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR
||
429 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT
);
430 case CHIPREV_EVERGREEN
:
432 return !alu
->is_op3
&& (
433 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT
);
437 /* alu instructions that can only execute on the vector unit */
438 static int is_alu_vec_unit_inst(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
440 return is_alu_reduction_inst(bc
, alu
) ||
441 is_alu_mova_inst(bc
, alu
);
444 /* alu instructions that can only execute on the trans unit */
445 static int is_alu_trans_unit_inst(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
447 switch (bc
->chiprev
) {
451 return alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT
||
452 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT
||
453 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT
||
454 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHL_INT
||
455 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT
||
456 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_INT
||
457 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT
||
458 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_INT
||
459 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT
||
460 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_INT
||
461 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_UINT
||
462 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT
||
463 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS
||
464 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
||
465 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED
||
466 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
||
467 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED
||
468 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_FF
||
469 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
||
470 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED
||
471 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_FF
||
472 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE
||
473 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN
||
474 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SQRT_IEEE
;
476 return alu
->inst
== V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT
||
477 alu
->inst
== V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_D2
||
478 alu
->inst
== V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_M2
||
479 alu
->inst
== V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_M4
;
480 case CHIPREV_EVERGREEN
:
483 return alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT
||
484 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT
||
485 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT_FLOOR
||
486 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT
||
487 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHL_INT
||
488 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT
||
489 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_INT
||
490 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT
||
491 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_INT
||
492 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT
||
493 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_INT
||
494 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_UINT
||
495 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT
||
496 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS
||
497 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
||
498 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED
||
499 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
||
500 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED
||
501 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_FF
||
502 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
||
503 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED
||
504 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_FF
||
505 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE
||
506 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN
||
507 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SQRT_IEEE
;
509 return alu
->inst
== EG_V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT
;
513 /* alu instructions that can execute on any unit */
514 static int is_alu_any_unit_inst(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
516 return !is_alu_vec_unit_inst(bc
, alu
) &&
517 !is_alu_trans_unit_inst(bc
, alu
);
520 static int assign_alu_units(struct r600_bc
*bc
, struct r600_bc_alu
*alu_first
,
521 struct r600_bc_alu
*assignment
[5])
523 struct r600_bc_alu
*alu
;
524 unsigned i
, chan
, trans
;
526 for (i
= 0; i
< 5; i
++)
527 assignment
[i
] = NULL
;
529 for (alu
= alu_first
; alu
; alu
= LIST_ENTRY(struct r600_bc_alu
, alu
->list
.next
, list
)) {
530 chan
= alu
->dst
.chan
;
531 if (is_alu_trans_unit_inst(bc
, alu
))
533 else if (is_alu_vec_unit_inst(bc
, alu
))
535 else if (assignment
[chan
])
536 trans
= 1; // assume ALU_INST_PREFER_VECTOR
542 assert(0); //ALU.Trans has already been allocated
547 if (assignment
[chan
]) {
548 assert(0); //ALU.chan has already been allocated
551 assignment
[chan
] = alu
;
560 struct alu_bank_swizzle
{
561 int hw_gpr
[NUM_OF_CYCLES
][NUM_OF_COMPONENTS
];
562 int hw_cfile_addr
[4];
563 int hw_cfile_elem
[4];
566 const unsigned cycle_for_bank_swizzle_vec
[][3] = {
567 [SQ_ALU_VEC_012
] = { 0, 1, 2 },
568 [SQ_ALU_VEC_021
] = { 0, 2, 1 },
569 [SQ_ALU_VEC_120
] = { 1, 2, 0 },
570 [SQ_ALU_VEC_102
] = { 1, 0, 2 },
571 [SQ_ALU_VEC_201
] = { 2, 0, 1 },
572 [SQ_ALU_VEC_210
] = { 2, 1, 0 }
575 const unsigned cycle_for_bank_swizzle_scl
[][3] = {
576 [SQ_ALU_SCL_210
] = { 2, 1, 0 },
577 [SQ_ALU_SCL_122
] = { 1, 2, 2 },
578 [SQ_ALU_SCL_212
] = { 2, 1, 2 },
579 [SQ_ALU_SCL_221
] = { 2, 2, 1 }
582 static void init_bank_swizzle(struct alu_bank_swizzle
*bs
)
584 int i
, cycle
, component
;
586 for (cycle
= 0; cycle
< NUM_OF_CYCLES
; cycle
++)
587 for (component
= 0; component
< NUM_OF_COMPONENTS
; component
++)
588 bs
->hw_gpr
[cycle
][component
] = -1;
589 for (i
= 0; i
< 4; i
++)
590 bs
->hw_cfile_addr
[i
] = -1;
591 for (i
= 0; i
< 4; i
++)
592 bs
->hw_cfile_elem
[i
] = -1;
595 static int reserve_gpr(struct alu_bank_swizzle
*bs
, unsigned sel
, unsigned chan
, unsigned cycle
)
597 if (bs
->hw_gpr
[cycle
][chan
] == -1)
598 bs
->hw_gpr
[cycle
][chan
] = sel
;
599 else if (bs
->hw_gpr
[cycle
][chan
] != (int)sel
) {
600 // Another scalar operation has already used GPR read port for channel
606 static int reserve_cfile(struct r600_bc
*bc
, struct alu_bank_swizzle
*bs
, unsigned sel
, unsigned chan
)
608 int res
, num_res
= 4;
609 if (bc
->chiprev
>= CHIPREV_R700
) {
613 for (res
= 0; res
< num_res
; ++res
) {
614 if (bs
->hw_cfile_addr
[res
] == -1) {
615 bs
->hw_cfile_addr
[res
] = sel
;
616 bs
->hw_cfile_elem
[res
] = chan
;
618 } else if (bs
->hw_cfile_addr
[res
] == sel
&&
619 bs
->hw_cfile_elem
[res
] == chan
)
620 return 0; // Read for this scalar element already reserved, nothing to do here.
622 // All cfile read ports are used, cannot reference vector element
626 static int is_gpr(unsigned sel
)
628 return (sel
>= 0 && sel
<= 127);
631 /* CB constants start at 512, and get translated to a kcache index when ALU
632 * clauses are constructed. Note that we handle kcache constants the same way
633 * as (the now gone) cfile constants, is that really required? */
634 static int is_cfile(unsigned sel
)
636 return (sel
> 255 && sel
< 512) ||
637 (sel
> 511 && sel
< 4607) || // Kcache before translate
638 (sel
> 127 && sel
< 192); // Kcache after translate
641 static int is_const(int sel
)
643 return is_cfile(sel
) ||
644 (sel
>= V_SQ_ALU_SRC_0
&&
645 sel
<= V_SQ_ALU_SRC_LITERAL
);
648 static int check_vector(struct r600_bc
*bc
, struct r600_bc_alu
*alu
,
649 struct alu_bank_swizzle
*bs
, int bank_swizzle
)
651 int r
, src
, num_src
, sel
, elem
, cycle
;
653 num_src
= r600_bc_get_num_operands(bc
, alu
);
654 for (src
= 0; src
< num_src
; src
++) {
655 sel
= alu
->src
[src
].sel
;
656 elem
= alu
->src
[src
].chan
;
658 cycle
= cycle_for_bank_swizzle_vec
[bank_swizzle
][src
];
659 if (src
== 1 && sel
== alu
->src
[0].sel
&& elem
== alu
->src
[0].chan
)
660 // Nothing to do; special-case optimization,
661 // second source uses first source’s reservation
664 r
= reserve_gpr(bs
, sel
, elem
, cycle
);
668 } else if (is_cfile(sel
)) {
669 r
= reserve_cfile(bc
, bs
, sel
, elem
);
673 // No restrictions on PV, PS, literal or special constants
678 static int check_scalar(struct r600_bc
*bc
, struct r600_bc_alu
*alu
,
679 struct alu_bank_swizzle
*bs
, int bank_swizzle
)
681 int r
, src
, num_src
, const_count
, sel
, elem
, cycle
;
683 num_src
= r600_bc_get_num_operands(bc
, alu
);
684 for (const_count
= 0, src
= 0; src
< num_src
; ++src
) {
685 sel
= alu
->src
[src
].sel
;
686 elem
= alu
->src
[src
].chan
;
687 if (is_const(sel
)) { // Any constant, including literal and inline constants
688 if (const_count
>= 2)
689 // More than two references to a constant in
690 // transcendental operation.
696 r
= reserve_cfile(bc
, bs
, sel
, elem
);
701 for (src
= 0; src
< num_src
; ++src
) {
702 sel
= alu
->src
[src
].sel
;
703 elem
= alu
->src
[src
].chan
;
705 cycle
= cycle_for_bank_swizzle_scl
[bank_swizzle
][src
];
706 if (cycle
< const_count
)
707 // Cycle for GPR load conflicts with
708 // constant load in transcendental operation.
710 r
= reserve_gpr(bs
, sel
, elem
, cycle
);
714 // Constants already processed
715 // No restrictions on PV, PS
720 static int check_and_set_bank_swizzle(struct r600_bc
*bc
,
721 struct r600_bc_alu
*slots
[5])
723 struct alu_bank_swizzle bs
;
725 int i
, r
= 0, forced
= 0;
727 for (i
= 0; i
< 5; i
++)
728 if (slots
[i
] && slots
[i
]->bank_swizzle_force
) {
729 slots
[i
]->bank_swizzle
= slots
[i
]->bank_swizzle_force
;
736 // just check every possible combination of bank swizzle
737 // not very efficent, but works on the first try in most of the cases
738 for (i
= 0; i
< 4; i
++)
739 bank_swizzle
[i
] = SQ_ALU_VEC_012
;
740 bank_swizzle
[4] = SQ_ALU_SCL_210
;
741 while(bank_swizzle
[4] <= SQ_ALU_SCL_221
) {
742 init_bank_swizzle(&bs
);
743 for (i
= 0; i
< 4; i
++) {
745 r
= check_vector(bc
, slots
[i
], &bs
, bank_swizzle
[i
]);
750 if (!r
&& slots
[4]) {
751 r
= check_scalar(bc
, slots
[4], &bs
, bank_swizzle
[4]);
754 for (i
= 0; i
< 5; i
++) {
756 slots
[i
]->bank_swizzle
= bank_swizzle
[i
];
761 for (i
= 0; i
< 5; i
++) {
763 if (bank_swizzle
[i
] <= SQ_ALU_VEC_210
)
766 bank_swizzle
[i
] = SQ_ALU_VEC_012
;
770 // couldn't find a working swizzle
774 static int replace_gpr_with_pv_ps(struct r600_bc
*bc
,
775 struct r600_bc_alu
*slots
[5], struct r600_bc_alu
*alu_prev
)
777 struct r600_bc_alu
*prev
[5];
779 int i
, j
, r
, src
, num_src
;
781 r
= assign_alu_units(bc
, alu_prev
, prev
);
785 for (i
= 0; i
< 5; ++i
) {
786 if(prev
[i
] && prev
[i
]->dst
.write
&& !prev
[i
]->dst
.rel
) {
787 gpr
[i
] = prev
[i
]->dst
.sel
;
788 if (is_alu_reduction_inst(bc
, prev
[i
]))
791 chan
[i
] = prev
[i
]->dst
.chan
;
796 for (i
= 0; i
< 5; ++i
) {
797 struct r600_bc_alu
*alu
= slots
[i
];
801 num_src
= r600_bc_get_num_operands(bc
, alu
);
802 for (src
= 0; src
< num_src
; ++src
) {
803 if (!is_gpr(alu
->src
[src
].sel
) || alu
->src
[src
].rel
)
806 if (alu
->src
[src
].sel
== gpr
[4] &&
807 alu
->src
[src
].chan
== chan
[4]) {
808 alu
->src
[src
].sel
= V_SQ_ALU_SRC_PS
;
809 alu
->src
[src
].chan
= 0;
813 for (j
= 0; j
< 4; ++j
) {
814 if (alu
->src
[src
].sel
== gpr
[j
] &&
815 alu
->src
[src
].chan
== j
) {
816 alu
->src
[src
].sel
= V_SQ_ALU_SRC_PV
;
817 alu
->src
[src
].chan
= chan
[j
];
827 void r600_bc_special_constants(u32 value
, unsigned *sel
, unsigned *neg
)
831 *sel
= V_SQ_ALU_SRC_0
;
834 *sel
= V_SQ_ALU_SRC_1_INT
;
837 *sel
= V_SQ_ALU_SRC_M_1_INT
;
839 case 0x3F800000: // 1.0f
840 *sel
= V_SQ_ALU_SRC_1
;
842 case 0x3F000000: // 0.5f
843 *sel
= V_SQ_ALU_SRC_0_5
;
845 case 0xBF800000: // -1.0f
846 *sel
= V_SQ_ALU_SRC_1
;
849 case 0xBF000000: // -0.5f
850 *sel
= V_SQ_ALU_SRC_0_5
;
854 *sel
= V_SQ_ALU_SRC_LITERAL
;
859 /* compute how many literal are needed */
860 static int r600_bc_alu_nliterals(struct r600_bc
*bc
, struct r600_bc_alu
*alu
,
861 uint32_t literal
[4], unsigned *nliteral
)
863 unsigned num_src
= r600_bc_get_num_operands(bc
, alu
);
866 for (i
= 0; i
< num_src
; ++i
) {
867 if (alu
->src
[i
].sel
== V_SQ_ALU_SRC_LITERAL
) {
868 uint32_t value
= alu
->src
[i
].value
[alu
->src
[i
].chan
];
870 for (j
= 0; j
< *nliteral
; ++j
) {
871 if (literal
[j
] == value
) {
879 literal
[(*nliteral
)++] = value
;
886 static void r600_bc_alu_adjust_literals(struct r600_bc
*bc
,
887 struct r600_bc_alu
*alu
,
888 uint32_t literal
[4], unsigned nliteral
)
890 unsigned num_src
= r600_bc_get_num_operands(bc
, alu
);
893 for (i
= 0; i
< num_src
; ++i
) {
894 if (alu
->src
[i
].sel
== V_SQ_ALU_SRC_LITERAL
) {
895 uint32_t value
= alu
->src
[i
].value
[alu
->src
[i
].chan
];
896 for (j
= 0; j
< nliteral
; ++j
) {
897 if (literal
[j
] == value
) {
898 alu
->src
[i
].chan
= j
;
906 static int merge_inst_groups(struct r600_bc
*bc
, struct r600_bc_alu
*slots
[5],
907 struct r600_bc_alu
*alu_prev
)
909 struct r600_bc_alu
*prev
[5];
910 struct r600_bc_alu
*result
[5] = { NULL
};
912 uint32_t literal
[4], prev_literal
[4];
913 unsigned nliteral
= 0, prev_nliteral
= 0;
915 int i
, j
, r
, src
, num_src
;
916 int num_once_inst
= 0;
917 int have_mova
= 0, have_rel
= 0;
919 r
= assign_alu_units(bc
, alu_prev
, prev
);
923 for (i
= 0; i
< 5; ++i
) {
924 struct r600_bc_alu
*alu
;
926 /* check number of literals */
928 if (r600_bc_alu_nliterals(bc
, prev
[i
], literal
, &nliteral
))
930 if (r600_bc_alu_nliterals(bc
, prev
[i
], prev_literal
, &prev_nliteral
))
932 if (is_alu_mova_inst(bc
, prev
[i
])) {
937 num_once_inst
+= is_alu_once_inst(bc
, prev
[i
]);
939 if (slots
[i
] && r600_bc_alu_nliterals(bc
, slots
[i
], literal
, &nliteral
))
942 // let's check used slots
943 if (prev
[i
] && !slots
[i
]) {
946 } else if (prev
[i
] && slots
[i
]) {
947 if (result
[4] == NULL
&& prev
[4] == NULL
&& slots
[4] == NULL
) {
948 // trans unit is still free try to use it
949 if (is_alu_any_unit_inst(bc
, slots
[i
])) {
951 result
[4] = slots
[i
];
952 } else if (is_alu_any_unit_inst(bc
, prev
[i
])) {
953 result
[i
] = slots
[i
];
959 } else if(!slots
[i
]) {
962 result
[i
] = slots
[i
];
964 // let's check source gprs
966 num_once_inst
+= is_alu_once_inst(bc
, alu
);
968 num_src
= r600_bc_get_num_operands(bc
, alu
);
969 for (src
= 0; src
< num_src
; ++src
) {
970 if (alu
->src
[src
].rel
) {
976 // constants doesn't matter
977 if (!is_gpr(alu
->src
[src
].sel
))
980 for (j
= 0; j
< 5; ++j
) {
981 if (!prev
[j
] || !prev
[j
]->dst
.write
)
984 // if it's relative then we can't determin which gpr is really used
985 if (prev
[j
]->dst
.chan
== alu
->src
[src
].chan
&&
986 (prev
[j
]->dst
.sel
== alu
->src
[src
].sel
||
987 prev
[j
]->dst
.rel
|| alu
->src
[src
].rel
))
993 /* more than one PRED_ or KILL_ ? */
994 if (num_once_inst
> 1)
997 /* check if the result can still be swizzlet */
998 r
= check_and_set_bank_swizzle(bc
, result
);
1002 /* looks like everything worked out right, apply the changes */
1004 /* undo adding previus literals */
1005 bc
->cf_last
->ndw
-= align(prev_nliteral
, 2);
1007 /* sort instructions */
1008 for (i
= 0; i
< 5; ++i
) {
1009 slots
[i
] = result
[i
];
1011 LIST_DEL(&result
[i
]->list
);
1012 result
[i
]->last
= 0;
1013 LIST_ADDTAIL(&result
[i
]->list
, &bc
->cf_last
->alu
);
1017 /* determine new last instruction */
1018 LIST_ENTRY(struct r600_bc_alu
, bc
->cf_last
->alu
.prev
, list
)->last
= 1;
1020 /* determine new first instruction */
1021 for (i
= 0; i
< 5; ++i
) {
1023 bc
->cf_last
->curr_bs_head
= result
[i
];
1028 bc
->cf_last
->prev_bs_head
= bc
->cf_last
->prev2_bs_head
;
1029 bc
->cf_last
->prev2_bs_head
= NULL
;
1034 /* This code handles kcache lines as single blocks of 32 constants. We could
1035 * probably do slightly better by recognizing that we actually have two
1036 * consecutive lines of 16 constants, but the resulting code would also be
1037 * somewhat more complicated. */
1038 static int r600_bc_alloc_kcache_lines(struct r600_bc
*bc
, struct r600_bc_alu
*alu
, int type
)
1040 struct r600_bc_kcache
*kcache
= bc
->cf_last
->kcache
;
1041 unsigned int required_lines
;
1042 unsigned int free_lines
= 0;
1043 unsigned int cache_line
[3];
1044 unsigned int count
= 0;
1048 /* Collect required cache lines. */
1049 for (i
= 0; i
< 3; ++i
) {
1053 if (alu
->src
[i
].sel
< 512)
1056 line
= ((alu
->src
[i
].sel
- 512) / 32) * 2;
1058 for (j
= 0; j
< count
; ++j
) {
1059 if (cache_line
[j
] == line
) {
1066 cache_line
[count
++] = line
;
1069 /* This should never actually happen. */
1070 if (count
>= 3) return -ENOMEM
;
1072 for (i
= 0; i
< 2; ++i
) {
1073 if (kcache
[i
].mode
== V_SQ_CF_KCACHE_NOP
) {
1078 /* Filter lines pulled in by previous intructions. Note that this is
1079 * only for the required_lines count, we can't remove these from the
1080 * cache_line array since we may have to start a new ALU clause. */
1081 for (i
= 0, required_lines
= count
; i
< count
; ++i
) {
1082 for (j
= 0; j
< 2; ++j
) {
1083 if (kcache
[j
].mode
== V_SQ_CF_KCACHE_LOCK_2
&&
1084 kcache
[j
].addr
== cache_line
[i
]) {
1091 /* Start a new ALU clause if needed. */
1092 if (required_lines
> free_lines
) {
1093 if ((r
= r600_bc_add_cf(bc
))) {
1096 bc
->cf_last
->inst
= (type
<< 3);
1097 kcache
= bc
->cf_last
->kcache
;
1100 /* Setup the kcache lines. */
1101 for (i
= 0; i
< count
; ++i
) {
1104 for (j
= 0; j
< 2; ++j
) {
1105 if (kcache
[j
].mode
== V_SQ_CF_KCACHE_LOCK_2
&&
1106 kcache
[j
].addr
== cache_line
[i
]) {
1112 if (found
) continue;
1114 for (j
= 0; j
< 2; ++j
) {
1115 if (kcache
[j
].mode
== V_SQ_CF_KCACHE_NOP
) {
1117 kcache
[j
].addr
= cache_line
[i
];
1118 kcache
[j
].mode
= V_SQ_CF_KCACHE_LOCK_2
;
1124 /* Alter the src operands to refer to the kcache. */
1125 for (i
= 0; i
< 3; ++i
) {
1126 static const unsigned int base
[] = {128, 160, 256, 288};
1129 if (alu
->src
[i
].sel
< 512)
1132 alu
->src
[i
].sel
-= 512;
1133 line
= (alu
->src
[i
].sel
/ 32) * 2;
1135 for (j
= 0; j
< 2; ++j
) {
1136 if (kcache
[j
].mode
== V_SQ_CF_KCACHE_LOCK_2
&&
1137 kcache
[j
].addr
== line
) {
1138 alu
->src
[i
].sel
&= 0x1f;
1139 alu
->src
[i
].sel
+= base
[j
];
1148 int r600_bc_add_alu_type(struct r600_bc
*bc
, const struct r600_bc_alu
*alu
, int type
)
1150 struct r600_bc_alu
*nalu
= r600_bc_alu();
1151 struct r600_bc_alu
*lalu
;
1156 memcpy(nalu
, alu
, sizeof(struct r600_bc_alu
));
1158 if (bc
->cf_last
!= NULL
&& bc
->cf_last
->inst
!= (type
<< 3)) {
1159 /* check if we could add it anyway */
1160 if (bc
->cf_last
->inst
== (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU
<< 3) &&
1161 type
== V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE
) {
1162 LIST_FOR_EACH_ENTRY(lalu
, &bc
->cf_last
->alu
, list
) {
1163 if (lalu
->predicate
) {
1164 bc
->force_add_cf
= 1;
1169 bc
->force_add_cf
= 1;
1172 /* cf can contains only alu or only vtx or only tex */
1173 if (bc
->cf_last
== NULL
|| bc
->force_add_cf
) {
1174 r
= r600_bc_add_cf(bc
);
1180 bc
->cf_last
->inst
= (type
<< 3);
1182 /* Setup the kcache for this ALU instruction. This will start a new
1183 * ALU clause if needed. */
1184 if ((r
= r600_bc_alloc_kcache_lines(bc
, nalu
, type
))) {
1189 if (!bc
->cf_last
->curr_bs_head
) {
1190 bc
->cf_last
->curr_bs_head
= nalu
;
1192 /* number of gpr == the last gpr used in any alu */
1193 for (i
= 0; i
< 3; i
++) {
1194 if (nalu
->src
[i
].sel
>= bc
->ngpr
&& nalu
->src
[i
].sel
< 128) {
1195 bc
->ngpr
= nalu
->src
[i
].sel
+ 1;
1197 if (nalu
->src
[i
].sel
== V_SQ_ALU_SRC_LITERAL
)
1198 r600_bc_special_constants(
1199 nalu
->src
[i
].value
[nalu
->src
[i
].chan
],
1200 &nalu
->src
[i
].sel
, &nalu
->src
[i
].neg
);
1202 if (nalu
->dst
.sel
>= bc
->ngpr
) {
1203 bc
->ngpr
= nalu
->dst
.sel
+ 1;
1205 LIST_ADDTAIL(&nalu
->list
, &bc
->cf_last
->alu
);
1206 /* each alu use 2 dwords */
1207 bc
->cf_last
->ndw
+= 2;
1210 /* process cur ALU instructions for bank swizzle */
1212 uint32_t literal
[4];
1214 struct r600_bc_alu
*slots
[5];
1215 r
= assign_alu_units(bc
, bc
->cf_last
->curr_bs_head
, slots
);
1219 if (bc
->cf_last
->prev_bs_head
) {
1220 r
= merge_inst_groups(bc
, slots
, bc
->cf_last
->prev_bs_head
);
1225 if (bc
->cf_last
->prev_bs_head
) {
1226 r
= replace_gpr_with_pv_ps(bc
, slots
, bc
->cf_last
->prev_bs_head
);
1231 r
= check_and_set_bank_swizzle(bc
, slots
);
1235 for (i
= 0, nliteral
= 0; i
< 5; i
++) {
1237 r
= r600_bc_alu_nliterals(bc
, slots
[i
], literal
, &nliteral
);
1242 bc
->cf_last
->ndw
+= align(nliteral
, 2);
1244 /* at most 128 slots, one add alu can add 5 slots + 4 constants(2 slots)
1246 if ((bc
->cf_last
->ndw
>> 1) >= 120) {
1247 bc
->force_add_cf
= 1;
1250 bc
->cf_last
->prev2_bs_head
= bc
->cf_last
->prev_bs_head
;
1251 bc
->cf_last
->prev_bs_head
= bc
->cf_last
->curr_bs_head
;
1252 bc
->cf_last
->curr_bs_head
= NULL
;
1257 int r600_bc_add_alu(struct r600_bc
*bc
, const struct r600_bc_alu
*alu
)
1259 return r600_bc_add_alu_type(bc
, alu
, BC_INST(bc
, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU
));
1262 static void r600_bc_remove_alu(struct r600_bc_cf
*cf
, struct r600_bc_alu
*alu
)
1264 if (alu
->last
&& alu
->list
.prev
!= &cf
->alu
) {
1265 PREV_ALU(alu
)->last
= 1;
1267 LIST_DEL(&alu
->list
);
1272 int r600_bc_add_vtx(struct r600_bc
*bc
, const struct r600_bc_vtx
*vtx
)
1274 struct r600_bc_vtx
*nvtx
= r600_bc_vtx();
1279 memcpy(nvtx
, vtx
, sizeof(struct r600_bc_vtx
));
1281 /* cf can contains only alu or only vtx or only tex */
1282 if (bc
->cf_last
== NULL
||
1283 (bc
->cf_last
->inst
!= V_SQ_CF_WORD1_SQ_CF_INST_VTX
&&
1284 bc
->cf_last
->inst
!= V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC
) ||
1286 r
= r600_bc_add_cf(bc
);
1291 bc
->cf_last
->inst
= V_SQ_CF_WORD1_SQ_CF_INST_VTX
;
1293 LIST_ADDTAIL(&nvtx
->list
, &bc
->cf_last
->vtx
);
1294 /* each fetch use 4 dwords */
1295 bc
->cf_last
->ndw
+= 4;
1297 if ((bc
->cf_last
->ndw
/ 4) > 7)
1298 bc
->force_add_cf
= 1;
1302 int r600_bc_add_tex(struct r600_bc
*bc
, const struct r600_bc_tex
*tex
)
1304 struct r600_bc_tex
*ntex
= r600_bc_tex();
1309 memcpy(ntex
, tex
, sizeof(struct r600_bc_tex
));
1311 /* cf can contains only alu or only vtx or only tex */
1312 if (bc
->cf_last
== NULL
||
1313 bc
->cf_last
->inst
!= V_SQ_CF_WORD1_SQ_CF_INST_TEX
||
1315 r
= r600_bc_add_cf(bc
);
1320 bc
->cf_last
->inst
= V_SQ_CF_WORD1_SQ_CF_INST_TEX
;
1322 if (ntex
->src_gpr
>= bc
->ngpr
) {
1323 bc
->ngpr
= ntex
->src_gpr
+ 1;
1325 if (ntex
->dst_gpr
>= bc
->ngpr
) {
1326 bc
->ngpr
= ntex
->dst_gpr
+ 1;
1328 LIST_ADDTAIL(&ntex
->list
, &bc
->cf_last
->tex
);
1329 /* each texture fetch use 4 dwords */
1330 bc
->cf_last
->ndw
+= 4;
1332 if ((bc
->cf_last
->ndw
/ 4) > 7)
1333 bc
->force_add_cf
= 1;
1337 int r600_bc_add_cfinst(struct r600_bc
*bc
, int inst
)
1340 r
= r600_bc_add_cf(bc
);
1344 bc
->cf_last
->cond
= V_SQ_CF_COND_ACTIVE
;
1345 bc
->cf_last
->inst
= inst
;
1349 /* common to all 3 families */
1350 static int r600_bc_vtx_build(struct r600_bc
*bc
, struct r600_bc_vtx
*vtx
, unsigned id
)
1352 unsigned fetch_resource_start
= 0;
1354 /* check if we are fetch shader */
1355 /* fetch shader can also access vertex resource,
1356 * first fetch shader resource is at 160
1358 if (bc
->type
== -1) {
1359 switch (bc
->chiprev
) {
1364 fetch_resource_start
= 160;
1367 case CHIPREV_EVERGREEN
:
1368 fetch_resource_start
= 0;
1371 fprintf(stderr
, "%s:%s:%d unknown chiprev %d\n",
1372 __FILE__
, __func__
, __LINE__
, bc
->chiprev
);
1376 bc
->bytecode
[id
++] = S_SQ_VTX_WORD0_BUFFER_ID(vtx
->buffer_id
+ fetch_resource_start
) |
1377 S_SQ_VTX_WORD0_SRC_GPR(vtx
->src_gpr
) |
1378 S_SQ_VTX_WORD0_SRC_SEL_X(vtx
->src_sel_x
) |
1379 S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(vtx
->mega_fetch_count
);
1380 bc
->bytecode
[id
++] = S_SQ_VTX_WORD1_DST_SEL_X(vtx
->dst_sel_x
) |
1381 S_SQ_VTX_WORD1_DST_SEL_Y(vtx
->dst_sel_y
) |
1382 S_SQ_VTX_WORD1_DST_SEL_Z(vtx
->dst_sel_z
) |
1383 S_SQ_VTX_WORD1_DST_SEL_W(vtx
->dst_sel_w
) |
1384 S_SQ_VTX_WORD1_USE_CONST_FIELDS(vtx
->use_const_fields
) |
1385 S_SQ_VTX_WORD1_DATA_FORMAT(vtx
->data_format
) |
1386 S_SQ_VTX_WORD1_NUM_FORMAT_ALL(vtx
->num_format_all
) |
1387 S_SQ_VTX_WORD1_FORMAT_COMP_ALL(vtx
->format_comp_all
) |
1388 S_SQ_VTX_WORD1_SRF_MODE_ALL(vtx
->srf_mode_all
) |
1389 S_SQ_VTX_WORD1_GPR_DST_GPR(vtx
->dst_gpr
);
1390 bc
->bytecode
[id
++] = S_SQ_VTX_WORD2_MEGA_FETCH(1);
1391 bc
->bytecode
[id
++] = 0;
1395 /* common to all 3 families */
1396 static int r600_bc_tex_build(struct r600_bc
*bc
, struct r600_bc_tex
*tex
, unsigned id
)
1398 bc
->bytecode
[id
++] = S_SQ_TEX_WORD0_TEX_INST(tex
->inst
) |
1399 S_SQ_TEX_WORD0_RESOURCE_ID(tex
->resource_id
) |
1400 S_SQ_TEX_WORD0_SRC_GPR(tex
->src_gpr
) |
1401 S_SQ_TEX_WORD0_SRC_REL(tex
->src_rel
);
1402 bc
->bytecode
[id
++] = S_SQ_TEX_WORD1_DST_GPR(tex
->dst_gpr
) |
1403 S_SQ_TEX_WORD1_DST_REL(tex
->dst_rel
) |
1404 S_SQ_TEX_WORD1_DST_SEL_X(tex
->dst_sel_x
) |
1405 S_SQ_TEX_WORD1_DST_SEL_Y(tex
->dst_sel_y
) |
1406 S_SQ_TEX_WORD1_DST_SEL_Z(tex
->dst_sel_z
) |
1407 S_SQ_TEX_WORD1_DST_SEL_W(tex
->dst_sel_w
) |
1408 S_SQ_TEX_WORD1_LOD_BIAS(tex
->lod_bias
) |
1409 S_SQ_TEX_WORD1_COORD_TYPE_X(tex
->coord_type_x
) |
1410 S_SQ_TEX_WORD1_COORD_TYPE_Y(tex
->coord_type_y
) |
1411 S_SQ_TEX_WORD1_COORD_TYPE_Z(tex
->coord_type_z
) |
1412 S_SQ_TEX_WORD1_COORD_TYPE_W(tex
->coord_type_w
);
1413 bc
->bytecode
[id
++] = S_SQ_TEX_WORD2_OFFSET_X(tex
->offset_x
) |
1414 S_SQ_TEX_WORD2_OFFSET_Y(tex
->offset_y
) |
1415 S_SQ_TEX_WORD2_OFFSET_Z(tex
->offset_z
) |
1416 S_SQ_TEX_WORD2_SAMPLER_ID(tex
->sampler_id
) |
1417 S_SQ_TEX_WORD2_SRC_SEL_X(tex
->src_sel_x
) |
1418 S_SQ_TEX_WORD2_SRC_SEL_Y(tex
->src_sel_y
) |
1419 S_SQ_TEX_WORD2_SRC_SEL_Z(tex
->src_sel_z
) |
1420 S_SQ_TEX_WORD2_SRC_SEL_W(tex
->src_sel_w
);
1421 bc
->bytecode
[id
++] = 0;
1425 /* r600 only, r700/eg bits in r700_asm.c */
1426 static int r600_bc_alu_build(struct r600_bc
*bc
, struct r600_bc_alu
*alu
, unsigned id
)
1428 /* don't replace gpr by pv or ps for destination register */
1429 bc
->bytecode
[id
++] = S_SQ_ALU_WORD0_SRC0_SEL(alu
->src
[0].sel
) |
1430 S_SQ_ALU_WORD0_SRC0_REL(alu
->src
[0].rel
) |
1431 S_SQ_ALU_WORD0_SRC0_CHAN(alu
->src
[0].chan
) |
1432 S_SQ_ALU_WORD0_SRC0_NEG(alu
->src
[0].neg
) |
1433 S_SQ_ALU_WORD0_SRC1_SEL(alu
->src
[1].sel
) |
1434 S_SQ_ALU_WORD0_SRC1_REL(alu
->src
[1].rel
) |
1435 S_SQ_ALU_WORD0_SRC1_CHAN(alu
->src
[1].chan
) |
1436 S_SQ_ALU_WORD0_SRC1_NEG(alu
->src
[1].neg
) |
1437 S_SQ_ALU_WORD0_LAST(alu
->last
);
1440 bc
->bytecode
[id
++] = S_SQ_ALU_WORD1_DST_GPR(alu
->dst
.sel
) |
1441 S_SQ_ALU_WORD1_DST_CHAN(alu
->dst
.chan
) |
1442 S_SQ_ALU_WORD1_DST_REL(alu
->dst
.rel
) |
1443 S_SQ_ALU_WORD1_CLAMP(alu
->dst
.clamp
) |
1444 S_SQ_ALU_WORD1_OP3_SRC2_SEL(alu
->src
[2].sel
) |
1445 S_SQ_ALU_WORD1_OP3_SRC2_REL(alu
->src
[2].rel
) |
1446 S_SQ_ALU_WORD1_OP3_SRC2_CHAN(alu
->src
[2].chan
) |
1447 S_SQ_ALU_WORD1_OP3_SRC2_NEG(alu
->src
[2].neg
) |
1448 S_SQ_ALU_WORD1_OP3_ALU_INST(alu
->inst
) |
1449 S_SQ_ALU_WORD1_BANK_SWIZZLE(alu
->bank_swizzle
);
1451 bc
->bytecode
[id
++] = S_SQ_ALU_WORD1_DST_GPR(alu
->dst
.sel
) |
1452 S_SQ_ALU_WORD1_DST_CHAN(alu
->dst
.chan
) |
1453 S_SQ_ALU_WORD1_DST_REL(alu
->dst
.rel
) |
1454 S_SQ_ALU_WORD1_CLAMP(alu
->dst
.clamp
) |
1455 S_SQ_ALU_WORD1_OP2_SRC0_ABS(alu
->src
[0].abs
) |
1456 S_SQ_ALU_WORD1_OP2_SRC1_ABS(alu
->src
[1].abs
) |
1457 S_SQ_ALU_WORD1_OP2_WRITE_MASK(alu
->dst
.write
) |
1458 S_SQ_ALU_WORD1_OP2_OMOD(alu
->omod
) |
1459 S_SQ_ALU_WORD1_OP2_ALU_INST(alu
->inst
) |
1460 S_SQ_ALU_WORD1_BANK_SWIZZLE(alu
->bank_swizzle
) |
1461 S_SQ_ALU_WORD1_OP2_UPDATE_EXECUTE_MASK(alu
->predicate
) |
1462 S_SQ_ALU_WORD1_OP2_UPDATE_PRED(alu
->predicate
);
1476 static enum cf_class
r600_bc_cf_class(struct r600_bc_cf
*cf
)
1479 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU
<< 3):
1480 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER
<< 3):
1481 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER
<< 3):
1482 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE
<< 3):
1483 return CF_CLASS_ALU
;
1485 case V_SQ_CF_WORD1_SQ_CF_INST_TEX
:
1486 return CF_CLASS_TEXTURE
;
1488 case V_SQ_CF_WORD1_SQ_CF_INST_VTX
:
1489 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC
:
1490 return CF_CLASS_VERTEX
;
1492 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
:
1493 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE
:
1494 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
:
1495 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE
:
1496 return CF_CLASS_EXPORT
;
1498 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP
:
1499 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE
:
1500 case V_SQ_CF_WORD1_SQ_CF_INST_POP
:
1501 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL
:
1502 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END
:
1503 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE
:
1504 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK
:
1505 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS
:
1506 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN
:
1507 return CF_CLASS_OTHER
;
1510 R600_ERR("unsupported CF instruction (0x%X)\n", cf
->inst
);
1515 /* common for r600/r700 - eg in eg_asm.c */
1516 static int r600_bc_cf_build(struct r600_bc
*bc
, struct r600_bc_cf
*cf
)
1518 unsigned id
= cf
->id
;
1519 unsigned end_of_program
= bc
->cf
.prev
== &cf
->list
;
1521 switch (r600_bc_cf_class(cf
)) {
1523 assert(!end_of_program
);
1524 bc
->bytecode
[id
++] = S_SQ_CF_ALU_WORD0_ADDR(cf
->addr
>> 1) |
1525 S_SQ_CF_ALU_WORD0_KCACHE_MODE0(cf
->kcache
[0].mode
) |
1526 S_SQ_CF_ALU_WORD0_KCACHE_BANK0(cf
->kcache
[0].bank
) |
1527 S_SQ_CF_ALU_WORD0_KCACHE_BANK1(cf
->kcache
[1].bank
);
1529 bc
->bytecode
[id
++] = S_SQ_CF_ALU_WORD1_CF_INST(cf
->inst
>> 3) |
1530 S_SQ_CF_ALU_WORD1_KCACHE_MODE1(cf
->kcache
[1].mode
) |
1531 S_SQ_CF_ALU_WORD1_KCACHE_ADDR0(cf
->kcache
[0].addr
) |
1532 S_SQ_CF_ALU_WORD1_KCACHE_ADDR1(cf
->kcache
[1].addr
) |
1533 S_SQ_CF_ALU_WORD1_BARRIER(cf
->barrier
) |
1534 S_SQ_CF_ALU_WORD1_USES_WATERFALL(bc
->chiprev
== CHIPREV_R600
? cf
->r6xx_uses_waterfall
: 0) |
1535 S_SQ_CF_ALU_WORD1_COUNT((cf
->ndw
/ 2) - 1);
1537 case CF_CLASS_TEXTURE
:
1538 case CF_CLASS_VERTEX
:
1539 bc
->bytecode
[id
++] = S_SQ_CF_WORD0_ADDR(cf
->addr
>> 1);
1540 bc
->bytecode
[id
++] = S_SQ_CF_WORD1_CF_INST(cf
->inst
) |
1541 S_SQ_CF_WORD1_BARRIER(cf
->barrier
) |
1542 S_SQ_CF_WORD1_COUNT((cf
->ndw
/ 4) - 1) |
1543 S_SQ_CF_WORD1_END_OF_PROGRAM(end_of_program
);
1545 case CF_CLASS_EXPORT
:
1546 bc
->bytecode
[id
++] = S_SQ_CF_ALLOC_EXPORT_WORD0_RW_GPR(cf
->output
.gpr
) |
1547 S_SQ_CF_ALLOC_EXPORT_WORD0_ELEM_SIZE(cf
->output
.elem_size
) |
1548 S_SQ_CF_ALLOC_EXPORT_WORD0_ARRAY_BASE(cf
->output
.array_base
) |
1549 S_SQ_CF_ALLOC_EXPORT_WORD0_TYPE(cf
->output
.type
);
1550 bc
->bytecode
[id
++] = S_SQ_CF_ALLOC_EXPORT_WORD1_BURST_COUNT(cf
->output
.burst_count
- 1) |
1551 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_X(cf
->output
.swizzle_x
) |
1552 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Y(cf
->output
.swizzle_y
) |
1553 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Z(cf
->output
.swizzle_z
) |
1554 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_W(cf
->output
.swizzle_w
) |
1555 S_SQ_CF_ALLOC_EXPORT_WORD1_BARRIER(cf
->barrier
) |
1556 S_SQ_CF_ALLOC_EXPORT_WORD1_CF_INST(cf
->inst
) |
1557 S_SQ_CF_ALLOC_EXPORT_WORD1_END_OF_PROGRAM(end_of_program
);
1559 case CF_CLASS_OTHER
:
1560 bc
->bytecode
[id
++] = S_SQ_CF_WORD0_ADDR(cf
->cf_addr
>> 1);
1561 bc
->bytecode
[id
++] = S_SQ_CF_WORD1_CF_INST(cf
->inst
) |
1562 S_SQ_CF_WORD1_BARRIER(cf
->barrier
) |
1563 S_SQ_CF_WORD1_COND(cf
->cond
) |
1564 S_SQ_CF_WORD1_POP_COUNT(cf
->pop_count
) |
1565 S_SQ_CF_WORD1_END_OF_PROGRAM(end_of_program
);
1569 R600_ERR("unsupported CF instruction (0x%X)\n", cf
->inst
);
1575 struct gpr_usage_range
{
1583 unsigned channels
:4;
1587 struct gpr_usage_range
*ranges
;
1590 static struct gpr_usage_range
* last_gpr_usage_range(struct gpr_usage
*usage
)
1593 return usage
->ranges
+ usage
->nranges
- 1;
1598 static struct gpr_usage_range
* add_gpr_usage_range(struct gpr_usage
*usage
)
1600 struct gpr_usage_range
*range
;
1603 usage
->ranges
= realloc(usage
->ranges
, usage
->nranges
* sizeof(struct gpr_usage_range
));
1607 range
= last_gpr_usage_range(usage
);
1608 range
->replacement
= -1; /* no prefered replacement */
1609 range
->rel_block
= -1;
1616 static void notice_gpr_read(struct gpr_usage
*usage
, int id
, unsigned chan
)
1618 struct gpr_usage_range
* range
;
1620 usage
->channels
|= 1 << chan
;
1621 usage
->first_write
= -1;
1622 if (!usage
->nranges
) {
1623 range
= add_gpr_usage_range(usage
);
1625 range
= last_gpr_usage_range(usage
);
1627 if (range
&& range
->end
< id
)
1631 static void notice_gpr_rel_read(struct r600_bc
*bc
, struct gpr_usage usage
[128],
1632 int id
, unsigned gpr
, unsigned chan
)
1635 for (i
= gpr
; i
< bc
->ngpr
; ++i
)
1636 notice_gpr_read(&usage
[i
], id
, chan
);
1638 last_gpr_usage_range(&usage
[gpr
])->rel_block
= bc
->ngpr
- gpr
;
1641 static void notice_gpr_last_write(struct gpr_usage
*usage
, int id
, unsigned chan
)
1643 usage
->last_write
[chan
] = id
;
1646 static void notice_gpr_write(struct gpr_usage
*usage
, int id
, unsigned chan
,
1647 int predicate
, int prefered_replacement
)
1649 struct gpr_usage_range
* last_range
= last_gpr_usage_range(usage
);
1650 int start
= usage
->first_write
!= -1 ? usage
->first_write
: id
;
1651 usage
->channels
&= ~(1 << chan
);
1652 if (usage
->channels
) {
1653 if (usage
->first_write
== -1)
1654 usage
->first_write
= id
;
1655 } else if (!last_range
|| (last_range
->start
!= start
&& !predicate
)) {
1656 usage
->first_write
= start
;
1657 struct gpr_usage_range
* range
= add_gpr_usage_range(usage
);
1658 range
->replacement
= prefered_replacement
;
1659 range
->start
= start
;
1660 } else if (last_range
->start
== start
&& prefered_replacement
!= -1) {
1661 last_range
->replacement
= prefered_replacement
;
1663 notice_gpr_last_write(usage
, id
, chan
);
1666 static void notice_gpr_rel_last_write(struct gpr_usage usage
[128], int id
, unsigned chan
)
1669 for (i
= 0; i
< 128; ++i
)
1670 notice_gpr_last_write(&usage
[i
], id
, chan
);
1673 static void notice_gpr_rel_write(struct gpr_usage usage
[128], int id
, unsigned chan
)
1676 for (i
= 0; i
< 128; ++i
)
1677 notice_gpr_write(&usage
[i
], id
, chan
, 1, -1);
1680 static void notice_alu_src_gprs(struct r600_bc
*bc
, struct r600_bc_alu
*alu
,
1681 struct gpr_usage usage
[128], int id
)
1683 unsigned src
, num_src
;
1685 num_src
= r600_bc_get_num_operands(bc
, alu
);
1686 for (src
= 0; src
< num_src
; ++src
) {
1687 // constants doesn't matter
1688 if (!is_gpr(alu
->src
[src
].sel
))
1691 if (alu
->src
[src
].rel
)
1692 notice_gpr_rel_read(bc
, usage
, id
, alu
->src
[src
].sel
, alu
->src
[src
].chan
);
1694 notice_gpr_read(&usage
[alu
->src
[src
].sel
], id
, alu
->src
[src
].chan
);
1698 static void notice_alu_dst_gprs(struct r600_bc_alu
*alu_first
, struct gpr_usage usage
[128],
1699 int id
, int predicate
)
1701 struct r600_bc_alu
*alu
;
1702 for (alu
= alu_first
; alu
; alu
= LIST_ENTRY(struct r600_bc_alu
, alu
->list
.next
, list
)) {
1703 if (alu
->dst
.write
) {
1705 notice_gpr_rel_write(usage
, id
, alu
->dst
.chan
);
1706 else if (alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
&& is_gpr(alu
->src
[0].sel
))
1707 notice_gpr_write(&usage
[alu
->dst
.sel
], id
, alu
->dst
.chan
,
1708 predicate
, alu
->src
[0].sel
);
1710 notice_gpr_write(&usage
[alu
->dst
.sel
], id
, alu
->dst
.chan
, predicate
, -1);
1718 static void notice_tex_gprs(struct r600_bc
*bc
, struct r600_bc_tex
*tex
,
1719 struct gpr_usage usage
[128],
1720 int id
, int predicate
)
1723 if (tex
->src_sel_x
< 4)
1724 notice_gpr_rel_read(bc
, usage
, id
, tex
->src_gpr
, tex
->src_sel_x
);
1725 if (tex
->src_sel_y
< 4)
1726 notice_gpr_rel_read(bc
, usage
, id
, tex
->src_gpr
, tex
->src_sel_y
);
1727 if (tex
->src_sel_z
< 4)
1728 notice_gpr_rel_read(bc
, usage
, id
, tex
->src_gpr
, tex
->src_sel_z
);
1729 if (tex
->src_sel_w
< 4)
1730 notice_gpr_rel_read(bc
, usage
, id
, tex
->src_gpr
, tex
->src_sel_w
);
1732 if (tex
->src_sel_x
< 4)
1733 notice_gpr_read(&usage
[tex
->src_gpr
], id
, tex
->src_sel_x
);
1734 if (tex
->src_sel_y
< 4)
1735 notice_gpr_read(&usage
[tex
->src_gpr
], id
, tex
->src_sel_y
);
1736 if (tex
->src_sel_z
< 4)
1737 notice_gpr_read(&usage
[tex
->src_gpr
], id
, tex
->src_sel_z
);
1738 if (tex
->src_sel_w
< 4)
1739 notice_gpr_read(&usage
[tex
->src_gpr
], id
, tex
->src_sel_w
);
1742 if (tex
->dst_sel_x
!= 7)
1743 notice_gpr_rel_write(usage
, id
, 0);
1744 if (tex
->dst_sel_y
!= 7)
1745 notice_gpr_rel_write(usage
, id
, 1);
1746 if (tex
->dst_sel_z
!= 7)
1747 notice_gpr_rel_write(usage
, id
, 2);
1748 if (tex
->dst_sel_w
!= 7)
1749 notice_gpr_rel_write(usage
, id
, 3);
1751 if (tex
->dst_sel_x
!= 7)
1752 notice_gpr_write(&usage
[tex
->dst_gpr
], id
, 0, predicate
, -1);
1753 if (tex
->dst_sel_y
!= 7)
1754 notice_gpr_write(&usage
[tex
->dst_gpr
], id
, 1, predicate
, -1);
1755 if (tex
->dst_sel_z
!= 7)
1756 notice_gpr_write(&usage
[tex
->dst_gpr
], id
, 2, predicate
, -1);
1757 if (tex
->dst_sel_w
!= 7)
1758 notice_gpr_write(&usage
[tex
->dst_gpr
], id
, 3, predicate
, -1);
1762 static void notice_vtx_gprs(struct r600_bc_vtx
*vtx
, struct gpr_usage usage
[128],
1763 int id
, int predicate
)
1765 notice_gpr_read(&usage
[vtx
->src_gpr
], id
, vtx
->src_sel_x
);
1767 if (vtx
->dst_sel_x
!= 7)
1768 notice_gpr_write(&usage
[vtx
->dst_gpr
], id
, 0, predicate
, -1);
1769 if (vtx
->dst_sel_y
!= 7)
1770 notice_gpr_write(&usage
[vtx
->dst_gpr
], id
, 1, predicate
, -1);
1771 if (vtx
->dst_sel_z
!= 7)
1772 notice_gpr_write(&usage
[vtx
->dst_gpr
], id
, 2, predicate
, -1);
1773 if (vtx
->dst_sel_w
!= 7)
1774 notice_gpr_write(&usage
[vtx
->dst_gpr
], id
, 3, predicate
, -1);
1777 static void notice_export_gprs(struct r600_bc_cf
*cf
, struct gpr_usage usage
[128],
1778 struct r600_bc_cf
*export_cf
[128], int export_remap
[128])
1780 //TODO handle other memory operations
1781 struct gpr_usage
*output
= &usage
[cf
->output
.gpr
];
1782 int id
= MAX4(output
->last_write
[0], output
->last_write
[1],
1783 output
->last_write
[2], output
->last_write
[3]);
1787 export_cf
[cf
->output
.gpr
] = cf
;
1788 export_remap
[cf
->output
.gpr
] = id
;
1789 if (cf
->output
.swizzle_x
< 4)
1790 notice_gpr_read(output
, id
, cf
->output
.swizzle_x
);
1791 if (cf
->output
.swizzle_y
< 4)
1792 notice_gpr_read(output
, id
, cf
->output
.swizzle_y
);
1793 if (cf
->output
.swizzle_z
< 4)
1794 notice_gpr_read(output
, id
, cf
->output
.swizzle_z
);
1795 if (cf
->output
.swizzle_w
< 4)
1796 notice_gpr_read(output
, id
, cf
->output
.swizzle_w
);
1799 static struct gpr_usage_range
*find_src_range(struct gpr_usage
*usage
, int id
)
1802 for (i
= 0; i
< usage
->nranges
; ++i
) {
1803 struct gpr_usage_range
* range
= &usage
->ranges
[i
];
1805 if (range
->start
< id
&& id
<= range
->end
)
1811 static struct gpr_usage_range
*find_dst_range(struct gpr_usage
*usage
, int id
)
1814 for (i
= 0; i
< usage
->nranges
; ++i
) {
1815 struct gpr_usage_range
* range
= &usage
->ranges
[i
];
1816 int end
= range
->end
;
1818 if (range
->start
<= id
&& (id
< end
|| end
== -1))
1824 static int is_barrier_needed(struct gpr_usage
*usage
, int id
, unsigned chan
, int last_barrier
)
1826 if (usage
->last_write
[chan
] != (id
& ~0xFF))
1827 return usage
->last_write
[chan
] >= last_barrier
;
1832 static int is_intersection(struct gpr_usage_range
* a
, struct gpr_usage_range
* b
)
1834 return a
->start
<= b
->end
&& b
->start
< a
->end
;
1837 static int rate_replacement(struct gpr_usage usage
[128], unsigned current
, unsigned gpr
,
1838 struct gpr_usage_range
* range
)
1840 int max_gpr
= gpr
+ MAX2(range
->rel_block
, 1);
1841 int best_start
= 0x3FFFFFFF, best_end
= 0x3FFFFFFF;
1844 for (; gpr
< max_gpr
; ++gpr
) {
1846 if (gpr
>= 128) /* relative gpr block won't fit into clause temporaries */
1847 return -1; /* forget it */
1849 if (gpr
== current
) /* ignore ranges of to be replaced register */
1852 for (i
= 0; i
< usage
[gpr
].nranges
; ++i
) {
1853 if (usage
[gpr
].ranges
[i
].replacement
< gpr
)
1854 continue; /* ignore already remapped ranges */
1856 if (is_intersection(&usage
[gpr
].ranges
[i
], range
))
1857 return -1; /* forget it if usages overlap */
1859 if (range
->start
>= usage
[gpr
].ranges
[i
].end
)
1860 best_start
= MIN2(best_start
, range
->start
- usage
[gpr
].ranges
[i
].end
);
1862 if (range
->end
!= -1 && range
->end
<= usage
[gpr
].ranges
[i
].start
)
1863 best_end
= MIN2(best_end
, usage
[gpr
].ranges
[i
].start
- range
->end
);
1866 return best_start
+ best_end
;
1869 static void find_replacement(struct gpr_usage usage
[128], unsigned current
,
1870 struct gpr_usage_range
*range
)
1873 int best_gpr
= -1, best_rate
= 0x7FFFFFFF;
1875 if (range
->replacement
== current
)
1876 return; /* register prefers to be not remapped */
1878 if (range
->replacement
!= -1 && range
->replacement
<= current
) {
1879 struct gpr_usage_range
*other
= find_src_range(&usage
[range
->replacement
], range
->start
);
1880 if (other
&& other
->replacement
!= -1)
1881 range
->replacement
= other
->replacement
;
1884 if (range
->replacement
!= -1 && range
->replacement
< current
) {
1885 int rate
= rate_replacement(usage
, current
, range
->replacement
, range
);
1887 /* check if prefered replacement can be used */
1890 best_gpr
= range
->replacement
;
1894 if (best_gpr
== -1 && (range
->start
& ~0xFF) == (range
->end
& ~0xFF)) {
1895 /* register is just used inside one ALU clause */
1896 /* try to use clause temporaries for it */
1897 for (i
= 127; i
> 123; --i
) {
1898 int rate
= rate_replacement(usage
, current
, i
, range
);
1900 if (rate
== -1) /* can't be used because ranges overlap */
1903 if (rate
< best_rate
) {
1907 /* can't get better than this */
1914 if (best_gpr
== -1) {
1915 for (i
= 0; i
< current
; ++i
) {
1916 int rate
= rate_replacement(usage
, current
, i
, range
);
1918 if (rate
== -1) /* can't be used because ranges overlap */
1921 if (rate
< best_rate
) {
1925 /* can't get better than this */
1932 if (best_gpr
!= -1) {
1933 struct gpr_usage_range
*reservation
= add_gpr_usage_range(&usage
[best_gpr
]);
1934 reservation
->replacement
= best_gpr
;
1935 reservation
->rel_block
= -1;
1936 reservation
->start
= range
->start
;
1937 reservation
->end
= range
->end
;
1941 range
->replacement
= best_gpr
;
1942 if (range
->rel_block
== -1)
1943 return; /* no relative block to handle we are done here */
1945 /* set prefered register for the whole relative register block */
1946 for (i
= current
+ 1, ++best_gpr
; i
< current
+ range
->rel_block
; ++i
, ++best_gpr
) {
1947 for (j
= 0; j
< usage
[i
].nranges
; ++j
) {
1948 if (is_intersection(&usage
[i
].ranges
[j
], range
))
1949 usage
[i
].ranges
[j
].replacement
= best_gpr
;
1954 static void replace_alu_gprs(struct r600_bc
*bc
, struct r600_bc_alu
*alu
, struct gpr_usage usage
[128],
1955 int id
, int last_barrier
, unsigned *barrier
)
1957 struct gpr_usage
*cur_usage
;
1958 struct gpr_usage_range
*range
;
1959 unsigned src
, num_src
;
1961 num_src
= r600_bc_get_num_operands(bc
, alu
);
1962 for (src
= 0; src
< num_src
; ++src
) {
1963 // constants doesn't matter
1964 if (!is_gpr(alu
->src
[src
].sel
))
1967 cur_usage
= &usage
[alu
->src
[src
].sel
];
1968 range
= find_src_range(cur_usage
, id
);
1969 alu
->src
[src
].sel
= range
->replacement
;
1971 *barrier
|= is_barrier_needed(cur_usage
, id
, alu
->src
[src
].chan
, last_barrier
);
1974 if (alu
->dst
.write
) {
1975 cur_usage
= &usage
[alu
->dst
.sel
];
1976 range
= find_dst_range(cur_usage
, id
);
1977 if (!range
|| range
->replacement
== -1) {
1981 /*TODO: really check that register 123 is useable */
1984 alu
->dst
.sel
= range
->replacement
;
1985 *barrier
|= is_barrier_needed(cur_usage
, id
, alu
->dst
.chan
, last_barrier
);
1988 if (alu
->dst
.write
) {
1990 notice_gpr_rel_last_write(usage
, id
, alu
->dst
.chan
);
1992 notice_gpr_last_write(cur_usage
, id
, alu
->dst
.chan
);
1996 static void replace_tex_gprs(struct r600_bc_tex
*tex
, struct gpr_usage usage
[128],
1997 int id
, int last_barrier
, unsigned *barrier
)
1999 struct gpr_usage
*cur_usage
= &usage
[tex
->src_gpr
];
2000 struct gpr_usage_range
*range
= find_src_range(cur_usage
, id
);
2005 if (tex
->src_sel_x
< 4)
2006 *barrier
|= is_barrier_needed(cur_usage
, id
, tex
->src_sel_x
, last_barrier
);
2007 if (tex
->src_sel_y
< 4)
2008 *barrier
|= is_barrier_needed(cur_usage
, id
, tex
->src_sel_y
, last_barrier
);
2009 if (tex
->src_sel_z
< 4)
2010 *barrier
|= is_barrier_needed(cur_usage
, id
, tex
->src_sel_z
, last_barrier
);
2011 if (tex
->src_sel_w
< 4)
2012 *barrier
|= is_barrier_needed(cur_usage
, id
, tex
->src_sel_w
, last_barrier
);
2014 tex
->src_gpr
= range
->replacement
;
2016 cur_usage
= &usage
[tex
->dst_gpr
];
2018 range
= find_dst_range(cur_usage
, id
);
2020 tex
->dst_gpr
= range
->replacement
;
2023 if (tex
->dst_sel_x
!= 7)
2024 notice_gpr_rel_last_write(usage
, id
, tex
->dst_sel_x
);
2025 if (tex
->dst_sel_y
!= 7)
2026 notice_gpr_rel_last_write(usage
, id
, tex
->dst_sel_y
);
2027 if (tex
->dst_sel_z
!= 7)
2028 notice_gpr_rel_last_write(usage
, id
, tex
->dst_sel_z
);
2029 if (tex
->dst_sel_w
!= 7)
2030 notice_gpr_rel_last_write(usage
, id
, tex
->dst_sel_w
);
2032 if (tex
->dst_sel_x
!= 7)
2033 notice_gpr_last_write(cur_usage
, id
, tex
->dst_sel_x
);
2034 if (tex
->dst_sel_y
!= 7)
2035 notice_gpr_last_write(cur_usage
, id
, tex
->dst_sel_y
);
2036 if (tex
->dst_sel_z
!= 7)
2037 notice_gpr_last_write(cur_usage
, id
, tex
->dst_sel_z
);
2038 if (tex
->dst_sel_w
!= 7)
2039 notice_gpr_last_write(cur_usage
, id
, tex
->dst_sel_w
);
2046 static void replace_vtx_gprs(struct r600_bc_vtx
*vtx
, struct gpr_usage usage
[128],
2047 int id
, int last_barrier
, unsigned *barrier
)
2049 struct gpr_usage
*cur_usage
= &usage
[vtx
->src_gpr
];
2050 struct gpr_usage_range
*range
= find_src_range(cur_usage
, id
);
2052 *barrier
|= is_barrier_needed(cur_usage
, id
, vtx
->src_sel_x
, last_barrier
);
2054 vtx
->src_gpr
= range
->replacement
;
2056 cur_usage
= &usage
[vtx
->dst_gpr
];
2057 range
= find_dst_range(cur_usage
, id
);
2059 vtx
->dst_gpr
= range
->replacement
;
2061 if (vtx
->dst_sel_x
!= 7)
2062 notice_gpr_last_write(cur_usage
, id
, vtx
->dst_sel_x
);
2063 if (vtx
->dst_sel_y
!= 7)
2064 notice_gpr_last_write(cur_usage
, id
, vtx
->dst_sel_y
);
2065 if (vtx
->dst_sel_z
!= 7)
2066 notice_gpr_last_write(cur_usage
, id
, vtx
->dst_sel_z
);
2067 if (vtx
->dst_sel_w
!= 7)
2068 notice_gpr_last_write(cur_usage
, id
, vtx
->dst_sel_w
);
2074 static void replace_export_gprs(struct r600_bc_cf
*cf
, struct gpr_usage usage
[128],
2075 int id
, int last_barrier
)
2077 //TODO handle other memory operations
2078 struct gpr_usage
*cur_usage
= &usage
[cf
->output
.gpr
];
2079 struct gpr_usage_range
*range
= find_src_range(cur_usage
, id
);
2082 if (cf
->output
.swizzle_x
< 4)
2083 cf
->barrier
|= is_barrier_needed(cur_usage
, -1, cf
->output
.swizzle_x
, last_barrier
);
2084 if (cf
->output
.swizzle_y
< 4)
2085 cf
->barrier
|= is_barrier_needed(cur_usage
, -1, cf
->output
.swizzle_y
, last_barrier
);
2086 if (cf
->output
.swizzle_z
< 4)
2087 cf
->barrier
|= is_barrier_needed(cur_usage
, -1, cf
->output
.swizzle_z
, last_barrier
);
2088 if (cf
->output
.swizzle_w
< 4)
2089 cf
->barrier
|= is_barrier_needed(cur_usage
, -1, cf
->output
.swizzle_w
, last_barrier
);
2091 cf
->output
.gpr
= range
->replacement
;
2094 static void optimize_alu_inst(struct r600_bc
*bc
, struct r600_bc_cf
*cf
, struct r600_bc_alu
*alu
)
2096 struct r600_bc_alu
*alu_next
;
2098 unsigned src
, num_src
;
2100 /* check if a MOV could be optimized away */
2101 if (alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
) {
2103 /* destination equals source? */
2104 if (alu
->dst
.sel
!= alu
->src
[0].sel
||
2105 alu
->dst
.chan
!= alu
->src
[0].chan
)
2108 /* any special handling for the source? */
2109 if (alu
->src
[0].rel
|| alu
->src
[0].neg
|| alu
->src
[0].abs
)
2112 /* any special handling for destination? */
2113 if (alu
->dst
.rel
|| alu
->dst
.clamp
)
2116 /* ok find next instruction group and check if ps/pv is used */
2117 for (alu_next
= alu
; !alu_next
->last
; alu_next
= NEXT_ALU(alu_next
));
2119 if (alu_next
->list
.next
!= &cf
->alu
) {
2120 chan
= is_alu_reduction_inst(bc
, alu
) ? 0 : alu
->dst
.chan
;
2121 for (alu_next
= NEXT_ALU(alu_next
); alu_next
; alu_next
= NEXT_ALU(alu_next
)) {
2122 num_src
= r600_bc_get_num_operands(bc
, alu_next
);
2123 for (src
= 0; src
< num_src
; ++src
) {
2124 if (alu_next
->src
[src
].sel
== V_SQ_ALU_SRC_PV
&&
2125 alu_next
->src
[src
].chan
== chan
)
2128 if (alu_next
->src
[src
].sel
== V_SQ_ALU_SRC_PS
)
2137 r600_bc_remove_alu(cf
, alu
);
2141 static void optimize_export_inst(struct r600_bc
*bc
, struct r600_bc_cf
*cf
)
2143 struct r600_bc_cf
*prev
= LIST_ENTRY(struct r600_bc_cf
, cf
->list
.prev
, list
);
2144 if (&prev
->list
== &bc
->cf
||
2145 prev
->inst
!= cf
->inst
||
2146 prev
->output
.type
!= cf
->output
.type
||
2147 prev
->output
.elem_size
!= cf
->output
.elem_size
||
2148 prev
->output
.swizzle_x
!= cf
->output
.swizzle_x
||
2149 prev
->output
.swizzle_y
!= cf
->output
.swizzle_y
||
2150 prev
->output
.swizzle_z
!= cf
->output
.swizzle_z
||
2151 prev
->output
.swizzle_w
!= cf
->output
.swizzle_w
)
2154 if ((prev
->output
.burst_count
+ cf
->output
.burst_count
) > 16)
2157 if ((prev
->output
.gpr
+ prev
->output
.burst_count
) == cf
->output
.gpr
&&
2158 (prev
->output
.array_base
+ prev
->output
.burst_count
) == cf
->output
.array_base
) {
2160 prev
->output
.burst_count
+= cf
->output
.burst_count
;
2161 r600_bc_remove_cf(bc
, cf
);
2163 } else if (prev
->output
.gpr
== (cf
->output
.gpr
+ cf
->output
.burst_count
) &&
2164 prev
->output
.array_base
== (cf
->output
.array_base
+ cf
->output
.burst_count
)) {
2166 cf
->output
.burst_count
+= prev
->output
.burst_count
;
2167 r600_bc_remove_cf(bc
, prev
);
2171 static void r600_bc_optimize(struct r600_bc
*bc
)
2173 struct r600_bc_cf
*cf
, *next_cf
;
2174 struct r600_bc_alu
*first
, *next_alu
;
2175 struct r600_bc_alu
*alu
;
2176 struct r600_bc_vtx
*vtx
;
2177 struct r600_bc_tex
*tex
;
2178 struct gpr_usage usage
[128];
2180 /* assume that each gpr is exported only once */
2181 struct r600_bc_cf
*export_cf
[128] = { NULL
};
2182 int export_remap
[128];
2184 int id
, cond_start
, barrier
[bc
->nstack
];
2185 unsigned i
, j
, stack
, predicate
, old_stack
;
2187 memset(&usage
, 0, sizeof(usage
));
2188 for (i
= 0; i
< 128; ++i
) {
2189 usage
[i
].first_write
= -1;
2190 usage
[i
].last_write
[0] = -1;
2191 usage
[i
].last_write
[1] = -1;
2192 usage
[i
].last_write
[2] = -1;
2193 usage
[i
].last_write
[3] = -1;
2196 /* first gather some informations about the gpr usage */
2198 LIST_FOR_EACH_ENTRY(cf
, &bc
->cf
, list
) {
2203 switch (r600_bc_cf_class(cf
)) {
2207 LIST_FOR_EACH_ENTRY(alu
, &cf
->alu
, list
) {
2210 notice_alu_src_gprs(bc
, alu
, usage
, id
);
2212 notice_alu_dst_gprs(first
, usage
, id
, predicate
|| stack
> 0);
2216 if (is_alu_pred_inst(bc
, alu
))
2219 if (cf
->inst
== V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE
<< 3)
2221 else if (cf
->inst
== V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER
<< 3)
2223 else if (cf
->inst
== V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER
<< 3)
2226 case CF_CLASS_TEXTURE
:
2227 LIST_FOR_EACH_ENTRY(tex
, &cf
->tex
, list
) {
2228 notice_tex_gprs(bc
, tex
, usage
, id
++, stack
> 0);
2231 case CF_CLASS_VERTEX
:
2232 LIST_FOR_EACH_ENTRY(vtx
, &cf
->vtx
, list
) {
2233 notice_vtx_gprs(vtx
, usage
, id
++, stack
> 0);
2236 case CF_CLASS_EXPORT
:
2237 notice_export_gprs(cf
, usage
, export_cf
, export_remap
);
2238 continue; // don't increment id
2239 case CF_CLASS_OTHER
:
2241 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP
:
2242 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE
:
2243 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS
:
2246 case V_SQ_CF_WORD1_SQ_CF_INST_POP
:
2247 stack
-= cf
->pop_count
;
2251 // TODO implement loop handling
2256 /* extend last_write after conditional block */
2257 if (stack
== 0 && old_stack
!= 0)
2258 for (i
= 0; i
< 128; ++i
)
2259 for (j
= 0; j
< 4; ++j
)
2260 if (usage
[i
].last_write
[j
] >= cond_start
)
2261 usage
[i
].last_write
[j
] = id
;
2268 /* try to optimize gpr usage */
2269 for (i
= 0; i
< 124; ++i
) {
2270 for (j
= 0; j
< usage
[i
].nranges
; ++j
) {
2271 struct gpr_usage_range
*range
= &usage
[i
].ranges
[j
];
2272 if (range
->start
== -1)
2273 /* can't rearange shader inputs */
2274 range
->replacement
= i
;
2275 else if (range
->end
== -1)
2276 /* gpr isn't used any more after this instruction */
2277 range
->replacement
= -1;
2279 find_replacement(usage
, i
, range
);
2281 if (range
->replacement
== i
)
2283 else if (range
->replacement
< i
&& range
->replacement
> bc
->ngpr
)
2284 bc
->ngpr
= range
->replacement
;
2289 /* apply the changes */
2290 for (i
= 0; i
< 128; ++i
) {
2291 usage
[i
].last_write
[0] = -1;
2292 usage
[i
].last_write
[1] = -1;
2293 usage
[i
].last_write
[2] = -1;
2294 usage
[i
].last_write
[3] = -1;
2298 LIST_FOR_EACH_ENTRY_SAFE(cf
, next_cf
, &bc
->cf
, list
) {
2300 switch (r600_bc_cf_class(cf
)) {
2305 LIST_FOR_EACH_ENTRY_SAFE(alu
, next_alu
, &cf
->alu
, list
) {
2306 replace_alu_gprs(bc
, alu
, usage
, id
, barrier
[stack
], &cf
->barrier
);
2310 if (is_alu_pred_inst(bc
, alu
))
2313 if (cf
->inst
== V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU
<< 3)
2314 optimize_alu_inst(bc
, cf
, alu
);
2316 if (cf
->inst
== V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE
<< 3)
2318 else if (cf
->inst
== V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER
<< 3)
2320 else if (cf
->inst
== V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER
<< 3)
2322 if (LIST_IS_EMPTY(&cf
->alu
)) {
2323 r600_bc_remove_cf(bc
, cf
);
2327 case CF_CLASS_TEXTURE
:
2329 LIST_FOR_EACH_ENTRY(tex
, &cf
->tex
, list
) {
2330 replace_tex_gprs(tex
, usage
, id
++, barrier
[stack
], &cf
->barrier
);
2333 case CF_CLASS_VERTEX
:
2335 LIST_FOR_EACH_ENTRY(vtx
, &cf
->vtx
, list
) {
2336 replace_vtx_gprs(vtx
, usage
, id
++, barrier
[stack
], &cf
->barrier
);
2339 case CF_CLASS_EXPORT
:
2340 continue; // don't increment id
2341 case CF_CLASS_OTHER
:
2342 if (cf
->inst
== V_SQ_CF_WORD1_SQ_CF_INST_POP
) {
2344 stack
-= cf
->pop_count
;
2350 if (cf
&& cf
->barrier
)
2351 barrier
[old_stack
] = id
;
2353 for (i
= old_stack
+ 1; i
<= stack
; ++i
)
2354 barrier
[i
] = barrier
[old_stack
];
2357 if (stack
!= 0) /* ensure exports are placed outside of conditional blocks */
2360 for (i
= 0; i
< 128; ++i
) {
2361 if (!export_cf
[i
] || id
< export_remap
[i
])
2364 r600_bc_move_cf(bc
, export_cf
[i
], next_cf
);
2365 replace_export_gprs(export_cf
[i
], usage
, export_remap
[i
], barrier
[stack
]);
2366 if (export_cf
[i
]->barrier
)
2367 barrier
[stack
] = id
- 1;
2368 next_cf
= LIST_ENTRY(struct r600_bc_cf
, export_cf
[i
]->list
.next
, list
);
2369 optimize_export_inst(bc
, export_cf
[i
]);
2370 export_cf
[i
] = NULL
;
2376 for (i
= 0; i
< 128; ++i
) {
2377 free(usage
[i
].ranges
);
2381 int r600_bc_build(struct r600_bc
*bc
)
2383 struct r600_bc_cf
*cf
;
2384 struct r600_bc_alu
*alu
;
2385 struct r600_bc_vtx
*vtx
;
2386 struct r600_bc_tex
*tex
;
2387 struct r600_bc_cf
*exports
[4] = { NULL
};
2388 uint32_t literal
[4];
2393 if (bc
->callstack
[0].max
> 0)
2394 bc
->nstack
= ((bc
->callstack
[0].max
+ 3) >> 2) + 2;
2395 if (bc
->type
== TGSI_PROCESSOR_VERTEX
&& !bc
->nstack
) {
2399 //r600_bc_optimize(bc);
2401 /* first path compute addr of each CF block */
2402 /* addr start after all the CF instructions */
2403 addr
= LIST_ENTRY(struct r600_bc_cf
, bc
->cf
.prev
, list
)->id
+ 2;
2404 LIST_FOR_EACH_ENTRY(cf
, &bc
->cf
, list
) {
2405 switch (r600_bc_cf_class(cf
)) {
2408 case CF_CLASS_TEXTURE
:
2409 case CF_CLASS_VERTEX
:
2410 /* fetch node need to be 16 bytes aligned*/
2412 addr
&= 0xFFFFFFFCUL
;
2414 case CF_CLASS_EXPORT
:
2415 if (cf
->inst
== BC_INST(bc
, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
))
2416 exports
[cf
->output
.type
] = cf
;
2418 case CF_CLASS_OTHER
:
2421 R600_ERR("unsupported CF instruction (0x%X)\n", cf
->inst
);
2426 bc
->ndw
= cf
->addr
+ cf
->ndw
;
2429 /* set export done on last export of each type */
2430 for (i
= 0; i
< 4; ++i
) {
2432 exports
[i
]->inst
= BC_INST(bc
, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE
);
2437 bc
->bytecode
= calloc(1, bc
->ndw
* 4);
2438 if (bc
->bytecode
== NULL
)
2440 LIST_FOR_EACH_ENTRY(cf
, &bc
->cf
, list
) {
2442 if (bc
->chiprev
== CHIPREV_EVERGREEN
)
2443 r
= eg_bc_cf_build(bc
, cf
);
2445 r
= r600_bc_cf_build(bc
, cf
);
2448 switch (r600_bc_cf_class(cf
)) {
2451 memset(literal
, 0, sizeof(literal
));
2452 LIST_FOR_EACH_ENTRY(alu
, &cf
->alu
, list
) {
2453 r
= r600_bc_alu_nliterals(bc
, alu
, literal
, &nliteral
);
2456 r600_bc_alu_adjust_literals(bc
, alu
, literal
, nliteral
);
2457 switch(bc
->chiprev
) {
2459 r
= r600_bc_alu_build(bc
, alu
, addr
);
2462 case CHIPREV_EVERGREEN
: /* eg alu is same encoding as r700 */
2463 r
= r700_bc_alu_build(bc
, alu
, addr
);
2466 R600_ERR("unknown family %d\n", bc
->family
);
2473 for (i
= 0; i
< align(nliteral
, 2); ++i
) {
2474 bc
->bytecode
[addr
++] = literal
[i
];
2477 memset(literal
, 0, sizeof(literal
));
2481 case CF_CLASS_VERTEX
:
2482 LIST_FOR_EACH_ENTRY(vtx
, &cf
->vtx
, list
) {
2483 r
= r600_bc_vtx_build(bc
, vtx
, addr
);
2489 case CF_CLASS_TEXTURE
:
2490 LIST_FOR_EACH_ENTRY(tex
, &cf
->tex
, list
) {
2491 r
= r600_bc_tex_build(bc
, tex
, addr
);
2497 case CF_CLASS_EXPORT
:
2498 case CF_CLASS_OTHER
:
2501 R600_ERR("unsupported CF instruction (0x%X)\n", cf
->inst
);
2508 void r600_bc_clear(struct r600_bc
*bc
)
2510 struct r600_bc_cf
*cf
= NULL
, *next_cf
;
2513 bc
->bytecode
= NULL
;
2515 LIST_FOR_EACH_ENTRY_SAFE(cf
, next_cf
, &bc
->cf
, list
) {
2516 struct r600_bc_alu
*alu
= NULL
, *next_alu
;
2517 struct r600_bc_tex
*tex
= NULL
, *next_tex
;
2518 struct r600_bc_tex
*vtx
= NULL
, *next_vtx
;
2520 LIST_FOR_EACH_ENTRY_SAFE(alu
, next_alu
, &cf
->alu
, list
) {
2524 LIST_INITHEAD(&cf
->alu
);
2526 LIST_FOR_EACH_ENTRY_SAFE(tex
, next_tex
, &cf
->tex
, list
) {
2530 LIST_INITHEAD(&cf
->tex
);
2532 LIST_FOR_EACH_ENTRY_SAFE(vtx
, next_vtx
, &cf
->vtx
, list
) {
2536 LIST_INITHEAD(&cf
->vtx
);
2541 LIST_INITHEAD(&cf
->list
);
2544 void r600_bc_dump(struct r600_bc
*bc
)
2546 struct r600_bc_cf
*cf
= NULL
;
2547 struct r600_bc_alu
*alu
= NULL
;
2548 struct r600_bc_vtx
*vtx
= NULL
;
2549 struct r600_bc_tex
*tex
= NULL
;
2552 uint32_t literal
[4];
2556 switch (bc
->chiprev
) {
2568 fprintf(stderr
, "bytecode %d dw -- %d gprs ---------------------\n", bc
->ndw
, bc
->ngpr
);
2569 fprintf(stderr
, " %c\n", chip
);
2571 LIST_FOR_EACH_ENTRY(cf
, &bc
->cf
, list
) {
2574 switch (r600_bc_cf_class(cf
)) {
2576 fprintf(stderr
, "%04d %08X ALU ", id
, bc
->bytecode
[id
]);
2577 fprintf(stderr
, "ADDR:%04d ", cf
->addr
);
2578 fprintf(stderr
, "KCACHE_MODE0:%X ", cf
->kcache
[0].mode
);
2579 fprintf(stderr
, "KCACHE_BANK0:%X ", cf
->kcache
[0].bank
);
2580 fprintf(stderr
, "KCACHE_BANK1:%X\n", cf
->kcache
[1].bank
);
2582 fprintf(stderr
, "%04d %08X ALU ", id
, bc
->bytecode
[id
]);
2583 fprintf(stderr
, "INST:%d ", cf
->inst
);
2584 fprintf(stderr
, "KCACHE_MODE1:%X ", cf
->kcache
[1].mode
);
2585 fprintf(stderr
, "KCACHE_ADDR0:%X ", cf
->kcache
[0].addr
);
2586 fprintf(stderr
, "KCACHE_ADDR1:%X ", cf
->kcache
[1].addr
);
2587 fprintf(stderr
, "BARRIER:%d ", cf
->barrier
);
2588 fprintf(stderr
, "COUNT:%d\n", cf
->ndw
/ 2);
2590 case CF_CLASS_TEXTURE
:
2591 case CF_CLASS_VERTEX
:
2592 fprintf(stderr
, "%04d %08X TEX/VTX ", id
, bc
->bytecode
[id
]);
2593 fprintf(stderr
, "ADDR:%04d\n", cf
->addr
);
2595 fprintf(stderr
, "%04d %08X TEX/VTX ", id
, bc
->bytecode
[id
]);
2596 fprintf(stderr
, "INST:%d ", cf
->inst
);
2597 fprintf(stderr
, "BARRIER:%d ", cf
->barrier
);
2598 fprintf(stderr
, "COUNT:%d\n", cf
->ndw
/ 4);
2600 case CF_CLASS_EXPORT
:
2601 fprintf(stderr
, "%04d %08X EXPORT ", id
, bc
->bytecode
[id
]);
2602 fprintf(stderr
, "GPR:%d ", cf
->output
.gpr
);
2603 fprintf(stderr
, "ELEM_SIZE:%X ", cf
->output
.elem_size
);
2604 fprintf(stderr
, "ARRAY_BASE:%X ", cf
->output
.array_base
);
2605 fprintf(stderr
, "TYPE:%X\n", cf
->output
.type
);
2607 fprintf(stderr
, "%04d %08X EXPORT ", id
, bc
->bytecode
[id
]);
2608 fprintf(stderr
, "SWIZ_X:%X ", cf
->output
.swizzle_x
);
2609 fprintf(stderr
, "SWIZ_Y:%X ", cf
->output
.swizzle_y
);
2610 fprintf(stderr
, "SWIZ_Z:%X ", cf
->output
.swizzle_z
);
2611 fprintf(stderr
, "SWIZ_W:%X ", cf
->output
.swizzle_w
);
2612 fprintf(stderr
, "BARRIER:%d ", cf
->barrier
);
2613 fprintf(stderr
, "INST:%d ", cf
->inst
);
2614 fprintf(stderr
, "BURST_COUNT:%d\n", cf
->output
.burst_count
);
2616 case CF_CLASS_OTHER
:
2617 fprintf(stderr
, "%04d %08X CF ", id
, bc
->bytecode
[id
]);
2618 fprintf(stderr
, "ADDR:%04d\n", cf
->cf_addr
);
2620 fprintf(stderr
, "%04d %08X CF ", id
, bc
->bytecode
[id
]);
2621 fprintf(stderr
, "INST:%d ", cf
->inst
);
2622 fprintf(stderr
, "COND:%X ", cf
->cond
);
2623 fprintf(stderr
, "BARRIER:%d ", cf
->barrier
);
2624 fprintf(stderr
, "POP_COUNT:%X\n", cf
->pop_count
);
2630 LIST_FOR_EACH_ENTRY(alu
, &cf
->alu
, list
) {
2631 r600_bc_alu_nliterals(bc
, alu
, literal
, &nliteral
);
2633 fprintf(stderr
, "%04d %08X ", id
, bc
->bytecode
[id
]);
2634 fprintf(stderr
, "SRC0(SEL:%d ", alu
->src
[0].sel
);
2635 fprintf(stderr
, "REL:%d ", alu
->src
[0].rel
);
2636 fprintf(stderr
, "CHAN:%d ", alu
->src
[0].chan
);
2637 fprintf(stderr
, "NEG:%d) ", alu
->src
[0].neg
);
2638 fprintf(stderr
, "SRC1(SEL:%d ", alu
->src
[1].sel
);
2639 fprintf(stderr
, "REL:%d ", alu
->src
[1].rel
);
2640 fprintf(stderr
, "CHAN:%d ", alu
->src
[1].chan
);
2641 fprintf(stderr
, "NEG:%d) ", alu
->src
[1].neg
);
2642 fprintf(stderr
, "LAST:%d)\n", alu
->last
);
2644 fprintf(stderr
, "%04d %08X %c ", id
, bc
->bytecode
[id
], alu
->last
? '*' : ' ');
2645 fprintf(stderr
, "INST:%d ", alu
->inst
);
2646 fprintf(stderr
, "DST(SEL:%d ", alu
->dst
.sel
);
2647 fprintf(stderr
, "CHAN:%d ", alu
->dst
.chan
);
2648 fprintf(stderr
, "REL:%d ", alu
->dst
.rel
);
2649 fprintf(stderr
, "CLAMP:%d) ", alu
->dst
.clamp
);
2650 fprintf(stderr
, "BANK_SWIZZLE:%d ", alu
->bank_swizzle
);
2652 fprintf(stderr
, "SRC2(SEL:%d ", alu
->src
[2].sel
);
2653 fprintf(stderr
, "REL:%d ", alu
->src
[2].rel
);
2654 fprintf(stderr
, "CHAN:%d ", alu
->src
[2].chan
);
2655 fprintf(stderr
, "NEG:%d)\n", alu
->src
[2].neg
);
2657 fprintf(stderr
, "SRC0_ABS:%d ", alu
->src
[0].abs
);
2658 fprintf(stderr
, "SRC1_ABS:%d ", alu
->src
[1].abs
);
2659 fprintf(stderr
, "WRITE_MASK:%d ", alu
->dst
.write
);
2660 fprintf(stderr
, "OMOD:%d ", alu
->omod
);
2661 fprintf(stderr
, "EXECUTE_MASK:%d ", alu
->predicate
);
2662 fprintf(stderr
, "UPDATE_PRED:%d\n", alu
->predicate
);
2667 for (i
= 0; i
< nliteral
; i
++, id
++) {
2668 float *f
= (float*)(bc
->bytecode
+ id
);
2669 fprintf(stderr
, "%04d %08X\t%f\n", id
, bc
->bytecode
[id
], *f
);
2676 LIST_FOR_EACH_ENTRY(tex
, &cf
->tex
, list
) {
2680 LIST_FOR_EACH_ENTRY(vtx
, &cf
->vtx
, list
) {
2685 fprintf(stderr
, "--------------------------------------\n");
2688 void r600_cf_vtx(struct r600_vertex_element
*ve
, u32
*bytecode
, unsigned count
)
2690 struct r600_pipe_state
*rstate
;
2694 bytecode
[i
++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2695 bytecode
[i
++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX
) |
2696 S_SQ_CF_WORD1_BARRIER(1) |
2697 S_SQ_CF_WORD1_COUNT(8 - 1);
2698 bytecode
[i
++] = S_SQ_CF_WORD0_ADDR(40 >> 1);
2699 bytecode
[i
++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX
) |
2700 S_SQ_CF_WORD1_BARRIER(1) |
2701 S_SQ_CF_WORD1_COUNT(count
- 8 - 1);
2703 bytecode
[i
++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2704 bytecode
[i
++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX
) |
2705 S_SQ_CF_WORD1_BARRIER(1) |
2706 S_SQ_CF_WORD1_COUNT(count
- 1);
2708 bytecode
[i
++] = S_SQ_CF_WORD0_ADDR(0);
2709 bytecode
[i
++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_RETURN
) |
2710 S_SQ_CF_WORD1_BARRIER(1);
2712 rstate
= &ve
->rstate
;
2713 rstate
->id
= R600_PIPE_STATE_FETCH_SHADER
;
2715 r600_pipe_state_add_reg(rstate
, R_0288A4_SQ_PGM_RESOURCES_FS
,
2716 0x00000000, 0xFFFFFFFF, NULL
);
2717 r600_pipe_state_add_reg(rstate
, R_0288DC_SQ_PGM_CF_OFFSET_FS
,
2718 0x00000000, 0xFFFFFFFF, NULL
);
2719 r600_pipe_state_add_reg(rstate
, R_028894_SQ_PGM_START_FS
,
2720 r600_bo_offset(ve
->fetch_shader
) >> 8,
2721 0xFFFFFFFF, ve
->fetch_shader
);
2724 void r600_cf_vtx_tc(struct r600_vertex_element
*ve
, u32
*bytecode
, unsigned count
)
2726 struct r600_pipe_state
*rstate
;
2730 bytecode
[i
++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2731 bytecode
[i
++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC
) |
2732 S_SQ_CF_WORD1_BARRIER(1) |
2733 S_SQ_CF_WORD1_COUNT(8 - 1);
2734 bytecode
[i
++] = S_SQ_CF_WORD0_ADDR(40 >> 1);
2735 bytecode
[i
++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC
) |
2736 S_SQ_CF_WORD1_BARRIER(1) |
2737 S_SQ_CF_WORD1_COUNT((count
- 8) - 1);
2739 bytecode
[i
++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2740 bytecode
[i
++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC
) |
2741 S_SQ_CF_WORD1_BARRIER(1) |
2742 S_SQ_CF_WORD1_COUNT(count
- 1);
2744 bytecode
[i
++] = S_SQ_CF_WORD0_ADDR(0);
2745 bytecode
[i
++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_RETURN
) |
2746 S_SQ_CF_WORD1_BARRIER(1);
2748 rstate
= &ve
->rstate
;
2749 rstate
->id
= R600_PIPE_STATE_FETCH_SHADER
;
2751 r600_pipe_state_add_reg(rstate
, R_0288A4_SQ_PGM_RESOURCES_FS
,
2752 0x00000000, 0xFFFFFFFF, NULL
);
2753 r600_pipe_state_add_reg(rstate
, R_0288DC_SQ_PGM_CF_OFFSET_FS
,
2754 0x00000000, 0xFFFFFFFF, NULL
);
2755 r600_pipe_state_add_reg(rstate
, R_028894_SQ_PGM_START_FS
,
2756 r600_bo_offset(ve
->fetch_shader
) >> 8,
2757 0xFFFFFFFF, ve
->fetch_shader
);
2760 static void r600_vertex_data_type(enum pipe_format pformat
, unsigned *format
,
2761 unsigned *num_format
, unsigned *format_comp
)
2763 const struct util_format_description
*desc
;
2770 desc
= util_format_description(pformat
);
2771 if (desc
->layout
!= UTIL_FORMAT_LAYOUT_PLAIN
) {
2775 /* Find the first non-VOID channel. */
2776 for (i
= 0; i
< 4; i
++) {
2777 if (desc
->channel
[i
].type
!= UTIL_FORMAT_TYPE_VOID
) {
2782 switch (desc
->channel
[i
].type
) {
2783 /* Half-floats, floats, doubles */
2784 case UTIL_FORMAT_TYPE_FLOAT
:
2785 switch (desc
->channel
[i
].size
) {
2787 switch (desc
->nr_channels
) {
2789 *format
= FMT_16_FLOAT
;
2792 *format
= FMT_16_16_FLOAT
;
2795 *format
= FMT_16_16_16_FLOAT
;
2798 *format
= FMT_16_16_16_16_FLOAT
;
2803 switch (desc
->nr_channels
) {
2805 *format
= FMT_32_FLOAT
;
2808 *format
= FMT_32_32_FLOAT
;
2811 *format
= FMT_32_32_32_FLOAT
;
2814 *format
= FMT_32_32_32_32_FLOAT
;
2823 case UTIL_FORMAT_TYPE_UNSIGNED
:
2825 case UTIL_FORMAT_TYPE_SIGNED
:
2826 switch (desc
->channel
[i
].size
) {
2828 switch (desc
->nr_channels
) {
2836 // *format = FMT_8_8_8; /* fails piglit draw-vertices test */
2839 *format
= FMT_8_8_8_8
;
2844 switch (desc
->nr_channels
) {
2849 *format
= FMT_16_16
;
2852 // *format = FMT_16_16_16; /* fails piglit draw-vertices test */
2855 *format
= FMT_16_16_16_16
;
2860 switch (desc
->nr_channels
) {
2865 *format
= FMT_32_32
;
2868 *format
= FMT_32_32_32
;
2871 *format
= FMT_32_32_32_32
;
2883 if (desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_SIGNED
) {
2886 if (desc
->channel
[i
].normalized
) {
2893 R600_ERR("unsupported vertex format %s\n", util_format_name(pformat
));
2896 int r600_vertex_elements_build_fetch_shader(struct r600_pipe_context
*rctx
, struct r600_vertex_element
*ve
)
2900 unsigned fetch_resource_start
= 0, format
, num_format
, format_comp
;
2901 struct pipe_vertex_element
*elements
= ve
->elements
;
2902 const struct util_format_description
*desc
;
2904 /* 2 dwords for cf aligned to 4 + 4 dwords per input */
2905 ndw
= 8 + ve
->count
* 4;
2906 ve
->fs_size
= ndw
* 4;
2908 /* use PIPE_BIND_VERTEX_BUFFER so we use the cache buffer manager */
2909 ve
->fetch_shader
= r600_bo(rctx
->radeon
, ndw
*4, 256, PIPE_BIND_VERTEX_BUFFER
, 0);
2910 if (ve
->fetch_shader
== NULL
) {
2914 bytecode
= r600_bo_map(rctx
->radeon
, ve
->fetch_shader
, 0, NULL
);
2915 if (bytecode
== NULL
) {
2916 r600_bo_reference(rctx
->radeon
, &ve
->fetch_shader
, NULL
);
2920 if (rctx
->family
>= CHIP_CEDAR
) {
2921 eg_cf_vtx(ve
, &bytecode
[0], (ndw
- 8) / 4);
2923 r600_cf_vtx(ve
, &bytecode
[0], (ndw
- 8) / 4);
2924 fetch_resource_start
= 160;
2927 /* vertex elements offset need special handling, if offset is bigger
2928 * than what we can put in fetch instruction then we need to alterate
2929 * the vertex resource offset. In such case in order to simplify code
2930 * we will bound one resource per elements. It's a worst case scenario.
2932 for (i
= 0; i
< ve
->count
; i
++) {
2933 ve
->vbuffer_offset
[i
] = C_SQ_VTX_WORD2_OFFSET
& elements
[i
].src_offset
;
2934 if (ve
->vbuffer_offset
[i
]) {
2935 ve
->vbuffer_need_offset
= 1;
2939 for (i
= 0; i
< ve
->count
; i
++) {
2940 unsigned vbuffer_index
;
2941 r600_vertex_data_type(ve
->hw_format
[i
], &format
, &num_format
, &format_comp
);
2942 desc
= util_format_description(ve
->hw_format
[i
]);
2944 R600_ERR("unknown format %d\n", ve
->hw_format
[i
]);
2945 r600_bo_reference(rctx
->radeon
, &ve
->fetch_shader
, NULL
);
2949 /* see above for vbuffer_need_offset explanation */
2950 vbuffer_index
= elements
[i
].vertex_buffer_index
;
2951 if (ve
->vbuffer_need_offset
) {
2952 bytecode
[8 + i
* 4 + 0] = S_SQ_VTX_WORD0_BUFFER_ID(i
+ fetch_resource_start
);
2954 bytecode
[8 + i
* 4 + 0] = S_SQ_VTX_WORD0_BUFFER_ID(vbuffer_index
+ fetch_resource_start
);
2956 bytecode
[8 + i
* 4 + 0] |= S_SQ_VTX_WORD0_SRC_GPR(0) |
2957 S_SQ_VTX_WORD0_SRC_SEL_X(0) |
2958 S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(0x1F);
2959 bytecode
[8 + i
* 4 + 1] = S_SQ_VTX_WORD1_DST_SEL_X(desc
->swizzle
[0]) |
2960 S_SQ_VTX_WORD1_DST_SEL_Y(desc
->swizzle
[1]) |
2961 S_SQ_VTX_WORD1_DST_SEL_Z(desc
->swizzle
[2]) |
2962 S_SQ_VTX_WORD1_DST_SEL_W(desc
->swizzle
[3]) |
2963 S_SQ_VTX_WORD1_USE_CONST_FIELDS(0) |
2964 S_SQ_VTX_WORD1_DATA_FORMAT(format
) |
2965 S_SQ_VTX_WORD1_NUM_FORMAT_ALL(num_format
) |
2966 S_SQ_VTX_WORD1_FORMAT_COMP_ALL(format_comp
) |
2967 S_SQ_VTX_WORD1_SRF_MODE_ALL(1) |
2968 S_SQ_VTX_WORD1_GPR_DST_GPR(i
+ 1);
2969 bytecode
[8 + i
* 4 + 2] = S_SQ_VTX_WORD2_OFFSET(elements
[i
].src_offset
) |
2970 S_SQ_VTX_WORD2_MEGA_FETCH(1);
2971 bytecode
[8 + i
* 4 + 3] = 0;
2973 r600_bo_unmap(rctx
->radeon
, ve
->fetch_shader
);