2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "util/u_format.h"
26 #include "util/u_memory.h"
27 #include "pipe/p_shader_tokens.h"
28 #include "r600_pipe.h"
30 #include "r600_opcodes.h"
32 #include "r600_formats.h"
35 #define NUM_OF_CYCLES 3
36 #define NUM_OF_COMPONENTS 4
38 #define PREV_ALU(alu) LIST_ENTRY(struct r600_bc_alu, alu->list.prev, list)
39 #define NEXT_ALU(alu) LIST_ENTRY(struct r600_bc_alu, alu->list.next, list)
41 static inline unsigned int r600_bc_get_num_operands(struct r600_bc_alu
*alu
)
47 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
:
49 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD
:
50 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE
:
51 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
:
52 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE
:
53 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE
:
54 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
:
55 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX
:
56 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN
:
57 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE
:
58 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE
:
59 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT
:
60 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE
:
61 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE
:
62 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT
:
63 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE
:
64 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE
:
65 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
:
66 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE
:
67 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE
:
70 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
:
71 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR
:
72 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT
:
73 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR
:
74 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC
:
75 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
:
76 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED
:
77 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
:
78 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
:
79 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE
:
80 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT
:
81 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN
:
82 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS
:
85 "Need instruction operand number for 0x%x.\n", alu
->inst
);
91 int r700_bc_alu_build(struct r600_bc
*bc
, struct r600_bc_alu
*alu
, unsigned id
);
93 static struct r600_bc_cf
*r600_bc_cf(void)
95 struct r600_bc_cf
*cf
= CALLOC_STRUCT(r600_bc_cf
);
99 LIST_INITHEAD(&cf
->list
);
100 LIST_INITHEAD(&cf
->alu
);
101 LIST_INITHEAD(&cf
->vtx
);
102 LIST_INITHEAD(&cf
->tex
);
107 static struct r600_bc_alu
*r600_bc_alu(void)
109 struct r600_bc_alu
*alu
= CALLOC_STRUCT(r600_bc_alu
);
113 LIST_INITHEAD(&alu
->list
);
117 static struct r600_bc_vtx
*r600_bc_vtx(void)
119 struct r600_bc_vtx
*vtx
= CALLOC_STRUCT(r600_bc_vtx
);
123 LIST_INITHEAD(&vtx
->list
);
127 static struct r600_bc_tex
*r600_bc_tex(void)
129 struct r600_bc_tex
*tex
= CALLOC_STRUCT(r600_bc_tex
);
133 LIST_INITHEAD(&tex
->list
);
137 int r600_bc_init(struct r600_bc
*bc
, enum radeon_family family
)
139 LIST_INITHEAD(&bc
->cf
);
141 switch (bc
->family
) {
150 bc
->chiprev
= CHIPREV_R600
;
156 bc
->chiprev
= CHIPREV_R700
;
167 bc
->chiprev
= CHIPREV_EVERGREEN
;
170 R600_ERR("unknown family %d\n", bc
->family
);
176 static int r600_bc_add_cf(struct r600_bc
*bc
)
178 struct r600_bc_cf
*cf
= r600_bc_cf();
182 LIST_ADDTAIL(&cf
->list
, &bc
->cf
);
184 cf
->id
= bc
->cf_last
->id
+ 2;
188 bc
->force_add_cf
= 0;
192 static void r600_bc_remove_cf(struct r600_bc
*bc
, struct r600_bc_cf
*cf
)
194 struct r600_bc_cf
*other
;
195 LIST_FOR_EACH_ENTRY(other
, &bc
->cf
, list
) {
196 if (other
->id
> cf
->id
)
198 if (other
->cf_addr
> cf
->id
)
205 static void r600_bc_move_cf(struct r600_bc
*bc
, struct r600_bc_cf
*cf
, struct r600_bc_cf
*next
)
207 struct r600_bc_cf
*prev
= LIST_ENTRY(struct r600_bc_cf
, next
->list
.prev
, list
);
208 unsigned old_id
= cf
->id
;
209 unsigned new_id
= prev
->id
+ 2;
210 struct r600_bc_cf
*other
;
213 return; /* position hasn't changed */
216 LIST_FOR_EACH_ENTRY(other
, &bc
->cf
, list
) {
217 if (other
->id
> old_id
)
219 if (other
->id
>= new_id
)
221 if (other
->cf_addr
> old_id
)
223 if (other
->cf_addr
> new_id
)
227 LIST_ADD(&cf
->list
, &prev
->list
);
230 int r600_bc_add_output(struct r600_bc
*bc
, const struct r600_bc_output
*output
)
234 r
= r600_bc_add_cf(bc
);
237 bc
->cf_last
->inst
= BC_INST(bc
, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
);
238 memcpy(&bc
->cf_last
->output
, output
, sizeof(struct r600_bc_output
));
239 bc
->cf_last
->output
.burst_count
= 1;
243 /* alu predicate instructions */
244 static int is_alu_pred_inst(struct r600_bc_alu
*alu
)
246 return !alu
->is_op3
&& (
247 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_UINT
||
248 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_UINT
||
249 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE
||
250 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT
||
251 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE
||
252 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE
||
253 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_INV
||
254 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_POP
||
255 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_CLR
||
256 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_RESTORE
||
257 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH
||
258 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH
||
259 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH
||
260 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH
||
261 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_INT
||
262 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_INT
||
263 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_INT
||
264 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT
||
265 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH_INT
||
266 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH_INT
||
267 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH_INT
||
268 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH_INT
||
269 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLT_PUSH_INT
||
270 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLE_PUSH_INT
);
273 /* alu kill instructions */
274 static int is_alu_kill_inst(struct r600_bc_alu
*alu
)
276 return !alu
->is_op3
&& (
277 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE
||
278 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
||
279 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE
||
280 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE
||
281 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_UINT
||
282 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_UINT
||
283 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE_INT
||
284 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_INT
||
285 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_INT
||
286 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE_INT
);
289 /* alu instructions that can ony exits once per group */
290 static int is_alu_once_inst(struct r600_bc_alu
*alu
)
292 return is_alu_kill_inst(alu
) ||
293 is_alu_pred_inst(alu
);
296 static int is_alu_reduction_inst(struct r600_bc_alu
*alu
)
298 return !alu
->is_op3
&& (
299 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE
||
300 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
||
301 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE
||
302 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX4
);
305 static int is_alu_mova_inst(struct r600_bc_alu
*alu
)
307 return !alu
->is_op3
&& (
308 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA
||
309 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR
||
310 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT
);
313 /* alu instructions that can only execute on the vector unit */
314 static int is_alu_vec_unit_inst(struct r600_bc_alu
*alu
)
316 return is_alu_reduction_inst(alu
) ||
317 is_alu_mova_inst(alu
);
320 /* alu instructions that can only execute on the trans unit */
321 static int is_alu_trans_unit_inst(struct r600_bc_alu
*alu
)
324 return alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT
||
325 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT
||
326 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT
||
327 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHL_INT
||
328 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT
||
329 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_INT
||
330 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT
||
331 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_INT
||
332 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT
||
333 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_INT
||
334 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_UINT
||
335 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT
||
336 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS
||
337 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
||
338 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED
||
339 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
||
340 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED
||
341 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_FF
||
342 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
||
343 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED
||
344 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_FF
||
345 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE
||
346 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN
||
347 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SQRT_IEEE
;
349 return alu
->inst
== V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT
||
350 alu
->inst
== V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_D2
||
351 alu
->inst
== V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_M2
||
352 alu
->inst
== V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_M4
;
355 /* alu instructions that can execute on any unit */
356 static int is_alu_any_unit_inst(struct r600_bc_alu
*alu
)
358 return !is_alu_vec_unit_inst(alu
) &&
359 !is_alu_trans_unit_inst(alu
);
362 static int assign_alu_units(struct r600_bc_alu
*alu_first
, struct r600_bc_alu
*assignment
[5])
364 struct r600_bc_alu
*alu
;
365 unsigned i
, chan
, trans
;
367 for (i
= 0; i
< 5; i
++)
368 assignment
[i
] = NULL
;
370 for (alu
= alu_first
; alu
; alu
= NEXT_ALU(alu
)) {
371 chan
= alu
->dst
.chan
;
372 if (is_alu_trans_unit_inst(alu
))
374 else if (is_alu_vec_unit_inst(alu
))
376 else if (assignment
[chan
])
377 trans
= 1; // assume ALU_INST_PREFER_VECTOR
383 assert(0); //ALU.Trans has already been allocated
388 if (assignment
[chan
]) {
389 assert(0); //ALU.chan has already been allocated
392 assignment
[chan
] = alu
;
401 struct alu_bank_swizzle
{
402 int hw_gpr
[NUM_OF_CYCLES
][NUM_OF_COMPONENTS
];
403 int hw_cfile_addr
[4];
404 int hw_cfile_elem
[4];
407 const unsigned cycle_for_bank_swizzle_vec
[][3] = {
408 [SQ_ALU_VEC_012
] = { 0, 1, 2 },
409 [SQ_ALU_VEC_021
] = { 0, 2, 1 },
410 [SQ_ALU_VEC_120
] = { 1, 2, 0 },
411 [SQ_ALU_VEC_102
] = { 1, 0, 2 },
412 [SQ_ALU_VEC_201
] = { 2, 0, 1 },
413 [SQ_ALU_VEC_210
] = { 2, 1, 0 }
416 const unsigned cycle_for_bank_swizzle_scl
[][3] = {
417 [SQ_ALU_SCL_210
] = { 2, 1, 0 },
418 [SQ_ALU_SCL_122
] = { 1, 2, 2 },
419 [SQ_ALU_SCL_212
] = { 2, 1, 2 },
420 [SQ_ALU_SCL_221
] = { 2, 2, 1 }
423 static void init_bank_swizzle(struct alu_bank_swizzle
*bs
)
425 int i
, cycle
, component
;
427 for (cycle
= 0; cycle
< NUM_OF_CYCLES
; cycle
++)
428 for (component
= 0; component
< NUM_OF_COMPONENTS
; component
++)
429 bs
->hw_gpr
[cycle
][component
] = -1;
430 for (i
= 0; i
< 4; i
++)
431 bs
->hw_cfile_addr
[i
] = -1;
432 for (i
= 0; i
< 4; i
++)
433 bs
->hw_cfile_elem
[i
] = -1;
436 static int reserve_gpr(struct alu_bank_swizzle
*bs
, unsigned sel
, unsigned chan
, unsigned cycle
)
438 if (bs
->hw_gpr
[cycle
][chan
] == -1)
439 bs
->hw_gpr
[cycle
][chan
] = sel
;
440 else if (bs
->hw_gpr
[cycle
][chan
] != (int)sel
) {
441 // Another scalar operation has already used GPR read port for channel
447 static int reserve_cfile(struct alu_bank_swizzle
*bs
, unsigned sel
, unsigned chan
)
449 int res
, resmatch
= -1, resempty
= -1;
450 for (res
= 3; res
>= 0; --res
) {
451 if (bs
->hw_cfile_addr
[res
] == -1)
453 else if (bs
->hw_cfile_addr
[res
] == sel
&&
454 bs
->hw_cfile_elem
[res
] == chan
)
458 return 0; // Read for this scalar element already reserved, nothing to do here.
459 else if (resempty
!= -1) {
460 bs
->hw_cfile_addr
[resempty
] = sel
;
461 bs
->hw_cfile_elem
[resempty
] = chan
;
463 // All cfile read ports are used, cannot reference vector element
469 static int is_gpr(unsigned sel
)
471 return (sel
>= 0 && sel
<= 127);
474 static int is_cfile(unsigned sel
)
476 return (sel
> 255 && sel
< 512);
479 /* CB constants start at 512, and get translated to a kcache index when ALU
480 * clauses are constructed. Note that we handle kcache constants the same way
481 * as (the now gone) cfile constants, is that really required? */
482 static int is_cb_const(int sel
)
484 if (sel
> 511 && sel
< 4607)
489 static int is_const(int sel
)
491 return is_cfile(sel
) ||
493 (sel
>= V_SQ_ALU_SRC_0
&&
494 sel
<= V_SQ_ALU_SRC_LITERAL
);
497 static int check_vector(struct r600_bc_alu
*alu
, struct alu_bank_swizzle
*bs
, int bank_swizzle
)
499 int r
, src
, num_src
, sel
, elem
, cycle
;
501 num_src
= r600_bc_get_num_operands(alu
);
502 for (src
= 0; src
< num_src
; src
++) {
503 sel
= alu
->src
[src
].sel
;
504 elem
= alu
->src
[src
].chan
;
506 cycle
= cycle_for_bank_swizzle_vec
[bank_swizzle
][src
];
507 if (src
== 1 && sel
== alu
->src
[0].sel
&& elem
== alu
->src
[0].chan
)
508 // Nothing to do; special-case optimization,
509 // second source uses first source’s reservation
512 r
= reserve_gpr(bs
, sel
, elem
, cycle
);
516 } else if (is_cfile(sel
)) {
517 r
= reserve_cfile(bs
, sel
, elem
);
521 // No restrictions on PV, PS, literal or special constants
526 static int check_scalar(struct r600_bc_alu
*alu
, struct alu_bank_swizzle
*bs
, int bank_swizzle
)
528 int r
, src
, num_src
, const_count
, sel
, elem
, cycle
;
530 num_src
= r600_bc_get_num_operands(alu
);
531 for (const_count
= 0, src
= 0; src
< num_src
; ++src
) {
532 sel
= alu
->src
[src
].sel
;
533 elem
= alu
->src
[src
].chan
;
534 if (is_const(sel
)) { // Any constant, including literal and inline constants
535 if (const_count
>= 2)
536 // More than two references to a constant in
537 // transcendental operation.
543 r
= reserve_cfile(bs
, sel
, elem
);
548 for (src
= 0; src
< num_src
; ++src
) {
549 sel
= alu
->src
[src
].sel
;
550 elem
= alu
->src
[src
].chan
;
552 cycle
= cycle_for_bank_swizzle_scl
[bank_swizzle
][src
];
553 if (cycle
< const_count
)
554 // Cycle for GPR load conflicts with
555 // constant load in transcendental operation.
557 r
= reserve_gpr(bs
, sel
, elem
, cycle
);
561 // Constants already processed
562 // No restrictions on PV, PS
567 static int check_and_set_bank_swizzle(struct r600_bc_alu
*slots
[5])
569 struct alu_bank_swizzle bs
;
571 int i
, r
= 0, forced
= 0;
573 for (i
= 0; i
< 5; i
++)
574 if (slots
[i
] && slots
[i
]->bank_swizzle_force
) {
575 slots
[i
]->bank_swizzle
= slots
[i
]->bank_swizzle_force
;
582 // just check every possible combination of bank swizzle
583 // not very efficent, but works on the first try in most of the cases
584 for (i
= 0; i
< 4; i
++)
585 bank_swizzle
[i
] = SQ_ALU_VEC_012
;
586 bank_swizzle
[4] = SQ_ALU_SCL_210
;
587 while(bank_swizzle
[4] <= SQ_ALU_SCL_221
) {
588 init_bank_swizzle(&bs
);
589 for (i
= 0; i
< 4; i
++) {
591 r
= check_vector(slots
[i
], &bs
, bank_swizzle
[i
]);
596 if (!r
&& slots
[4]) {
597 r
= check_scalar(slots
[4], &bs
, bank_swizzle
[4]);
600 for (i
= 0; i
< 5; i
++) {
602 slots
[i
]->bank_swizzle
= bank_swizzle
[i
];
607 for (i
= 0; i
< 5; i
++) {
609 if (bank_swizzle
[i
] <= SQ_ALU_VEC_210
)
612 bank_swizzle
[i
] = SQ_ALU_VEC_012
;
616 // couldn't find a working swizzle
620 static int replace_gpr_with_pv_ps(struct r600_bc_alu
*slots
[5], struct r600_bc_alu
*alu_prev
)
622 struct r600_bc_alu
*prev
[5];
624 int i
, j
, r
, src
, num_src
;
626 r
= assign_alu_units(alu_prev
, prev
);
630 for (i
= 0; i
< 5; ++i
) {
631 if(prev
[i
] && prev
[i
]->dst
.write
&& !prev
[i
]->dst
.rel
) {
632 gpr
[i
] = prev
[i
]->dst
.sel
;
633 if (is_alu_reduction_inst(prev
[i
]))
636 chan
[i
] = prev
[i
]->dst
.chan
;
641 for (i
= 0; i
< 5; ++i
) {
642 struct r600_bc_alu
*alu
= slots
[i
];
646 num_src
= r600_bc_get_num_operands(alu
);
647 for (src
= 0; src
< num_src
; ++src
) {
648 if (!is_gpr(alu
->src
[src
].sel
) || alu
->src
[src
].rel
)
651 if (alu
->src
[src
].sel
== gpr
[4] &&
652 alu
->src
[src
].chan
== chan
[4]) {
653 alu
->src
[src
].sel
= V_SQ_ALU_SRC_PS
;
654 alu
->src
[src
].chan
= 0;
658 for (j
= 0; j
< 4; ++j
) {
659 if (alu
->src
[src
].sel
== gpr
[j
] &&
660 alu
->src
[src
].chan
== j
) {
661 alu
->src
[src
].sel
= V_SQ_ALU_SRC_PV
;
662 alu
->src
[src
].chan
= chan
[j
];
672 void r600_bc_special_constants(u32 value
, unsigned *sel
, unsigned *neg
)
676 *sel
= V_SQ_ALU_SRC_0
;
679 *sel
= V_SQ_ALU_SRC_1_INT
;
682 *sel
= V_SQ_ALU_SRC_M_1_INT
;
684 case 0x3F800000: // 1.0f
685 *sel
= V_SQ_ALU_SRC_1
;
687 case 0x3F000000: // 0.5f
688 *sel
= V_SQ_ALU_SRC_0_5
;
690 case 0xBF800000: // -1.0f
691 *sel
= V_SQ_ALU_SRC_1
;
694 case 0xBF000000: // -0.5f
695 *sel
= V_SQ_ALU_SRC_0_5
;
699 *sel
= V_SQ_ALU_SRC_LITERAL
;
704 /* compute how many literal are needed */
705 static int r600_bc_alu_nliterals(struct r600_bc_alu
*alu
, uint32_t literal
[4], unsigned *nliteral
)
707 unsigned num_src
= r600_bc_get_num_operands(alu
);
710 for (i
= 0; i
< num_src
; ++i
) {
711 if (alu
->src
[i
].sel
== V_SQ_ALU_SRC_LITERAL
) {
712 uint32_t value
= alu
->src
[i
].value
[alu
->src
[i
].chan
];
714 for (j
= 0; j
< *nliteral
; ++j
) {
715 if (literal
[j
] == value
) {
723 literal
[(*nliteral
)++] = value
;
730 static void r600_bc_alu_adjust_literals(struct r600_bc_alu
*alu
, uint32_t literal
[4], unsigned nliteral
)
732 unsigned num_src
= r600_bc_get_num_operands(alu
);
735 for (i
= 0; i
< num_src
; ++i
) {
736 if (alu
->src
[i
].sel
== V_SQ_ALU_SRC_LITERAL
) {
737 uint32_t value
= alu
->src
[i
].value
[alu
->src
[i
].chan
];
738 for (j
= 0; j
< nliteral
; ++j
) {
739 if (literal
[j
] == value
) {
740 alu
->src
[i
].chan
= j
;
748 static int merge_inst_groups(struct r600_bc
*bc
, struct r600_bc_alu
*slots
[5], struct r600_bc_alu
*alu_prev
)
750 struct r600_bc_alu
*prev
[5];
751 struct r600_bc_alu
*result
[5] = { NULL
};
754 unsigned nliteral
= 0;
756 int i
, j
, r
, src
, num_src
;
757 int num_once_inst
= 0;
759 r
= assign_alu_units(alu_prev
, prev
);
763 for (i
= 0; i
< 5; ++i
) {
764 /* check number of literals */
765 if (prev
[i
] && r600_bc_alu_nliterals(prev
[i
], literal
, &nliteral
))
767 if (slots
[i
] && r600_bc_alu_nliterals(slots
[i
], literal
, &nliteral
))
770 // let's check used slots
771 if (prev
[i
] && !slots
[i
]) {
773 num_once_inst
+= is_alu_once_inst(prev
[i
]);
775 } else if (prev
[i
] && slots
[i
]) {
776 if (result
[4] == NULL
&& prev
[4] == NULL
&& slots
[4] == NULL
) {
777 // trans unit is still free try to use it
778 if (is_alu_any_unit_inst(slots
[i
])) {
780 result
[4] = slots
[i
];
781 } else if (is_alu_any_unit_inst(prev
[i
])) {
782 result
[i
] = slots
[i
];
788 } else if(!slots
[i
]) {
791 result
[i
] = slots
[i
];
793 // let's check source gprs
794 struct r600_bc_alu
*alu
= slots
[i
];
795 num_once_inst
+= is_alu_once_inst(alu
);
797 num_src
= r600_bc_get_num_operands(alu
);
798 for (src
= 0; src
< num_src
; ++src
) {
799 // constants doesn't matter
800 if (!is_gpr(alu
->src
[src
].sel
))
803 for (j
= 0; j
< 5; ++j
) {
804 if (!prev
[j
] || !prev
[j
]->dst
.write
)
807 // if it's relative then we can't determin which gpr is really used
808 if (prev
[j
]->dst
.chan
== alu
->src
[src
].chan
&&
809 (prev
[j
]->dst
.sel
== alu
->src
[src
].sel
||
810 prev
[j
]->dst
.rel
|| alu
->src
[src
].rel
))
816 /* more than one PRED_ or KILL_ ? */
817 if (num_once_inst
> 1)
820 /* check if the result can still be swizzlet */
821 r
= check_and_set_bank_swizzle(result
);
825 /* looks like everything worked out right, apply the changes */
827 /* sort instructions */
828 for (i
= 0; i
< 5; ++i
) {
829 slots
[i
] = result
[i
];
831 LIST_DEL(&result
[i
]->list
);
833 LIST_ADDTAIL(&result
[i
]->list
, &bc
->cf_last
->alu
);
837 /* determine new last instruction */
838 LIST_ENTRY(struct r600_bc_alu
, bc
->cf_last
->alu
.prev
, list
)->last
= 1;
840 /* determine new first instruction */
841 for (i
= 0; i
< 5; ++i
) {
843 bc
->cf_last
->curr_bs_head
= result
[i
];
848 bc
->cf_last
->prev_bs_head
= bc
->cf_last
->prev2_bs_head
;
849 bc
->cf_last
->prev2_bs_head
= NULL
;
854 /* This code handles kcache lines as single blocks of 32 constants. We could
855 * probably do slightly better by recognizing that we actually have two
856 * consecutive lines of 16 constants, but the resulting code would also be
857 * somewhat more complicated. */
858 static int r600_bc_alloc_kcache_lines(struct r600_bc
*bc
, struct r600_bc_alu
*alu
, int type
)
860 struct r600_bc_kcache
*kcache
= bc
->cf_last
->kcache
;
861 unsigned int required_lines
;
862 unsigned int free_lines
= 0;
863 unsigned int cache_line
[3];
864 unsigned int count
= 0;
868 /* Collect required cache lines. */
869 for (i
= 0; i
< 3; ++i
) {
873 if (alu
->src
[i
].sel
< 512)
876 line
= ((alu
->src
[i
].sel
- 512) / 32) * 2;
878 for (j
= 0; j
< count
; ++j
) {
879 if (cache_line
[j
] == line
) {
886 cache_line
[count
++] = line
;
889 /* This should never actually happen. */
890 if (count
>= 3) return -ENOMEM
;
892 for (i
= 0; i
< 2; ++i
) {
893 if (kcache
[i
].mode
== V_SQ_CF_KCACHE_NOP
) {
898 /* Filter lines pulled in by previous intructions. Note that this is
899 * only for the required_lines count, we can't remove these from the
900 * cache_line array since we may have to start a new ALU clause. */
901 for (i
= 0, required_lines
= count
; i
< count
; ++i
) {
902 for (j
= 0; j
< 2; ++j
) {
903 if (kcache
[j
].mode
== V_SQ_CF_KCACHE_LOCK_2
&&
904 kcache
[j
].addr
== cache_line
[i
]) {
911 /* Start a new ALU clause if needed. */
912 if (required_lines
> free_lines
) {
913 if ((r
= r600_bc_add_cf(bc
))) {
916 bc
->cf_last
->inst
= (type
<< 3);
917 kcache
= bc
->cf_last
->kcache
;
920 /* Setup the kcache lines. */
921 for (i
= 0; i
< count
; ++i
) {
924 for (j
= 0; j
< 2; ++j
) {
925 if (kcache
[j
].mode
== V_SQ_CF_KCACHE_LOCK_2
&&
926 kcache
[j
].addr
== cache_line
[i
]) {
934 for (j
= 0; j
< 2; ++j
) {
935 if (kcache
[j
].mode
== V_SQ_CF_KCACHE_NOP
) {
937 kcache
[j
].addr
= cache_line
[i
];
938 kcache
[j
].mode
= V_SQ_CF_KCACHE_LOCK_2
;
944 /* Alter the src operands to refer to the kcache. */
945 for (i
= 0; i
< 3; ++i
) {
946 static const unsigned int base
[] = {128, 160, 256, 288};
949 if (alu
->src
[i
].sel
< 512)
952 alu
->src
[i
].sel
-= 512;
953 line
= (alu
->src
[i
].sel
/ 32) * 2;
955 for (j
= 0; j
< 2; ++j
) {
956 if (kcache
[j
].mode
== V_SQ_CF_KCACHE_LOCK_2
&&
957 kcache
[j
].addr
== line
) {
958 alu
->src
[i
].sel
&= 0x1f;
959 alu
->src
[i
].sel
+= base
[j
];
968 int r600_bc_add_alu_type(struct r600_bc
*bc
, const struct r600_bc_alu
*alu
, int type
)
970 struct r600_bc_alu
*nalu
= r600_bc_alu();
971 struct r600_bc_alu
*lalu
;
976 memcpy(nalu
, alu
, sizeof(struct r600_bc_alu
));
978 if (bc
->cf_last
!= NULL
&& bc
->cf_last
->inst
!= (type
<< 3)) {
979 /* check if we could add it anyway */
980 if (bc
->cf_last
->inst
== (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU
<< 3) &&
981 type
== V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE
) {
982 LIST_FOR_EACH_ENTRY(lalu
, &bc
->cf_last
->alu
, list
) {
983 if (lalu
->predicate
) {
984 bc
->force_add_cf
= 1;
989 bc
->force_add_cf
= 1;
992 /* cf can contains only alu or only vtx or only tex */
993 if (bc
->cf_last
== NULL
|| bc
->force_add_cf
) {
994 r
= r600_bc_add_cf(bc
);
1000 bc
->cf_last
->inst
= (type
<< 3);
1002 /* Setup the kcache for this ALU instruction. This will start a new
1003 * ALU clause if needed. */
1004 if ((r
= r600_bc_alloc_kcache_lines(bc
, nalu
, type
))) {
1009 if (!bc
->cf_last
->curr_bs_head
) {
1010 bc
->cf_last
->curr_bs_head
= nalu
;
1012 /* at most 128 slots, one add alu can add 5 slots + 4 constants(2 slots)
1014 if (nalu
->last
&& (bc
->cf_last
->ndw
>> 1) >= 120) {
1015 bc
->force_add_cf
= 1;
1017 /* replace special constants */
1018 for (i
= 0; i
< 3; i
++) {
1019 if (nalu
->src
[i
].sel
== V_SQ_ALU_SRC_LITERAL
)
1020 r600_bc_special_constants(
1021 nalu
->src
[i
].value
[nalu
->src
[i
].chan
],
1022 &nalu
->src
[i
].sel
, &nalu
->src
[i
].neg
);
1024 if (nalu
->src
[i
].sel
>= bc
->ngpr
&& nalu
->src
[i
].sel
< 128) {
1025 bc
->ngpr
= nalu
->src
[i
].sel
+ 1;
1028 if (nalu
->dst
.sel
>= bc
->ngpr
) {
1029 bc
->ngpr
= nalu
->dst
.sel
+ 1;
1032 LIST_ADDTAIL(&nalu
->list
, &bc
->cf_last
->alu
);
1033 /* each alu use 2 dwords */
1034 bc
->cf_last
->ndw
+= 2;
1037 /* process cur ALU instructions for bank swizzle */
1039 struct r600_bc_alu
*slots
[5];
1040 r
= assign_alu_units(bc
->cf_last
->curr_bs_head
, slots
);
1044 if (bc
->cf_last
->prev_bs_head
) {
1045 r
= merge_inst_groups(bc
, slots
, bc
->cf_last
->prev_bs_head
);
1050 if (bc
->cf_last
->prev_bs_head
) {
1051 r
= replace_gpr_with_pv_ps(slots
, bc
->cf_last
->prev_bs_head
);
1056 r
= check_and_set_bank_swizzle(slots
);
1060 bc
->cf_last
->prev2_bs_head
= bc
->cf_last
->prev_bs_head
;
1061 bc
->cf_last
->prev_bs_head
= bc
->cf_last
->curr_bs_head
;
1062 bc
->cf_last
->curr_bs_head
= NULL
;
1067 int r600_bc_add_alu(struct r600_bc
*bc
, const struct r600_bc_alu
*alu
)
1069 return r600_bc_add_alu_type(bc
, alu
, BC_INST(bc
, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU
));
1072 static void r600_bc_remove_alu(struct r600_bc_cf
*cf
, struct r600_bc_alu
*alu
)
1074 if (alu
->last
&& alu
->list
.prev
!= &cf
->alu
) {
1075 PREV_ALU(alu
)->last
= 1;
1077 LIST_DEL(&alu
->list
);
1082 int r600_bc_add_vtx(struct r600_bc
*bc
, const struct r600_bc_vtx
*vtx
)
1084 struct r600_bc_vtx
*nvtx
= r600_bc_vtx();
1089 memcpy(nvtx
, vtx
, sizeof(struct r600_bc_vtx
));
1091 /* cf can contains only alu or only vtx or only tex */
1092 if (bc
->cf_last
== NULL
||
1093 (bc
->cf_last
->inst
!= V_SQ_CF_WORD1_SQ_CF_INST_VTX
&&
1094 bc
->cf_last
->inst
!= V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC
) ||
1096 r
= r600_bc_add_cf(bc
);
1101 bc
->cf_last
->inst
= V_SQ_CF_WORD1_SQ_CF_INST_VTX
;
1103 LIST_ADDTAIL(&nvtx
->list
, &bc
->cf_last
->vtx
);
1104 /* each fetch use 4 dwords */
1105 bc
->cf_last
->ndw
+= 4;
1107 if ((bc
->cf_last
->ndw
/ 4) > 7)
1108 bc
->force_add_cf
= 1;
1112 int r600_bc_add_tex(struct r600_bc
*bc
, const struct r600_bc_tex
*tex
)
1114 struct r600_bc_tex
*ntex
= r600_bc_tex();
1119 memcpy(ntex
, tex
, sizeof(struct r600_bc_tex
));
1121 /* cf can contains only alu or only vtx or only tex */
1122 if (bc
->cf_last
== NULL
||
1123 bc
->cf_last
->inst
!= V_SQ_CF_WORD1_SQ_CF_INST_TEX
||
1125 r
= r600_bc_add_cf(bc
);
1130 bc
->cf_last
->inst
= V_SQ_CF_WORD1_SQ_CF_INST_TEX
;
1132 LIST_ADDTAIL(&ntex
->list
, &bc
->cf_last
->tex
);
1133 /* each texture fetch use 4 dwords */
1134 bc
->cf_last
->ndw
+= 4;
1136 if ((bc
->cf_last
->ndw
/ 4) > 7)
1137 bc
->force_add_cf
= 1;
1141 int r600_bc_add_cfinst(struct r600_bc
*bc
, int inst
)
1144 r
= r600_bc_add_cf(bc
);
1148 bc
->cf_last
->cond
= V_SQ_CF_COND_ACTIVE
;
1149 bc
->cf_last
->inst
= inst
;
1153 /* common to all 3 families */
1154 static int r600_bc_vtx_build(struct r600_bc
*bc
, struct r600_bc_vtx
*vtx
, unsigned id
)
1156 unsigned fetch_resource_start
= 0;
1158 /* check if we are fetch shader */
1159 /* fetch shader can also access vertex resource,
1160 * first fetch shader resource is at 160
1162 if (bc
->type
== -1) {
1163 switch (bc
->chiprev
) {
1168 fetch_resource_start
= 160;
1171 case CHIPREV_EVERGREEN
:
1172 fetch_resource_start
= 0;
1175 fprintf(stderr
, "%s:%s:%d unknown chiprev %d\n",
1176 __FILE__
, __func__
, __LINE__
, bc
->chiprev
);
1180 bc
->bytecode
[id
++] = S_SQ_VTX_WORD0_BUFFER_ID(vtx
->buffer_id
+ fetch_resource_start
) |
1181 S_SQ_VTX_WORD0_SRC_GPR(vtx
->src_gpr
) |
1182 S_SQ_VTX_WORD0_SRC_SEL_X(vtx
->src_sel_x
) |
1183 S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(vtx
->mega_fetch_count
);
1184 bc
->bytecode
[id
++] = S_SQ_VTX_WORD1_DST_SEL_X(vtx
->dst_sel_x
) |
1185 S_SQ_VTX_WORD1_DST_SEL_Y(vtx
->dst_sel_y
) |
1186 S_SQ_VTX_WORD1_DST_SEL_Z(vtx
->dst_sel_z
) |
1187 S_SQ_VTX_WORD1_DST_SEL_W(vtx
->dst_sel_w
) |
1188 S_SQ_VTX_WORD1_USE_CONST_FIELDS(vtx
->use_const_fields
) |
1189 S_SQ_VTX_WORD1_DATA_FORMAT(vtx
->data_format
) |
1190 S_SQ_VTX_WORD1_NUM_FORMAT_ALL(vtx
->num_format_all
) |
1191 S_SQ_VTX_WORD1_FORMAT_COMP_ALL(vtx
->format_comp_all
) |
1192 S_SQ_VTX_WORD1_SRF_MODE_ALL(vtx
->srf_mode_all
) |
1193 S_SQ_VTX_WORD1_GPR_DST_GPR(vtx
->dst_gpr
);
1194 bc
->bytecode
[id
++] = S_SQ_VTX_WORD2_MEGA_FETCH(1);
1195 bc
->bytecode
[id
++] = 0;
1199 /* common to all 3 families */
1200 static int r600_bc_tex_build(struct r600_bc
*bc
, struct r600_bc_tex
*tex
, unsigned id
)
1202 bc
->bytecode
[id
++] = S_SQ_TEX_WORD0_TEX_INST(tex
->inst
) |
1203 S_SQ_TEX_WORD0_RESOURCE_ID(tex
->resource_id
) |
1204 S_SQ_TEX_WORD0_SRC_GPR(tex
->src_gpr
) |
1205 S_SQ_TEX_WORD0_SRC_REL(tex
->src_rel
);
1206 bc
->bytecode
[id
++] = S_SQ_TEX_WORD1_DST_GPR(tex
->dst_gpr
) |
1207 S_SQ_TEX_WORD1_DST_REL(tex
->dst_rel
) |
1208 S_SQ_TEX_WORD1_DST_SEL_X(tex
->dst_sel_x
) |
1209 S_SQ_TEX_WORD1_DST_SEL_Y(tex
->dst_sel_y
) |
1210 S_SQ_TEX_WORD1_DST_SEL_Z(tex
->dst_sel_z
) |
1211 S_SQ_TEX_WORD1_DST_SEL_W(tex
->dst_sel_w
) |
1212 S_SQ_TEX_WORD1_LOD_BIAS(tex
->lod_bias
) |
1213 S_SQ_TEX_WORD1_COORD_TYPE_X(tex
->coord_type_x
) |
1214 S_SQ_TEX_WORD1_COORD_TYPE_Y(tex
->coord_type_y
) |
1215 S_SQ_TEX_WORD1_COORD_TYPE_Z(tex
->coord_type_z
) |
1216 S_SQ_TEX_WORD1_COORD_TYPE_W(tex
->coord_type_w
);
1217 bc
->bytecode
[id
++] = S_SQ_TEX_WORD2_OFFSET_X(tex
->offset_x
) |
1218 S_SQ_TEX_WORD2_OFFSET_Y(tex
->offset_y
) |
1219 S_SQ_TEX_WORD2_OFFSET_Z(tex
->offset_z
) |
1220 S_SQ_TEX_WORD2_SAMPLER_ID(tex
->sampler_id
) |
1221 S_SQ_TEX_WORD2_SRC_SEL_X(tex
->src_sel_x
) |
1222 S_SQ_TEX_WORD2_SRC_SEL_Y(tex
->src_sel_y
) |
1223 S_SQ_TEX_WORD2_SRC_SEL_Z(tex
->src_sel_z
) |
1224 S_SQ_TEX_WORD2_SRC_SEL_W(tex
->src_sel_w
);
1225 bc
->bytecode
[id
++] = 0;
1229 /* r600 only, r700/eg bits in r700_asm.c */
1230 static int r600_bc_alu_build(struct r600_bc
*bc
, struct r600_bc_alu
*alu
, unsigned id
)
1232 /* don't replace gpr by pv or ps for destination register */
1233 bc
->bytecode
[id
++] = S_SQ_ALU_WORD0_SRC0_SEL(alu
->src
[0].sel
) |
1234 S_SQ_ALU_WORD0_SRC0_REL(alu
->src
[0].rel
) |
1235 S_SQ_ALU_WORD0_SRC0_CHAN(alu
->src
[0].chan
) |
1236 S_SQ_ALU_WORD0_SRC0_NEG(alu
->src
[0].neg
) |
1237 S_SQ_ALU_WORD0_SRC1_SEL(alu
->src
[1].sel
) |
1238 S_SQ_ALU_WORD0_SRC1_REL(alu
->src
[1].rel
) |
1239 S_SQ_ALU_WORD0_SRC1_CHAN(alu
->src
[1].chan
) |
1240 S_SQ_ALU_WORD0_SRC1_NEG(alu
->src
[1].neg
) |
1241 S_SQ_ALU_WORD0_LAST(alu
->last
);
1244 bc
->bytecode
[id
++] = S_SQ_ALU_WORD1_DST_GPR(alu
->dst
.sel
) |
1245 S_SQ_ALU_WORD1_DST_CHAN(alu
->dst
.chan
) |
1246 S_SQ_ALU_WORD1_DST_REL(alu
->dst
.rel
) |
1247 S_SQ_ALU_WORD1_CLAMP(alu
->dst
.clamp
) |
1248 S_SQ_ALU_WORD1_OP3_SRC2_SEL(alu
->src
[2].sel
) |
1249 S_SQ_ALU_WORD1_OP3_SRC2_REL(alu
->src
[2].rel
) |
1250 S_SQ_ALU_WORD1_OP3_SRC2_CHAN(alu
->src
[2].chan
) |
1251 S_SQ_ALU_WORD1_OP3_SRC2_NEG(alu
->src
[2].neg
) |
1252 S_SQ_ALU_WORD1_OP3_ALU_INST(alu
->inst
) |
1253 S_SQ_ALU_WORD1_BANK_SWIZZLE(alu
->bank_swizzle
);
1255 bc
->bytecode
[id
++] = S_SQ_ALU_WORD1_DST_GPR(alu
->dst
.sel
) |
1256 S_SQ_ALU_WORD1_DST_CHAN(alu
->dst
.chan
) |
1257 S_SQ_ALU_WORD1_DST_REL(alu
->dst
.rel
) |
1258 S_SQ_ALU_WORD1_CLAMP(alu
->dst
.clamp
) |
1259 S_SQ_ALU_WORD1_OP2_SRC0_ABS(alu
->src
[0].abs
) |
1260 S_SQ_ALU_WORD1_OP2_SRC1_ABS(alu
->src
[1].abs
) |
1261 S_SQ_ALU_WORD1_OP2_WRITE_MASK(alu
->dst
.write
) |
1262 S_SQ_ALU_WORD1_OP2_OMOD(alu
->omod
) |
1263 S_SQ_ALU_WORD1_OP2_ALU_INST(alu
->inst
) |
1264 S_SQ_ALU_WORD1_BANK_SWIZZLE(alu
->bank_swizzle
) |
1265 S_SQ_ALU_WORD1_OP2_UPDATE_EXECUTE_MASK(alu
->predicate
) |
1266 S_SQ_ALU_WORD1_OP2_UPDATE_PRED(alu
->predicate
);
1280 static enum cf_class
get_cf_class(struct r600_bc_cf
*cf
)
1283 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU
<< 3):
1284 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER
<< 3):
1285 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER
<< 3):
1286 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE
<< 3):
1287 return CF_CLASS_ALU
;
1289 case V_SQ_CF_WORD1_SQ_CF_INST_TEX
:
1290 return CF_CLASS_TEXTURE
;
1292 case V_SQ_CF_WORD1_SQ_CF_INST_VTX
:
1293 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC
:
1294 return CF_CLASS_VERTEX
;
1296 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
:
1297 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE
:
1298 return CF_CLASS_EXPORT
;
1300 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP
:
1301 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE
:
1302 case V_SQ_CF_WORD1_SQ_CF_INST_POP
:
1303 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL
:
1304 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END
:
1305 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE
:
1306 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK
:
1307 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS
:
1308 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN
:
1309 return CF_CLASS_OTHER
;
1312 R600_ERR("unsupported CF instruction (0x%X)\n", cf
->inst
);
1317 /* common for r600/r700 - eg in eg_asm.c */
1318 static int r600_bc_cf_build(struct r600_bc
*bc
, struct r600_bc_cf
*cf
)
1320 unsigned id
= cf
->id
;
1321 unsigned end_of_program
= bc
->cf
.prev
== &cf
->list
;
1323 switch (get_cf_class(cf
)) {
1325 assert(!end_of_program
);
1326 bc
->bytecode
[id
++] = S_SQ_CF_ALU_WORD0_ADDR(cf
->addr
>> 1) |
1327 S_SQ_CF_ALU_WORD0_KCACHE_MODE0(cf
->kcache
[0].mode
) |
1328 S_SQ_CF_ALU_WORD0_KCACHE_BANK0(cf
->kcache
[0].bank
) |
1329 S_SQ_CF_ALU_WORD0_KCACHE_BANK1(cf
->kcache
[1].bank
);
1331 bc
->bytecode
[id
++] = S_SQ_CF_ALU_WORD1_CF_INST(cf
->inst
>> 3) |
1332 S_SQ_CF_ALU_WORD1_KCACHE_MODE1(cf
->kcache
[1].mode
) |
1333 S_SQ_CF_ALU_WORD1_KCACHE_ADDR0(cf
->kcache
[0].addr
) |
1334 S_SQ_CF_ALU_WORD1_KCACHE_ADDR1(cf
->kcache
[1].addr
) |
1335 S_SQ_CF_ALU_WORD1_BARRIER(cf
->barrier
) |
1336 S_SQ_CF_ALU_WORD1_USES_WATERFALL(bc
->chiprev
== CHIPREV_R600
? cf
->r6xx_uses_waterfall
: 0) |
1337 S_SQ_CF_ALU_WORD1_COUNT((cf
->ndw
/ 2) - 1);
1339 case CF_CLASS_TEXTURE
:
1340 case CF_CLASS_VERTEX
:
1341 bc
->bytecode
[id
++] = S_SQ_CF_WORD0_ADDR(cf
->addr
>> 1);
1342 bc
->bytecode
[id
++] = S_SQ_CF_WORD1_CF_INST(cf
->inst
) |
1343 S_SQ_CF_WORD1_BARRIER(cf
->barrier
) |
1344 S_SQ_CF_WORD1_COUNT((cf
->ndw
/ 4) - 1) |
1345 S_SQ_CF_WORD1_END_OF_PROGRAM(end_of_program
);
1347 case CF_CLASS_EXPORT
:
1348 bc
->bytecode
[id
++] = S_SQ_CF_ALLOC_EXPORT_WORD0_RW_GPR(cf
->output
.gpr
) |
1349 S_SQ_CF_ALLOC_EXPORT_WORD0_ELEM_SIZE(cf
->output
.elem_size
) |
1350 S_SQ_CF_ALLOC_EXPORT_WORD0_ARRAY_BASE(cf
->output
.array_base
) |
1351 S_SQ_CF_ALLOC_EXPORT_WORD0_TYPE(cf
->output
.type
);
1352 bc
->bytecode
[id
++] = S_SQ_CF_ALLOC_EXPORT_WORD1_BURST_COUNT(cf
->output
.burst_count
- 1) |
1353 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_X(cf
->output
.swizzle_x
) |
1354 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Y(cf
->output
.swizzle_y
) |
1355 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Z(cf
->output
.swizzle_z
) |
1356 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_W(cf
->output
.swizzle_w
) |
1357 S_SQ_CF_ALLOC_EXPORT_WORD1_BARRIER(cf
->barrier
) |
1358 S_SQ_CF_ALLOC_EXPORT_WORD1_CF_INST(cf
->inst
) |
1359 S_SQ_CF_ALLOC_EXPORT_WORD1_END_OF_PROGRAM(end_of_program
);
1361 case CF_CLASS_OTHER
:
1362 bc
->bytecode
[id
++] = S_SQ_CF_WORD0_ADDR(cf
->cf_addr
>> 1);
1363 bc
->bytecode
[id
++] = S_SQ_CF_WORD1_CF_INST(cf
->inst
) |
1364 S_SQ_CF_WORD1_BARRIER(cf
->barrier
) |
1365 S_SQ_CF_WORD1_COND(cf
->cond
) |
1366 S_SQ_CF_WORD1_POP_COUNT(cf
->pop_count
) |
1367 S_SQ_CF_WORD1_END_OF_PROGRAM(end_of_program
);
1371 R600_ERR("unsupported CF instruction (0x%X)\n", cf
->inst
);
1377 struct gpr_usage_range
{
1384 unsigned channels
:4;
1385 int32_t first_write
;
1386 int32_t last_write
[4];
1388 struct gpr_usage_range
*ranges
;
1391 static struct gpr_usage_range
* add_gpr_usage_range(struct gpr_usage
*usage
)
1394 usage
->ranges
= realloc(usage
->ranges
, usage
->nranges
* sizeof(struct gpr_usage_range
));
1397 return &usage
->ranges
[usage
->nranges
-1];
1400 static void notice_gpr_read(struct gpr_usage
*usage
, int32_t id
, unsigned chan
)
1402 usage
->channels
|= 1 << chan
;
1403 usage
->first_write
= -1;
1404 if (!usage
->nranges
) {
1405 struct gpr_usage_range
* range
= add_gpr_usage_range(usage
);
1406 range
->replacement
= -1;
1410 if (usage
->ranges
[usage
->nranges
-1].end
< id
)
1411 usage
->ranges
[usage
->nranges
-1].end
= id
;
1414 static void notice_gpr_rel_read(struct gpr_usage usage
[128], int32_t id
, unsigned chan
)
1417 for (i
= 0; i
< 128; ++i
)
1418 notice_gpr_read(&usage
[i
], id
, chan
);
1421 static void notice_gpr_last_write(struct gpr_usage
*usage
, int32_t id
, unsigned chan
)
1423 usage
->last_write
[chan
] = id
;
1426 static void notice_gpr_write(struct gpr_usage
*usage
, int32_t id
, unsigned chan
,
1427 int predicate
, int prefered_replacement
)
1429 int32_t start
= usage
->first_write
!= -1 ? usage
->first_write
: id
;
1430 usage
->channels
&= ~(1 << chan
);
1431 if (usage
->channels
) {
1432 if (usage
->first_write
== -1)
1433 usage
->first_write
= id
;
1434 } else if (!usage
->nranges
|| (usage
->ranges
[usage
->nranges
-1].start
!= start
&& !predicate
)) {
1435 usage
->first_write
= start
;
1436 struct gpr_usage_range
* range
= add_gpr_usage_range(usage
);
1437 range
->replacement
= prefered_replacement
;
1438 range
->start
= start
;
1440 } else if (usage
->ranges
[usage
->nranges
-1].start
== start
&& prefered_replacement
!= -1) {
1441 usage
->ranges
[usage
->nranges
-1].replacement
= prefered_replacement
;
1443 notice_gpr_last_write(usage
, id
, chan
);
1446 static void notice_gpr_rel_last_write(struct gpr_usage usage
[128], int32_t id
, unsigned chan
)
1449 for (i
= 0; i
< 128; ++i
)
1450 notice_gpr_last_write(&usage
[i
], id
, chan
);
1453 static void notice_gpr_rel_write(struct gpr_usage usage
[128], int32_t id
, unsigned chan
)
1456 for (i
= 0; i
< 128; ++i
)
1457 notice_gpr_write(&usage
[i
], id
, chan
, 1, -1);
1460 static void notice_alu_src_gprs(struct r600_bc_alu
*alu
, struct gpr_usage usage
[128], int32_t id
)
1462 unsigned src
, num_src
;
1464 num_src
= r600_bc_get_num_operands(alu
);
1465 for (src
= 0; src
< num_src
; ++src
) {
1466 // constants doesn't matter
1467 if (!is_gpr(alu
->src
[src
].sel
))
1470 if (alu
->src
[src
].rel
)
1471 notice_gpr_rel_read(usage
, id
, alu
->src
[src
].chan
);
1473 notice_gpr_read(&usage
[alu
->src
[src
].sel
], id
, alu
->src
[src
].chan
);
1477 static void notice_alu_dst_gprs(struct r600_bc_alu
*alu_first
, struct gpr_usage usage
[128],
1478 int32_t id
, int predicate
)
1480 struct r600_bc_alu
*alu
;
1481 for (alu
= alu_first
; alu
; alu
= LIST_ENTRY(struct r600_bc_alu
, alu
->list
.next
, list
)) {
1482 if (alu
->dst
.write
) {
1484 notice_gpr_rel_write(usage
, id
, alu
->dst
.chan
);
1485 else if (alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
&& is_gpr(alu
->src
[0].sel
))
1486 notice_gpr_write(&usage
[alu
->dst
.sel
], id
, alu
->dst
.chan
,
1487 predicate
, alu
->src
[0].sel
);
1489 notice_gpr_write(&usage
[alu
->dst
.sel
], id
, alu
->dst
.chan
, predicate
, -1);
1497 static void notice_tex_gprs(struct r600_bc_tex
*tex
, struct gpr_usage usage
[128],
1498 int32_t id
, int predicate
)
1501 if (tex
->src_sel_x
< 4)
1502 notice_gpr_rel_read(usage
, id
, tex
->src_sel_x
);
1503 if (tex
->src_sel_y
< 4)
1504 notice_gpr_rel_read(usage
, id
, tex
->src_sel_y
);
1505 if (tex
->src_sel_z
< 4)
1506 notice_gpr_rel_read(usage
, id
, tex
->src_sel_z
);
1507 if (tex
->src_sel_w
< 4)
1508 notice_gpr_rel_read(usage
, id
, tex
->src_sel_w
);
1510 if (tex
->src_sel_x
< 4)
1511 notice_gpr_read(&usage
[tex
->src_gpr
], id
, tex
->src_sel_x
);
1512 if (tex
->src_sel_y
< 4)
1513 notice_gpr_read(&usage
[tex
->src_gpr
], id
, tex
->src_sel_y
);
1514 if (tex
->src_sel_z
< 4)
1515 notice_gpr_read(&usage
[tex
->src_gpr
], id
, tex
->src_sel_z
);
1516 if (tex
->src_sel_w
< 4)
1517 notice_gpr_read(&usage
[tex
->src_gpr
], id
, tex
->src_sel_w
);
1520 if (tex
->dst_sel_x
!= 7)
1521 notice_gpr_rel_write(usage
, id
, 0);
1522 if (tex
->dst_sel_y
!= 7)
1523 notice_gpr_rel_write(usage
, id
, 1);
1524 if (tex
->dst_sel_z
!= 7)
1525 notice_gpr_rel_write(usage
, id
, 2);
1526 if (tex
->dst_sel_w
!= 7)
1527 notice_gpr_rel_write(usage
, id
, 3);
1529 if (tex
->dst_sel_x
!= 7)
1530 notice_gpr_write(&usage
[tex
->dst_gpr
], id
, 0, predicate
, -1);
1531 if (tex
->dst_sel_y
!= 7)
1532 notice_gpr_write(&usage
[tex
->dst_gpr
], id
, 1, predicate
, -1);
1533 if (tex
->dst_sel_z
!= 7)
1534 notice_gpr_write(&usage
[tex
->dst_gpr
], id
, 2, predicate
, -1);
1535 if (tex
->dst_sel_w
!= 7)
1536 notice_gpr_write(&usage
[tex
->dst_gpr
], id
, 3, predicate
, -1);
1540 static void notice_vtx_gprs(struct r600_bc_vtx
*vtx
, struct gpr_usage usage
[128],
1541 int32_t id
, int predicate
)
1543 notice_gpr_read(&usage
[vtx
->src_gpr
], id
, vtx
->src_sel_x
);
1545 if (vtx
->dst_sel_x
!= 7)
1546 notice_gpr_write(&usage
[vtx
->dst_gpr
], id
, 0, predicate
, -1);
1547 if (vtx
->dst_sel_y
!= 7)
1548 notice_gpr_write(&usage
[vtx
->dst_gpr
], id
, 1, predicate
, -1);
1549 if (vtx
->dst_sel_z
!= 7)
1550 notice_gpr_write(&usage
[vtx
->dst_gpr
], id
, 2, predicate
, -1);
1551 if (vtx
->dst_sel_w
!= 7)
1552 notice_gpr_write(&usage
[vtx
->dst_gpr
], id
, 3, predicate
, -1);
1555 static void notice_export_gprs(struct r600_bc_cf
*cf
, struct gpr_usage usage
[128],
1556 struct r600_bc_cf
*export_cf
[128], int32_t export_remap
[128])
1558 //TODO handle other memory operations
1559 struct gpr_usage
*output
= &usage
[cf
->output
.gpr
];
1560 int32_t id
= (output
->last_write
[0] + 0x100) & ~0xFF;
1562 export_cf
[cf
->output
.gpr
] = cf
;
1563 export_remap
[cf
->output
.gpr
] = id
;
1564 if (cf
->output
.swizzle_x
< 4)
1565 notice_gpr_read(output
, id
, cf
->output
.swizzle_x
);
1566 if (cf
->output
.swizzle_y
< 4)
1567 notice_gpr_read(output
, id
, cf
->output
.swizzle_y
);
1568 if (cf
->output
.swizzle_z
< 4)
1569 notice_gpr_read(output
, id
, cf
->output
.swizzle_z
);
1570 if (cf
->output
.swizzle_w
< 4)
1571 notice_gpr_read(output
, id
, cf
->output
.swizzle_w
);
1574 static struct gpr_usage_range
*find_src_range(struct gpr_usage
*usage
, int32_t id
)
1577 for (i
= 0; i
< usage
->nranges
; ++i
) {
1578 struct gpr_usage_range
* range
= &usage
->ranges
[i
];
1580 if (range
->start
< id
&& id
<= range
->end
)
1586 static struct gpr_usage_range
*find_dst_range(struct gpr_usage
*usage
, int32_t id
)
1589 for (i
= 0; i
< usage
->nranges
; ++i
) {
1590 struct gpr_usage_range
* range
= &usage
->ranges
[i
];
1591 int32_t end
= range
->end
;
1593 if (range
->start
<= id
&& (id
< end
|| end
== -1))
1596 assert(0); /* should not happen */
1600 static int is_barrier_needed(struct gpr_usage
*usage
, int32_t id
, unsigned chan
, int32_t last_barrier
)
1602 if (usage
->last_write
[chan
] != (id
& ~0xFF))
1603 return usage
->last_write
[chan
] >= last_barrier
;
1608 static int is_intersection(struct gpr_usage_range
* a
, struct gpr_usage_range
* b
)
1610 return a
->start
<= b
->end
&& b
->start
< a
->end
;
1613 static int rate_replacement(struct gpr_usage
*usage
, struct gpr_usage_range
* range
)
1616 int32_t best_start
= 0x3FFFFFFF, best_end
= 0x3FFFFFFF;
1618 for (i
= 0; i
< usage
->nranges
; ++i
) {
1619 if (usage
->ranges
[i
].replacement
!= -1)
1620 continue; /* ignore already remapped ranges */
1622 if (is_intersection(&usage
->ranges
[i
], range
))
1623 return -1; /* forget it if usages overlap */
1625 if (range
->start
>= usage
->ranges
[i
].end
)
1626 best_start
= MIN2(best_start
, range
->start
- usage
->ranges
[i
].end
);
1628 if (range
->end
!= -1 && range
->end
<= usage
->ranges
[i
].start
)
1629 best_end
= MIN2(best_end
, usage
->ranges
[i
].start
- range
->end
);
1631 return best_start
+ best_end
;
1634 static void find_replacement(struct gpr_usage usage
[128], unsigned current
,
1635 struct gpr_usage_range
*range
, int is_export
)
1638 int best_gpr
= -1, best_rate
= 0x7FFFFFFF;
1640 if (range
->replacement
!= -1 && range
->replacement
<= current
) {
1641 struct gpr_usage_range
*other
= find_src_range(&usage
[range
->replacement
], range
->start
);
1642 if (other
&& other
->replacement
!= -1)
1643 range
->replacement
= other
->replacement
;
1646 if (range
->replacement
!= -1 && range
->replacement
< current
) {
1647 int rate
= rate_replacement(&usage
[range
->replacement
], range
);
1649 /* check if prefered replacement can be used */
1652 best_gpr
= range
->replacement
;
1656 if (best_gpr
== -1 && (range
->start
& ~0xFF) == (range
->end
& ~0xFF)) {
1657 /* register is just used inside one ALU clause */
1658 /* try to use clause temporaryis for it */
1659 for (i
= 127; i
> 123; --i
) {
1660 int rate
= rate_replacement(&usage
[i
], range
);
1662 if (rate
== -1) /* can't be used because ranges overlap */
1665 if (rate
< best_rate
) {
1669 /* can't get better than this */
1670 if (rate
== 0 || is_export
)
1676 if (best_gpr
== -1) {
1677 for (i
= 0; i
< current
; ++i
) {
1678 int rate
= rate_replacement(&usage
[i
], range
);
1680 if (rate
== -1) /* can't be used because ranges overlap */
1683 if (rate
< best_rate
) {
1687 /* can't get better than this */
1694 range
->replacement
= best_gpr
;
1695 if (best_gpr
!= -1) {
1696 struct gpr_usage_range
*reservation
= add_gpr_usage_range(&usage
[best_gpr
]);
1697 reservation
->replacement
= -1;
1698 reservation
->start
= range
->start
;
1699 reservation
->end
= range
->end
;
1703 static void find_export_replacement(struct gpr_usage usage
[128],
1704 struct gpr_usage_range
*range
, struct r600_bc_cf
*current
,
1705 struct r600_bc_cf
*next
, int32_t next_id
)
1707 if (!next
|| next_id
<= range
->start
|| next_id
> range
->end
)
1710 if (current
->output
.type
!= next
->output
.type
)
1713 if ((current
->output
.array_base
+ 1) != next
->output
.array_base
)
1716 find_src_range(&usage
[next
->output
.gpr
], next_id
)->replacement
= range
->replacement
+ 1;
1719 static void replace_alu_gprs(struct r600_bc_alu
*alu
, struct gpr_usage usage
[128],
1720 int32_t id
, int32_t last_barrier
, unsigned *barrier
)
1722 struct gpr_usage
*cur_usage
;
1723 struct gpr_usage_range
*range
;
1724 unsigned src
, num_src
;
1726 num_src
= r600_bc_get_num_operands(alu
);
1727 for (src
= 0; src
< num_src
; ++src
) {
1728 // constants doesn't matter
1729 if (!is_gpr(alu
->src
[src
].sel
))
1732 cur_usage
= &usage
[alu
->src
[src
].sel
];
1733 range
= find_src_range(cur_usage
, id
);
1734 if (range
->replacement
!= -1)
1735 alu
->src
[src
].sel
= range
->replacement
;
1737 *barrier
|= is_barrier_needed(cur_usage
, id
, alu
->src
[src
].chan
, last_barrier
);
1740 if (alu
->dst
.write
) {
1741 cur_usage
= &usage
[alu
->dst
.sel
];
1742 range
= find_dst_range(cur_usage
, id
);
1743 if (range
->replacement
== alu
->dst
.sel
) {
1747 /*TODO: really check that register 123 is useable */
1749 } else if (range
->replacement
!= -1) {
1750 alu
->dst
.sel
= range
->replacement
;
1753 notice_gpr_rel_last_write(usage
, id
, alu
->dst
.chan
);
1755 notice_gpr_last_write(cur_usage
, id
, alu
->dst
.chan
);
1759 static void replace_tex_gprs(struct r600_bc_tex
*tex
, struct gpr_usage usage
[128],
1760 int32_t id
, int32_t last_barrier
, unsigned *barrier
)
1762 struct gpr_usage
*cur_usage
= &usage
[tex
->src_gpr
];
1763 struct gpr_usage_range
*range
= find_src_range(cur_usage
, id
);
1768 if (tex
->src_sel_x
< 4)
1769 *barrier
|= is_barrier_needed(cur_usage
, id
, tex
->src_sel_x
, last_barrier
);
1770 if (tex
->src_sel_y
< 4)
1771 *barrier
|= is_barrier_needed(cur_usage
, id
, tex
->src_sel_y
, last_barrier
);
1772 if (tex
->src_sel_z
< 4)
1773 *barrier
|= is_barrier_needed(cur_usage
, id
, tex
->src_sel_z
, last_barrier
);
1774 if (tex
->src_sel_w
< 4)
1775 *barrier
|= is_barrier_needed(cur_usage
, id
, tex
->src_sel_w
, last_barrier
);
1778 if (range
->replacement
!= -1)
1779 tex
->src_gpr
= range
->replacement
;
1781 cur_usage
= &usage
[tex
->dst_gpr
];
1782 range
= find_dst_range(cur_usage
, id
);
1783 if (range
->replacement
!= -1)
1784 tex
->dst_gpr
= range
->replacement
;
1787 if (tex
->dst_sel_x
!= 7)
1788 notice_gpr_rel_last_write(usage
, id
, tex
->dst_sel_x
);
1789 if (tex
->dst_sel_y
!= 7)
1790 notice_gpr_rel_last_write(usage
, id
, tex
->dst_sel_y
);
1791 if (tex
->dst_sel_z
!= 7)
1792 notice_gpr_rel_last_write(usage
, id
, tex
->dst_sel_z
);
1793 if (tex
->dst_sel_w
!= 7)
1794 notice_gpr_rel_last_write(usage
, id
, tex
->dst_sel_w
);
1796 if (tex
->dst_sel_x
!= 7)
1797 notice_gpr_last_write(cur_usage
, id
, tex
->dst_sel_x
);
1798 if (tex
->dst_sel_y
!= 7)
1799 notice_gpr_last_write(cur_usage
, id
, tex
->dst_sel_y
);
1800 if (tex
->dst_sel_z
!= 7)
1801 notice_gpr_last_write(cur_usage
, id
, tex
->dst_sel_z
);
1802 if (tex
->dst_sel_w
!= 7)
1803 notice_gpr_last_write(cur_usage
, id
, tex
->dst_sel_w
);
1807 static void replace_vtx_gprs(struct r600_bc_vtx
*vtx
, struct gpr_usage usage
[128],
1808 int32_t id
, int32_t last_barrier
, unsigned *barrier
)
1810 struct gpr_usage
*cur_usage
= &usage
[vtx
->src_gpr
];
1811 struct gpr_usage_range
*range
= find_src_range(cur_usage
, id
);
1813 *barrier
|= is_barrier_needed(cur_usage
, id
, vtx
->src_sel_x
, last_barrier
);
1815 if (range
->replacement
!= -1)
1816 vtx
->src_gpr
= range
->replacement
;
1818 cur_usage
= &usage
[vtx
->dst_gpr
];
1819 range
= find_dst_range(cur_usage
, id
);
1820 if (range
->replacement
!= -1)
1821 vtx
->dst_gpr
= range
->replacement
;
1823 if (vtx
->dst_sel_x
!= 7)
1824 notice_gpr_last_write(cur_usage
, id
, vtx
->dst_sel_x
);
1825 if (vtx
->dst_sel_y
!= 7)
1826 notice_gpr_last_write(cur_usage
, id
, vtx
->dst_sel_y
);
1827 if (vtx
->dst_sel_z
!= 7)
1828 notice_gpr_last_write(cur_usage
, id
, vtx
->dst_sel_z
);
1829 if (vtx
->dst_sel_w
!= 7)
1830 notice_gpr_last_write(cur_usage
, id
, vtx
->dst_sel_w
);
1833 static void replace_export_gprs(struct r600_bc_cf
*cf
, struct gpr_usage usage
[128],
1834 int32_t id
, int32_t last_barrier
)
1836 //TODO handle other memory operations
1837 struct gpr_usage
*cur_usage
= &usage
[cf
->output
.gpr
];
1838 struct gpr_usage_range
*range
= find_src_range(cur_usage
, id
);
1841 if (cf
->output
.swizzle_x
< 4)
1842 cf
->barrier
|= is_barrier_needed(cur_usage
, -1, cf
->output
.swizzle_x
, last_barrier
);
1843 if (cf
->output
.swizzle_y
< 4)
1844 cf
->barrier
|= is_barrier_needed(cur_usage
, -1, cf
->output
.swizzle_y
, last_barrier
);
1845 if (cf
->output
.swizzle_z
< 4)
1846 cf
->barrier
|= is_barrier_needed(cur_usage
, -1, cf
->output
.swizzle_z
, last_barrier
);
1847 if (cf
->output
.swizzle_w
< 4)
1848 cf
->barrier
|= is_barrier_needed(cur_usage
, -1, cf
->output
.swizzle_w
, last_barrier
);
1850 if (range
->replacement
!= -1)
1851 cf
->output
.gpr
= range
->replacement
;
1854 static void optimize_alu_inst(struct r600_bc_cf
*cf
, struct r600_bc_alu
*alu
)
1856 struct r600_bc_alu
*alu_next
;
1858 unsigned src
, num_src
;
1860 /* check if a MOV could be optimized away */
1861 if (alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
) {
1863 /* destination equals source? */
1864 if (alu
->dst
.sel
!= alu
->src
[0].sel
||
1865 alu
->dst
.chan
!= alu
->src
[0].chan
)
1868 /* any special handling for the source? */
1869 if (alu
->src
[0].rel
|| alu
->src
[0].neg
|| alu
->src
[0].abs
)
1872 /* any special handling for destination? */
1873 if (alu
->dst
.rel
|| alu
->dst
.clamp
)
1876 /* ok find next instruction group and check if ps/pv is used */
1877 for (alu_next
= alu
; !alu_next
->last
; alu_next
= NEXT_ALU(alu_next
));
1879 if (alu_next
->list
.next
!= &cf
->alu
) {
1880 chan
= is_alu_reduction_inst(alu
) ? 0 : alu
->dst
.chan
;
1881 for (alu_next
= NEXT_ALU(alu_next
); alu_next
; alu_next
= NEXT_ALU(alu_next
)) {
1882 num_src
= r600_bc_get_num_operands(alu_next
);
1883 for (src
= 0; src
< num_src
; ++src
) {
1884 if (alu_next
->src
[src
].sel
== V_SQ_ALU_SRC_PV
&&
1885 alu_next
->src
[src
].chan
== chan
)
1888 if (alu_next
->src
[src
].sel
== V_SQ_ALU_SRC_PS
)
1897 r600_bc_remove_alu(cf
, alu
);
1901 static void optimize_export_inst(struct r600_bc
*bc
, struct r600_bc_cf
*cf
)
1903 struct r600_bc_cf
*prev
= LIST_ENTRY(struct r600_bc_cf
, cf
->list
.prev
, list
);
1904 if (&prev
->list
== &bc
->cf
||
1905 prev
->inst
!= cf
->inst
||
1906 prev
->output
.type
!= cf
->output
.type
||
1907 prev
->output
.elem_size
!= cf
->output
.elem_size
||
1908 prev
->output
.swizzle_x
!= cf
->output
.swizzle_x
||
1909 prev
->output
.swizzle_y
!= cf
->output
.swizzle_y
||
1910 prev
->output
.swizzle_z
!= cf
->output
.swizzle_z
||
1911 prev
->output
.swizzle_w
!= cf
->output
.swizzle_w
)
1914 if ((prev
->output
.burst_count
+ cf
->output
.burst_count
) > 16)
1917 if ((prev
->output
.gpr
+ prev
->output
.burst_count
) == cf
->output
.gpr
&&
1918 (prev
->output
.array_base
+ prev
->output
.burst_count
) == cf
->output
.array_base
) {
1920 prev
->output
.burst_count
+= cf
->output
.burst_count
;
1921 r600_bc_remove_cf(bc
, cf
);
1923 } else if (prev
->output
.gpr
== (cf
->output
.gpr
+ cf
->output
.burst_count
) &&
1924 prev
->output
.array_base
== (cf
->output
.array_base
+ cf
->output
.burst_count
)) {
1926 cf
->output
.burst_count
+= prev
->output
.burst_count
;
1927 r600_bc_remove_cf(bc
, prev
);
1931 static void r600_bc_optimize(struct r600_bc
*bc
)
1933 struct r600_bc_cf
*cf
, *next_cf
;
1934 struct r600_bc_alu
*first
, *next_alu
;
1935 struct r600_bc_alu
*alu
;
1936 struct r600_bc_vtx
*vtx
;
1937 struct r600_bc_tex
*tex
;
1938 struct gpr_usage usage
[128];
1940 /* assume that each gpr is exported only once */
1941 struct r600_bc_cf
*export_cf
[128] = { NULL
};
1942 int32_t export_remap
[128];
1944 int32_t id
, barrier
[bc
->nstack
];
1945 unsigned i
, j
, stack
, predicate
, old_stack
;
1947 memset(&usage
, 0, sizeof(usage
));
1948 for (i
= 0; i
< 128; ++i
) {
1949 usage
[i
].first_write
= -1;
1950 usage
[i
].last_write
[0] = -1;
1951 usage
[i
].last_write
[1] = -1;
1952 usage
[i
].last_write
[2] = -1;
1953 usage
[i
].last_write
[3] = -1;
1956 /* first gather some informations about the gpr usage */
1958 LIST_FOR_EACH_ENTRY(cf
, &bc
->cf
, list
) {
1959 switch (get_cf_class(cf
)) {
1963 LIST_FOR_EACH_ENTRY(alu
, &cf
->alu
, list
) {
1966 notice_alu_src_gprs(alu
, usage
, id
);
1968 notice_alu_dst_gprs(first
, usage
, id
, predicate
|| stack
> 0);
1972 if (is_alu_pred_inst(alu
))
1975 if (cf
->inst
== V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE
<< 3)
1977 else if (cf
->inst
== V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER
<< 3)
1979 else if (cf
->inst
== V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER
<< 3)
1982 case CF_CLASS_TEXTURE
:
1983 LIST_FOR_EACH_ENTRY(tex
, &cf
->tex
, list
) {
1984 notice_tex_gprs(tex
, usage
, id
++, stack
> 0);
1987 case CF_CLASS_VERTEX
:
1988 LIST_FOR_EACH_ENTRY(vtx
, &cf
->vtx
, list
) {
1989 notice_vtx_gprs(vtx
, usage
, id
++, stack
> 0);
1992 case CF_CLASS_EXPORT
:
1993 notice_export_gprs(cf
, usage
, export_cf
, export_remap
);
1994 continue; // don't increment id
1995 case CF_CLASS_OTHER
:
1997 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP
:
1998 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE
:
1999 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS
:
2002 case V_SQ_CF_WORD1_SQ_CF_INST_POP
:
2003 stack
-= cf
->pop_count
;
2007 // TODO implement loop handling
2016 /* try to optimize gpr usage */
2017 for (i
= 0; i
< 124; ++i
) {
2018 for (j
= 0; j
< usage
[i
].nranges
; ++j
) {
2019 struct gpr_usage_range
*range
= &usage
[i
].ranges
[j
];
2020 int is_export
= export_cf
[i
] && export_cf
[i
+ 1] &&
2021 range
->start
< export_remap
[i
] &&
2022 export_remap
[i
] <= range
->end
;
2024 if (range
->start
== -1)
2025 range
->replacement
= -1;
2026 else if (range
->end
== -1)
2027 range
->replacement
= i
;
2029 find_replacement(usage
, i
, range
, is_export
);
2031 if (range
->replacement
== -1)
2033 else if (range
->replacement
< i
&& range
->replacement
> bc
->ngpr
)
2034 bc
->ngpr
= range
->replacement
;
2036 if (is_export
&& range
->replacement
!= -1) {
2037 find_export_replacement(usage
, range
, export_cf
[i
],
2038 export_cf
[i
+ 1], export_remap
[i
+ 1]);
2044 /* apply the changes */
2045 for (i
= 0; i
< 128; ++i
) {
2046 usage
[i
].last_write
[0] = -1;
2047 usage
[i
].last_write
[1] = -1;
2048 usage
[i
].last_write
[2] = -1;
2049 usage
[i
].last_write
[3] = -1;
2053 LIST_FOR_EACH_ENTRY_SAFE(cf
, next_cf
, &bc
->cf
, list
) {
2055 switch (get_cf_class(cf
)) {
2060 LIST_FOR_EACH_ENTRY_SAFE(alu
, next_alu
, &cf
->alu
, list
) {
2061 replace_alu_gprs(alu
, usage
, id
, barrier
[stack
], &cf
->barrier
);
2065 if (is_alu_pred_inst(alu
))
2068 if (cf
->inst
== V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU
<< 3)
2069 optimize_alu_inst(cf
, alu
);
2071 if (cf
->inst
== V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE
<< 3)
2073 else if (cf
->inst
== V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER
<< 3)
2075 else if (cf
->inst
== V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER
<< 3)
2077 if (LIST_IS_EMPTY(&cf
->alu
)) {
2078 r600_bc_remove_cf(bc
, cf
);
2082 case CF_CLASS_TEXTURE
:
2084 LIST_FOR_EACH_ENTRY(tex
, &cf
->tex
, list
) {
2085 replace_tex_gprs(tex
, usage
, id
++, barrier
[stack
], &cf
->barrier
);
2088 case CF_CLASS_VERTEX
:
2090 LIST_FOR_EACH_ENTRY(vtx
, &cf
->vtx
, list
) {
2091 replace_vtx_gprs(vtx
, usage
, id
++, barrier
[stack
], &cf
->barrier
);
2094 case CF_CLASS_EXPORT
:
2095 continue; // don't increment id
2096 case CF_CLASS_OTHER
:
2097 if (cf
->inst
== V_SQ_CF_WORD1_SQ_CF_INST_POP
) {
2099 stack
-= cf
->pop_count
;
2105 if (cf
&& cf
->barrier
)
2106 barrier
[old_stack
] = id
;
2108 for (i
= old_stack
+ 1; i
<= stack
; ++i
)
2109 barrier
[i
] = barrier
[old_stack
];
2112 if (stack
!= 0) /* ensue exports are placed outside of conditional blocks */
2115 for (i
= 0; i
< 128; ++i
) {
2116 if (!export_cf
[i
] || id
< export_remap
[i
])
2119 r600_bc_move_cf(bc
, export_cf
[i
], next_cf
);
2120 replace_export_gprs(export_cf
[i
], usage
, export_remap
[i
], barrier
[stack
]);
2121 if (export_cf
[i
]->barrier
)
2122 barrier
[stack
] = id
- 1;
2123 next_cf
= LIST_ENTRY(struct r600_bc_cf
, export_cf
[i
]->list
.next
, list
);
2124 optimize_export_inst(bc
, export_cf
[i
]);
2125 export_cf
[i
] = NULL
;
2131 for (i
= 0; i
< 128; ++i
) {
2132 free(usage
[i
].ranges
);
2136 int r600_bc_build(struct r600_bc
*bc
)
2138 struct r600_bc_cf
*cf
;
2139 struct r600_bc_alu
*alu
;
2140 struct r600_bc_vtx
*vtx
;
2141 struct r600_bc_tex
*tex
;
2142 struct r600_bc_cf
*exports
[4] = { NULL
};
2143 uint32_t literal
[4];
2148 if (bc
->callstack
[0].max
> 0)
2149 bc
->nstack
= ((bc
->callstack
[0].max
+ 3) >> 2) + 2;
2150 if (bc
->type
== TGSI_PROCESSOR_VERTEX
&& !bc
->nstack
) {
2154 r600_bc_optimize(bc
);
2156 /* first path compute addr of each CF block */
2157 /* addr start after all the CF instructions */
2158 addr
= LIST_ENTRY(struct r600_bc_cf
, bc
->cf
.prev
, list
)->id
+ 2;
2159 LIST_FOR_EACH_ENTRY(cf
, &bc
->cf
, list
) {
2160 switch (get_cf_class(cf
)) {
2163 LIST_FOR_EACH_ENTRY(alu
, &cf
->alu
, list
) {
2164 r
= r600_bc_alu_nliterals(alu
, literal
, &nliteral
);
2168 cf
->ndw
+= align(nliteral
, 2);
2173 case CF_CLASS_TEXTURE
:
2174 case CF_CLASS_VERTEX
:
2175 /* fetch node need to be 16 bytes aligned*/
2177 addr
&= 0xFFFFFFFCUL
;
2180 case CF_CLASS_EXPORT
:
2181 if (cf
->inst
== BC_INST(bc
, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
))
2182 exports
[cf
->output
.type
] = cf
;
2184 case CF_CLASS_OTHER
:
2187 R600_ERR("unsupported CF instruction (0x%X)\n", cf
->inst
);
2192 bc
->ndw
= cf
->addr
+ cf
->ndw
;
2195 /* set export done on last export of each type */
2196 for (i
= 0; i
< 4; ++i
) {
2198 exports
[i
]->inst
= BC_INST(bc
, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE
);
2203 bc
->bytecode
= calloc(1, bc
->ndw
* 4);
2204 if (bc
->bytecode
== NULL
)
2206 LIST_FOR_EACH_ENTRY(cf
, &bc
->cf
, list
) {
2208 if (bc
->chiprev
== CHIPREV_EVERGREEN
)
2209 r
= eg_bc_cf_build(bc
, cf
);
2211 r
= r600_bc_cf_build(bc
, cf
);
2214 switch (get_cf_class(cf
)) {
2217 LIST_FOR_EACH_ENTRY(alu
, &cf
->alu
, list
) {
2218 r
= r600_bc_alu_nliterals(alu
, literal
, &nliteral
);
2221 r600_bc_alu_adjust_literals(alu
, literal
, nliteral
);
2222 switch(bc
->chiprev
) {
2224 r
= r600_bc_alu_build(bc
, alu
, addr
);
2227 case CHIPREV_EVERGREEN
: /* eg alu is same encoding as r700 */
2228 r
= r700_bc_alu_build(bc
, alu
, addr
);
2231 R600_ERR("unknown family %d\n", bc
->family
);
2238 for (i
= 0; i
< align(nliteral
, 2); ++i
) {
2239 bc
->bytecode
[addr
++] = literal
[i
];
2245 case CF_CLASS_VERTEX
:
2246 LIST_FOR_EACH_ENTRY(vtx
, &cf
->vtx
, list
) {
2247 r
= r600_bc_vtx_build(bc
, vtx
, addr
);
2253 case CF_CLASS_TEXTURE
:
2254 LIST_FOR_EACH_ENTRY(tex
, &cf
->tex
, list
) {
2255 r
= r600_bc_tex_build(bc
, tex
, addr
);
2261 case CF_CLASS_EXPORT
:
2262 case CF_CLASS_OTHER
:
2265 R600_ERR("unsupported CF instruction (0x%X)\n", cf
->inst
);
2272 void r600_bc_clear(struct r600_bc
*bc
)
2274 struct r600_bc_cf
*cf
= NULL
, *next_cf
;
2277 bc
->bytecode
= NULL
;
2279 LIST_FOR_EACH_ENTRY_SAFE(cf
, next_cf
, &bc
->cf
, list
) {
2280 struct r600_bc_alu
*alu
= NULL
, *next_alu
;
2281 struct r600_bc_tex
*tex
= NULL
, *next_tex
;
2282 struct r600_bc_tex
*vtx
= NULL
, *next_vtx
;
2284 LIST_FOR_EACH_ENTRY_SAFE(alu
, next_alu
, &cf
->alu
, list
) {
2288 LIST_INITHEAD(&cf
->alu
);
2290 LIST_FOR_EACH_ENTRY_SAFE(tex
, next_tex
, &cf
->tex
, list
) {
2294 LIST_INITHEAD(&cf
->tex
);
2296 LIST_FOR_EACH_ENTRY_SAFE(vtx
, next_vtx
, &cf
->vtx
, list
) {
2300 LIST_INITHEAD(&cf
->vtx
);
2305 LIST_INITHEAD(&cf
->list
);
2308 void r600_bc_dump(struct r600_bc
*bc
)
2310 struct r600_bc_cf
*cf
;
2311 struct r600_bc_alu
*alu
;
2312 struct r600_bc_vtx
*vtx
;
2313 struct r600_bc_tex
*tex
;
2316 uint32_t literal
[4];
2320 switch (bc
->chiprev
) {
2332 fprintf(stderr
, "bytecode %d dw -- %d gprs -----------------------\n", bc
->ndw
, bc
->ngpr
);
2333 fprintf(stderr
, " %c\n", chip
);
2335 LIST_FOR_EACH_ENTRY(cf
, &bc
->cf
, list
) {
2338 switch (get_cf_class(cf
)) {
2340 fprintf(stderr
, "%04d %08X ALU ", id
, bc
->bytecode
[id
]);
2341 fprintf(stderr
, "ADDR:%04d ", cf
->addr
);
2342 fprintf(stderr
, "KCACHE_MODE0:%X ", cf
->kcache
[0].mode
);
2343 fprintf(stderr
, "KCACHE_BANK0:%X ", cf
->kcache
[0].bank
);
2344 fprintf(stderr
, "KCACHE_BANK1:%X\n", cf
->kcache
[1].bank
);
2346 fprintf(stderr
, "%04d %08X ALU ", id
, bc
->bytecode
[id
]);
2347 fprintf(stderr
, "INST:%d ", cf
->inst
);
2348 fprintf(stderr
, "KCACHE_MODE1:%X ", cf
->kcache
[1].mode
);
2349 fprintf(stderr
, "KCACHE_ADDR0:%X ", cf
->kcache
[0].addr
);
2350 fprintf(stderr
, "KCACHE_ADDR1:%X ", cf
->kcache
[1].addr
);
2351 fprintf(stderr
, "BARRIER:%d ", cf
->barrier
);
2352 fprintf(stderr
, "COUNT:%d\n", cf
->ndw
/ 2);
2354 case CF_CLASS_TEXTURE
:
2355 case CF_CLASS_VERTEX
:
2356 fprintf(stderr
, "%04d %08X TEX/VTX ", id
, bc
->bytecode
[id
]);
2357 fprintf(stderr
, "ADDR:%04d\n", cf
->addr
);
2359 fprintf(stderr
, "%04d %08X TEX/VTX ", id
, bc
->bytecode
[id
]);
2360 fprintf(stderr
, "INST:%d ", cf
->inst
);
2361 fprintf(stderr
, "BARRIER:%d ", cf
->barrier
);
2362 fprintf(stderr
, "COUNT:%d\n", cf
->ndw
/ 4);
2364 case CF_CLASS_EXPORT
:
2365 fprintf(stderr
, "%04d %08X EXPORT ", id
, bc
->bytecode
[id
]);
2366 fprintf(stderr
, "GPR:%d ", cf
->output
.gpr
);
2367 fprintf(stderr
, "ELEM_SIZE:%X ", cf
->output
.elem_size
);
2368 fprintf(stderr
, "ARRAY_BASE:%X ", cf
->output
.array_base
);
2369 fprintf(stderr
, "TYPE:%X\n", cf
->output
.type
);
2371 fprintf(stderr
, "%04d %08X EXPORT ", id
, bc
->bytecode
[id
]);
2372 fprintf(stderr
, "SWIZ_X:%X ", cf
->output
.swizzle_x
);
2373 fprintf(stderr
, "SWIZ_Y:%X ", cf
->output
.swizzle_y
);
2374 fprintf(stderr
, "SWIZ_Z:%X ", cf
->output
.swizzle_z
);
2375 fprintf(stderr
, "SWIZ_W:%X ", cf
->output
.swizzle_w
);
2376 fprintf(stderr
, "SWIZ_W:%X ", cf
->output
.swizzle_w
);
2377 fprintf(stderr
, "BARRIER:%d ", cf
->barrier
);
2378 fprintf(stderr
, "INST:%d ", cf
->inst
);
2379 fprintf(stderr
, "BURST_COUNT:%d\n", cf
->output
.burst_count
);
2381 case CF_CLASS_OTHER
:
2382 fprintf(stderr
, "%04d %08X CF ", id
, bc
->bytecode
[id
]);
2383 fprintf(stderr
, "ADDR:%04d\n", cf
->cf_addr
);
2385 fprintf(stderr
, "%04d %08X CF ", id
, bc
->bytecode
[id
]);
2386 fprintf(stderr
, "INST:%d ", cf
->inst
);
2387 fprintf(stderr
, "COND:%X ", cf
->cond
);
2388 fprintf(stderr
, "BARRIER:%d ", cf
->barrier
);
2389 fprintf(stderr
, "POP_COUNT:%X\n", cf
->pop_count
);
2395 LIST_FOR_EACH_ENTRY(alu
, &cf
->alu
, list
) {
2396 r600_bc_alu_nliterals(alu
, literal
, &nliteral
);
2398 fprintf(stderr
, "%04d %08X ", id
, bc
->bytecode
[id
]);
2399 fprintf(stderr
, "SRC0(SEL:%d ", alu
->src
[0].sel
);
2400 fprintf(stderr
, "REL:%d ", alu
->src
[0].rel
);
2401 fprintf(stderr
, "CHAN:%d ", alu
->src
[0].chan
);
2402 fprintf(stderr
, "NEG:%d) ", alu
->src
[0].neg
);
2403 fprintf(stderr
, "SRC1(SEL:%d ", alu
->src
[1].sel
);
2404 fprintf(stderr
, "REL:%d ", alu
->src
[1].rel
);
2405 fprintf(stderr
, "CHAN:%d ", alu
->src
[1].chan
);
2406 fprintf(stderr
, "NEG:%d) ", alu
->src
[1].neg
);
2407 fprintf(stderr
, "LAST:%d)\n", alu
->last
);
2409 fprintf(stderr
, "%04d %08X %c ", id
, bc
->bytecode
[id
], alu
->last
? '*' : ' ');
2410 fprintf(stderr
, "INST:%d ", alu
->inst
);
2411 fprintf(stderr
, "DST(SEL:%d ", alu
->dst
.sel
);
2412 fprintf(stderr
, "CHAN:%d ", alu
->dst
.chan
);
2413 fprintf(stderr
, "REL:%d ", alu
->dst
.rel
);
2414 fprintf(stderr
, "CLAMP:%d) ", alu
->dst
.clamp
);
2415 fprintf(stderr
, "BANK_SWIZZLE:%d ", alu
->bank_swizzle
);
2417 fprintf(stderr
, "SRC2(SEL:%d ", alu
->src
[2].sel
);
2418 fprintf(stderr
, "REL:%d ", alu
->src
[2].rel
);
2419 fprintf(stderr
, "CHAN:%d ", alu
->src
[2].chan
);
2420 fprintf(stderr
, "NEG:%d)\n", alu
->src
[2].neg
);
2422 fprintf(stderr
, "SRC0_ABS:%d ", alu
->src
[0].abs
);
2423 fprintf(stderr
, "SRC1_ABS:%d ", alu
->src
[1].abs
);
2424 fprintf(stderr
, "WRITE_MASK:%d ", alu
->dst
.write
);
2425 fprintf(stderr
, "OMOD:%d ", alu
->omod
);
2426 fprintf(stderr
, "EXECUTE_MASK:%d ", alu
->predicate
);
2427 fprintf(stderr
, "UPDATE_PRED:%d\n", alu
->predicate
);
2432 for (i
= 0; i
< nliteral
; i
++, id
++) {
2433 float *f
= (float*)(bc
->bytecode
+ id
);
2434 fprintf(stderr
, "%04d %08X %f\n", id
, bc
->bytecode
[id
], *f
);
2441 LIST_FOR_EACH_ENTRY(tex
, &cf
->tex
, list
) {
2445 LIST_FOR_EACH_ENTRY(vtx
, &cf
->vtx
, list
) {
2450 fprintf(stderr
, "--------------------------------------\n");
2453 void r600_cf_vtx(struct r600_vertex_element
*ve
, u32
*bytecode
, unsigned count
)
2455 struct r600_pipe_state
*rstate
;
2459 bytecode
[i
++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2460 bytecode
[i
++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX
) |
2461 S_SQ_CF_WORD1_BARRIER(0) |
2462 S_SQ_CF_WORD1_COUNT(8 - 1);
2463 bytecode
[i
++] = S_SQ_CF_WORD0_ADDR(40 >> 1);
2464 bytecode
[i
++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX
) |
2465 S_SQ_CF_WORD1_BARRIER(0) |
2466 S_SQ_CF_WORD1_COUNT(count
- 8 - 1);
2468 bytecode
[i
++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2469 bytecode
[i
++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX
) |
2470 S_SQ_CF_WORD1_BARRIER(0) |
2471 S_SQ_CF_WORD1_COUNT(count
- 1);
2473 bytecode
[i
++] = S_SQ_CF_WORD0_ADDR(0);
2474 bytecode
[i
++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_RETURN
) |
2475 S_SQ_CF_WORD1_BARRIER(0);
2477 rstate
= &ve
->rstate
;
2478 rstate
->id
= R600_PIPE_STATE_FETCH_SHADER
;
2480 r600_pipe_state_add_reg(rstate
, R_0288A4_SQ_PGM_RESOURCES_FS
,
2481 0x00000000, 0xFFFFFFFF, NULL
);
2482 r600_pipe_state_add_reg(rstate
, R_0288DC_SQ_PGM_CF_OFFSET_FS
,
2483 0x00000000, 0xFFFFFFFF, NULL
);
2484 r600_pipe_state_add_reg(rstate
, R_028894_SQ_PGM_START_FS
,
2485 r600_bo_offset(ve
->fetch_shader
) >> 8,
2486 0xFFFFFFFF, ve
->fetch_shader
);
2489 void r600_cf_vtx_tc(struct r600_vertex_element
*ve
, u32
*bytecode
, unsigned count
)
2491 struct r600_pipe_state
*rstate
;
2495 bytecode
[i
++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2496 bytecode
[i
++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC
) |
2497 S_SQ_CF_WORD1_BARRIER(0) |
2498 S_SQ_CF_WORD1_COUNT(8 - 1);
2499 bytecode
[i
++] = S_SQ_CF_WORD0_ADDR(40 >> 1);
2500 bytecode
[i
++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC
) |
2501 S_SQ_CF_WORD1_BARRIER(0) |
2502 S_SQ_CF_WORD1_COUNT((count
- 8) - 1);
2504 bytecode
[i
++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
2505 bytecode
[i
++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC
) |
2506 S_SQ_CF_WORD1_BARRIER(0) |
2507 S_SQ_CF_WORD1_COUNT(count
- 1);
2509 bytecode
[i
++] = S_SQ_CF_WORD0_ADDR(0);
2510 bytecode
[i
++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_RETURN
) |
2511 S_SQ_CF_WORD1_BARRIER(0);
2513 rstate
= &ve
->rstate
;
2514 rstate
->id
= R600_PIPE_STATE_FETCH_SHADER
;
2516 r600_pipe_state_add_reg(rstate
, R_0288A4_SQ_PGM_RESOURCES_FS
,
2517 0x00000000, 0xFFFFFFFF, NULL
);
2518 r600_pipe_state_add_reg(rstate
, R_0288DC_SQ_PGM_CF_OFFSET_FS
,
2519 0x00000000, 0xFFFFFFFF, NULL
);
2520 r600_pipe_state_add_reg(rstate
, R_028894_SQ_PGM_START_FS
,
2521 r600_bo_offset(ve
->fetch_shader
) >> 8,
2522 0xFFFFFFFF, ve
->fetch_shader
);
2525 static void r600_vertex_data_type(enum pipe_format pformat
, unsigned *format
,
2526 unsigned *num_format
, unsigned *format_comp
)
2528 const struct util_format_description
*desc
;
2535 desc
= util_format_description(pformat
);
2536 if (desc
->layout
!= UTIL_FORMAT_LAYOUT_PLAIN
) {
2540 /* Find the first non-VOID channel. */
2541 for (i
= 0; i
< 4; i
++) {
2542 if (desc
->channel
[i
].type
!= UTIL_FORMAT_TYPE_VOID
) {
2547 switch (desc
->channel
[i
].type
) {
2548 /* Half-floats, floats, doubles */
2549 case UTIL_FORMAT_TYPE_FLOAT
:
2550 switch (desc
->channel
[i
].size
) {
2552 switch (desc
->nr_channels
) {
2554 *format
= FMT_16_FLOAT
;
2557 *format
= FMT_16_16_FLOAT
;
2560 *format
= FMT_16_16_16_FLOAT
;
2563 *format
= FMT_16_16_16_16_FLOAT
;
2568 switch (desc
->nr_channels
) {
2570 *format
= FMT_32_FLOAT
;
2573 *format
= FMT_32_32_FLOAT
;
2576 *format
= FMT_32_32_32_FLOAT
;
2579 *format
= FMT_32_32_32_32_FLOAT
;
2588 case UTIL_FORMAT_TYPE_UNSIGNED
:
2590 case UTIL_FORMAT_TYPE_SIGNED
:
2591 switch (desc
->channel
[i
].size
) {
2593 switch (desc
->nr_channels
) {
2601 // *format = FMT_8_8_8; /* fails piglit draw-vertices test */
2604 *format
= FMT_8_8_8_8
;
2609 switch (desc
->nr_channels
) {
2614 *format
= FMT_16_16
;
2617 // *format = FMT_16_16_16; /* fails piglit draw-vertices test */
2620 *format
= FMT_16_16_16_16
;
2625 switch (desc
->nr_channels
) {
2630 *format
= FMT_32_32
;
2633 *format
= FMT_32_32_32
;
2636 *format
= FMT_32_32_32_32
;
2648 if (desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_SIGNED
) {
2651 if (desc
->channel
[i
].normalized
) {
2658 R600_ERR("unsupported vertex format %s\n", util_format_name(pformat
));
2661 int r600_vertex_elements_build_fetch_shader(struct r600_pipe_context
*rctx
, struct r600_vertex_element
*ve
)
2665 unsigned fetch_resource_start
= 0, format
, num_format
, format_comp
;
2666 struct pipe_vertex_element
*elements
= ve
->elements
;
2667 const struct util_format_description
*desc
;
2669 /* 2 dwords for cf aligned to 4 + 4 dwords per input */
2670 ndw
= 8 + ve
->count
* 4;
2671 ve
->fs_size
= ndw
* 4;
2673 /* use PIPE_BIND_VERTEX_BUFFER so we use the cache buffer manager */
2674 ve
->fetch_shader
= r600_bo(rctx
->radeon
, ndw
*4, 256, PIPE_BIND_VERTEX_BUFFER
, 0);
2675 if (ve
->fetch_shader
== NULL
) {
2679 bytecode
= r600_bo_map(rctx
->radeon
, ve
->fetch_shader
, 0, NULL
);
2680 if (bytecode
== NULL
) {
2681 r600_bo_reference(rctx
->radeon
, &ve
->fetch_shader
, NULL
);
2685 if (rctx
->family
>= CHIP_CEDAR
) {
2686 eg_cf_vtx(ve
, &bytecode
[0], (ndw
- 8) / 4);
2688 r600_cf_vtx(ve
, &bytecode
[0], (ndw
- 8) / 4);
2689 fetch_resource_start
= 160;
2692 /* vertex elements offset need special handling, if offset is bigger
2693 * than what we can put in fetch instruction then we need to alterate
2694 * the vertex resource offset. In such case in order to simplify code
2695 * we will bound one resource per elements. It's a worst case scenario.
2697 for (i
= 0; i
< ve
->count
; i
++) {
2698 ve
->vbuffer_offset
[i
] = C_SQ_VTX_WORD2_OFFSET
& elements
[i
].src_offset
;
2699 if (ve
->vbuffer_offset
[i
]) {
2700 ve
->vbuffer_need_offset
= 1;
2704 for (i
= 0; i
< ve
->count
; i
++) {
2705 unsigned vbuffer_index
;
2706 r600_vertex_data_type(ve
->hw_format
[i
], &format
, &num_format
, &format_comp
);
2707 desc
= util_format_description(ve
->hw_format
[i
]);
2709 R600_ERR("unknown format %d\n", ve
->hw_format
[i
]);
2710 r600_bo_reference(rctx
->radeon
, &ve
->fetch_shader
, NULL
);
2714 /* see above for vbuffer_need_offset explanation */
2715 vbuffer_index
= elements
[i
].vertex_buffer_index
;
2716 if (ve
->vbuffer_need_offset
) {
2717 bytecode
[8 + i
* 4 + 0] = S_SQ_VTX_WORD0_BUFFER_ID(i
+ fetch_resource_start
);
2719 bytecode
[8 + i
* 4 + 0] = S_SQ_VTX_WORD0_BUFFER_ID(vbuffer_index
+ fetch_resource_start
);
2721 bytecode
[8 + i
* 4 + 0] |= S_SQ_VTX_WORD0_SRC_GPR(0) |
2722 S_SQ_VTX_WORD0_SRC_SEL_X(0) |
2723 S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(0x1F);
2724 bytecode
[8 + i
* 4 + 1] = S_SQ_VTX_WORD1_DST_SEL_X(desc
->swizzle
[0]) |
2725 S_SQ_VTX_WORD1_DST_SEL_Y(desc
->swizzle
[1]) |
2726 S_SQ_VTX_WORD1_DST_SEL_Z(desc
->swizzle
[2]) |
2727 S_SQ_VTX_WORD1_DST_SEL_W(desc
->swizzle
[3]) |
2728 S_SQ_VTX_WORD1_USE_CONST_FIELDS(0) |
2729 S_SQ_VTX_WORD1_DATA_FORMAT(format
) |
2730 S_SQ_VTX_WORD1_NUM_FORMAT_ALL(num_format
) |
2731 S_SQ_VTX_WORD1_FORMAT_COMP_ALL(format_comp
) |
2732 S_SQ_VTX_WORD1_SRF_MODE_ALL(1) |
2733 S_SQ_VTX_WORD1_GPR_DST_GPR(i
+ 1);
2734 bytecode
[8 + i
* 4 + 2] = S_SQ_VTX_WORD2_OFFSET(elements
[i
].src_offset
) |
2735 S_SQ_VTX_WORD2_MEGA_FETCH(1);
2736 bytecode
[8 + i
* 4 + 3] = 0;
2738 r600_bo_unmap(rctx
->radeon
, ve
->fetch_shader
);