2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "util/u_format.h"
26 #include "util/u_memory.h"
27 #include "pipe/p_shader_tokens.h"
28 #include "r600_pipe.h"
30 #include "r600_opcodes.h"
32 #include "r600_formats.h"
35 #define NUM_OF_CYCLES 3
36 #define NUM_OF_COMPONENTS 4
38 static inline unsigned int r600_bc_get_num_operands(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
43 switch (bc
->chiprev
) {
47 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
:
49 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD
:
50 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT
:
51 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE
:
52 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
:
53 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE
:
54 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE
:
55 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
:
56 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX
:
57 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN
:
58 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE
:
59 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE
:
60 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT
:
61 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE
:
62 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE
:
63 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT
:
64 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE
:
65 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE
:
66 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
:
67 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE
:
68 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE
:
71 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
:
72 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA
:
73 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR
:
74 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT
:
75 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT
:
76 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR
:
77 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC
:
78 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
:
79 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED
:
80 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
:
81 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED
:
82 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
:
83 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED
:
84 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE
:
85 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT
:
86 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN
:
87 case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS
:
90 "Need instruction operand number for 0x%x.\n", alu
->inst
);
93 case CHIPREV_EVERGREEN
:
95 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP
:
97 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD
:
98 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT
:
99 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE
:
100 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
:
101 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE
:
102 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE
:
103 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL
:
104 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX
:
105 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN
:
106 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE
:
107 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE
:
108 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT
:
109 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE
:
110 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE
:
111 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT
:
112 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE
:
113 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE
:
114 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
:
115 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE
:
116 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE
:
117 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INTERP_XY
:
118 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INTERP_ZW
:
121 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV
:
122 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT
:
123 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT
:
124 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR
:
125 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC
:
126 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
:
127 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED
:
128 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
:
129 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED
:
130 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
:
131 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED
:
132 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE
:
133 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT
:
134 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT_FLOOR
:
135 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN
:
136 case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS
:
139 "Need instruction operand number for 0x%x.\n", alu
->inst
);
147 int r700_bc_alu_build(struct r600_bc
*bc
, struct r600_bc_alu
*alu
, unsigned id
);
149 static struct r600_bc_cf
*r600_bc_cf(void)
151 struct r600_bc_cf
*cf
= CALLOC_STRUCT(r600_bc_cf
);
155 LIST_INITHEAD(&cf
->list
);
156 LIST_INITHEAD(&cf
->alu
);
157 LIST_INITHEAD(&cf
->vtx
);
158 LIST_INITHEAD(&cf
->tex
);
162 static struct r600_bc_alu
*r600_bc_alu(void)
164 struct r600_bc_alu
*alu
= CALLOC_STRUCT(r600_bc_alu
);
168 LIST_INITHEAD(&alu
->list
);
172 static struct r600_bc_vtx
*r600_bc_vtx(void)
174 struct r600_bc_vtx
*vtx
= CALLOC_STRUCT(r600_bc_vtx
);
178 LIST_INITHEAD(&vtx
->list
);
182 static struct r600_bc_tex
*r600_bc_tex(void)
184 struct r600_bc_tex
*tex
= CALLOC_STRUCT(r600_bc_tex
);
188 LIST_INITHEAD(&tex
->list
);
192 int r600_bc_init(struct r600_bc
*bc
, enum radeon_family family
)
194 LIST_INITHEAD(&bc
->cf
);
196 switch (bc
->family
) {
205 bc
->chiprev
= CHIPREV_R600
;
211 bc
->chiprev
= CHIPREV_R700
;
222 bc
->chiprev
= CHIPREV_EVERGREEN
;
225 R600_ERR("unknown family %d\n", bc
->family
);
231 static int r600_bc_add_cf(struct r600_bc
*bc
)
233 struct r600_bc_cf
*cf
= r600_bc_cf();
237 LIST_ADDTAIL(&cf
->list
, &bc
->cf
);
239 cf
->id
= bc
->cf_last
->id
+ 2;
243 bc
->force_add_cf
= 0;
247 int r600_bc_add_output(struct r600_bc
*bc
, const struct r600_bc_output
*output
)
251 if (bc
->cf_last
&& (bc
->cf_last
->inst
== output
->inst
||
252 (bc
->cf_last
->inst
== BC_INST(bc
, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
) &&
253 output
->inst
== BC_INST(bc
, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE
))) &&
254 output
->type
== bc
->cf_last
->output
.type
&&
255 output
->elem_size
== bc
->cf_last
->output
.elem_size
&&
256 output
->swizzle_x
== bc
->cf_last
->output
.swizzle_x
&&
257 output
->swizzle_y
== bc
->cf_last
->output
.swizzle_y
&&
258 output
->swizzle_z
== bc
->cf_last
->output
.swizzle_z
&&
259 output
->swizzle_w
== bc
->cf_last
->output
.swizzle_w
&&
260 (output
->burst_count
+ bc
->cf_last
->output
.burst_count
) <= 16) {
262 if ((output
->gpr
+ output
->burst_count
) == bc
->cf_last
->output
.gpr
&&
263 (output
->array_base
+ output
->burst_count
) == bc
->cf_last
->output
.array_base
) {
265 bc
->cf_last
->output
.end_of_program
|= output
->end_of_program
;
266 bc
->cf_last
->output
.inst
= output
->inst
;
267 bc
->cf_last
->output
.gpr
= output
->gpr
;
268 bc
->cf_last
->output
.array_base
= output
->array_base
;
269 bc
->cf_last
->output
.burst_count
+= output
->burst_count
;
272 } else if (output
->gpr
== (bc
->cf_last
->output
.gpr
+ bc
->cf_last
->output
.burst_count
) &&
273 output
->array_base
== (bc
->cf_last
->output
.array_base
+ bc
->cf_last
->output
.burst_count
)) {
275 bc
->cf_last
->output
.end_of_program
|= output
->end_of_program
;
276 bc
->cf_last
->output
.inst
= output
->inst
;
277 bc
->cf_last
->output
.burst_count
+= output
->burst_count
;
282 r
= r600_bc_add_cf(bc
);
285 bc
->cf_last
->inst
= output
->inst
;
286 memcpy(&bc
->cf_last
->output
, output
, sizeof(struct r600_bc_output
));
290 /* alu instructions that can ony exits once per group */
291 static int is_alu_once_inst(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
293 switch (bc
->chiprev
) {
296 return !alu
->is_op3
&& (
297 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE
||
298 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
||
299 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE
||
300 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE
||
301 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_UINT
||
302 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_UINT
||
303 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE_INT
||
304 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_INT
||
305 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_INT
||
306 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE_INT
||
307 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_UINT
||
308 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_UINT
||
309 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE
||
310 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT
||
311 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE
||
312 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE
||
313 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_INV
||
314 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_POP
||
315 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_CLR
||
316 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_RESTORE
||
317 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH
||
318 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH
||
319 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH
||
320 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH
||
321 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_INT
||
322 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_INT
||
323 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_INT
||
324 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT
||
325 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH_INT
||
326 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH_INT
||
327 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH_INT
||
328 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH_INT
||
329 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLT_PUSH_INT
||
330 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLE_PUSH_INT
);
331 case CHIPREV_EVERGREEN
:
333 return !alu
->is_op3
&& (
334 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE
||
335 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT
||
336 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE
||
337 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE
||
338 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_UINT
||
339 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_UINT
||
340 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE_INT
||
341 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_INT
||
342 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_INT
||
343 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE_INT
||
344 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_UINT
||
345 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_UINT
||
346 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE
||
347 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT
||
348 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE
||
349 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE
||
350 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_INV
||
351 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_POP
||
352 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_CLR
||
353 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SET_RESTORE
||
354 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH
||
355 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH
||
356 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH
||
357 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH
||
358 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_INT
||
359 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_INT
||
360 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_INT
||
361 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT
||
362 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_PUSH_INT
||
363 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_PUSH_INT
||
364 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_PUSH_INT
||
365 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_PUSH_INT
||
366 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLT_PUSH_INT
||
367 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETLE_PUSH_INT
);
371 static int is_alu_reduction_inst(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
373 switch (bc
->chiprev
) {
376 return !alu
->is_op3
&& (
377 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE
||
378 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
||
379 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE
||
380 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX4
);
381 case CHIPREV_EVERGREEN
:
383 return !alu
->is_op3
&& (
384 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE
||
385 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4
||
386 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE
||
387 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX4
);
391 static int is_alu_cube_inst(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
393 switch (bc
->chiprev
) {
396 return !alu
->is_op3
&&
397 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE
;
398 case CHIPREV_EVERGREEN
:
400 return !alu
->is_op3
&&
401 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE
;
405 static int is_alu_mova_inst(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
407 switch (bc
->chiprev
) {
410 return !alu
->is_op3
&& (
411 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA
||
412 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR
||
413 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT
);
414 case CHIPREV_EVERGREEN
:
416 return !alu
->is_op3
&& (
417 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT
);
421 /* alu instructions that can only execute on the vector unit */
422 static int is_alu_vec_unit_inst(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
424 return is_alu_reduction_inst(bc
, alu
) ||
425 is_alu_mova_inst(bc
, alu
);
428 /* alu instructions that can only execute on the trans unit */
429 static int is_alu_trans_unit_inst(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
431 switch (bc
->chiprev
) {
435 return alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT
||
436 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT
||
437 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT
||
438 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHL_INT
||
439 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT
||
440 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_INT
||
441 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT
||
442 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_INT
||
443 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT
||
444 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_INT
||
445 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_UINT
||
446 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT
||
447 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS
||
448 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
||
449 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED
||
450 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
||
451 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED
||
452 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_FF
||
453 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
||
454 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED
||
455 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_FF
||
456 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE
||
457 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN
||
458 alu
->inst
== V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SQRT_IEEE
;
460 return alu
->inst
== V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT
||
461 alu
->inst
== V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_D2
||
462 alu
->inst
== V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_M2
||
463 alu
->inst
== V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT_M4
;
464 case CHIPREV_EVERGREEN
:
467 /* Note that FLT_TO_INT* instructions are vector instructions
468 * on Evergreen, despite what the documentation says. */
469 return alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT
||
470 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT
||
471 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHL_INT
||
472 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT
||
473 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_INT
||
474 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT
||
475 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_INT
||
476 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT
||
477 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_INT
||
478 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_UINT
||
479 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT
||
480 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS
||
481 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE
||
482 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED
||
483 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE
||
484 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_CLAMPED
||
485 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_FF
||
486 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE
||
487 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED
||
488 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_FF
||
489 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE
||
490 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN
||
491 alu
->inst
== EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SQRT_IEEE
;
493 return alu
->inst
== EG_V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT
;
497 /* alu instructions that can execute on any unit */
498 static int is_alu_any_unit_inst(struct r600_bc
*bc
, struct r600_bc_alu
*alu
)
500 return !is_alu_vec_unit_inst(bc
, alu
) &&
501 !is_alu_trans_unit_inst(bc
, alu
);
504 static int assign_alu_units(struct r600_bc
*bc
, struct r600_bc_alu
*alu_first
,
505 struct r600_bc_alu
*assignment
[5])
507 struct r600_bc_alu
*alu
;
508 unsigned i
, chan
, trans
;
510 for (i
= 0; i
< 5; i
++)
511 assignment
[i
] = NULL
;
513 for (alu
= alu_first
; alu
; alu
= LIST_ENTRY(struct r600_bc_alu
, alu
->list
.next
, list
)) {
514 chan
= alu
->dst
.chan
;
515 if (is_alu_trans_unit_inst(bc
, alu
))
517 else if (is_alu_vec_unit_inst(bc
, alu
))
519 else if (assignment
[chan
])
520 trans
= 1; // assume ALU_INST_PREFER_VECTOR
526 assert(0); //ALU.Trans has already been allocated
531 if (assignment
[chan
]) {
532 assert(0); //ALU.chan has already been allocated
535 assignment
[chan
] = alu
;
544 struct alu_bank_swizzle
{
545 int hw_gpr
[NUM_OF_CYCLES
][NUM_OF_COMPONENTS
];
546 int hw_cfile_addr
[4];
547 int hw_cfile_elem
[4];
550 static const unsigned cycle_for_bank_swizzle_vec
[][3] = {
551 [SQ_ALU_VEC_012
] = { 0, 1, 2 },
552 [SQ_ALU_VEC_021
] = { 0, 2, 1 },
553 [SQ_ALU_VEC_120
] = { 1, 2, 0 },
554 [SQ_ALU_VEC_102
] = { 1, 0, 2 },
555 [SQ_ALU_VEC_201
] = { 2, 0, 1 },
556 [SQ_ALU_VEC_210
] = { 2, 1, 0 }
559 static const unsigned cycle_for_bank_swizzle_scl
[][3] = {
560 [SQ_ALU_SCL_210
] = { 2, 1, 0 },
561 [SQ_ALU_SCL_122
] = { 1, 2, 2 },
562 [SQ_ALU_SCL_212
] = { 2, 1, 2 },
563 [SQ_ALU_SCL_221
] = { 2, 2, 1 }
566 static void init_bank_swizzle(struct alu_bank_swizzle
*bs
)
568 int i
, cycle
, component
;
570 for (cycle
= 0; cycle
< NUM_OF_CYCLES
; cycle
++)
571 for (component
= 0; component
< NUM_OF_COMPONENTS
; component
++)
572 bs
->hw_gpr
[cycle
][component
] = -1;
573 for (i
= 0; i
< 4; i
++)
574 bs
->hw_cfile_addr
[i
] = -1;
575 for (i
= 0; i
< 4; i
++)
576 bs
->hw_cfile_elem
[i
] = -1;
579 static int reserve_gpr(struct alu_bank_swizzle
*bs
, unsigned sel
, unsigned chan
, unsigned cycle
)
581 if (bs
->hw_gpr
[cycle
][chan
] == -1)
582 bs
->hw_gpr
[cycle
][chan
] = sel
;
583 else if (bs
->hw_gpr
[cycle
][chan
] != (int)sel
) {
584 // Another scalar operation has already used GPR read port for channel
590 static int reserve_cfile(struct r600_bc
*bc
, struct alu_bank_swizzle
*bs
, unsigned sel
, unsigned chan
)
592 int res
, num_res
= 4;
593 if (bc
->chiprev
>= CHIPREV_R700
) {
597 for (res
= 0; res
< num_res
; ++res
) {
598 if (bs
->hw_cfile_addr
[res
] == -1) {
599 bs
->hw_cfile_addr
[res
] = sel
;
600 bs
->hw_cfile_elem
[res
] = chan
;
602 } else if (bs
->hw_cfile_addr
[res
] == sel
&&
603 bs
->hw_cfile_elem
[res
] == chan
)
604 return 0; // Read for this scalar element already reserved, nothing to do here.
606 // All cfile read ports are used, cannot reference vector element
610 static int is_gpr(unsigned sel
)
612 return (sel
>= 0 && sel
<= 127);
615 /* CB constants start at 512, and get translated to a kcache index when ALU
616 * clauses are constructed. Note that we handle kcache constants the same way
617 * as (the now gone) cfile constants, is that really required? */
618 static int is_cfile(unsigned sel
)
620 return (sel
> 255 && sel
< 512) ||
621 (sel
> 511 && sel
< 4607) || // Kcache before translate
622 (sel
> 127 && sel
< 192); // Kcache after translate
625 static int is_const(int sel
)
627 return is_cfile(sel
) ||
628 (sel
>= V_SQ_ALU_SRC_0
&&
629 sel
<= V_SQ_ALU_SRC_LITERAL
);
632 static int check_vector(struct r600_bc
*bc
, struct r600_bc_alu
*alu
,
633 struct alu_bank_swizzle
*bs
, int bank_swizzle
)
635 int r
, src
, num_src
, sel
, elem
, cycle
;
637 num_src
= r600_bc_get_num_operands(bc
, alu
);
638 for (src
= 0; src
< num_src
; src
++) {
639 sel
= alu
->src
[src
].sel
;
640 elem
= alu
->src
[src
].chan
;
642 cycle
= cycle_for_bank_swizzle_vec
[bank_swizzle
][src
];
643 if (src
== 1 && sel
== alu
->src
[0].sel
&& elem
== alu
->src
[0].chan
)
644 // Nothing to do; special-case optimization,
645 // second source uses first source’s reservation
648 r
= reserve_gpr(bs
, sel
, elem
, cycle
);
652 } else if (is_cfile(sel
)) {
653 r
= reserve_cfile(bc
, bs
, sel
, elem
);
657 // No restrictions on PV, PS, literal or special constants
662 static int check_scalar(struct r600_bc
*bc
, struct r600_bc_alu
*alu
,
663 struct alu_bank_swizzle
*bs
, int bank_swizzle
)
665 int r
, src
, num_src
, const_count
, sel
, elem
, cycle
;
667 num_src
= r600_bc_get_num_operands(bc
, alu
);
668 for (const_count
= 0, src
= 0; src
< num_src
; ++src
) {
669 sel
= alu
->src
[src
].sel
;
670 elem
= alu
->src
[src
].chan
;
671 if (is_const(sel
)) { // Any constant, including literal and inline constants
672 if (const_count
>= 2)
673 // More than two references to a constant in
674 // transcendental operation.
680 r
= reserve_cfile(bc
, bs
, sel
, elem
);
685 for (src
= 0; src
< num_src
; ++src
) {
686 sel
= alu
->src
[src
].sel
;
687 elem
= alu
->src
[src
].chan
;
689 cycle
= cycle_for_bank_swizzle_scl
[bank_swizzle
][src
];
690 if (cycle
< const_count
)
691 // Cycle for GPR load conflicts with
692 // constant load in transcendental operation.
694 r
= reserve_gpr(bs
, sel
, elem
, cycle
);
698 // Constants already processed
699 // No restrictions on PV, PS
704 static int check_and_set_bank_swizzle(struct r600_bc
*bc
,
705 struct r600_bc_alu
*slots
[5])
707 struct alu_bank_swizzle bs
;
709 int i
, r
= 0, forced
= 0;
711 for (i
= 0; i
< 5; i
++)
712 if (slots
[i
] && slots
[i
]->bank_swizzle_force
) {
713 slots
[i
]->bank_swizzle
= slots
[i
]->bank_swizzle_force
;
720 // just check every possible combination of bank swizzle
721 // not very efficent, but works on the first try in most of the cases
722 for (i
= 0; i
< 4; i
++)
723 bank_swizzle
[i
] = SQ_ALU_VEC_012
;
724 bank_swizzle
[4] = SQ_ALU_SCL_210
;
725 while(bank_swizzle
[4] <= SQ_ALU_SCL_221
) {
726 init_bank_swizzle(&bs
);
727 for (i
= 0; i
< 4; i
++) {
729 r
= check_vector(bc
, slots
[i
], &bs
, bank_swizzle
[i
]);
734 if (!r
&& slots
[4]) {
735 r
= check_scalar(bc
, slots
[4], &bs
, bank_swizzle
[4]);
738 for (i
= 0; i
< 5; i
++) {
740 slots
[i
]->bank_swizzle
= bank_swizzle
[i
];
745 for (i
= 0; i
< 5; i
++) {
747 if (bank_swizzle
[i
] <= SQ_ALU_VEC_210
)
750 bank_swizzle
[i
] = SQ_ALU_VEC_012
;
754 // couldn't find a working swizzle
758 static int replace_gpr_with_pv_ps(struct r600_bc
*bc
,
759 struct r600_bc_alu
*slots
[5], struct r600_bc_alu
*alu_prev
)
761 struct r600_bc_alu
*prev
[5];
763 int i
, j
, r
, src
, num_src
;
765 r
= assign_alu_units(bc
, alu_prev
, prev
);
769 for (i
= 0; i
< 5; ++i
) {
770 if(prev
[i
] && prev
[i
]->dst
.write
&& !prev
[i
]->dst
.rel
) {
771 gpr
[i
] = prev
[i
]->dst
.sel
;
772 /* cube writes more than PV.X */
773 if (!is_alu_cube_inst(bc
, prev
[i
]) && is_alu_reduction_inst(bc
, prev
[i
]))
776 chan
[i
] = prev
[i
]->dst
.chan
;
781 for (i
= 0; i
< 5; ++i
) {
782 struct r600_bc_alu
*alu
= slots
[i
];
786 num_src
= r600_bc_get_num_operands(bc
, alu
);
787 for (src
= 0; src
< num_src
; ++src
) {
788 if (!is_gpr(alu
->src
[src
].sel
) || alu
->src
[src
].rel
)
791 if (alu
->src
[src
].sel
== gpr
[4] &&
792 alu
->src
[src
].chan
== chan
[4]) {
793 alu
->src
[src
].sel
= V_SQ_ALU_SRC_PS
;
794 alu
->src
[src
].chan
= 0;
798 for (j
= 0; j
< 4; ++j
) {
799 if (alu
->src
[src
].sel
== gpr
[j
] &&
800 alu
->src
[src
].chan
== j
) {
801 alu
->src
[src
].sel
= V_SQ_ALU_SRC_PV
;
802 alu
->src
[src
].chan
= chan
[j
];
812 void r600_bc_special_constants(u32 value
, unsigned *sel
, unsigned *neg
)
816 *sel
= V_SQ_ALU_SRC_0
;
819 *sel
= V_SQ_ALU_SRC_1_INT
;
822 *sel
= V_SQ_ALU_SRC_M_1_INT
;
824 case 0x3F800000: // 1.0f
825 *sel
= V_SQ_ALU_SRC_1
;
827 case 0x3F000000: // 0.5f
828 *sel
= V_SQ_ALU_SRC_0_5
;
830 case 0xBF800000: // -1.0f
831 *sel
= V_SQ_ALU_SRC_1
;
834 case 0xBF000000: // -0.5f
835 *sel
= V_SQ_ALU_SRC_0_5
;
839 *sel
= V_SQ_ALU_SRC_LITERAL
;
844 /* compute how many literal are needed */
845 static int r600_bc_alu_nliterals(struct r600_bc
*bc
, struct r600_bc_alu
*alu
,
846 uint32_t literal
[4], unsigned *nliteral
)
848 unsigned num_src
= r600_bc_get_num_operands(bc
, alu
);
851 for (i
= 0; i
< num_src
; ++i
) {
852 if (alu
->src
[i
].sel
== V_SQ_ALU_SRC_LITERAL
) {
853 uint32_t value
= alu
->src
[i
].value
;
855 for (j
= 0; j
< *nliteral
; ++j
) {
856 if (literal
[j
] == value
) {
864 literal
[(*nliteral
)++] = value
;
871 static void r600_bc_alu_adjust_literals(struct r600_bc
*bc
,
872 struct r600_bc_alu
*alu
,
873 uint32_t literal
[4], unsigned nliteral
)
875 unsigned num_src
= r600_bc_get_num_operands(bc
, alu
);
878 for (i
= 0; i
< num_src
; ++i
) {
879 if (alu
->src
[i
].sel
== V_SQ_ALU_SRC_LITERAL
) {
880 uint32_t value
= alu
->src
[i
].value
;
881 for (j
= 0; j
< nliteral
; ++j
) {
882 if (literal
[j
] == value
) {
883 alu
->src
[i
].chan
= j
;
891 static int merge_inst_groups(struct r600_bc
*bc
, struct r600_bc_alu
*slots
[5],
892 struct r600_bc_alu
*alu_prev
)
894 struct r600_bc_alu
*prev
[5];
895 struct r600_bc_alu
*result
[5] = { NULL
};
897 uint32_t literal
[4], prev_literal
[4];
898 unsigned nliteral
= 0, prev_nliteral
= 0;
900 int i
, j
, r
, src
, num_src
;
901 int num_once_inst
= 0;
902 int have_mova
= 0, have_rel
= 0;
904 r
= assign_alu_units(bc
, alu_prev
, prev
);
908 for (i
= 0; i
< 5; ++i
) {
909 struct r600_bc_alu
*alu
;
911 /* check number of literals */
913 if (r600_bc_alu_nliterals(bc
, prev
[i
], literal
, &nliteral
))
915 if (r600_bc_alu_nliterals(bc
, prev
[i
], prev_literal
, &prev_nliteral
))
917 if (is_alu_mova_inst(bc
, prev
[i
])) {
922 num_once_inst
+= is_alu_once_inst(bc
, prev
[i
]);
924 if (slots
[i
] && r600_bc_alu_nliterals(bc
, slots
[i
], literal
, &nliteral
))
927 // let's check used slots
928 if (prev
[i
] && !slots
[i
]) {
931 } else if (prev
[i
] && slots
[i
]) {
932 if (result
[4] == NULL
&& prev
[4] == NULL
&& slots
[4] == NULL
) {
933 // trans unit is still free try to use it
934 if (is_alu_any_unit_inst(bc
, slots
[i
])) {
936 result
[4] = slots
[i
];
937 } else if (is_alu_any_unit_inst(bc
, prev
[i
])) {
938 result
[i
] = slots
[i
];
944 } else if(!slots
[i
]) {
947 result
[i
] = slots
[i
];
949 // let's check source gprs
951 num_once_inst
+= is_alu_once_inst(bc
, alu
);
953 num_src
= r600_bc_get_num_operands(bc
, alu
);
954 for (src
= 0; src
< num_src
; ++src
) {
955 if (alu
->src
[src
].rel
) {
961 // constants doesn't matter
962 if (!is_gpr(alu
->src
[src
].sel
))
965 for (j
= 0; j
< 5; ++j
) {
966 if (!prev
[j
] || !prev
[j
]->dst
.write
)
969 // if it's relative then we can't determin which gpr is really used
970 if (prev
[j
]->dst
.chan
== alu
->src
[src
].chan
&&
971 (prev
[j
]->dst
.sel
== alu
->src
[src
].sel
||
972 prev
[j
]->dst
.rel
|| alu
->src
[src
].rel
))
978 /* more than one PRED_ or KILL_ ? */
979 if (num_once_inst
> 1)
982 /* check if the result can still be swizzlet */
983 r
= check_and_set_bank_swizzle(bc
, result
);
987 /* looks like everything worked out right, apply the changes */
989 /* undo adding previus literals */
990 bc
->cf_last
->ndw
-= align(prev_nliteral
, 2);
992 /* sort instructions */
993 for (i
= 0; i
< 5; ++i
) {
994 slots
[i
] = result
[i
];
996 LIST_DEL(&result
[i
]->list
);
998 LIST_ADDTAIL(&result
[i
]->list
, &bc
->cf_last
->alu
);
1002 /* determine new last instruction */
1003 LIST_ENTRY(struct r600_bc_alu
, bc
->cf_last
->alu
.prev
, list
)->last
= 1;
1005 /* determine new first instruction */
1006 for (i
= 0; i
< 5; ++i
) {
1008 bc
->cf_last
->curr_bs_head
= result
[i
];
1013 bc
->cf_last
->prev_bs_head
= bc
->cf_last
->prev2_bs_head
;
1014 bc
->cf_last
->prev2_bs_head
= NULL
;
1019 /* This code handles kcache lines as single blocks of 32 constants. We could
1020 * probably do slightly better by recognizing that we actually have two
1021 * consecutive lines of 16 constants, but the resulting code would also be
1022 * somewhat more complicated. */
1023 static int r600_bc_alloc_kcache_lines(struct r600_bc
*bc
, struct r600_bc_alu
*alu
, int type
)
1025 struct r600_bc_kcache
*kcache
= bc
->cf_last
->kcache
;
1026 unsigned int required_lines
;
1027 unsigned int free_lines
= 0;
1028 unsigned int cache_line
[3];
1029 unsigned int count
= 0;
1033 /* Collect required cache lines. */
1034 for (i
= 0; i
< 3; ++i
) {
1038 if (alu
->src
[i
].sel
< 512)
1041 line
= ((alu
->src
[i
].sel
- 512) / 32) * 2;
1043 for (j
= 0; j
< count
; ++j
) {
1044 if (cache_line
[j
] == line
) {
1051 cache_line
[count
++] = line
;
1054 /* This should never actually happen. */
1055 if (count
>= 3) return -ENOMEM
;
1057 for (i
= 0; i
< 2; ++i
) {
1058 if (kcache
[i
].mode
== V_SQ_CF_KCACHE_NOP
) {
1063 /* Filter lines pulled in by previous intructions. Note that this is
1064 * only for the required_lines count, we can't remove these from the
1065 * cache_line array since we may have to start a new ALU clause. */
1066 for (i
= 0, required_lines
= count
; i
< count
; ++i
) {
1067 for (j
= 0; j
< 2; ++j
) {
1068 if (kcache
[j
].mode
== V_SQ_CF_KCACHE_LOCK_2
&&
1069 kcache
[j
].addr
== cache_line
[i
]) {
1076 /* Start a new ALU clause if needed. */
1077 if (required_lines
> free_lines
) {
1078 if ((r
= r600_bc_add_cf(bc
))) {
1081 bc
->cf_last
->inst
= (type
<< 3);
1082 kcache
= bc
->cf_last
->kcache
;
1085 /* Setup the kcache lines. */
1086 for (i
= 0; i
< count
; ++i
) {
1089 for (j
= 0; j
< 2; ++j
) {
1090 if (kcache
[j
].mode
== V_SQ_CF_KCACHE_LOCK_2
&&
1091 kcache
[j
].addr
== cache_line
[i
]) {
1097 if (found
) continue;
1099 for (j
= 0; j
< 2; ++j
) {
1100 if (kcache
[j
].mode
== V_SQ_CF_KCACHE_NOP
) {
1102 kcache
[j
].addr
= cache_line
[i
];
1103 kcache
[j
].mode
= V_SQ_CF_KCACHE_LOCK_2
;
1109 /* Alter the src operands to refer to the kcache. */
1110 for (i
= 0; i
< 3; ++i
) {
1111 static const unsigned int base
[] = {128, 160, 256, 288};
1114 if (alu
->src
[i
].sel
< 512)
1117 alu
->src
[i
].sel
-= 512;
1118 line
= (alu
->src
[i
].sel
/ 32) * 2;
1120 for (j
= 0; j
< 2; ++j
) {
1121 if (kcache
[j
].mode
== V_SQ_CF_KCACHE_LOCK_2
&&
1122 kcache
[j
].addr
== line
) {
1123 alu
->src
[i
].sel
&= 0x1f;
1124 alu
->src
[i
].sel
+= base
[j
];
1133 int r600_bc_add_alu_type(struct r600_bc
*bc
, const struct r600_bc_alu
*alu
, int type
)
1135 struct r600_bc_alu
*nalu
= r600_bc_alu();
1136 struct r600_bc_alu
*lalu
;
1141 memcpy(nalu
, alu
, sizeof(struct r600_bc_alu
));
1143 if (bc
->cf_last
!= NULL
&& bc
->cf_last
->inst
!= (type
<< 3)) {
1144 /* check if we could add it anyway */
1145 if (bc
->cf_last
->inst
== (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU
<< 3) &&
1146 type
== V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE
) {
1147 LIST_FOR_EACH_ENTRY(lalu
, &bc
->cf_last
->alu
, list
) {
1148 if (lalu
->predicate
) {
1149 bc
->force_add_cf
= 1;
1154 bc
->force_add_cf
= 1;
1157 /* cf can contains only alu or only vtx or only tex */
1158 if (bc
->cf_last
== NULL
|| bc
->force_add_cf
) {
1159 r
= r600_bc_add_cf(bc
);
1165 bc
->cf_last
->inst
= (type
<< 3);
1167 /* Setup the kcache for this ALU instruction. This will start a new
1168 * ALU clause if needed. */
1169 if ((r
= r600_bc_alloc_kcache_lines(bc
, nalu
, type
))) {
1174 if (!bc
->cf_last
->curr_bs_head
) {
1175 bc
->cf_last
->curr_bs_head
= nalu
;
1177 /* number of gpr == the last gpr used in any alu */
1178 for (i
= 0; i
< 3; i
++) {
1179 if (nalu
->src
[i
].sel
>= bc
->ngpr
&& nalu
->src
[i
].sel
< 128) {
1180 bc
->ngpr
= nalu
->src
[i
].sel
+ 1;
1182 if (nalu
->src
[i
].sel
== V_SQ_ALU_SRC_LITERAL
)
1183 r600_bc_special_constants(nalu
->src
[i
].value
,
1184 &nalu
->src
[i
].sel
, &nalu
->src
[i
].neg
);
1186 if (nalu
->dst
.sel
>= bc
->ngpr
) {
1187 bc
->ngpr
= nalu
->dst
.sel
+ 1;
1189 LIST_ADDTAIL(&nalu
->list
, &bc
->cf_last
->alu
);
1190 /* each alu use 2 dwords */
1191 bc
->cf_last
->ndw
+= 2;
1194 /* process cur ALU instructions for bank swizzle */
1196 uint32_t literal
[4];
1198 struct r600_bc_alu
*slots
[5];
1199 r
= assign_alu_units(bc
, bc
->cf_last
->curr_bs_head
, slots
);
1203 if (bc
->cf_last
->prev_bs_head
) {
1204 r
= merge_inst_groups(bc
, slots
, bc
->cf_last
->prev_bs_head
);
1209 if (bc
->cf_last
->prev_bs_head
) {
1210 r
= replace_gpr_with_pv_ps(bc
, slots
, bc
->cf_last
->prev_bs_head
);
1215 r
= check_and_set_bank_swizzle(bc
, slots
);
1219 for (i
= 0, nliteral
= 0; i
< 5; i
++) {
1221 r
= r600_bc_alu_nliterals(bc
, slots
[i
], literal
, &nliteral
);
1226 bc
->cf_last
->ndw
+= align(nliteral
, 2);
1228 /* at most 128 slots, one add alu can add 5 slots + 4 constants(2 slots)
1230 if ((bc
->cf_last
->ndw
>> 1) >= 120) {
1231 bc
->force_add_cf
= 1;
1234 bc
->cf_last
->prev2_bs_head
= bc
->cf_last
->prev_bs_head
;
1235 bc
->cf_last
->prev_bs_head
= bc
->cf_last
->curr_bs_head
;
1236 bc
->cf_last
->curr_bs_head
= NULL
;
1241 int r600_bc_add_alu(struct r600_bc
*bc
, const struct r600_bc_alu
*alu
)
1243 return r600_bc_add_alu_type(bc
, alu
, BC_INST(bc
, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU
));
1246 int r600_bc_add_vtx(struct r600_bc
*bc
, const struct r600_bc_vtx
*vtx
)
1248 struct r600_bc_vtx
*nvtx
= r600_bc_vtx();
1253 memcpy(nvtx
, vtx
, sizeof(struct r600_bc_vtx
));
1255 /* cf can contains only alu or only vtx or only tex */
1256 if (bc
->cf_last
== NULL
||
1257 (bc
->cf_last
->inst
!= V_SQ_CF_WORD1_SQ_CF_INST_VTX
&&
1258 bc
->cf_last
->inst
!= V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC
) ||
1260 r
= r600_bc_add_cf(bc
);
1265 bc
->cf_last
->inst
= V_SQ_CF_WORD1_SQ_CF_INST_VTX
;
1267 LIST_ADDTAIL(&nvtx
->list
, &bc
->cf_last
->vtx
);
1268 /* each fetch use 4 dwords */
1269 bc
->cf_last
->ndw
+= 4;
1271 if ((bc
->cf_last
->ndw
/ 4) > 7)
1272 bc
->force_add_cf
= 1;
1276 int r600_bc_add_tex(struct r600_bc
*bc
, const struct r600_bc_tex
*tex
)
1278 struct r600_bc_tex
*ntex
= r600_bc_tex();
1283 memcpy(ntex
, tex
, sizeof(struct r600_bc_tex
));
1285 /* cf can contains only alu or only vtx or only tex */
1286 if (bc
->cf_last
== NULL
||
1287 bc
->cf_last
->inst
!= V_SQ_CF_WORD1_SQ_CF_INST_TEX
||
1289 r
= r600_bc_add_cf(bc
);
1294 bc
->cf_last
->inst
= V_SQ_CF_WORD1_SQ_CF_INST_TEX
;
1296 if (ntex
->src_gpr
>= bc
->ngpr
) {
1297 bc
->ngpr
= ntex
->src_gpr
+ 1;
1299 if (ntex
->dst_gpr
>= bc
->ngpr
) {
1300 bc
->ngpr
= ntex
->dst_gpr
+ 1;
1302 LIST_ADDTAIL(&ntex
->list
, &bc
->cf_last
->tex
);
1303 /* each texture fetch use 4 dwords */
1304 bc
->cf_last
->ndw
+= 4;
1306 if ((bc
->cf_last
->ndw
/ 4) > 7)
1307 bc
->force_add_cf
= 1;
1311 int r600_bc_add_cfinst(struct r600_bc
*bc
, int inst
)
1314 r
= r600_bc_add_cf(bc
);
1318 bc
->cf_last
->cond
= V_SQ_CF_COND_ACTIVE
;
1319 bc
->cf_last
->inst
= inst
;
1323 /* common to all 3 families */
1324 static int r600_bc_vtx_build(struct r600_bc
*bc
, struct r600_bc_vtx
*vtx
, unsigned id
)
1326 unsigned fetch_resource_start
= 0;
1328 /* check if we are fetch shader */
1329 /* fetch shader can also access vertex resource,
1330 * first fetch shader resource is at 160
1332 if (bc
->type
== -1) {
1333 switch (bc
->chiprev
) {
1338 fetch_resource_start
= 160;
1341 case CHIPREV_EVERGREEN
:
1342 fetch_resource_start
= 0;
1345 fprintf(stderr
, "%s:%s:%d unknown chiprev %d\n",
1346 __FILE__
, __func__
, __LINE__
, bc
->chiprev
);
1350 bc
->bytecode
[id
++] = S_SQ_VTX_WORD0_BUFFER_ID(vtx
->buffer_id
+ fetch_resource_start
) |
1351 S_SQ_VTX_WORD0_FETCH_TYPE(vtx
->fetch_type
) |
1352 S_SQ_VTX_WORD0_SRC_GPR(vtx
->src_gpr
) |
1353 S_SQ_VTX_WORD0_SRC_SEL_X(vtx
->src_sel_x
) |
1354 S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(vtx
->mega_fetch_count
);
1355 bc
->bytecode
[id
++] = S_SQ_VTX_WORD1_DST_SEL_X(vtx
->dst_sel_x
) |
1356 S_SQ_VTX_WORD1_DST_SEL_Y(vtx
->dst_sel_y
) |
1357 S_SQ_VTX_WORD1_DST_SEL_Z(vtx
->dst_sel_z
) |
1358 S_SQ_VTX_WORD1_DST_SEL_W(vtx
->dst_sel_w
) |
1359 S_SQ_VTX_WORD1_USE_CONST_FIELDS(vtx
->use_const_fields
) |
1360 S_SQ_VTX_WORD1_DATA_FORMAT(vtx
->data_format
) |
1361 S_SQ_VTX_WORD1_NUM_FORMAT_ALL(vtx
->num_format_all
) |
1362 S_SQ_VTX_WORD1_FORMAT_COMP_ALL(vtx
->format_comp_all
) |
1363 S_SQ_VTX_WORD1_SRF_MODE_ALL(vtx
->srf_mode_all
) |
1364 S_SQ_VTX_WORD1_GPR_DST_GPR(vtx
->dst_gpr
);
1365 bc
->bytecode
[id
++] = S_SQ_VTX_WORD2_MEGA_FETCH(1);
1366 bc
->bytecode
[id
++] = 0;
1370 /* common to all 3 families */
1371 static int r600_bc_tex_build(struct r600_bc
*bc
, struct r600_bc_tex
*tex
, unsigned id
)
1373 bc
->bytecode
[id
++] = S_SQ_TEX_WORD0_TEX_INST(tex
->inst
) |
1374 S_SQ_TEX_WORD0_RESOURCE_ID(tex
->resource_id
) |
1375 S_SQ_TEX_WORD0_SRC_GPR(tex
->src_gpr
) |
1376 S_SQ_TEX_WORD0_SRC_REL(tex
->src_rel
);
1377 bc
->bytecode
[id
++] = S_SQ_TEX_WORD1_DST_GPR(tex
->dst_gpr
) |
1378 S_SQ_TEX_WORD1_DST_REL(tex
->dst_rel
) |
1379 S_SQ_TEX_WORD1_DST_SEL_X(tex
->dst_sel_x
) |
1380 S_SQ_TEX_WORD1_DST_SEL_Y(tex
->dst_sel_y
) |
1381 S_SQ_TEX_WORD1_DST_SEL_Z(tex
->dst_sel_z
) |
1382 S_SQ_TEX_WORD1_DST_SEL_W(tex
->dst_sel_w
) |
1383 S_SQ_TEX_WORD1_LOD_BIAS(tex
->lod_bias
) |
1384 S_SQ_TEX_WORD1_COORD_TYPE_X(tex
->coord_type_x
) |
1385 S_SQ_TEX_WORD1_COORD_TYPE_Y(tex
->coord_type_y
) |
1386 S_SQ_TEX_WORD1_COORD_TYPE_Z(tex
->coord_type_z
) |
1387 S_SQ_TEX_WORD1_COORD_TYPE_W(tex
->coord_type_w
);
1388 bc
->bytecode
[id
++] = S_SQ_TEX_WORD2_OFFSET_X(tex
->offset_x
) |
1389 S_SQ_TEX_WORD2_OFFSET_Y(tex
->offset_y
) |
1390 S_SQ_TEX_WORD2_OFFSET_Z(tex
->offset_z
) |
1391 S_SQ_TEX_WORD2_SAMPLER_ID(tex
->sampler_id
) |
1392 S_SQ_TEX_WORD2_SRC_SEL_X(tex
->src_sel_x
) |
1393 S_SQ_TEX_WORD2_SRC_SEL_Y(tex
->src_sel_y
) |
1394 S_SQ_TEX_WORD2_SRC_SEL_Z(tex
->src_sel_z
) |
1395 S_SQ_TEX_WORD2_SRC_SEL_W(tex
->src_sel_w
);
1396 bc
->bytecode
[id
++] = 0;
1400 /* r600 only, r700/eg bits in r700_asm.c */
1401 static int r600_bc_alu_build(struct r600_bc
*bc
, struct r600_bc_alu
*alu
, unsigned id
)
1403 /* don't replace gpr by pv or ps for destination register */
1404 bc
->bytecode
[id
++] = S_SQ_ALU_WORD0_SRC0_SEL(alu
->src
[0].sel
) |
1405 S_SQ_ALU_WORD0_SRC0_REL(alu
->src
[0].rel
) |
1406 S_SQ_ALU_WORD0_SRC0_CHAN(alu
->src
[0].chan
) |
1407 S_SQ_ALU_WORD0_SRC0_NEG(alu
->src
[0].neg
) |
1408 S_SQ_ALU_WORD0_SRC1_SEL(alu
->src
[1].sel
) |
1409 S_SQ_ALU_WORD0_SRC1_REL(alu
->src
[1].rel
) |
1410 S_SQ_ALU_WORD0_SRC1_CHAN(alu
->src
[1].chan
) |
1411 S_SQ_ALU_WORD0_SRC1_NEG(alu
->src
[1].neg
) |
1412 S_SQ_ALU_WORD0_LAST(alu
->last
);
1415 bc
->bytecode
[id
++] = S_SQ_ALU_WORD1_DST_GPR(alu
->dst
.sel
) |
1416 S_SQ_ALU_WORD1_DST_CHAN(alu
->dst
.chan
) |
1417 S_SQ_ALU_WORD1_DST_REL(alu
->dst
.rel
) |
1418 S_SQ_ALU_WORD1_CLAMP(alu
->dst
.clamp
) |
1419 S_SQ_ALU_WORD1_OP3_SRC2_SEL(alu
->src
[2].sel
) |
1420 S_SQ_ALU_WORD1_OP3_SRC2_REL(alu
->src
[2].rel
) |
1421 S_SQ_ALU_WORD1_OP3_SRC2_CHAN(alu
->src
[2].chan
) |
1422 S_SQ_ALU_WORD1_OP3_SRC2_NEG(alu
->src
[2].neg
) |
1423 S_SQ_ALU_WORD1_OP3_ALU_INST(alu
->inst
) |
1424 S_SQ_ALU_WORD1_BANK_SWIZZLE(alu
->bank_swizzle
);
1426 bc
->bytecode
[id
++] = S_SQ_ALU_WORD1_DST_GPR(alu
->dst
.sel
) |
1427 S_SQ_ALU_WORD1_DST_CHAN(alu
->dst
.chan
) |
1428 S_SQ_ALU_WORD1_DST_REL(alu
->dst
.rel
) |
1429 S_SQ_ALU_WORD1_CLAMP(alu
->dst
.clamp
) |
1430 S_SQ_ALU_WORD1_OP2_SRC0_ABS(alu
->src
[0].abs
) |
1431 S_SQ_ALU_WORD1_OP2_SRC1_ABS(alu
->src
[1].abs
) |
1432 S_SQ_ALU_WORD1_OP2_WRITE_MASK(alu
->dst
.write
) |
1433 S_SQ_ALU_WORD1_OP2_OMOD(alu
->omod
) |
1434 S_SQ_ALU_WORD1_OP2_ALU_INST(alu
->inst
) |
1435 S_SQ_ALU_WORD1_BANK_SWIZZLE(alu
->bank_swizzle
) |
1436 S_SQ_ALU_WORD1_OP2_UPDATE_EXECUTE_MASK(alu
->predicate
) |
1437 S_SQ_ALU_WORD1_OP2_UPDATE_PRED(alu
->predicate
);
1442 /* common for r600/r700 - eg in eg_asm.c */
1443 static int r600_bc_cf_build(struct r600_bc
*bc
, struct r600_bc_cf
*cf
)
1445 unsigned id
= cf
->id
;
1448 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU
<< 3):
1449 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE
<< 3):
1450 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER
<< 3):
1451 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER
<< 3):
1452 bc
->bytecode
[id
++] = S_SQ_CF_ALU_WORD0_ADDR(cf
->addr
>> 1) |
1453 S_SQ_CF_ALU_WORD0_KCACHE_MODE0(cf
->kcache
[0].mode
) |
1454 S_SQ_CF_ALU_WORD0_KCACHE_BANK0(cf
->kcache
[0].bank
) |
1455 S_SQ_CF_ALU_WORD0_KCACHE_BANK1(cf
->kcache
[1].bank
);
1457 bc
->bytecode
[id
++] = S_SQ_CF_ALU_WORD1_CF_INST(cf
->inst
>> 3) |
1458 S_SQ_CF_ALU_WORD1_KCACHE_MODE1(cf
->kcache
[1].mode
) |
1459 S_SQ_CF_ALU_WORD1_KCACHE_ADDR0(cf
->kcache
[0].addr
) |
1460 S_SQ_CF_ALU_WORD1_KCACHE_ADDR1(cf
->kcache
[1].addr
) |
1461 S_SQ_CF_ALU_WORD1_BARRIER(1) |
1462 S_SQ_CF_ALU_WORD1_USES_WATERFALL(bc
->chiprev
== CHIPREV_R600
? cf
->r6xx_uses_waterfall
: 0) |
1463 S_SQ_CF_ALU_WORD1_COUNT((cf
->ndw
/ 2) - 1);
1465 case V_SQ_CF_WORD1_SQ_CF_INST_TEX
:
1466 case V_SQ_CF_WORD1_SQ_CF_INST_VTX
:
1467 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC
:
1468 bc
->bytecode
[id
++] = S_SQ_CF_WORD0_ADDR(cf
->addr
>> 1);
1469 bc
->bytecode
[id
++] = S_SQ_CF_WORD1_CF_INST(cf
->inst
) |
1470 S_SQ_CF_WORD1_BARRIER(1) |
1471 S_SQ_CF_WORD1_COUNT((cf
->ndw
/ 4) - 1);
1473 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
:
1474 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE
:
1475 bc
->bytecode
[id
++] = S_SQ_CF_ALLOC_EXPORT_WORD0_RW_GPR(cf
->output
.gpr
) |
1476 S_SQ_CF_ALLOC_EXPORT_WORD0_ELEM_SIZE(cf
->output
.elem_size
) |
1477 S_SQ_CF_ALLOC_EXPORT_WORD0_ARRAY_BASE(cf
->output
.array_base
) |
1478 S_SQ_CF_ALLOC_EXPORT_WORD0_TYPE(cf
->output
.type
);
1479 bc
->bytecode
[id
++] = S_SQ_CF_ALLOC_EXPORT_WORD1_BURST_COUNT(cf
->output
.burst_count
- 1) |
1480 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_X(cf
->output
.swizzle_x
) |
1481 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Y(cf
->output
.swizzle_y
) |
1482 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Z(cf
->output
.swizzle_z
) |
1483 S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_W(cf
->output
.swizzle_w
) |
1484 S_SQ_CF_ALLOC_EXPORT_WORD1_BARRIER(cf
->output
.barrier
) |
1485 S_SQ_CF_ALLOC_EXPORT_WORD1_CF_INST(cf
->output
.inst
) |
1486 S_SQ_CF_ALLOC_EXPORT_WORD1_END_OF_PROGRAM(cf
->output
.end_of_program
);
1488 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP
:
1489 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE
:
1490 case V_SQ_CF_WORD1_SQ_CF_INST_POP
:
1491 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL
:
1492 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END
:
1493 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE
:
1494 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK
:
1495 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS
:
1496 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN
:
1497 bc
->bytecode
[id
++] = S_SQ_CF_WORD0_ADDR(cf
->cf_addr
>> 1);
1498 bc
->bytecode
[id
++] = S_SQ_CF_WORD1_CF_INST(cf
->inst
) |
1499 S_SQ_CF_WORD1_BARRIER(1) |
1500 S_SQ_CF_WORD1_COND(cf
->cond
) |
1501 S_SQ_CF_WORD1_POP_COUNT(cf
->pop_count
);
1505 R600_ERR("unsupported CF instruction (0x%X)\n", cf
->inst
);
1511 int r600_bc_build(struct r600_bc
*bc
)
1513 struct r600_bc_cf
*cf
;
1514 struct r600_bc_alu
*alu
;
1515 struct r600_bc_vtx
*vtx
;
1516 struct r600_bc_tex
*tex
;
1517 uint32_t literal
[4];
1522 if (bc
->callstack
[0].max
> 0)
1523 bc
->nstack
= ((bc
->callstack
[0].max
+ 3) >> 2) + 2;
1524 if (bc
->type
== TGSI_PROCESSOR_VERTEX
&& !bc
->nstack
) {
1528 /* first path compute addr of each CF block */
1529 /* addr start after all the CF instructions */
1530 addr
= bc
->cf_last
->id
+ 2;
1531 LIST_FOR_EACH_ENTRY(cf
, &bc
->cf
, list
) {
1533 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU
<< 3):
1534 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER
<< 3):
1535 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER
<< 3):
1536 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE
<< 3):
1538 case V_SQ_CF_WORD1_SQ_CF_INST_TEX
:
1539 case V_SQ_CF_WORD1_SQ_CF_INST_VTX
:
1540 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC
:
1541 /* fetch node need to be 16 bytes aligned*/
1543 addr
&= 0xFFFFFFFCUL
;
1545 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
:
1546 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE
:
1547 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
:
1548 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE
:
1550 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP
:
1551 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE
:
1552 case V_SQ_CF_WORD1_SQ_CF_INST_POP
:
1553 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL
:
1554 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END
:
1555 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE
:
1556 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK
:
1557 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS
:
1558 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN
:
1561 R600_ERR("unsupported CF instruction (0x%X)\n", cf
->inst
);
1566 bc
->ndw
= cf
->addr
+ cf
->ndw
;
1569 bc
->bytecode
= calloc(1, bc
->ndw
* 4);
1570 if (bc
->bytecode
== NULL
)
1572 LIST_FOR_EACH_ENTRY(cf
, &bc
->cf
, list
) {
1574 if (bc
->chiprev
== CHIPREV_EVERGREEN
)
1575 r
= eg_bc_cf_build(bc
, cf
);
1577 r
= r600_bc_cf_build(bc
, cf
);
1581 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU
<< 3):
1582 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER
<< 3):
1583 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER
<< 3):
1584 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE
<< 3):
1586 memset(literal
, 0, sizeof(literal
));
1587 LIST_FOR_EACH_ENTRY(alu
, &cf
->alu
, list
) {
1588 r
= r600_bc_alu_nliterals(bc
, alu
, literal
, &nliteral
);
1591 r600_bc_alu_adjust_literals(bc
, alu
, literal
, nliteral
);
1592 switch(bc
->chiprev
) {
1594 r
= r600_bc_alu_build(bc
, alu
, addr
);
1597 case CHIPREV_EVERGREEN
: /* eg alu is same encoding as r700 */
1598 r
= r700_bc_alu_build(bc
, alu
, addr
);
1601 R600_ERR("unknown family %d\n", bc
->family
);
1608 for (i
= 0; i
< align(nliteral
, 2); ++i
) {
1609 bc
->bytecode
[addr
++] = literal
[i
];
1612 memset(literal
, 0, sizeof(literal
));
1616 case V_SQ_CF_WORD1_SQ_CF_INST_VTX
:
1617 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC
:
1618 LIST_FOR_EACH_ENTRY(vtx
, &cf
->vtx
, list
) {
1619 r
= r600_bc_vtx_build(bc
, vtx
, addr
);
1625 case V_SQ_CF_WORD1_SQ_CF_INST_TEX
:
1626 LIST_FOR_EACH_ENTRY(tex
, &cf
->tex
, list
) {
1627 r
= r600_bc_tex_build(bc
, tex
, addr
);
1633 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
:
1634 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE
:
1635 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
:
1636 case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE
:
1637 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL
:
1638 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END
:
1639 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE
:
1640 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK
:
1641 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP
:
1642 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE
:
1643 case V_SQ_CF_WORD1_SQ_CF_INST_POP
:
1644 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS
:
1645 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN
:
1648 R600_ERR("unsupported CF instruction (0x%X)\n", cf
->inst
);
1655 void r600_bc_clear(struct r600_bc
*bc
)
1657 struct r600_bc_cf
*cf
= NULL
, *next_cf
;
1660 bc
->bytecode
= NULL
;
1662 LIST_FOR_EACH_ENTRY_SAFE(cf
, next_cf
, &bc
->cf
, list
) {
1663 struct r600_bc_alu
*alu
= NULL
, *next_alu
;
1664 struct r600_bc_tex
*tex
= NULL
, *next_tex
;
1665 struct r600_bc_tex
*vtx
= NULL
, *next_vtx
;
1667 LIST_FOR_EACH_ENTRY_SAFE(alu
, next_alu
, &cf
->alu
, list
) {
1671 LIST_INITHEAD(&cf
->alu
);
1673 LIST_FOR_EACH_ENTRY_SAFE(tex
, next_tex
, &cf
->tex
, list
) {
1677 LIST_INITHEAD(&cf
->tex
);
1679 LIST_FOR_EACH_ENTRY_SAFE(vtx
, next_vtx
, &cf
->vtx
, list
) {
1683 LIST_INITHEAD(&cf
->vtx
);
1688 LIST_INITHEAD(&cf
->list
);
1691 void r600_bc_dump(struct r600_bc
*bc
)
1693 struct r600_bc_cf
*cf
= NULL
;
1694 struct r600_bc_alu
*alu
= NULL
;
1695 struct r600_bc_vtx
*vtx
= NULL
;
1696 struct r600_bc_tex
*tex
= NULL
;
1699 uint32_t literal
[4];
1703 switch (bc
->chiprev
) {
1715 fprintf(stderr
, "bytecode %d dw -- %d gprs ---------------------\n", bc
->ndw
, bc
->ngpr
);
1716 fprintf(stderr
, " %c\n", chip
);
1718 LIST_FOR_EACH_ENTRY(cf
, &bc
->cf
, list
) {
1722 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU
<< 3):
1723 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER
<< 3):
1724 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER
<< 3):
1725 case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE
<< 3):
1726 fprintf(stderr
, "%04d %08X ALU ", id
, bc
->bytecode
[id
]);
1727 fprintf(stderr
, "ADDR:%d ", cf
->addr
);
1728 fprintf(stderr
, "KCACHE_MODE0:%X ", cf
->kcache
[0].mode
);
1729 fprintf(stderr
, "KCACHE_BANK0:%X ", cf
->kcache
[0].bank
);
1730 fprintf(stderr
, "KCACHE_BANK1:%X\n", cf
->kcache
[1].bank
);
1732 fprintf(stderr
, "%04d %08X ALU ", id
, bc
->bytecode
[id
]);
1733 fprintf(stderr
, "INST:%d ", cf
->inst
);
1734 fprintf(stderr
, "KCACHE_MODE1:%X ", cf
->kcache
[1].mode
);
1735 fprintf(stderr
, "KCACHE_ADDR0:%X ", cf
->kcache
[0].addr
);
1736 fprintf(stderr
, "KCACHE_ADDR1:%X ", cf
->kcache
[1].addr
);
1737 fprintf(stderr
, "COUNT:%d\n", cf
->ndw
/ 2);
1739 case V_SQ_CF_WORD1_SQ_CF_INST_TEX
:
1740 case V_SQ_CF_WORD1_SQ_CF_INST_VTX
:
1741 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC
:
1742 fprintf(stderr
, "%04d %08X TEX/VTX ", id
, bc
->bytecode
[id
]);
1743 fprintf(stderr
, "ADDR:%d\n", cf
->addr
);
1745 fprintf(stderr
, "%04d %08X TEX/VTX ", id
, bc
->bytecode
[id
]);
1746 fprintf(stderr
, "INST:%d ", cf
->inst
);
1747 fprintf(stderr
, "COUNT:%d\n", cf
->ndw
/ 4);
1749 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT
:
1750 case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE
:
1751 fprintf(stderr
, "%04d %08X EXPORT ", id
, bc
->bytecode
[id
]);
1752 fprintf(stderr
, "GPR:%X ", cf
->output
.gpr
);
1753 fprintf(stderr
, "ELEM_SIZE:%X ", cf
->output
.elem_size
);
1754 fprintf(stderr
, "ARRAY_BASE:%X ", cf
->output
.array_base
);
1755 fprintf(stderr
, "TYPE:%X\n", cf
->output
.type
);
1757 fprintf(stderr
, "%04d %08X EXPORT ", id
, bc
->bytecode
[id
]);
1758 fprintf(stderr
, "SWIZ_X:%X ", cf
->output
.swizzle_x
);
1759 fprintf(stderr
, "SWIZ_Y:%X ", cf
->output
.swizzle_y
);
1760 fprintf(stderr
, "SWIZ_Z:%X ", cf
->output
.swizzle_z
);
1761 fprintf(stderr
, "SWIZ_W:%X ", cf
->output
.swizzle_w
);
1762 fprintf(stderr
, "BARRIER:%X ", cf
->output
.barrier
);
1763 fprintf(stderr
, "INST:%d ", cf
->output
.inst
);
1764 fprintf(stderr
, "BURST_COUNT:%d ", cf
->output
.burst_count
);
1765 fprintf(stderr
, "EOP:%X\n", cf
->output
.end_of_program
);
1767 case V_SQ_CF_WORD1_SQ_CF_INST_JUMP
:
1768 case V_SQ_CF_WORD1_SQ_CF_INST_ELSE
:
1769 case V_SQ_CF_WORD1_SQ_CF_INST_POP
:
1770 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL
:
1771 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END
:
1772 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE
:
1773 case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK
:
1774 case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS
:
1775 case V_SQ_CF_WORD1_SQ_CF_INST_RETURN
:
1776 fprintf(stderr
, "%04d %08X CF ", id
, bc
->bytecode
[id
]);
1777 fprintf(stderr
, "ADDR:%d\n", cf
->cf_addr
);
1779 fprintf(stderr
, "%04d %08X CF ", id
, bc
->bytecode
[id
]);
1780 fprintf(stderr
, "INST:%d ", cf
->inst
);
1781 fprintf(stderr
, "COND:%X ", cf
->cond
);
1782 fprintf(stderr
, "POP_COUNT:%X\n", cf
->pop_count
);
1788 LIST_FOR_EACH_ENTRY(alu
, &cf
->alu
, list
) {
1789 r600_bc_alu_nliterals(bc
, alu
, literal
, &nliteral
);
1791 fprintf(stderr
, "%04d %08X ", id
, bc
->bytecode
[id
]);
1792 fprintf(stderr
, "SRC0(SEL:%d ", alu
->src
[0].sel
);
1793 fprintf(stderr
, "REL:%d ", alu
->src
[0].rel
);
1794 fprintf(stderr
, "CHAN:%d ", alu
->src
[0].chan
);
1795 fprintf(stderr
, "NEG:%d) ", alu
->src
[0].neg
);
1796 fprintf(stderr
, "SRC1(SEL:%d ", alu
->src
[1].sel
);
1797 fprintf(stderr
, "REL:%d ", alu
->src
[1].rel
);
1798 fprintf(stderr
, "CHAN:%d ", alu
->src
[1].chan
);
1799 fprintf(stderr
, "NEG:%d) ", alu
->src
[1].neg
);
1800 fprintf(stderr
, "LAST:%d)\n", alu
->last
);
1802 fprintf(stderr
, "%04d %08X %c ", id
, bc
->bytecode
[id
], alu
->last
? '*' : ' ');
1803 fprintf(stderr
, "INST:%d ", alu
->inst
);
1804 fprintf(stderr
, "DST(SEL:%d ", alu
->dst
.sel
);
1805 fprintf(stderr
, "CHAN:%d ", alu
->dst
.chan
);
1806 fprintf(stderr
, "REL:%d ", alu
->dst
.rel
);
1807 fprintf(stderr
, "CLAMP:%d) ", alu
->dst
.clamp
);
1808 fprintf(stderr
, "BANK_SWIZZLE:%d ", alu
->bank_swizzle
);
1810 fprintf(stderr
, "SRC2(SEL:%d ", alu
->src
[2].sel
);
1811 fprintf(stderr
, "REL:%d ", alu
->src
[2].rel
);
1812 fprintf(stderr
, "CHAN:%d ", alu
->src
[2].chan
);
1813 fprintf(stderr
, "NEG:%d)\n", alu
->src
[2].neg
);
1815 fprintf(stderr
, "SRC0_ABS:%d ", alu
->src
[0].abs
);
1816 fprintf(stderr
, "SRC1_ABS:%d ", alu
->src
[1].abs
);
1817 fprintf(stderr
, "WRITE_MASK:%d ", alu
->dst
.write
);
1818 fprintf(stderr
, "OMOD:%d ", alu
->omod
);
1819 fprintf(stderr
, "EXECUTE_MASK:%d ", alu
->predicate
);
1820 fprintf(stderr
, "UPDATE_PRED:%d\n", alu
->predicate
);
1825 for (i
= 0; i
< nliteral
; i
++, id
++) {
1826 float *f
= (float*)(bc
->bytecode
+ id
);
1827 fprintf(stderr
, "%04d %08X\t%f\n", id
, bc
->bytecode
[id
], *f
);
1834 LIST_FOR_EACH_ENTRY(tex
, &cf
->tex
, list
) {
1838 LIST_FOR_EACH_ENTRY(vtx
, &cf
->vtx
, list
) {
1843 fprintf(stderr
, "--------------------------------------\n");
1846 static void r600_cf_vtx(struct r600_vertex_element
*ve
, u32
*bytecode
, unsigned count
)
1848 struct r600_pipe_state
*rstate
;
1852 bytecode
[i
++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
1853 bytecode
[i
++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX
) |
1854 S_SQ_CF_WORD1_BARRIER(1) |
1855 S_SQ_CF_WORD1_COUNT(8 - 1);
1856 bytecode
[i
++] = S_SQ_CF_WORD0_ADDR(40 >> 1);
1857 bytecode
[i
++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX
) |
1858 S_SQ_CF_WORD1_BARRIER(1) |
1859 S_SQ_CF_WORD1_COUNT(count
- 8 - 1);
1861 bytecode
[i
++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
1862 bytecode
[i
++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX
) |
1863 S_SQ_CF_WORD1_BARRIER(1) |
1864 S_SQ_CF_WORD1_COUNT(count
- 1);
1866 bytecode
[i
++] = S_SQ_CF_WORD0_ADDR(0);
1867 bytecode
[i
++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_RETURN
) |
1868 S_SQ_CF_WORD1_BARRIER(1);
1870 rstate
= &ve
->rstate
;
1871 rstate
->id
= R600_PIPE_STATE_FETCH_SHADER
;
1873 r600_pipe_state_add_reg(rstate
, R_0288A4_SQ_PGM_RESOURCES_FS
,
1874 0x00000000, 0xFFFFFFFF, NULL
);
1875 r600_pipe_state_add_reg(rstate
, R_0288DC_SQ_PGM_CF_OFFSET_FS
,
1876 0x00000000, 0xFFFFFFFF, NULL
);
1877 r600_pipe_state_add_reg(rstate
, R_028894_SQ_PGM_START_FS
,
1878 r600_bo_offset(ve
->fetch_shader
) >> 8,
1879 0xFFFFFFFF, ve
->fetch_shader
);
1882 static void r600_vertex_data_type(enum pipe_format pformat
, unsigned *format
,
1883 unsigned *num_format
, unsigned *format_comp
)
1885 const struct util_format_description
*desc
;
1892 desc
= util_format_description(pformat
);
1893 if (desc
->layout
!= UTIL_FORMAT_LAYOUT_PLAIN
) {
1897 /* Find the first non-VOID channel. */
1898 for (i
= 0; i
< 4; i
++) {
1899 if (desc
->channel
[i
].type
!= UTIL_FORMAT_TYPE_VOID
) {
1904 switch (desc
->channel
[i
].type
) {
1905 /* Half-floats, floats, ints */
1906 case UTIL_FORMAT_TYPE_FLOAT
:
1907 switch (desc
->channel
[i
].size
) {
1909 switch (desc
->nr_channels
) {
1911 *format
= FMT_16_FLOAT
;
1914 *format
= FMT_16_16_FLOAT
;
1918 *format
= FMT_16_16_16_16_FLOAT
;
1923 switch (desc
->nr_channels
) {
1925 *format
= FMT_32_FLOAT
;
1928 *format
= FMT_32_32_FLOAT
;
1931 *format
= FMT_32_32_32_FLOAT
;
1934 *format
= FMT_32_32_32_32_FLOAT
;
1943 case UTIL_FORMAT_TYPE_UNSIGNED
:
1945 case UTIL_FORMAT_TYPE_SIGNED
:
1946 switch (desc
->channel
[i
].size
) {
1948 switch (desc
->nr_channels
) {
1957 *format
= FMT_8_8_8_8
;
1962 switch (desc
->nr_channels
) {
1967 *format
= FMT_16_16
;
1971 *format
= FMT_16_16_16_16
;
1976 switch (desc
->nr_channels
) {
1981 *format
= FMT_32_32
;
1984 *format
= FMT_32_32_32
;
1987 *format
= FMT_32_32_32_32
;
1999 if (desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_SIGNED
) {
2002 if (desc
->channel
[i
].normalized
) {
2009 R600_ERR("unsupported vertex format %s\n", util_format_name(pformat
));
2012 int r600_vertex_elements_build_fetch_shader(struct r600_pipe_context
*rctx
, struct r600_vertex_element
*ve
)
2016 unsigned fetch_resource_start
= 0, format
, num_format
, format_comp
;
2017 struct pipe_vertex_element
*elements
= ve
->elements
;
2018 const struct util_format_description
*desc
;
2020 /* 2 dwords for cf aligned to 4 + 4 dwords per input */
2021 ndw
= 8 + ve
->count
* 4;
2022 ve
->fs_size
= ndw
* 4;
2024 /* use PIPE_BIND_VERTEX_BUFFER so we use the cache buffer manager */
2025 ve
->fetch_shader
= r600_bo(rctx
->radeon
, ndw
*4, 256, PIPE_BIND_VERTEX_BUFFER
, 0);
2026 if (ve
->fetch_shader
== NULL
) {
2030 bytecode
= r600_bo_map(rctx
->radeon
, ve
->fetch_shader
, 0, NULL
);
2031 if (bytecode
== NULL
) {
2032 r600_bo_reference(rctx
->radeon
, &ve
->fetch_shader
, NULL
);
2036 if (rctx
->family
>= CHIP_CEDAR
) {
2037 eg_cf_vtx(ve
, &bytecode
[0], (ndw
- 8) / 4);
2039 r600_cf_vtx(ve
, &bytecode
[0], (ndw
- 8) / 4);
2040 fetch_resource_start
= 160;
2043 /* vertex elements offset need special handling, if offset is bigger
2044 * than what we can put in fetch instruction then we need to alterate
2045 * the vertex resource offset. In such case in order to simplify code
2046 * we will bound one resource per elements. It's a worst case scenario.
2048 for (i
= 0; i
< ve
->count
; i
++) {
2049 ve
->vbuffer_offset
[i
] = C_SQ_VTX_WORD2_OFFSET
& elements
[i
].src_offset
;
2050 if (ve
->vbuffer_offset
[i
]) {
2051 ve
->vbuffer_need_offset
= 1;
2055 for (i
= 0; i
< ve
->count
; i
++) {
2056 unsigned vbuffer_index
;
2057 r600_vertex_data_type(ve
->elements
[i
].src_format
, &format
, &num_format
, &format_comp
);
2058 desc
= util_format_description(ve
->elements
[i
].src_format
);
2060 R600_ERR("unknown format %d\n", ve
->elements
[i
].src_format
);
2061 r600_bo_reference(rctx
->radeon
, &ve
->fetch_shader
, NULL
);
2065 /* see above for vbuffer_need_offset explanation */
2066 vbuffer_index
= elements
[i
].vertex_buffer_index
;
2067 if (ve
->vbuffer_need_offset
) {
2068 bytecode
[8 + i
* 4 + 0] = S_SQ_VTX_WORD0_BUFFER_ID(i
+ fetch_resource_start
);
2070 bytecode
[8 + i
* 4 + 0] = S_SQ_VTX_WORD0_BUFFER_ID(vbuffer_index
+ fetch_resource_start
);
2072 bytecode
[8 + i
* 4 + 0] |= S_SQ_VTX_WORD0_SRC_GPR(0) |
2073 S_SQ_VTX_WORD0_SRC_SEL_X(0) |
2074 S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(0x1F);
2075 bytecode
[8 + i
* 4 + 1] = S_SQ_VTX_WORD1_DST_SEL_X(desc
->swizzle
[0]) |
2076 S_SQ_VTX_WORD1_DST_SEL_Y(desc
->swizzle
[1]) |
2077 S_SQ_VTX_WORD1_DST_SEL_Z(desc
->swizzle
[2]) |
2078 S_SQ_VTX_WORD1_DST_SEL_W(desc
->swizzle
[3]) |
2079 S_SQ_VTX_WORD1_USE_CONST_FIELDS(0) |
2080 S_SQ_VTX_WORD1_DATA_FORMAT(format
) |
2081 S_SQ_VTX_WORD1_NUM_FORMAT_ALL(num_format
) |
2082 S_SQ_VTX_WORD1_FORMAT_COMP_ALL(format_comp
) |
2083 S_SQ_VTX_WORD1_SRF_MODE_ALL(1) |
2084 S_SQ_VTX_WORD1_GPR_DST_GPR(i
+ 1);
2085 bytecode
[8 + i
* 4 + 2] = S_SQ_VTX_WORD2_OFFSET(elements
[i
].src_offset
) |
2086 S_SQ_VTX_WORD2_MEGA_FETCH(1);
2087 bytecode
[8 + i
* 4 + 3] = 0;
2089 r600_bo_unmap(rctx
->radeon
, ve
->fetch_shader
);