2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include "brw_fs_live_variables.h"
32 #include "brw_shader.h"
36 /** @file brw_fs_schedule_instructions.cpp
38 * List scheduling of FS instructions.
40 * The basic model of the list scheduler is to take a basic block,
41 * compute a DAG of the dependencies (RAW ordering with latency, WAW
42 * ordering with latency, WAR ordering), and make a list of the DAG heads.
43 * Heuristically pick a DAG head, then put all the children that are
44 * now DAG heads into the list of things to schedule.
46 * The heuristic is the important part. We're trying to be cheap,
47 * since actually computing the optimal scheduling is NP complete.
48 * What we do is track a "current clock". When we schedule a node, we
49 * update the earliest-unblocked clock time of its children, and
50 * increment the clock. Then, when trying to schedule, we just pick
51 * the earliest-unblocked instruction to schedule.
53 * Note that often there will be many things which could execute
54 * immediately, and there are a range of heuristic options to choose
55 * from in picking among those.
58 static bool debug
= false;
60 class instruction_scheduler
;
62 class schedule_node
: public exec_node
65 schedule_node(backend_instruction
*inst
, instruction_scheduler
*sched
);
66 void set_latency_gen4();
67 void set_latency_gen7(bool is_haswell
);
69 backend_instruction
*inst
;
70 schedule_node
**children
;
79 * Which iteration of pushing groups of children onto the candidates list
80 * this node was a part of.
82 unsigned cand_generation
;
85 * This is the sum of the instruction's latency plus the maximum delay of
86 * its children, or just the issue_time if it's a leaf node.
91 * Preferred exit node among the (direct or indirect) successors of this
92 * node. Among the scheduler nodes blocked by this node, this will be the
93 * one that may cause earliest program termination, or NULL if none of the
94 * successors is an exit node.
100 * Lower bound of the scheduling time after which one of the instructions
101 * blocked by this node may lead to program termination.
103 * exit_unblocked_time() determines a strict partial ordering relation '«' on
104 * the set of scheduler nodes as follows:
106 * n « m <-> exit_unblocked_time(n) < exit_unblocked_time(m)
108 * which can be used to heuristically order nodes according to how early they
109 * can unblock an exit node and lead to program termination.
112 exit_unblocked_time(const schedule_node
*n
)
114 return n
->exit
? n
->exit
->unblocked_time
: INT_MAX
;
118 schedule_node::set_latency_gen4()
121 int math_latency
= 22;
123 switch (inst
->opcode
) {
124 case SHADER_OPCODE_RCP
:
125 this->latency
= 1 * chans
* math_latency
;
127 case SHADER_OPCODE_RSQ
:
128 this->latency
= 2 * chans
* math_latency
;
130 case SHADER_OPCODE_INT_QUOTIENT
:
131 case SHADER_OPCODE_SQRT
:
132 case SHADER_OPCODE_LOG2
:
133 /* full precision log. partial is 2. */
134 this->latency
= 3 * chans
* math_latency
;
136 case SHADER_OPCODE_INT_REMAINDER
:
137 case SHADER_OPCODE_EXP2
:
138 /* full precision. partial is 3, same throughput. */
139 this->latency
= 4 * chans
* math_latency
;
141 case SHADER_OPCODE_POW
:
142 this->latency
= 8 * chans
* math_latency
;
144 case SHADER_OPCODE_SIN
:
145 case SHADER_OPCODE_COS
:
146 /* minimum latency, max is 12 rounds. */
147 this->latency
= 5 * chans
* math_latency
;
156 schedule_node::set_latency_gen7(bool is_haswell
)
158 switch (inst
->opcode
) {
161 * (since the last two src operands are in different register banks):
162 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
164 * 3 cycles on IVB, 4 on HSW
165 * (since the last two src operands are in the same register bank):
166 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
168 * 18 cycles on IVB, 16 on HSW
169 * (since the last two src operands are in different register banks):
170 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
171 * mov(8) null g4<4,5,1>F { align16 WE_normal 1Q };
173 * 20 cycles on IVB, 18 on HSW
174 * (since the last two src operands are in the same register bank):
175 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
176 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
179 /* Our register allocator doesn't know about register banks, so use the
182 latency
= is_haswell
? 16 : 18;
187 * (since the last two src operands are in different register banks):
188 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
190 * 3 cycles on IVB, 4 on HSW
191 * (since the last two src operands are in the same register bank):
192 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
194 * 16 cycles on IVB, 14 on HSW
195 * (since the last two src operands are in different register banks):
196 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
197 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
200 * (since the last two src operands are in the same register bank):
201 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
202 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
205 /* Our register allocator doesn't know about register banks, so use the
211 case SHADER_OPCODE_RCP
:
212 case SHADER_OPCODE_RSQ
:
213 case SHADER_OPCODE_SQRT
:
214 case SHADER_OPCODE_LOG2
:
215 case SHADER_OPCODE_EXP2
:
216 case SHADER_OPCODE_SIN
:
217 case SHADER_OPCODE_COS
:
219 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
222 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
223 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
225 * Same for exp2, log2, rsq, sqrt, sin, cos.
227 latency
= is_haswell
? 14 : 16;
230 case SHADER_OPCODE_POW
:
232 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
235 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
236 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
238 latency
= is_haswell
? 22 : 24;
241 case SHADER_OPCODE_TEX
:
242 case SHADER_OPCODE_TXD
:
243 case SHADER_OPCODE_TXF
:
244 case SHADER_OPCODE_TXF_LZ
:
245 case SHADER_OPCODE_TXL
:
246 case SHADER_OPCODE_TXL_LZ
:
248 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
249 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
250 * send(8) g4<1>UW g114<8,8,1>F
251 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
253 * 697 +/-49 cycles (min 610, n=26):
254 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
255 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
256 * send(8) g4<1>UW g114<8,8,1>F
257 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
258 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
260 * So the latency on our first texture load of the batchbuffer takes
261 * ~700 cycles, since the caches are cold at that point.
263 * 840 +/- 92 cycles (min 720, n=25):
264 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
265 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
266 * send(8) g4<1>UW g114<8,8,1>F
267 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
268 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
269 * send(8) g4<1>UW g114<8,8,1>F
270 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
271 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
273 * On the second load, it takes just an extra ~140 cycles, and after
274 * accounting for the 14 cycles of the MOV's latency, that makes ~130.
276 * 683 +/- 49 cycles (min = 602, n=47):
277 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
278 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
279 * send(8) g4<1>UW g114<8,8,1>F
280 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
281 * send(8) g50<1>UW g114<8,8,1>F
282 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
283 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
285 * The unit appears to be pipelined, since this matches up with the
286 * cache-cold case, despite there being two loads here. If you replace
287 * the g4 in the MOV to null with g50, it's still 693 +/- 52 (n=39).
289 * So, take some number between the cache-hot 140 cycles and the
290 * cache-cold 700 cycles. No particular tuning was done on this.
292 * I haven't done significant testing of the non-TEX opcodes. TXL at
293 * least looked about the same as TEX.
298 case SHADER_OPCODE_TXS
:
299 /* Testing textureSize(sampler2D, 0), one load was 420 +/- 41
301 * mov(8) g114<1>UD 0D { align1 WE_normal 1Q };
302 * send(8) g6<1>UW g114<8,8,1>F
303 * sampler (10, 0, 10, 1) mlen 1 rlen 4 { align1 WE_normal 1Q };
304 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1Q };
307 * Two loads was 535 +/- 30 cycles (n=19):
308 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
309 * send(16) g6<1>UW g114<8,8,1>F
310 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
311 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
312 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1H };
313 * send(16) g8<1>UW g114<8,8,1>F
314 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
315 * mov(16) g8<1>F g8<8,8,1>D { align1 WE_normal 1H };
316 * add(16) g6<1>F g6<8,8,1>F g8<8,8,1>F { align1 WE_normal 1H };
318 * Since the only caches that should matter are just the
319 * instruction/state cache containing the surface state, assume that we
320 * always have hot caches.
325 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4
:
326 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
:
327 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7
:
328 case VS_OPCODE_PULL_CONSTANT_LOAD
:
329 /* testing using varying-index pull constants:
332 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
333 * send(8) g4<1>F g4<8,8,1>D
334 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
337 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
338 * send(8) g4<1>F g4<8,8,1>D
339 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
340 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
343 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
344 * send(8) g4<1>F g4<8,8,1>D
345 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
346 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
347 * send(8) g4<1>F g4<8,8,1>D
348 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
349 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
351 * So, if it's cache-hot, it's about 140. If it's cache cold, it's
352 * about 460. We expect to mostly be cache hot, so pick something more
358 case SHADER_OPCODE_GEN7_SCRATCH_READ
:
359 /* Testing a load from offset 0, that had been previously written:
361 * send(8) g114<1>UW g0<8,8,1>F data (0, 0, 0) mlen 1 rlen 1 { align1 WE_normal 1Q };
362 * mov(8) null g114<8,8,1>F { align1 WE_normal 1Q };
364 * The cycles spent seemed to be grouped around 40-50 (as low as 38),
365 * then around 140. Presumably this is cache hit vs miss.
370 case VEC4_OPCODE_UNTYPED_ATOMIC
:
371 /* See GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP */
375 case VEC4_OPCODE_UNTYPED_SURFACE_READ
:
376 case VEC4_OPCODE_UNTYPED_SURFACE_WRITE
:
377 /* See also GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ */
378 latency
= is_haswell
? 300 : 600;
381 case SHADER_OPCODE_SEND
:
382 switch (inst
->sfid
) {
383 case BRW_SFID_SAMPLER
: {
384 unsigned msg_type
= (inst
->desc
>> 12) & 0x1f;
386 case GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO
:
387 case GEN6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO
:
388 /* See also SHADER_OPCODE_TXS */
393 /* See also SHADER_OPCODE_TEX */
400 case GEN6_SFID_DATAPORT_RENDER_CACHE
:
401 switch ((inst
->desc
>> 14) & 0x1f) {
402 case GEN7_DATAPORT_RC_TYPED_SURFACE_WRITE
:
403 case GEN7_DATAPORT_RC_TYPED_SURFACE_READ
:
404 /* See also SHADER_OPCODE_TYPED_SURFACE_READ */
409 case GEN7_DATAPORT_RC_TYPED_ATOMIC_OP
:
410 /* See also SHADER_OPCODE_TYPED_ATOMIC */
415 case GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE
:
416 /* completely fabricated number */
421 unreachable("Unknown render cache message");
425 case GEN7_SFID_DATAPORT_DATA_CACHE
:
426 switch ((inst
->desc
>> 14) & 0x1f) {
427 case GEN7_DATAPORT_DC_DWORD_SCATTERED_READ
:
428 case GEN6_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE
:
429 case HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ
:
430 case HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE
:
431 /* We have no data for this but assume it's roughly the same as
432 * untyped surface read/write.
437 case GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ
:
438 case GEN7_DATAPORT_DC_UNTYPED_SURFACE_WRITE
:
440 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
441 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
442 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
443 * send(8) g4<1>UD g112<8,8,1>UD
444 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
446 * . [repeats 8 times]
448 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
449 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
450 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
451 * send(8) g4<1>UD g112<8,8,1>UD
452 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
454 * Running it 100 times as fragment shader on a 128x128 quad
455 * gives an average latency of 583 cycles per surface read,
456 * standard deviation 0.9%.
462 case GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP
:
464 * mov(8) g112<1>ud 0x00000000ud { align1 WE_all 1Q };
465 * mov(1) g112.7<1>ud g1.7<0,1,0>ud { align1 WE_all };
466 * mov(8) g113<1>ud 0x00000000ud { align1 WE_normal 1Q };
467 * send(8) g4<1>ud g112<8,8,1>ud
468 * data (38, 5, 6) mlen 2 rlen 1 { align1 WE_normal 1Q };
470 * Running it 100 times as fragment shader on a 128x128 quad
471 * gives an average latency of 13867 cycles per atomic op,
472 * standard deviation 3%. Note that this is a rather
473 * pessimistic estimate, the actual latency in cases with few
474 * collisions between threads and favorable pipelining has been
475 * seen to be reduced by a factor of 100.
482 unreachable("Unknown data cache message");
486 case HSW_SFID_DATAPORT_DATA_CACHE_1
:
487 switch ((inst
->desc
>> 14) & 0x1f) {
488 case HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ
:
489 case HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE
:
490 case HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ
:
491 case HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE
:
492 case GEN8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_WRITE
:
493 case GEN8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_READ
:
494 case GEN8_DATAPORT_DC_PORT1_A64_SCATTERED_WRITE
:
495 case GEN9_DATAPORT_DC_PORT1_A64_SCATTERED_READ
:
496 /* See also GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ */
500 case HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP
:
501 case HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2
:
502 case HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2
:
503 case HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP
:
504 case GEN9_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_FLOAT_OP
:
505 case GEN8_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_OP
:
506 case GEN9_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_FLOAT_OP
:
507 /* See also GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP */
512 unreachable("Unknown data cache message");
517 unreachable("Unknown SFID");
523 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
526 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
527 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
534 class instruction_scheduler
{
536 instruction_scheduler(backend_shader
*s
, int grf_count
,
537 unsigned hw_reg_count
, int block_count
,
538 instruction_scheduler_mode mode
)
541 this->mem_ctx
= ralloc_context(NULL
);
542 this->grf_count
= grf_count
;
543 this->hw_reg_count
= hw_reg_count
;
544 this->instructions
.make_empty();
545 this->instructions_to_schedule
= 0;
546 this->post_reg_alloc
= (mode
== SCHEDULE_POST
);
548 if (!post_reg_alloc
) {
549 this->reg_pressure_in
= rzalloc_array(mem_ctx
, int, block_count
);
551 this->livein
= ralloc_array(mem_ctx
, BITSET_WORD
*, block_count
);
552 for (int i
= 0; i
< block_count
; i
++)
553 this->livein
[i
] = rzalloc_array(mem_ctx
, BITSET_WORD
,
554 BITSET_WORDS(grf_count
));
556 this->liveout
= ralloc_array(mem_ctx
, BITSET_WORD
*, block_count
);
557 for (int i
= 0; i
< block_count
; i
++)
558 this->liveout
[i
] = rzalloc_array(mem_ctx
, BITSET_WORD
,
559 BITSET_WORDS(grf_count
));
561 this->hw_liveout
= ralloc_array(mem_ctx
, BITSET_WORD
*, block_count
);
562 for (int i
= 0; i
< block_count
; i
++)
563 this->hw_liveout
[i
] = rzalloc_array(mem_ctx
, BITSET_WORD
,
564 BITSET_WORDS(hw_reg_count
));
566 this->written
= rzalloc_array(mem_ctx
, bool, grf_count
);
568 this->reads_remaining
= rzalloc_array(mem_ctx
, int, grf_count
);
570 this->hw_reads_remaining
= rzalloc_array(mem_ctx
, int, hw_reg_count
);
572 this->reg_pressure_in
= NULL
;
574 this->liveout
= NULL
;
575 this->hw_liveout
= NULL
;
576 this->written
= NULL
;
577 this->reads_remaining
= NULL
;
578 this->hw_reads_remaining
= NULL
;
582 ~instruction_scheduler()
584 ralloc_free(this->mem_ctx
);
586 void add_barrier_deps(schedule_node
*n
);
587 void add_dep(schedule_node
*before
, schedule_node
*after
, int latency
);
588 void add_dep(schedule_node
*before
, schedule_node
*after
);
590 void run(cfg_t
*cfg
);
591 void add_insts_from_block(bblock_t
*block
);
592 void compute_delays();
593 void compute_exits();
594 virtual void calculate_deps() = 0;
595 virtual schedule_node
*choose_instruction_to_schedule() = 0;
598 * Returns how many cycles it takes the instruction to issue.
600 * Instructions in gen hardware are handled one simd4 vector at a time,
601 * with 1 cycle per vector dispatched. Thus SIMD8 pixel shaders take 2
602 * cycles to dispatch and SIMD16 (compressed) instructions take 4.
604 virtual int issue_time(backend_instruction
*inst
) = 0;
606 virtual void count_reads_remaining(backend_instruction
*inst
) = 0;
607 virtual void setup_liveness(cfg_t
*cfg
) = 0;
608 virtual void update_register_pressure(backend_instruction
*inst
) = 0;
609 virtual int get_register_pressure_benefit(backend_instruction
*inst
) = 0;
611 void schedule_instructions(bblock_t
*block
);
616 int instructions_to_schedule
;
618 unsigned hw_reg_count
;
621 exec_list instructions
;
624 instruction_scheduler_mode mode
;
627 * The register pressure at the beginning of each basic block.
630 int *reg_pressure_in
;
633 * The virtual GRF's whose range overlaps the beginning of each basic block.
636 BITSET_WORD
**livein
;
639 * The virtual GRF's whose range overlaps the end of each basic block.
642 BITSET_WORD
**liveout
;
645 * The hardware GRF's whose range overlaps the end of each basic block.
648 BITSET_WORD
**hw_liveout
;
651 * Whether we've scheduled a write for this virtual GRF yet.
657 * How many reads we haven't scheduled for this virtual GRF yet.
660 int *reads_remaining
;
663 * How many reads we haven't scheduled for this hardware GRF yet.
666 int *hw_reads_remaining
;
669 class fs_instruction_scheduler
: public instruction_scheduler
672 fs_instruction_scheduler(fs_visitor
*v
, int grf_count
, int hw_reg_count
,
674 instruction_scheduler_mode mode
);
675 void calculate_deps();
676 bool is_compressed(fs_inst
*inst
);
677 schedule_node
*choose_instruction_to_schedule();
678 int issue_time(backend_instruction
*inst
);
681 void count_reads_remaining(backend_instruction
*inst
);
682 void setup_liveness(cfg_t
*cfg
);
683 void update_register_pressure(backend_instruction
*inst
);
684 int get_register_pressure_benefit(backend_instruction
*inst
);
687 fs_instruction_scheduler::fs_instruction_scheduler(fs_visitor
*v
,
688 int grf_count
, int hw_reg_count
,
690 instruction_scheduler_mode mode
)
691 : instruction_scheduler(v
, grf_count
, hw_reg_count
, block_count
, mode
),
697 is_src_duplicate(fs_inst
*inst
, int src
)
699 for (int i
= 0; i
< src
; i
++)
700 if (inst
->src
[i
].equals(inst
->src
[src
]))
707 fs_instruction_scheduler::count_reads_remaining(backend_instruction
*be
)
709 fs_inst
*inst
= (fs_inst
*)be
;
711 if (!reads_remaining
)
714 for (int i
= 0; i
< inst
->sources
; i
++) {
715 if (is_src_duplicate(inst
, i
))
718 if (inst
->src
[i
].file
== VGRF
) {
719 reads_remaining
[inst
->src
[i
].nr
]++;
720 } else if (inst
->src
[i
].file
== FIXED_GRF
) {
721 if (inst
->src
[i
].nr
>= hw_reg_count
)
724 for (unsigned j
= 0; j
< regs_read(inst
, i
); j
++)
725 hw_reads_remaining
[inst
->src
[i
].nr
+ j
]++;
731 fs_instruction_scheduler::setup_liveness(cfg_t
*cfg
)
733 const fs_live_variables
&live
= v
->live_analysis
.require();
735 /* First, compute liveness on a per-GRF level using the in/out sets from
736 * liveness calculation.
738 for (int block
= 0; block
< cfg
->num_blocks
; block
++) {
739 for (int i
= 0; i
< live
.num_vars
; i
++) {
740 if (BITSET_TEST(live
.block_data
[block
].livein
, i
)) {
741 int vgrf
= live
.vgrf_from_var
[i
];
742 if (!BITSET_TEST(livein
[block
], vgrf
)) {
743 reg_pressure_in
[block
] += v
->alloc
.sizes
[vgrf
];
744 BITSET_SET(livein
[block
], vgrf
);
748 if (BITSET_TEST(live
.block_data
[block
].liveout
, i
))
749 BITSET_SET(liveout
[block
], live
.vgrf_from_var
[i
]);
753 /* Now, extend the live in/live out sets for when a range crosses a block
754 * boundary, which matches what our register allocator/interference code
755 * does to account for force_writemask_all and incompatible exec_mask's.
757 for (int block
= 0; block
< cfg
->num_blocks
- 1; block
++) {
758 for (int i
= 0; i
< grf_count
; i
++) {
759 if (live
.vgrf_start
[i
] <= cfg
->blocks
[block
]->end_ip
&&
760 live
.vgrf_end
[i
] >= cfg
->blocks
[block
+ 1]->start_ip
) {
761 if (!BITSET_TEST(livein
[block
+ 1], i
)) {
762 reg_pressure_in
[block
+ 1] += v
->alloc
.sizes
[i
];
763 BITSET_SET(livein
[block
+ 1], i
);
766 BITSET_SET(liveout
[block
], i
);
771 int payload_last_use_ip
[hw_reg_count
];
772 v
->calculate_payload_ranges(hw_reg_count
, payload_last_use_ip
);
774 for (unsigned i
= 0; i
< hw_reg_count
; i
++) {
775 if (payload_last_use_ip
[i
] == -1)
778 for (int block
= 0; block
< cfg
->num_blocks
; block
++) {
779 if (cfg
->blocks
[block
]->start_ip
<= payload_last_use_ip
[i
])
780 reg_pressure_in
[block
]++;
782 if (cfg
->blocks
[block
]->end_ip
<= payload_last_use_ip
[i
])
783 BITSET_SET(hw_liveout
[block
], i
);
789 fs_instruction_scheduler::update_register_pressure(backend_instruction
*be
)
791 fs_inst
*inst
= (fs_inst
*)be
;
793 if (!reads_remaining
)
796 if (inst
->dst
.file
== VGRF
) {
797 written
[inst
->dst
.nr
] = true;
800 for (int i
= 0; i
< inst
->sources
; i
++) {
801 if (is_src_duplicate(inst
, i
))
804 if (inst
->src
[i
].file
== VGRF
) {
805 reads_remaining
[inst
->src
[i
].nr
]--;
806 } else if (inst
->src
[i
].file
== FIXED_GRF
&&
807 inst
->src
[i
].nr
< hw_reg_count
) {
808 for (unsigned off
= 0; off
< regs_read(inst
, i
); off
++)
809 hw_reads_remaining
[inst
->src
[i
].nr
+ off
]--;
815 fs_instruction_scheduler::get_register_pressure_benefit(backend_instruction
*be
)
817 fs_inst
*inst
= (fs_inst
*)be
;
820 if (inst
->dst
.file
== VGRF
) {
821 if (!BITSET_TEST(livein
[block_idx
], inst
->dst
.nr
) &&
822 !written
[inst
->dst
.nr
])
823 benefit
-= v
->alloc
.sizes
[inst
->dst
.nr
];
826 for (int i
= 0; i
< inst
->sources
; i
++) {
827 if (is_src_duplicate(inst
, i
))
830 if (inst
->src
[i
].file
== VGRF
&&
831 !BITSET_TEST(liveout
[block_idx
], inst
->src
[i
].nr
) &&
832 reads_remaining
[inst
->src
[i
].nr
] == 1)
833 benefit
+= v
->alloc
.sizes
[inst
->src
[i
].nr
];
835 if (inst
->src
[i
].file
== FIXED_GRF
&&
836 inst
->src
[i
].nr
< hw_reg_count
) {
837 for (unsigned off
= 0; off
< regs_read(inst
, i
); off
++) {
838 int reg
= inst
->src
[i
].nr
+ off
;
839 if (!BITSET_TEST(hw_liveout
[block_idx
], reg
) &&
840 hw_reads_remaining
[reg
] == 1) {
850 class vec4_instruction_scheduler
: public instruction_scheduler
853 vec4_instruction_scheduler(vec4_visitor
*v
, int grf_count
);
854 void calculate_deps();
855 schedule_node
*choose_instruction_to_schedule();
856 int issue_time(backend_instruction
*inst
);
859 void count_reads_remaining(backend_instruction
*inst
);
860 void setup_liveness(cfg_t
*cfg
);
861 void update_register_pressure(backend_instruction
*inst
);
862 int get_register_pressure_benefit(backend_instruction
*inst
);
865 vec4_instruction_scheduler::vec4_instruction_scheduler(vec4_visitor
*v
,
867 : instruction_scheduler(v
, grf_count
, 0, 0, SCHEDULE_POST
),
873 vec4_instruction_scheduler::count_reads_remaining(backend_instruction
*)
878 vec4_instruction_scheduler::setup_liveness(cfg_t
*)
883 vec4_instruction_scheduler::update_register_pressure(backend_instruction
*)
888 vec4_instruction_scheduler::get_register_pressure_benefit(backend_instruction
*)
893 schedule_node::schedule_node(backend_instruction
*inst
,
894 instruction_scheduler
*sched
)
896 const struct gen_device_info
*devinfo
= sched
->bs
->devinfo
;
899 this->child_array_size
= 0;
900 this->children
= NULL
;
901 this->child_latency
= NULL
;
902 this->child_count
= 0;
903 this->parent_count
= 0;
904 this->unblocked_time
= 0;
905 this->cand_generation
= 0;
909 /* We can't measure Gen6 timings directly but expect them to be much
910 * closer to Gen7 than Gen4.
912 if (!sched
->post_reg_alloc
)
914 else if (devinfo
->gen
>= 6)
915 set_latency_gen7(devinfo
->is_haswell
);
921 instruction_scheduler::add_insts_from_block(bblock_t
*block
)
923 foreach_inst_in_block(backend_instruction
, inst
, block
) {
924 schedule_node
*n
= new(mem_ctx
) schedule_node(inst
, this);
926 instructions
.push_tail(n
);
929 this->instructions_to_schedule
= block
->end_ip
- block
->start_ip
+ 1;
932 /** Computation of the delay member of each node. */
934 instruction_scheduler::compute_delays()
936 foreach_in_list_reverse(schedule_node
, n
, &instructions
) {
937 if (!n
->child_count
) {
938 n
->delay
= issue_time(n
->inst
);
940 for (int i
= 0; i
< n
->child_count
; i
++) {
941 assert(n
->children
[i
]->delay
);
942 n
->delay
= MAX2(n
->delay
, n
->latency
+ n
->children
[i
]->delay
);
949 instruction_scheduler::compute_exits()
951 /* Calculate a lower bound of the scheduling time of each node in the
952 * graph. This is analogous to the node's critical path but calculated
953 * from the top instead of from the bottom of the block.
955 foreach_in_list(schedule_node
, n
, &instructions
) {
956 for (int i
= 0; i
< n
->child_count
; i
++) {
957 n
->children
[i
]->unblocked_time
=
958 MAX2(n
->children
[i
]->unblocked_time
,
959 n
->unblocked_time
+ issue_time(n
->inst
) + n
->child_latency
[i
]);
963 /* Calculate the exit of each node by induction based on the exit nodes of
964 * its children. The preferred exit of a node is the one among the exit
965 * nodes of its children which can be unblocked first according to the
966 * optimistic unblocked time estimate calculated above.
968 foreach_in_list_reverse(schedule_node
, n
, &instructions
) {
969 n
->exit
= (n
->inst
->opcode
== FS_OPCODE_DISCARD_JUMP
? n
: NULL
);
971 for (int i
= 0; i
< n
->child_count
; i
++) {
972 if (exit_unblocked_time(n
->children
[i
]) < exit_unblocked_time(n
))
973 n
->exit
= n
->children
[i
]->exit
;
979 * Add a dependency between two instruction nodes.
981 * The @after node will be scheduled after @before. We will try to
982 * schedule it @latency cycles after @before, but no guarantees there.
985 instruction_scheduler::add_dep(schedule_node
*before
, schedule_node
*after
,
988 if (!before
|| !after
)
991 assert(before
!= after
);
993 for (int i
= 0; i
< before
->child_count
; i
++) {
994 if (before
->children
[i
] == after
) {
995 before
->child_latency
[i
] = MAX2(before
->child_latency
[i
], latency
);
1000 if (before
->child_array_size
<= before
->child_count
) {
1001 if (before
->child_array_size
< 16)
1002 before
->child_array_size
= 16;
1004 before
->child_array_size
*= 2;
1006 before
->children
= reralloc(mem_ctx
, before
->children
,
1008 before
->child_array_size
);
1009 before
->child_latency
= reralloc(mem_ctx
, before
->child_latency
,
1010 int, before
->child_array_size
);
1013 before
->children
[before
->child_count
] = after
;
1014 before
->child_latency
[before
->child_count
] = latency
;
1015 before
->child_count
++;
1016 after
->parent_count
++;
1020 instruction_scheduler::add_dep(schedule_node
*before
, schedule_node
*after
)
1025 add_dep(before
, after
, before
->latency
);
1029 is_scheduling_barrier(const backend_instruction
*inst
)
1031 return inst
->opcode
== FS_OPCODE_PLACEHOLDER_HALT
||
1032 inst
->is_control_flow() ||
1033 inst
->has_side_effects();
1037 * Sometimes we really want this node to execute after everything that
1038 * was before it and before everything that followed it. This adds
1039 * the deps to do so.
1042 instruction_scheduler::add_barrier_deps(schedule_node
*n
)
1044 schedule_node
*prev
= (schedule_node
*)n
->prev
;
1045 schedule_node
*next
= (schedule_node
*)n
->next
;
1048 while (!prev
->is_head_sentinel()) {
1049 add_dep(prev
, n
, 0);
1050 if (is_scheduling_barrier(prev
->inst
))
1052 prev
= (schedule_node
*)prev
->prev
;
1057 while (!next
->is_tail_sentinel()) {
1058 add_dep(n
, next
, 0);
1059 if (is_scheduling_barrier(next
->inst
))
1061 next
= (schedule_node
*)next
->next
;
1066 /* instruction scheduling needs to be aware of when an MRF write
1067 * actually writes 2 MRFs.
1070 fs_instruction_scheduler::is_compressed(fs_inst
*inst
)
1072 return inst
->exec_size
== 16;
1076 fs_instruction_scheduler::calculate_deps()
1078 /* Pre-register-allocation, this tracks the last write per VGRF offset.
1079 * After register allocation, reg_offsets are gone and we track individual
1082 schedule_node
**last_grf_write
;
1083 schedule_node
*last_mrf_write
[BRW_MAX_MRF(v
->devinfo
->gen
)];
1084 schedule_node
*last_conditional_mod
[8] = {};
1085 schedule_node
*last_accumulator_write
= NULL
;
1086 /* Fixed HW registers are assumed to be separate from the virtual
1087 * GRFs, so they can be tracked separately. We don't really write
1088 * to fixed GRFs much, so don't bother tracking them on a more
1091 schedule_node
*last_fixed_grf_write
= NULL
;
1093 last_grf_write
= (schedule_node
**)calloc(sizeof(schedule_node
*), grf_count
* 16);
1094 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
1096 /* top-to-bottom dependencies: RAW and WAW. */
1097 foreach_in_list(schedule_node
, n
, &instructions
) {
1098 fs_inst
*inst
= (fs_inst
*)n
->inst
;
1100 if (is_scheduling_barrier(inst
))
1101 add_barrier_deps(n
);
1103 /* read-after-write deps. */
1104 for (int i
= 0; i
< inst
->sources
; i
++) {
1105 if (inst
->src
[i
].file
== VGRF
) {
1106 if (post_reg_alloc
) {
1107 for (unsigned r
= 0; r
< regs_read(inst
, i
); r
++)
1108 add_dep(last_grf_write
[inst
->src
[i
].nr
+ r
], n
);
1110 for (unsigned r
= 0; r
< regs_read(inst
, i
); r
++) {
1111 add_dep(last_grf_write
[inst
->src
[i
].nr
* 16 +
1112 inst
->src
[i
].offset
/ REG_SIZE
+ r
], n
);
1115 } else if (inst
->src
[i
].file
== FIXED_GRF
) {
1116 if (post_reg_alloc
) {
1117 for (unsigned r
= 0; r
< regs_read(inst
, i
); r
++)
1118 add_dep(last_grf_write
[inst
->src
[i
].nr
+ r
], n
);
1120 add_dep(last_fixed_grf_write
, n
);
1122 } else if (inst
->src
[i
].is_accumulator()) {
1123 add_dep(last_accumulator_write
, n
);
1124 } else if (inst
->src
[i
].file
== ARF
) {
1125 add_barrier_deps(n
);
1129 if (inst
->base_mrf
!= -1) {
1130 for (int i
= 0; i
< inst
->mlen
; i
++) {
1131 /* It looks like the MRF regs are released in the send
1132 * instruction once it's sent, not when the result comes
1135 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
1139 if (const unsigned mask
= inst
->flags_read(v
->devinfo
)) {
1140 assert(mask
< (1 << ARRAY_SIZE(last_conditional_mod
)));
1142 for (unsigned i
= 0; i
< ARRAY_SIZE(last_conditional_mod
); i
++) {
1143 if (mask
& (1 << i
))
1144 add_dep(last_conditional_mod
[i
], n
);
1148 if (inst
->reads_accumulator_implicitly()) {
1149 add_dep(last_accumulator_write
, n
);
1152 /* write-after-write deps. */
1153 if (inst
->dst
.file
== VGRF
) {
1154 if (post_reg_alloc
) {
1155 for (unsigned r
= 0; r
< regs_written(inst
); r
++) {
1156 add_dep(last_grf_write
[inst
->dst
.nr
+ r
], n
);
1157 last_grf_write
[inst
->dst
.nr
+ r
] = n
;
1160 for (unsigned r
= 0; r
< regs_written(inst
); r
++) {
1161 add_dep(last_grf_write
[inst
->dst
.nr
* 16 +
1162 inst
->dst
.offset
/ REG_SIZE
+ r
], n
);
1163 last_grf_write
[inst
->dst
.nr
* 16 +
1164 inst
->dst
.offset
/ REG_SIZE
+ r
] = n
;
1167 } else if (inst
->dst
.file
== MRF
) {
1168 int reg
= inst
->dst
.nr
& ~BRW_MRF_COMPR4
;
1170 add_dep(last_mrf_write
[reg
], n
);
1171 last_mrf_write
[reg
] = n
;
1172 if (is_compressed(inst
)) {
1173 if (inst
->dst
.nr
& BRW_MRF_COMPR4
)
1177 add_dep(last_mrf_write
[reg
], n
);
1178 last_mrf_write
[reg
] = n
;
1180 } else if (inst
->dst
.file
== FIXED_GRF
) {
1181 if (post_reg_alloc
) {
1182 for (unsigned r
= 0; r
< regs_written(inst
); r
++)
1183 last_grf_write
[inst
->dst
.nr
+ r
] = n
;
1185 last_fixed_grf_write
= n
;
1187 } else if (inst
->dst
.is_accumulator()) {
1188 add_dep(last_accumulator_write
, n
);
1189 last_accumulator_write
= n
;
1190 } else if (inst
->dst
.file
== ARF
&& !inst
->dst
.is_null()) {
1191 add_barrier_deps(n
);
1194 if (inst
->mlen
> 0 && inst
->base_mrf
!= -1) {
1195 for (unsigned i
= 0; i
< inst
->implied_mrf_writes(); i
++) {
1196 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
1197 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
1201 if (const unsigned mask
= inst
->flags_written()) {
1202 assert(mask
< (1 << ARRAY_SIZE(last_conditional_mod
)));
1204 for (unsigned i
= 0; i
< ARRAY_SIZE(last_conditional_mod
); i
++) {
1205 if (mask
& (1 << i
)) {
1206 add_dep(last_conditional_mod
[i
], n
, 0);
1207 last_conditional_mod
[i
] = n
;
1212 if (inst
->writes_accumulator_implicitly(v
->devinfo
) &&
1213 !inst
->dst
.is_accumulator()) {
1214 add_dep(last_accumulator_write
, n
);
1215 last_accumulator_write
= n
;
1219 /* bottom-to-top dependencies: WAR */
1220 memset(last_grf_write
, 0, sizeof(schedule_node
*) * grf_count
* 16);
1221 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
1222 memset(last_conditional_mod
, 0, sizeof(last_conditional_mod
));
1223 last_accumulator_write
= NULL
;
1224 last_fixed_grf_write
= NULL
;
1226 foreach_in_list_reverse_safe(schedule_node
, n
, &instructions
) {
1227 fs_inst
*inst
= (fs_inst
*)n
->inst
;
1229 /* write-after-read deps. */
1230 for (int i
= 0; i
< inst
->sources
; i
++) {
1231 if (inst
->src
[i
].file
== VGRF
) {
1232 if (post_reg_alloc
) {
1233 for (unsigned r
= 0; r
< regs_read(inst
, i
); r
++)
1234 add_dep(n
, last_grf_write
[inst
->src
[i
].nr
+ r
], 0);
1236 for (unsigned r
= 0; r
< regs_read(inst
, i
); r
++) {
1237 add_dep(n
, last_grf_write
[inst
->src
[i
].nr
* 16 +
1238 inst
->src
[i
].offset
/ REG_SIZE
+ r
], 0);
1241 } else if (inst
->src
[i
].file
== FIXED_GRF
) {
1242 if (post_reg_alloc
) {
1243 for (unsigned r
= 0; r
< regs_read(inst
, i
); r
++)
1244 add_dep(n
, last_grf_write
[inst
->src
[i
].nr
+ r
], 0);
1246 add_dep(n
, last_fixed_grf_write
, 0);
1248 } else if (inst
->src
[i
].is_accumulator()) {
1249 add_dep(n
, last_accumulator_write
, 0);
1250 } else if (inst
->src
[i
].file
== ARF
) {
1251 add_barrier_deps(n
);
1255 if (inst
->base_mrf
!= -1) {
1256 for (int i
= 0; i
< inst
->mlen
; i
++) {
1257 /* It looks like the MRF regs are released in the send
1258 * instruction once it's sent, not when the result comes
1261 add_dep(n
, last_mrf_write
[inst
->base_mrf
+ i
], 2);
1265 if (const unsigned mask
= inst
->flags_read(v
->devinfo
)) {
1266 assert(mask
< (1 << ARRAY_SIZE(last_conditional_mod
)));
1268 for (unsigned i
= 0; i
< ARRAY_SIZE(last_conditional_mod
); i
++) {
1269 if (mask
& (1 << i
))
1270 add_dep(n
, last_conditional_mod
[i
]);
1274 if (inst
->reads_accumulator_implicitly()) {
1275 add_dep(n
, last_accumulator_write
);
1278 /* Update the things this instruction wrote, so earlier reads
1279 * can mark this as WAR dependency.
1281 if (inst
->dst
.file
== VGRF
) {
1282 if (post_reg_alloc
) {
1283 for (unsigned r
= 0; r
< regs_written(inst
); r
++)
1284 last_grf_write
[inst
->dst
.nr
+ r
] = n
;
1286 for (unsigned r
= 0; r
< regs_written(inst
); r
++) {
1287 last_grf_write
[inst
->dst
.nr
* 16 +
1288 inst
->dst
.offset
/ REG_SIZE
+ r
] = n
;
1291 } else if (inst
->dst
.file
== MRF
) {
1292 int reg
= inst
->dst
.nr
& ~BRW_MRF_COMPR4
;
1294 last_mrf_write
[reg
] = n
;
1296 if (is_compressed(inst
)) {
1297 if (inst
->dst
.nr
& BRW_MRF_COMPR4
)
1302 last_mrf_write
[reg
] = n
;
1304 } else if (inst
->dst
.file
== FIXED_GRF
) {
1305 if (post_reg_alloc
) {
1306 for (unsigned r
= 0; r
< regs_written(inst
); r
++)
1307 last_grf_write
[inst
->dst
.nr
+ r
] = n
;
1309 last_fixed_grf_write
= n
;
1311 } else if (inst
->dst
.is_accumulator()) {
1312 last_accumulator_write
= n
;
1313 } else if (inst
->dst
.file
== ARF
&& !inst
->dst
.is_null()) {
1314 add_barrier_deps(n
);
1317 if (inst
->mlen
> 0 && inst
->base_mrf
!= -1) {
1318 for (unsigned i
= 0; i
< inst
->implied_mrf_writes(); i
++) {
1319 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
1323 if (const unsigned mask
= inst
->flags_written()) {
1324 assert(mask
< (1 << ARRAY_SIZE(last_conditional_mod
)));
1326 for (unsigned i
= 0; i
< ARRAY_SIZE(last_conditional_mod
); i
++) {
1327 if (mask
& (1 << i
))
1328 last_conditional_mod
[i
] = n
;
1332 if (inst
->writes_accumulator_implicitly(v
->devinfo
)) {
1333 last_accumulator_write
= n
;
1337 free(last_grf_write
);
1341 vec4_instruction_scheduler::calculate_deps()
1343 schedule_node
*last_grf_write
[grf_count
];
1344 schedule_node
*last_mrf_write
[BRW_MAX_MRF(v
->devinfo
->gen
)];
1345 schedule_node
*last_conditional_mod
= NULL
;
1346 schedule_node
*last_accumulator_write
= NULL
;
1347 /* Fixed HW registers are assumed to be separate from the virtual
1348 * GRFs, so they can be tracked separately. We don't really write
1349 * to fixed GRFs much, so don't bother tracking them on a more
1352 schedule_node
*last_fixed_grf_write
= NULL
;
1354 memset(last_grf_write
, 0, sizeof(last_grf_write
));
1355 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
1357 /* top-to-bottom dependencies: RAW and WAW. */
1358 foreach_in_list(schedule_node
, n
, &instructions
) {
1359 vec4_instruction
*inst
= (vec4_instruction
*)n
->inst
;
1361 if (is_scheduling_barrier(inst
))
1362 add_barrier_deps(n
);
1364 /* read-after-write deps. */
1365 for (int i
= 0; i
< 3; i
++) {
1366 if (inst
->src
[i
].file
== VGRF
) {
1367 for (unsigned j
= 0; j
< regs_read(inst
, i
); ++j
)
1368 add_dep(last_grf_write
[inst
->src
[i
].nr
+ j
], n
);
1369 } else if (inst
->src
[i
].file
== FIXED_GRF
) {
1370 add_dep(last_fixed_grf_write
, n
);
1371 } else if (inst
->src
[i
].is_accumulator()) {
1372 assert(last_accumulator_write
);
1373 add_dep(last_accumulator_write
, n
);
1374 } else if (inst
->src
[i
].file
== ARF
) {
1375 add_barrier_deps(n
);
1379 if (inst
->reads_g0_implicitly())
1380 add_dep(last_fixed_grf_write
, n
);
1382 if (!inst
->is_send_from_grf()) {
1383 for (int i
= 0; i
< inst
->mlen
; i
++) {
1384 /* It looks like the MRF regs are released in the send
1385 * instruction once it's sent, not when the result comes
1388 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
1392 if (inst
->reads_flag()) {
1393 assert(last_conditional_mod
);
1394 add_dep(last_conditional_mod
, n
);
1397 if (inst
->reads_accumulator_implicitly()) {
1398 assert(last_accumulator_write
);
1399 add_dep(last_accumulator_write
, n
);
1402 /* write-after-write deps. */
1403 if (inst
->dst
.file
== VGRF
) {
1404 for (unsigned j
= 0; j
< regs_written(inst
); ++j
) {
1405 add_dep(last_grf_write
[inst
->dst
.nr
+ j
], n
);
1406 last_grf_write
[inst
->dst
.nr
+ j
] = n
;
1408 } else if (inst
->dst
.file
== MRF
) {
1409 add_dep(last_mrf_write
[inst
->dst
.nr
], n
);
1410 last_mrf_write
[inst
->dst
.nr
] = n
;
1411 } else if (inst
->dst
.file
== FIXED_GRF
) {
1412 last_fixed_grf_write
= n
;
1413 } else if (inst
->dst
.is_accumulator()) {
1414 add_dep(last_accumulator_write
, n
);
1415 last_accumulator_write
= n
;
1416 } else if (inst
->dst
.file
== ARF
&& !inst
->dst
.is_null()) {
1417 add_barrier_deps(n
);
1420 if (inst
->mlen
> 0 && !inst
->is_send_from_grf()) {
1421 for (unsigned i
= 0; i
< inst
->implied_mrf_writes(); i
++) {
1422 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
1423 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
1427 if (inst
->writes_flag()) {
1428 add_dep(last_conditional_mod
, n
, 0);
1429 last_conditional_mod
= n
;
1432 if (inst
->writes_accumulator_implicitly(v
->devinfo
) &&
1433 !inst
->dst
.is_accumulator()) {
1434 add_dep(last_accumulator_write
, n
);
1435 last_accumulator_write
= n
;
1439 /* bottom-to-top dependencies: WAR */
1440 memset(last_grf_write
, 0, sizeof(last_grf_write
));
1441 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
1442 last_conditional_mod
= NULL
;
1443 last_accumulator_write
= NULL
;
1444 last_fixed_grf_write
= NULL
;
1446 foreach_in_list_reverse_safe(schedule_node
, n
, &instructions
) {
1447 vec4_instruction
*inst
= (vec4_instruction
*)n
->inst
;
1449 /* write-after-read deps. */
1450 for (int i
= 0; i
< 3; i
++) {
1451 if (inst
->src
[i
].file
== VGRF
) {
1452 for (unsigned j
= 0; j
< regs_read(inst
, i
); ++j
)
1453 add_dep(n
, last_grf_write
[inst
->src
[i
].nr
+ j
]);
1454 } else if (inst
->src
[i
].file
== FIXED_GRF
) {
1455 add_dep(n
, last_fixed_grf_write
);
1456 } else if (inst
->src
[i
].is_accumulator()) {
1457 add_dep(n
, last_accumulator_write
);
1458 } else if (inst
->src
[i
].file
== ARF
) {
1459 add_barrier_deps(n
);
1463 if (!inst
->is_send_from_grf()) {
1464 for (int i
= 0; i
< inst
->mlen
; i
++) {
1465 /* It looks like the MRF regs are released in the send
1466 * instruction once it's sent, not when the result comes
1469 add_dep(n
, last_mrf_write
[inst
->base_mrf
+ i
], 2);
1473 if (inst
->reads_flag()) {
1474 add_dep(n
, last_conditional_mod
);
1477 if (inst
->reads_accumulator_implicitly()) {
1478 add_dep(n
, last_accumulator_write
);
1481 /* Update the things this instruction wrote, so earlier reads
1482 * can mark this as WAR dependency.
1484 if (inst
->dst
.file
== VGRF
) {
1485 for (unsigned j
= 0; j
< regs_written(inst
); ++j
)
1486 last_grf_write
[inst
->dst
.nr
+ j
] = n
;
1487 } else if (inst
->dst
.file
== MRF
) {
1488 last_mrf_write
[inst
->dst
.nr
] = n
;
1489 } else if (inst
->dst
.file
== FIXED_GRF
) {
1490 last_fixed_grf_write
= n
;
1491 } else if (inst
->dst
.is_accumulator()) {
1492 last_accumulator_write
= n
;
1493 } else if (inst
->dst
.file
== ARF
&& !inst
->dst
.is_null()) {
1494 add_barrier_deps(n
);
1497 if (inst
->mlen
> 0 && !inst
->is_send_from_grf()) {
1498 for (unsigned i
= 0; i
< inst
->implied_mrf_writes(); i
++) {
1499 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
1503 if (inst
->writes_flag()) {
1504 last_conditional_mod
= n
;
1507 if (inst
->writes_accumulator_implicitly(v
->devinfo
)) {
1508 last_accumulator_write
= n
;
1514 fs_instruction_scheduler::choose_instruction_to_schedule()
1516 schedule_node
*chosen
= NULL
;
1518 if (mode
== SCHEDULE_PRE
|| mode
== SCHEDULE_POST
) {
1519 int chosen_time
= 0;
1521 /* Of the instructions ready to execute or the closest to being ready,
1522 * choose the one most likely to unblock an early program exit, or
1523 * otherwise the oldest one.
1525 foreach_in_list(schedule_node
, n
, &instructions
) {
1527 exit_unblocked_time(n
) < exit_unblocked_time(chosen
) ||
1528 (exit_unblocked_time(n
) == exit_unblocked_time(chosen
) &&
1529 n
->unblocked_time
< chosen_time
)) {
1531 chosen_time
= n
->unblocked_time
;
1535 /* Before register allocation, we don't care about the latencies of
1536 * instructions. All we care about is reducing live intervals of
1537 * variables so that we can avoid register spilling, or get SIMD16
1538 * shaders which naturally do a better job of hiding instruction
1541 foreach_in_list(schedule_node
, n
, &instructions
) {
1542 fs_inst
*inst
= (fs_inst
*)n
->inst
;
1549 /* Most important: If we can definitely reduce register pressure, do
1552 int register_pressure_benefit
= get_register_pressure_benefit(n
->inst
);
1553 int chosen_register_pressure_benefit
=
1554 get_register_pressure_benefit(chosen
->inst
);
1556 if (register_pressure_benefit
> 0 &&
1557 register_pressure_benefit
> chosen_register_pressure_benefit
) {
1560 } else if (chosen_register_pressure_benefit
> 0 &&
1561 (register_pressure_benefit
<
1562 chosen_register_pressure_benefit
)) {
1566 if (mode
== SCHEDULE_PRE_LIFO
) {
1567 /* Prefer instructions that recently became available for
1568 * scheduling. These are the things that are most likely to
1569 * (eventually) make a variable dead and reduce register pressure.
1570 * Typical register pressure estimates don't work for us because
1571 * most of our pressure comes from texturing, where no single
1572 * instruction to schedule will make a vec4 value dead.
1574 if (n
->cand_generation
> chosen
->cand_generation
) {
1577 } else if (n
->cand_generation
< chosen
->cand_generation
) {
1581 /* On MRF-using chips, prefer non-SEND instructions. If we don't
1582 * do this, then because we prefer instructions that just became
1583 * candidates, we'll end up in a pattern of scheduling a SEND,
1584 * then the MRFs for the next SEND, then the next SEND, then the
1585 * MRFs, etc., without ever consuming the results of a send.
1587 if (v
->devinfo
->gen
< 7) {
1588 fs_inst
*chosen_inst
= (fs_inst
*)chosen
->inst
;
1590 /* We use size_written > 4 * exec_size as our test for the kind
1591 * of send instruction to avoid -- only sends generate many
1592 * regs, and a single-result send is probably actually reducing
1593 * register pressure.
1595 if (inst
->size_written
<= 4 * inst
->exec_size
&&
1596 chosen_inst
->size_written
> 4 * chosen_inst
->exec_size
) {
1599 } else if (inst
->size_written
> chosen_inst
->size_written
) {
1605 /* For instructions pushed on the cands list at the same time, prefer
1606 * the one with the highest delay to the end of the program. This is
1607 * most likely to have its values able to be consumed first (such as
1608 * for a large tree of lowered ubo loads, which appear reversed in
1609 * the instruction stream with respect to when they can be consumed).
1611 if (n
->delay
> chosen
->delay
) {
1614 } else if (n
->delay
< chosen
->delay
) {
1618 /* Prefer the node most likely to unblock an early program exit.
1620 if (exit_unblocked_time(n
) < exit_unblocked_time(chosen
)) {
1623 } else if (exit_unblocked_time(n
) > exit_unblocked_time(chosen
)) {
1627 /* If all other metrics are equal, we prefer the first instruction in
1628 * the list (program execution).
1637 vec4_instruction_scheduler::choose_instruction_to_schedule()
1639 schedule_node
*chosen
= NULL
;
1640 int chosen_time
= 0;
1642 /* Of the instructions ready to execute or the closest to being ready,
1643 * choose the oldest one.
1645 foreach_in_list(schedule_node
, n
, &instructions
) {
1646 if (!chosen
|| n
->unblocked_time
< chosen_time
) {
1648 chosen_time
= n
->unblocked_time
;
1656 fs_instruction_scheduler::issue_time(backend_instruction
*inst
)
1658 const unsigned overhead
= v
->bank_conflict_cycles((fs_inst
*)inst
);
1659 if (is_compressed((fs_inst
*)inst
))
1660 return 4 + overhead
;
1662 return 2 + overhead
;
1666 vec4_instruction_scheduler::issue_time(backend_instruction
*)
1668 /* We always execute as two vec4s in parallel. */
1673 instruction_scheduler::schedule_instructions(bblock_t
*block
)
1675 const struct gen_device_info
*devinfo
= bs
->devinfo
;
1677 if (!post_reg_alloc
)
1678 reg_pressure
= reg_pressure_in
[block
->num
];
1679 block_idx
= block
->num
;
1681 /* Remove non-DAG heads from the list. */
1682 foreach_in_list_safe(schedule_node
, n
, &instructions
) {
1683 if (n
->parent_count
!= 0)
1687 unsigned cand_generation
= 1;
1688 while (!instructions
.is_empty()) {
1689 schedule_node
*chosen
= choose_instruction_to_schedule();
1691 /* Schedule this instruction. */
1694 chosen
->inst
->exec_node::remove();
1695 block
->instructions
.push_tail(chosen
->inst
);
1696 instructions_to_schedule
--;
1698 if (!post_reg_alloc
) {
1699 reg_pressure
-= get_register_pressure_benefit(chosen
->inst
);
1700 update_register_pressure(chosen
->inst
);
1703 /* If we expected a delay for scheduling, then bump the clock to reflect
1704 * that. In reality, the hardware will switch to another hyperthread
1705 * and may not return to dispatching our thread for a while even after
1706 * we're unblocked. After this, we have the time when the chosen
1707 * instruction will start executing.
1709 time
= MAX2(time
, chosen
->unblocked_time
);
1711 /* Update the clock for how soon an instruction could start after the
1714 time
+= issue_time(chosen
->inst
);
1717 fprintf(stderr
, "clock %4d, scheduled: ", time
);
1718 bs
->dump_instruction(chosen
->inst
);
1719 if (!post_reg_alloc
)
1720 fprintf(stderr
, "(register pressure %d)\n", reg_pressure
);
1723 /* Now that we've scheduled a new instruction, some of its
1724 * children can be promoted to the list of instructions ready to
1725 * be scheduled. Update the children's unblocked time for this
1726 * DAG edge as we do so.
1728 for (int i
= chosen
->child_count
- 1; i
>= 0; i
--) {
1729 schedule_node
*child
= chosen
->children
[i
];
1731 child
->unblocked_time
= MAX2(child
->unblocked_time
,
1732 time
+ chosen
->child_latency
[i
]);
1735 fprintf(stderr
, "\tchild %d, %d parents: ", i
, child
->parent_count
);
1736 bs
->dump_instruction(child
->inst
);
1739 child
->cand_generation
= cand_generation
;
1740 child
->parent_count
--;
1741 if (child
->parent_count
== 0) {
1743 fprintf(stderr
, "\t\tnow available\n");
1745 instructions
.push_head(child
);
1750 /* Shared resource: the mathbox. There's one mathbox per EU on Gen6+
1751 * but it's more limited pre-gen6, so if we send something off to it then
1752 * the next math instruction isn't going to make progress until the first
1755 if (devinfo
->gen
< 6 && chosen
->inst
->is_math()) {
1756 foreach_in_list(schedule_node
, n
, &instructions
) {
1757 if (n
->inst
->is_math())
1758 n
->unblocked_time
= MAX2(n
->unblocked_time
,
1759 time
+ chosen
->latency
);
1764 assert(instructions_to_schedule
== 0);
1766 block
->cycle_count
= time
;
1769 static unsigned get_cycle_count(cfg_t
*cfg
)
1771 unsigned count
= 0, multiplier
= 1;
1772 foreach_block(block
, cfg
) {
1773 if (block
->start()->opcode
== BRW_OPCODE_DO
)
1774 multiplier
*= 10; /* assume that loops execute ~10 times */
1776 count
+= block
->cycle_count
* multiplier
;
1778 if (block
->end()->opcode
== BRW_OPCODE_WHILE
)
1786 instruction_scheduler::run(cfg_t
*cfg
)
1788 if (debug
&& !post_reg_alloc
) {
1789 fprintf(stderr
, "\nInstructions before scheduling (reg_alloc %d)\n",
1791 bs
->dump_instructions();
1794 if (!post_reg_alloc
)
1795 setup_liveness(cfg
);
1797 foreach_block(block
, cfg
) {
1798 if (reads_remaining
) {
1799 memset(reads_remaining
, 0,
1800 grf_count
* sizeof(*reads_remaining
));
1801 memset(hw_reads_remaining
, 0,
1802 hw_reg_count
* sizeof(*hw_reads_remaining
));
1803 memset(written
, 0, grf_count
* sizeof(*written
));
1805 foreach_inst_in_block(fs_inst
, inst
, block
)
1806 count_reads_remaining(inst
);
1809 add_insts_from_block(block
);
1816 schedule_instructions(block
);
1819 if (debug
&& !post_reg_alloc
) {
1820 fprintf(stderr
, "\nInstructions after scheduling (reg_alloc %d)\n",
1822 bs
->dump_instructions();
1825 cfg
->cycle_count
= get_cycle_count(cfg
);
1829 fs_visitor::schedule_instructions(instruction_scheduler_mode mode
)
1832 if (mode
== SCHEDULE_POST
)
1833 grf_count
= grf_used
;
1835 grf_count
= alloc
.count
;
1837 fs_instruction_scheduler
sched(this, grf_count
, first_non_payload_grf
,
1838 cfg
->num_blocks
, mode
);
1841 invalidate_analysis(DEPENDENCY_INSTRUCTIONS
);
1845 vec4_visitor::opt_schedule_instructions()
1847 vec4_instruction_scheduler
sched(this, prog_data
->total_grf
);
1850 invalidate_analysis(DEPENDENCY_INSTRUCTIONS
);