2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include "brw_fs_live_variables.h"
32 #include "brw_shader.h"
36 /** @file brw_fs_schedule_instructions.cpp
38 * List scheduling of FS instructions.
40 * The basic model of the list scheduler is to take a basic block,
41 * compute a DAG of the dependencies (RAW ordering with latency, WAW
42 * ordering with latency, WAR ordering), and make a list of the DAG heads.
43 * Heuristically pick a DAG head, then put all the children that are
44 * now DAG heads into the list of things to schedule.
46 * The heuristic is the important part. We're trying to be cheap,
47 * since actually computing the optimal scheduling is NP complete.
48 * What we do is track a "current clock". When we schedule a node, we
49 * update the earliest-unblocked clock time of its children, and
50 * increment the clock. Then, when trying to schedule, we just pick
51 * the earliest-unblocked instruction to schedule.
53 * Note that often there will be many things which could execute
54 * immediately, and there are a range of heuristic options to choose
55 * from in picking among those.
58 static bool debug
= false;
60 class instruction_scheduler
;
62 class schedule_node
: public exec_node
65 schedule_node(backend_instruction
*inst
, instruction_scheduler
*sched
);
66 void set_latency_gen4();
67 void set_latency_gen7(bool is_haswell
);
69 backend_instruction
*inst
;
70 schedule_node
**children
;
79 * Which iteration of pushing groups of children onto the candidates list
80 * this node was a part of.
82 unsigned cand_generation
;
85 * This is the sum of the instruction's latency plus the maximum delay of
86 * its children, or just the issue_time if it's a leaf node.
91 * Preferred exit node among the (direct or indirect) successors of this
92 * node. Among the scheduler nodes blocked by this node, this will be the
93 * one that may cause earliest program termination, or NULL if none of the
94 * successors is an exit node.
102 * Lower bound of the scheduling time after which one of the instructions
103 * blocked by this node may lead to program termination.
105 * exit_unblocked_time() determines a strict partial ordering relation '«' on
106 * the set of scheduler nodes as follows:
108 * n « m <-> exit_unblocked_time(n) < exit_unblocked_time(m)
110 * which can be used to heuristically order nodes according to how early they
111 * can unblock an exit node and lead to program termination.
114 exit_unblocked_time(const schedule_node
*n
)
116 return n
->exit
? n
->exit
->unblocked_time
: INT_MAX
;
120 schedule_node::set_latency_gen4()
123 int math_latency
= 22;
125 switch (inst
->opcode
) {
126 case SHADER_OPCODE_RCP
:
127 this->latency
= 1 * chans
* math_latency
;
129 case SHADER_OPCODE_RSQ
:
130 this->latency
= 2 * chans
* math_latency
;
132 case SHADER_OPCODE_INT_QUOTIENT
:
133 case SHADER_OPCODE_SQRT
:
134 case SHADER_OPCODE_LOG2
:
135 /* full precision log. partial is 2. */
136 this->latency
= 3 * chans
* math_latency
;
138 case SHADER_OPCODE_INT_REMAINDER
:
139 case SHADER_OPCODE_EXP2
:
140 /* full precision. partial is 3, same throughput. */
141 this->latency
= 4 * chans
* math_latency
;
143 case SHADER_OPCODE_POW
:
144 this->latency
= 8 * chans
* math_latency
;
146 case SHADER_OPCODE_SIN
:
147 case SHADER_OPCODE_COS
:
148 /* minimum latency, max is 12 rounds. */
149 this->latency
= 5 * chans
* math_latency
;
158 schedule_node::set_latency_gen7(bool is_haswell
)
160 switch (inst
->opcode
) {
163 * (since the last two src operands are in different register banks):
164 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
166 * 3 cycles on IVB, 4 on HSW
167 * (since the last two src operands are in the same register bank):
168 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
170 * 18 cycles on IVB, 16 on HSW
171 * (since the last two src operands are in different register banks):
172 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
173 * mov(8) null g4<4,5,1>F { align16 WE_normal 1Q };
175 * 20 cycles on IVB, 18 on HSW
176 * (since the last two src operands are in the same register bank):
177 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
178 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
181 /* Our register allocator doesn't know about register banks, so use the
184 latency
= is_haswell
? 16 : 18;
189 * (since the last two src operands are in different register banks):
190 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
192 * 3 cycles on IVB, 4 on HSW
193 * (since the last two src operands are in the same register bank):
194 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
196 * 16 cycles on IVB, 14 on HSW
197 * (since the last two src operands are in different register banks):
198 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
199 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
202 * (since the last two src operands are in the same register bank):
203 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
204 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
207 /* Our register allocator doesn't know about register banks, so use the
213 case SHADER_OPCODE_RCP
:
214 case SHADER_OPCODE_RSQ
:
215 case SHADER_OPCODE_SQRT
:
216 case SHADER_OPCODE_LOG2
:
217 case SHADER_OPCODE_EXP2
:
218 case SHADER_OPCODE_SIN
:
219 case SHADER_OPCODE_COS
:
221 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
224 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
225 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
227 * Same for exp2, log2, rsq, sqrt, sin, cos.
229 latency
= is_haswell
? 14 : 16;
232 case SHADER_OPCODE_POW
:
234 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
237 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
238 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
240 latency
= is_haswell
? 22 : 24;
243 case SHADER_OPCODE_TEX
:
244 case SHADER_OPCODE_TXD
:
245 case SHADER_OPCODE_TXF
:
246 case SHADER_OPCODE_TXF_LZ
:
247 case SHADER_OPCODE_TXL
:
248 case SHADER_OPCODE_TXL_LZ
:
250 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
251 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
252 * send(8) g4<1>UW g114<8,8,1>F
253 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
255 * 697 +/-49 cycles (min 610, n=26):
256 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
257 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
258 * send(8) g4<1>UW g114<8,8,1>F
259 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
260 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
262 * So the latency on our first texture load of the batchbuffer takes
263 * ~700 cycles, since the caches are cold at that point.
265 * 840 +/- 92 cycles (min 720, n=25):
266 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
267 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
268 * send(8) g4<1>UW g114<8,8,1>F
269 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
270 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
271 * send(8) g4<1>UW g114<8,8,1>F
272 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
273 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
275 * On the second load, it takes just an extra ~140 cycles, and after
276 * accounting for the 14 cycles of the MOV's latency, that makes ~130.
278 * 683 +/- 49 cycles (min = 602, n=47):
279 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
280 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
281 * send(8) g4<1>UW g114<8,8,1>F
282 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
283 * send(8) g50<1>UW g114<8,8,1>F
284 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
285 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
287 * The unit appears to be pipelined, since this matches up with the
288 * cache-cold case, despite there being two loads here. If you replace
289 * the g4 in the MOV to null with g50, it's still 693 +/- 52 (n=39).
291 * So, take some number between the cache-hot 140 cycles and the
292 * cache-cold 700 cycles. No particular tuning was done on this.
294 * I haven't done significant testing of the non-TEX opcodes. TXL at
295 * least looked about the same as TEX.
300 case SHADER_OPCODE_TXS
:
301 /* Testing textureSize(sampler2D, 0), one load was 420 +/- 41
303 * mov(8) g114<1>UD 0D { align1 WE_normal 1Q };
304 * send(8) g6<1>UW g114<8,8,1>F
305 * sampler (10, 0, 10, 1) mlen 1 rlen 4 { align1 WE_normal 1Q };
306 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1Q };
309 * Two loads was 535 +/- 30 cycles (n=19):
310 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
311 * send(16) g6<1>UW g114<8,8,1>F
312 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
313 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
314 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1H };
315 * send(16) g8<1>UW g114<8,8,1>F
316 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
317 * mov(16) g8<1>F g8<8,8,1>D { align1 WE_normal 1H };
318 * add(16) g6<1>F g6<8,8,1>F g8<8,8,1>F { align1 WE_normal 1H };
320 * Since the only caches that should matter are just the
321 * instruction/state cache containing the surface state, assume that we
322 * always have hot caches.
327 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4
:
328 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7
:
329 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
:
330 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7
:
331 case VS_OPCODE_PULL_CONSTANT_LOAD
:
332 /* testing using varying-index pull constants:
335 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
336 * send(8) g4<1>F g4<8,8,1>D
337 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
340 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
341 * send(8) g4<1>F g4<8,8,1>D
342 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
343 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
346 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
347 * send(8) g4<1>F g4<8,8,1>D
348 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
349 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
350 * send(8) g4<1>F g4<8,8,1>D
351 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
352 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
354 * So, if it's cache-hot, it's about 140. If it's cache cold, it's
355 * about 460. We expect to mostly be cache hot, so pick something more
361 case SHADER_OPCODE_GEN7_SCRATCH_READ
:
362 /* Testing a load from offset 0, that had been previously written:
364 * send(8) g114<1>UW g0<8,8,1>F data (0, 0, 0) mlen 1 rlen 1 { align1 WE_normal 1Q };
365 * mov(8) null g114<8,8,1>F { align1 WE_normal 1Q };
367 * The cycles spent seemed to be grouped around 40-50 (as low as 38),
368 * then around 140. Presumably this is cache hit vs miss.
373 case SHADER_OPCODE_UNTYPED_ATOMIC
:
374 case SHADER_OPCODE_TYPED_ATOMIC
:
376 * mov(8) g112<1>ud 0x00000000ud { align1 WE_all 1Q };
377 * mov(1) g112.7<1>ud g1.7<0,1,0>ud { align1 WE_all };
378 * mov(8) g113<1>ud 0x00000000ud { align1 WE_normal 1Q };
379 * send(8) g4<1>ud g112<8,8,1>ud
380 * data (38, 5, 6) mlen 2 rlen 1 { align1 WE_normal 1Q };
382 * Running it 100 times as fragment shader on a 128x128 quad
383 * gives an average latency of 13867 cycles per atomic op,
384 * standard deviation 3%. Note that this is a rather
385 * pessimistic estimate, the actual latency in cases with few
386 * collisions between threads and favorable pipelining has been
387 * seen to be reduced by a factor of 100.
392 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
393 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
394 case SHADER_OPCODE_TYPED_SURFACE_READ
:
395 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
397 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
398 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
399 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
400 * send(8) g4<1>UD g112<8,8,1>UD
401 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
403 * . [repeats 8 times]
405 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
406 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
407 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
408 * send(8) g4<1>UD g112<8,8,1>UD
409 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
411 * Running it 100 times as fragment shader on a 128x128 quad
412 * gives an average latency of 583 cycles per surface read,
413 * standard deviation 0.9%.
415 latency
= is_haswell
? 300 : 600;
420 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
423 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
424 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
431 class instruction_scheduler
{
433 instruction_scheduler(backend_shader
*s
, int grf_count
,
434 int hw_reg_count
, int block_count
,
435 instruction_scheduler_mode mode
)
438 this->mem_ctx
= ralloc_context(NULL
);
439 this->grf_count
= grf_count
;
440 this->hw_reg_count
= hw_reg_count
;
441 this->instructions
.make_empty();
442 this->instructions_to_schedule
= 0;
443 this->post_reg_alloc
= (mode
== SCHEDULE_POST
);
446 if (!post_reg_alloc
) {
447 this->reg_pressure_in
= rzalloc_array(mem_ctx
, int, block_count
);
449 this->livein
= ralloc_array(mem_ctx
, BITSET_WORD
*, block_count
);
450 for (int i
= 0; i
< block_count
; i
++)
451 this->livein
[i
] = rzalloc_array(mem_ctx
, BITSET_WORD
,
452 BITSET_WORDS(grf_count
));
454 this->liveout
= ralloc_array(mem_ctx
, BITSET_WORD
*, block_count
);
455 for (int i
= 0; i
< block_count
; i
++)
456 this->liveout
[i
] = rzalloc_array(mem_ctx
, BITSET_WORD
,
457 BITSET_WORDS(grf_count
));
459 this->hw_liveout
= ralloc_array(mem_ctx
, BITSET_WORD
*, block_count
);
460 for (int i
= 0; i
< block_count
; i
++)
461 this->hw_liveout
[i
] = rzalloc_array(mem_ctx
, BITSET_WORD
,
462 BITSET_WORDS(hw_reg_count
));
464 this->written
= rzalloc_array(mem_ctx
, bool, grf_count
);
466 this->reads_remaining
= rzalloc_array(mem_ctx
, int, grf_count
);
468 this->hw_reads_remaining
= rzalloc_array(mem_ctx
, int, hw_reg_count
);
470 this->reg_pressure_in
= NULL
;
472 this->liveout
= NULL
;
473 this->hw_liveout
= NULL
;
474 this->written
= NULL
;
475 this->reads_remaining
= NULL
;
476 this->hw_reads_remaining
= NULL
;
480 ~instruction_scheduler()
482 ralloc_free(this->mem_ctx
);
484 void add_barrier_deps(schedule_node
*n
);
485 void add_dep(schedule_node
*before
, schedule_node
*after
, int latency
);
486 void add_dep(schedule_node
*before
, schedule_node
*after
);
488 void run(cfg_t
*cfg
);
489 void add_insts_from_block(bblock_t
*block
);
490 void compute_delays();
491 void compute_exits();
492 virtual void calculate_deps() = 0;
493 virtual schedule_node
*choose_instruction_to_schedule() = 0;
496 * Returns how many cycles it takes the instruction to issue.
498 * Instructions in gen hardware are handled one simd4 vector at a time,
499 * with 1 cycle per vector dispatched. Thus SIMD8 pixel shaders take 2
500 * cycles to dispatch and SIMD16 (compressed) instructions take 4.
502 virtual int issue_time(backend_instruction
*inst
) = 0;
504 virtual void count_reads_remaining(backend_instruction
*inst
) = 0;
505 virtual void setup_liveness(cfg_t
*cfg
) = 0;
506 virtual void update_register_pressure(backend_instruction
*inst
) = 0;
507 virtual int get_register_pressure_benefit(backend_instruction
*inst
) = 0;
509 void schedule_instructions(bblock_t
*block
);
514 int instructions_to_schedule
;
520 exec_list instructions
;
523 instruction_scheduler_mode mode
;
526 * The register pressure at the beginning of each basic block.
529 int *reg_pressure_in
;
532 * The virtual GRF's whose range overlaps the beginning of each basic block.
535 BITSET_WORD
**livein
;
538 * The virtual GRF's whose range overlaps the end of each basic block.
541 BITSET_WORD
**liveout
;
544 * The hardware GRF's whose range overlaps the end of each basic block.
547 BITSET_WORD
**hw_liveout
;
550 * Whether we've scheduled a write for this virtual GRF yet.
556 * How many reads we haven't scheduled for this virtual GRF yet.
559 int *reads_remaining
;
562 * How many reads we haven't scheduled for this hardware GRF yet.
565 int *hw_reads_remaining
;
568 class fs_instruction_scheduler
: public instruction_scheduler
571 fs_instruction_scheduler(fs_visitor
*v
, int grf_count
, int hw_reg_count
,
573 instruction_scheduler_mode mode
);
574 void calculate_deps();
575 bool is_compressed(fs_inst
*inst
);
576 schedule_node
*choose_instruction_to_schedule();
577 int issue_time(backend_instruction
*inst
);
580 void count_reads_remaining(backend_instruction
*inst
);
581 void setup_liveness(cfg_t
*cfg
);
582 void update_register_pressure(backend_instruction
*inst
);
583 int get_register_pressure_benefit(backend_instruction
*inst
);
586 fs_instruction_scheduler::fs_instruction_scheduler(fs_visitor
*v
,
587 int grf_count
, int hw_reg_count
,
589 instruction_scheduler_mode mode
)
590 : instruction_scheduler(v
, grf_count
, hw_reg_count
, block_count
, mode
),
596 is_src_duplicate(fs_inst
*inst
, int src
)
598 for (int i
= 0; i
< src
; i
++)
599 if (inst
->src
[i
].equals(inst
->src
[src
]))
606 fs_instruction_scheduler::count_reads_remaining(backend_instruction
*be
)
608 fs_inst
*inst
= (fs_inst
*)be
;
610 if (!reads_remaining
)
613 for (int i
= 0; i
< inst
->sources
; i
++) {
614 if (is_src_duplicate(inst
, i
))
617 if (inst
->src
[i
].file
== VGRF
) {
618 reads_remaining
[inst
->src
[i
].nr
]++;
619 } else if (inst
->src
[i
].file
== FIXED_GRF
) {
620 if (inst
->src
[i
].nr
>= hw_reg_count
)
623 for (unsigned j
= 0; j
< regs_read(inst
, i
); j
++)
624 hw_reads_remaining
[inst
->src
[i
].nr
+ j
]++;
630 fs_instruction_scheduler::setup_liveness(cfg_t
*cfg
)
632 /* First, compute liveness on a per-GRF level using the in/out sets from
633 * liveness calculation.
635 for (int block
= 0; block
< cfg
->num_blocks
; block
++) {
636 for (int i
= 0; i
< v
->live_intervals
->num_vars
; i
++) {
637 if (BITSET_TEST(v
->live_intervals
->block_data
[block
].livein
, i
)) {
638 int vgrf
= v
->live_intervals
->vgrf_from_var
[i
];
639 if (!BITSET_TEST(livein
[block
], vgrf
)) {
640 reg_pressure_in
[block
] += v
->alloc
.sizes
[vgrf
];
641 BITSET_SET(livein
[block
], vgrf
);
645 if (BITSET_TEST(v
->live_intervals
->block_data
[block
].liveout
, i
))
646 BITSET_SET(liveout
[block
], v
->live_intervals
->vgrf_from_var
[i
]);
650 /* Now, extend the live in/live out sets for when a range crosses a block
651 * boundary, which matches what our register allocator/interference code
652 * does to account for force_writemask_all and incompatible exec_mask's.
654 for (int block
= 0; block
< cfg
->num_blocks
- 1; block
++) {
655 for (int i
= 0; i
< grf_count
; i
++) {
656 if (v
->virtual_grf_start
[i
] <= cfg
->blocks
[block
]->end_ip
&&
657 v
->virtual_grf_end
[i
] >= cfg
->blocks
[block
+ 1]->start_ip
) {
658 if (!BITSET_TEST(livein
[block
+ 1], i
)) {
659 reg_pressure_in
[block
+ 1] += v
->alloc
.sizes
[i
];
660 BITSET_SET(livein
[block
+ 1], i
);
663 BITSET_SET(liveout
[block
], i
);
668 int payload_last_use_ip
[hw_reg_count
];
669 v
->calculate_payload_ranges(hw_reg_count
, payload_last_use_ip
);
671 for (int i
= 0; i
< hw_reg_count
; i
++) {
672 if (payload_last_use_ip
[i
] == -1)
675 for (int block
= 0; block
< cfg
->num_blocks
; block
++) {
676 if (cfg
->blocks
[block
]->start_ip
<= payload_last_use_ip
[i
])
677 reg_pressure_in
[block
]++;
679 if (cfg
->blocks
[block
]->end_ip
<= payload_last_use_ip
[i
])
680 BITSET_SET(hw_liveout
[block
], i
);
686 fs_instruction_scheduler::update_register_pressure(backend_instruction
*be
)
688 fs_inst
*inst
= (fs_inst
*)be
;
690 if (!reads_remaining
)
693 if (inst
->dst
.file
== VGRF
) {
694 written
[inst
->dst
.nr
] = true;
697 for (int i
= 0; i
< inst
->sources
; i
++) {
698 if (is_src_duplicate(inst
, i
))
701 if (inst
->src
[i
].file
== VGRF
) {
702 reads_remaining
[inst
->src
[i
].nr
]--;
703 } else if (inst
->src
[i
].file
== FIXED_GRF
&&
704 inst
->src
[i
].nr
< hw_reg_count
) {
705 for (unsigned off
= 0; off
< regs_read(inst
, i
); off
++)
706 hw_reads_remaining
[inst
->src
[i
].nr
+ off
]--;
712 fs_instruction_scheduler::get_register_pressure_benefit(backend_instruction
*be
)
714 fs_inst
*inst
= (fs_inst
*)be
;
717 if (inst
->dst
.file
== VGRF
) {
718 if (!BITSET_TEST(livein
[block_idx
], inst
->dst
.nr
) &&
719 !written
[inst
->dst
.nr
])
720 benefit
-= v
->alloc
.sizes
[inst
->dst
.nr
];
723 for (int i
= 0; i
< inst
->sources
; i
++) {
724 if (is_src_duplicate(inst
, i
))
727 if (inst
->src
[i
].file
== VGRF
&&
728 !BITSET_TEST(liveout
[block_idx
], inst
->src
[i
].nr
) &&
729 reads_remaining
[inst
->src
[i
].nr
] == 1)
730 benefit
+= v
->alloc
.sizes
[inst
->src
[i
].nr
];
732 if (inst
->src
[i
].file
== FIXED_GRF
&&
733 inst
->src
[i
].nr
< hw_reg_count
) {
734 for (unsigned off
= 0; off
< regs_read(inst
, i
); off
++) {
735 int reg
= inst
->src
[i
].nr
+ off
;
736 if (!BITSET_TEST(hw_liveout
[block_idx
], reg
) &&
737 hw_reads_remaining
[reg
] == 1) {
747 class vec4_instruction_scheduler
: public instruction_scheduler
750 vec4_instruction_scheduler(vec4_visitor
*v
, int grf_count
);
751 void calculate_deps();
752 schedule_node
*choose_instruction_to_schedule();
753 int issue_time(backend_instruction
*inst
);
756 void count_reads_remaining(backend_instruction
*inst
);
757 void setup_liveness(cfg_t
*cfg
);
758 void update_register_pressure(backend_instruction
*inst
);
759 int get_register_pressure_benefit(backend_instruction
*inst
);
762 vec4_instruction_scheduler::vec4_instruction_scheduler(vec4_visitor
*v
,
764 : instruction_scheduler(v
, grf_count
, 0, 0, SCHEDULE_POST
),
770 vec4_instruction_scheduler::count_reads_remaining(backend_instruction
*be
)
775 vec4_instruction_scheduler::setup_liveness(cfg_t
*cfg
)
780 vec4_instruction_scheduler::update_register_pressure(backend_instruction
*be
)
785 vec4_instruction_scheduler::get_register_pressure_benefit(backend_instruction
*be
)
790 schedule_node::schedule_node(backend_instruction
*inst
,
791 instruction_scheduler
*sched
)
793 const struct gen_device_info
*devinfo
= sched
->bs
->devinfo
;
796 this->child_array_size
= 0;
797 this->children
= NULL
;
798 this->child_latency
= NULL
;
799 this->child_count
= 0;
800 this->parent_count
= 0;
801 this->unblocked_time
= 0;
802 this->cand_generation
= 0;
805 this->is_barrier
= false;
807 /* We can't measure Gen6 timings directly but expect them to be much
808 * closer to Gen7 than Gen4.
810 if (!sched
->post_reg_alloc
)
812 else if (devinfo
->gen
>= 6)
813 set_latency_gen7(devinfo
->is_haswell
);
819 instruction_scheduler::add_insts_from_block(bblock_t
*block
)
821 foreach_inst_in_block(backend_instruction
, inst
, block
) {
822 schedule_node
*n
= new(mem_ctx
) schedule_node(inst
, this);
824 instructions
.push_tail(n
);
827 this->instructions_to_schedule
= block
->end_ip
- block
->start_ip
+ 1;
830 /** Computation of the delay member of each node. */
832 instruction_scheduler::compute_delays()
834 foreach_in_list_reverse(schedule_node
, n
, &instructions
) {
835 if (!n
->child_count
) {
836 n
->delay
= issue_time(n
->inst
);
838 for (int i
= 0; i
< n
->child_count
; i
++) {
839 assert(n
->children
[i
]->delay
);
840 n
->delay
= MAX2(n
->delay
, n
->latency
+ n
->children
[i
]->delay
);
847 instruction_scheduler::compute_exits()
849 /* Calculate a lower bound of the scheduling time of each node in the
850 * graph. This is analogous to the node's critical path but calculated
851 * from the top instead of from the bottom of the block.
853 foreach_in_list(schedule_node
, n
, &instructions
) {
854 for (int i
= 0; i
< n
->child_count
; i
++) {
855 n
->children
[i
]->unblocked_time
=
856 MAX2(n
->children
[i
]->unblocked_time
,
857 n
->unblocked_time
+ issue_time(n
->inst
) + n
->child_latency
[i
]);
861 /* Calculate the exit of each node by induction based on the exit nodes of
862 * its children. The preferred exit of a node is the one among the exit
863 * nodes of its children which can be unblocked first according to the
864 * optimistic unblocked time estimate calculated above.
866 foreach_in_list_reverse(schedule_node
, n
, &instructions
) {
867 n
->exit
= (n
->inst
->opcode
== FS_OPCODE_DISCARD_JUMP
? n
: NULL
);
869 for (int i
= 0; i
< n
->child_count
; i
++) {
870 if (exit_unblocked_time(n
->children
[i
]) < exit_unblocked_time(n
))
871 n
->exit
= n
->children
[i
]->exit
;
877 * Add a dependency between two instruction nodes.
879 * The @after node will be scheduled after @before. We will try to
880 * schedule it @latency cycles after @before, but no guarantees there.
883 instruction_scheduler::add_dep(schedule_node
*before
, schedule_node
*after
,
886 if (!before
|| !after
)
889 assert(before
!= after
);
891 for (int i
= 0; i
< before
->child_count
; i
++) {
892 if (before
->children
[i
] == after
) {
893 before
->child_latency
[i
] = MAX2(before
->child_latency
[i
], latency
);
898 if (before
->child_array_size
<= before
->child_count
) {
899 if (before
->child_array_size
< 16)
900 before
->child_array_size
= 16;
902 before
->child_array_size
*= 2;
904 before
->children
= reralloc(mem_ctx
, before
->children
,
906 before
->child_array_size
);
907 before
->child_latency
= reralloc(mem_ctx
, before
->child_latency
,
908 int, before
->child_array_size
);
911 before
->children
[before
->child_count
] = after
;
912 before
->child_latency
[before
->child_count
] = latency
;
913 before
->child_count
++;
914 after
->parent_count
++;
918 instruction_scheduler::add_dep(schedule_node
*before
, schedule_node
*after
)
923 add_dep(before
, after
, before
->latency
);
927 * Sometimes we really want this node to execute after everything that
928 * was before it and before everything that followed it. This adds
932 instruction_scheduler::add_barrier_deps(schedule_node
*n
)
934 schedule_node
*prev
= (schedule_node
*)n
->prev
;
935 schedule_node
*next
= (schedule_node
*)n
->next
;
937 n
->is_barrier
= true;
940 while (!prev
->is_head_sentinel()) {
942 if (prev
->is_barrier
)
944 prev
= (schedule_node
*)prev
->prev
;
949 while (!next
->is_tail_sentinel()) {
951 if (next
->is_barrier
)
953 next
= (schedule_node
*)next
->next
;
958 /* instruction scheduling needs to be aware of when an MRF write
959 * actually writes 2 MRFs.
962 fs_instruction_scheduler::is_compressed(fs_inst
*inst
)
964 return inst
->exec_size
== 16;
968 is_scheduling_barrier(const fs_inst
*inst
)
970 return inst
->opcode
== FS_OPCODE_PLACEHOLDER_HALT
||
971 inst
->is_control_flow() ||
972 inst
->has_side_effects();
976 fs_instruction_scheduler::calculate_deps()
978 /* Pre-register-allocation, this tracks the last write per VGRF offset.
979 * After register allocation, reg_offsets are gone and we track individual
982 schedule_node
*last_grf_write
[grf_count
* 16];
983 schedule_node
*last_mrf_write
[BRW_MAX_MRF(v
->devinfo
->gen
)];
984 schedule_node
*last_conditional_mod
[4] = {};
985 schedule_node
*last_accumulator_write
= NULL
;
986 /* Fixed HW registers are assumed to be separate from the virtual
987 * GRFs, so they can be tracked separately. We don't really write
988 * to fixed GRFs much, so don't bother tracking them on a more
991 schedule_node
*last_fixed_grf_write
= NULL
;
993 memset(last_grf_write
, 0, sizeof(last_grf_write
));
994 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
996 /* top-to-bottom dependencies: RAW and WAW. */
997 foreach_in_list(schedule_node
, n
, &instructions
) {
998 fs_inst
*inst
= (fs_inst
*)n
->inst
;
1000 if (is_scheduling_barrier(inst
))
1001 add_barrier_deps(n
);
1003 /* read-after-write deps. */
1004 for (int i
= 0; i
< inst
->sources
; i
++) {
1005 if (inst
->src
[i
].file
== VGRF
) {
1006 if (post_reg_alloc
) {
1007 for (unsigned r
= 0; r
< regs_read(inst
, i
); r
++)
1008 add_dep(last_grf_write
[inst
->src
[i
].nr
+ r
], n
);
1010 for (unsigned r
= 0; r
< regs_read(inst
, i
); r
++) {
1011 add_dep(last_grf_write
[inst
->src
[i
].nr
* 16 +
1012 inst
->src
[i
].offset
/ REG_SIZE
+ r
], n
);
1015 } else if (inst
->src
[i
].file
== FIXED_GRF
) {
1016 if (post_reg_alloc
) {
1017 for (unsigned r
= 0; r
< regs_read(inst
, i
); r
++)
1018 add_dep(last_grf_write
[inst
->src
[i
].nr
+ r
], n
);
1020 add_dep(last_fixed_grf_write
, n
);
1022 } else if (inst
->src
[i
].is_accumulator()) {
1023 add_dep(last_accumulator_write
, n
);
1024 } else if (inst
->src
[i
].file
== ARF
) {
1025 add_barrier_deps(n
);
1029 if (inst
->base_mrf
!= -1) {
1030 for (int i
= 0; i
< inst
->mlen
; i
++) {
1031 /* It looks like the MRF regs are released in the send
1032 * instruction once it's sent, not when the result comes
1035 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
1039 if (const unsigned mask
= inst
->flags_read(v
->devinfo
)) {
1040 assert(mask
< (1 << ARRAY_SIZE(last_conditional_mod
)));
1042 for (unsigned i
= 0; i
< ARRAY_SIZE(last_conditional_mod
); i
++) {
1043 if (mask
& (1 << i
))
1044 add_dep(last_conditional_mod
[i
], n
);
1048 if (inst
->reads_accumulator_implicitly()) {
1049 add_dep(last_accumulator_write
, n
);
1052 /* write-after-write deps. */
1053 if (inst
->dst
.file
== VGRF
) {
1054 if (post_reg_alloc
) {
1055 for (unsigned r
= 0; r
< regs_written(inst
); r
++) {
1056 add_dep(last_grf_write
[inst
->dst
.nr
+ r
], n
);
1057 last_grf_write
[inst
->dst
.nr
+ r
] = n
;
1060 for (unsigned r
= 0; r
< regs_written(inst
); r
++) {
1061 add_dep(last_grf_write
[inst
->dst
.nr
* 16 +
1062 inst
->dst
.offset
/ REG_SIZE
+ r
], n
);
1063 last_grf_write
[inst
->dst
.nr
* 16 +
1064 inst
->dst
.offset
/ REG_SIZE
+ r
] = n
;
1067 } else if (inst
->dst
.file
== MRF
) {
1068 int reg
= inst
->dst
.nr
& ~BRW_MRF_COMPR4
;
1070 add_dep(last_mrf_write
[reg
], n
);
1071 last_mrf_write
[reg
] = n
;
1072 if (is_compressed(inst
)) {
1073 if (inst
->dst
.nr
& BRW_MRF_COMPR4
)
1077 add_dep(last_mrf_write
[reg
], n
);
1078 last_mrf_write
[reg
] = n
;
1080 } else if (inst
->dst
.file
== FIXED_GRF
) {
1081 if (post_reg_alloc
) {
1082 for (unsigned r
= 0; r
< regs_written(inst
); r
++)
1083 last_grf_write
[inst
->dst
.nr
+ r
] = n
;
1085 last_fixed_grf_write
= n
;
1087 } else if (inst
->dst
.is_accumulator()) {
1088 add_dep(last_accumulator_write
, n
);
1089 last_accumulator_write
= n
;
1090 } else if (inst
->dst
.file
== ARF
&& !inst
->dst
.is_null()) {
1091 add_barrier_deps(n
);
1094 if (inst
->mlen
> 0 && inst
->base_mrf
!= -1) {
1095 for (int i
= 0; i
< v
->implied_mrf_writes(inst
); i
++) {
1096 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
1097 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
1101 if (const unsigned mask
= inst
->flags_written()) {
1102 assert(mask
< (1 << ARRAY_SIZE(last_conditional_mod
)));
1104 for (unsigned i
= 0; i
< ARRAY_SIZE(last_conditional_mod
); i
++) {
1105 if (mask
& (1 << i
)) {
1106 add_dep(last_conditional_mod
[i
], n
, 0);
1107 last_conditional_mod
[i
] = n
;
1112 if (inst
->writes_accumulator_implicitly(v
->devinfo
) &&
1113 !inst
->dst
.is_accumulator()) {
1114 add_dep(last_accumulator_write
, n
);
1115 last_accumulator_write
= n
;
1119 /* bottom-to-top dependencies: WAR */
1120 memset(last_grf_write
, 0, sizeof(last_grf_write
));
1121 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
1122 memset(last_conditional_mod
, 0, sizeof(last_conditional_mod
));
1123 last_accumulator_write
= NULL
;
1124 last_fixed_grf_write
= NULL
;
1126 foreach_in_list_reverse_safe(schedule_node
, n
, &instructions
) {
1127 fs_inst
*inst
= (fs_inst
*)n
->inst
;
1129 /* write-after-read deps. */
1130 for (int i
= 0; i
< inst
->sources
; i
++) {
1131 if (inst
->src
[i
].file
== VGRF
) {
1132 if (post_reg_alloc
) {
1133 for (unsigned r
= 0; r
< regs_read(inst
, i
); r
++)
1134 add_dep(n
, last_grf_write
[inst
->src
[i
].nr
+ r
], 0);
1136 for (unsigned r
= 0; r
< regs_read(inst
, i
); r
++) {
1137 add_dep(n
, last_grf_write
[inst
->src
[i
].nr
* 16 +
1138 inst
->src
[i
].offset
/ REG_SIZE
+ r
], 0);
1141 } else if (inst
->src
[i
].file
== FIXED_GRF
) {
1142 if (post_reg_alloc
) {
1143 for (unsigned r
= 0; r
< regs_read(inst
, i
); r
++)
1144 add_dep(n
, last_grf_write
[inst
->src
[i
].nr
+ r
], 0);
1146 add_dep(n
, last_fixed_grf_write
, 0);
1148 } else if (inst
->src
[i
].is_accumulator()) {
1149 add_dep(n
, last_accumulator_write
, 0);
1150 } else if (inst
->src
[i
].file
== ARF
) {
1151 add_barrier_deps(n
);
1155 if (inst
->base_mrf
!= -1) {
1156 for (int i
= 0; i
< inst
->mlen
; i
++) {
1157 /* It looks like the MRF regs are released in the send
1158 * instruction once it's sent, not when the result comes
1161 add_dep(n
, last_mrf_write
[inst
->base_mrf
+ i
], 2);
1165 if (const unsigned mask
= inst
->flags_read(v
->devinfo
)) {
1166 assert(mask
< (1 << ARRAY_SIZE(last_conditional_mod
)));
1168 for (unsigned i
= 0; i
< ARRAY_SIZE(last_conditional_mod
); i
++) {
1169 if (mask
& (1 << i
))
1170 add_dep(n
, last_conditional_mod
[i
]);
1174 if (inst
->reads_accumulator_implicitly()) {
1175 add_dep(n
, last_accumulator_write
);
1178 /* Update the things this instruction wrote, so earlier reads
1179 * can mark this as WAR dependency.
1181 if (inst
->dst
.file
== VGRF
) {
1182 if (post_reg_alloc
) {
1183 for (unsigned r
= 0; r
< regs_written(inst
); r
++)
1184 last_grf_write
[inst
->dst
.nr
+ r
] = n
;
1186 for (unsigned r
= 0; r
< regs_written(inst
); r
++) {
1187 last_grf_write
[inst
->dst
.nr
* 16 +
1188 inst
->dst
.offset
/ REG_SIZE
+ r
] = n
;
1191 } else if (inst
->dst
.file
== MRF
) {
1192 int reg
= inst
->dst
.nr
& ~BRW_MRF_COMPR4
;
1194 last_mrf_write
[reg
] = n
;
1196 if (is_compressed(inst
)) {
1197 if (inst
->dst
.nr
& BRW_MRF_COMPR4
)
1202 last_mrf_write
[reg
] = n
;
1204 } else if (inst
->dst
.file
== FIXED_GRF
) {
1205 if (post_reg_alloc
) {
1206 for (unsigned r
= 0; r
< regs_written(inst
); r
++)
1207 last_grf_write
[inst
->dst
.nr
+ r
] = n
;
1209 last_fixed_grf_write
= n
;
1211 } else if (inst
->dst
.is_accumulator()) {
1212 last_accumulator_write
= n
;
1213 } else if (inst
->dst
.file
== ARF
&& !inst
->dst
.is_null()) {
1214 add_barrier_deps(n
);
1217 if (inst
->mlen
> 0 && inst
->base_mrf
!= -1) {
1218 for (int i
= 0; i
< v
->implied_mrf_writes(inst
); i
++) {
1219 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
1223 if (const unsigned mask
= inst
->flags_written()) {
1224 assert(mask
< (1 << ARRAY_SIZE(last_conditional_mod
)));
1226 for (unsigned i
= 0; i
< ARRAY_SIZE(last_conditional_mod
); i
++) {
1227 if (mask
& (1 << i
))
1228 last_conditional_mod
[i
] = n
;
1232 if (inst
->writes_accumulator_implicitly(v
->devinfo
)) {
1233 last_accumulator_write
= n
;
1239 is_scheduling_barrier(const vec4_instruction
*inst
)
1241 return inst
->is_control_flow() ||
1242 inst
->has_side_effects();
1246 vec4_instruction_scheduler::calculate_deps()
1248 schedule_node
*last_grf_write
[grf_count
];
1249 schedule_node
*last_mrf_write
[BRW_MAX_MRF(v
->devinfo
->gen
)];
1250 schedule_node
*last_conditional_mod
= NULL
;
1251 schedule_node
*last_accumulator_write
= NULL
;
1252 /* Fixed HW registers are assumed to be separate from the virtual
1253 * GRFs, so they can be tracked separately. We don't really write
1254 * to fixed GRFs much, so don't bother tracking them on a more
1257 schedule_node
*last_fixed_grf_write
= NULL
;
1259 memset(last_grf_write
, 0, sizeof(last_grf_write
));
1260 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
1262 /* top-to-bottom dependencies: RAW and WAW. */
1263 foreach_in_list(schedule_node
, n
, &instructions
) {
1264 vec4_instruction
*inst
= (vec4_instruction
*)n
->inst
;
1266 if (is_scheduling_barrier(inst
))
1267 add_barrier_deps(n
);
1269 /* read-after-write deps. */
1270 for (int i
= 0; i
< 3; i
++) {
1271 if (inst
->src
[i
].file
== VGRF
) {
1272 for (unsigned j
= 0; j
< regs_read(inst
, i
); ++j
)
1273 add_dep(last_grf_write
[inst
->src
[i
].nr
+ j
], n
);
1274 } else if (inst
->src
[i
].file
== FIXED_GRF
) {
1275 add_dep(last_fixed_grf_write
, n
);
1276 } else if (inst
->src
[i
].is_accumulator()) {
1277 assert(last_accumulator_write
);
1278 add_dep(last_accumulator_write
, n
);
1279 } else if (inst
->src
[i
].file
== ARF
) {
1280 add_barrier_deps(n
);
1284 if (!inst
->is_send_from_grf()) {
1285 for (int i
= 0; i
< inst
->mlen
; i
++) {
1286 /* It looks like the MRF regs are released in the send
1287 * instruction once it's sent, not when the result comes
1290 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
1294 if (inst
->reads_flag()) {
1295 assert(last_conditional_mod
);
1296 add_dep(last_conditional_mod
, n
);
1299 if (inst
->reads_accumulator_implicitly()) {
1300 assert(last_accumulator_write
);
1301 add_dep(last_accumulator_write
, n
);
1304 /* write-after-write deps. */
1305 if (inst
->dst
.file
== VGRF
) {
1306 for (unsigned j
= 0; j
< regs_written(inst
); ++j
) {
1307 add_dep(last_grf_write
[inst
->dst
.nr
+ j
], n
);
1308 last_grf_write
[inst
->dst
.nr
+ j
] = n
;
1310 } else if (inst
->dst
.file
== MRF
) {
1311 add_dep(last_mrf_write
[inst
->dst
.nr
], n
);
1312 last_mrf_write
[inst
->dst
.nr
] = n
;
1313 } else if (inst
->dst
.file
== FIXED_GRF
) {
1314 last_fixed_grf_write
= n
;
1315 } else if (inst
->dst
.is_accumulator()) {
1316 add_dep(last_accumulator_write
, n
);
1317 last_accumulator_write
= n
;
1318 } else if (inst
->dst
.file
== ARF
&& !inst
->dst
.is_null()) {
1319 add_barrier_deps(n
);
1322 if (inst
->mlen
> 0 && !inst
->is_send_from_grf()) {
1323 for (int i
= 0; i
< v
->implied_mrf_writes(inst
); i
++) {
1324 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
1325 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
1329 if (inst
->writes_flag()) {
1330 add_dep(last_conditional_mod
, n
, 0);
1331 last_conditional_mod
= n
;
1334 if (inst
->writes_accumulator_implicitly(v
->devinfo
) &&
1335 !inst
->dst
.is_accumulator()) {
1336 add_dep(last_accumulator_write
, n
);
1337 last_accumulator_write
= n
;
1341 /* bottom-to-top dependencies: WAR */
1342 memset(last_grf_write
, 0, sizeof(last_grf_write
));
1343 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
1344 last_conditional_mod
= NULL
;
1345 last_accumulator_write
= NULL
;
1346 last_fixed_grf_write
= NULL
;
1348 foreach_in_list_reverse_safe(schedule_node
, n
, &instructions
) {
1349 vec4_instruction
*inst
= (vec4_instruction
*)n
->inst
;
1351 /* write-after-read deps. */
1352 for (int i
= 0; i
< 3; i
++) {
1353 if (inst
->src
[i
].file
== VGRF
) {
1354 for (unsigned j
= 0; j
< regs_read(inst
, i
); ++j
)
1355 add_dep(n
, last_grf_write
[inst
->src
[i
].nr
+ j
]);
1356 } else if (inst
->src
[i
].file
== FIXED_GRF
) {
1357 add_dep(n
, last_fixed_grf_write
);
1358 } else if (inst
->src
[i
].is_accumulator()) {
1359 add_dep(n
, last_accumulator_write
);
1360 } else if (inst
->src
[i
].file
== ARF
) {
1361 add_barrier_deps(n
);
1365 if (!inst
->is_send_from_grf()) {
1366 for (int i
= 0; i
< inst
->mlen
; i
++) {
1367 /* It looks like the MRF regs are released in the send
1368 * instruction once it's sent, not when the result comes
1371 add_dep(n
, last_mrf_write
[inst
->base_mrf
+ i
], 2);
1375 if (inst
->reads_flag()) {
1376 add_dep(n
, last_conditional_mod
);
1379 if (inst
->reads_accumulator_implicitly()) {
1380 add_dep(n
, last_accumulator_write
);
1383 /* Update the things this instruction wrote, so earlier reads
1384 * can mark this as WAR dependency.
1386 if (inst
->dst
.file
== VGRF
) {
1387 for (unsigned j
= 0; j
< regs_written(inst
); ++j
)
1388 last_grf_write
[inst
->dst
.nr
+ j
] = n
;
1389 } else if (inst
->dst
.file
== MRF
) {
1390 last_mrf_write
[inst
->dst
.nr
] = n
;
1391 } else if (inst
->dst
.file
== FIXED_GRF
) {
1392 last_fixed_grf_write
= n
;
1393 } else if (inst
->dst
.is_accumulator()) {
1394 last_accumulator_write
= n
;
1395 } else if (inst
->dst
.file
== ARF
&& !inst
->dst
.is_null()) {
1396 add_barrier_deps(n
);
1399 if (inst
->mlen
> 0 && !inst
->is_send_from_grf()) {
1400 for (int i
= 0; i
< v
->implied_mrf_writes(inst
); i
++) {
1401 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
1405 if (inst
->writes_flag()) {
1406 last_conditional_mod
= n
;
1409 if (inst
->writes_accumulator_implicitly(v
->devinfo
)) {
1410 last_accumulator_write
= n
;
1416 fs_instruction_scheduler::choose_instruction_to_schedule()
1418 schedule_node
*chosen
= NULL
;
1420 if (mode
== SCHEDULE_PRE
|| mode
== SCHEDULE_POST
) {
1421 int chosen_time
= 0;
1423 /* Of the instructions ready to execute or the closest to being ready,
1424 * choose the one most likely to unblock an early program exit, or
1425 * otherwise the oldest one.
1427 foreach_in_list(schedule_node
, n
, &instructions
) {
1429 exit_unblocked_time(n
) < exit_unblocked_time(chosen
) ||
1430 (exit_unblocked_time(n
) == exit_unblocked_time(chosen
) &&
1431 n
->unblocked_time
< chosen_time
)) {
1433 chosen_time
= n
->unblocked_time
;
1437 /* Before register allocation, we don't care about the latencies of
1438 * instructions. All we care about is reducing live intervals of
1439 * variables so that we can avoid register spilling, or get SIMD16
1440 * shaders which naturally do a better job of hiding instruction
1443 foreach_in_list(schedule_node
, n
, &instructions
) {
1444 fs_inst
*inst
= (fs_inst
*)n
->inst
;
1451 /* Most important: If we can definitely reduce register pressure, do
1454 int register_pressure_benefit
= get_register_pressure_benefit(n
->inst
);
1455 int chosen_register_pressure_benefit
=
1456 get_register_pressure_benefit(chosen
->inst
);
1458 if (register_pressure_benefit
> 0 &&
1459 register_pressure_benefit
> chosen_register_pressure_benefit
) {
1462 } else if (chosen_register_pressure_benefit
> 0 &&
1463 (register_pressure_benefit
<
1464 chosen_register_pressure_benefit
)) {
1468 if (mode
== SCHEDULE_PRE_LIFO
) {
1469 /* Prefer instructions that recently became available for
1470 * scheduling. These are the things that are most likely to
1471 * (eventually) make a variable dead and reduce register pressure.
1472 * Typical register pressure estimates don't work for us because
1473 * most of our pressure comes from texturing, where no single
1474 * instruction to schedule will make a vec4 value dead.
1476 if (n
->cand_generation
> chosen
->cand_generation
) {
1479 } else if (n
->cand_generation
< chosen
->cand_generation
) {
1483 /* On MRF-using chips, prefer non-SEND instructions. If we don't
1484 * do this, then because we prefer instructions that just became
1485 * candidates, we'll end up in a pattern of scheduling a SEND,
1486 * then the MRFs for the next SEND, then the next SEND, then the
1487 * MRFs, etc., without ever consuming the results of a send.
1489 if (v
->devinfo
->gen
< 7) {
1490 fs_inst
*chosen_inst
= (fs_inst
*)chosen
->inst
;
1492 /* We use size_written > 4 * exec_size as our test for the kind
1493 * of send instruction to avoid -- only sends generate many
1494 * regs, and a single-result send is probably actually reducing
1495 * register pressure.
1497 if (inst
->size_written
<= 4 * inst
->exec_size
&&
1498 chosen_inst
->size_written
> 4 * chosen_inst
->exec_size
) {
1501 } else if (inst
->size_written
> chosen_inst
->size_written
) {
1507 /* For instructions pushed on the cands list at the same time, prefer
1508 * the one with the highest delay to the end of the program. This is
1509 * most likely to have its values able to be consumed first (such as
1510 * for a large tree of lowered ubo loads, which appear reversed in
1511 * the instruction stream with respect to when they can be consumed).
1513 if (n
->delay
> chosen
->delay
) {
1516 } else if (n
->delay
< chosen
->delay
) {
1520 /* Prefer the node most likely to unblock an early program exit.
1522 if (exit_unblocked_time(n
) < exit_unblocked_time(chosen
)) {
1525 } else if (exit_unblocked_time(n
) > exit_unblocked_time(chosen
)) {
1529 /* If all other metrics are equal, we prefer the first instruction in
1530 * the list (program execution).
1539 vec4_instruction_scheduler::choose_instruction_to_schedule()
1541 schedule_node
*chosen
= NULL
;
1542 int chosen_time
= 0;
1544 /* Of the instructions ready to execute or the closest to being ready,
1545 * choose the oldest one.
1547 foreach_in_list(schedule_node
, n
, &instructions
) {
1548 if (!chosen
|| n
->unblocked_time
< chosen_time
) {
1550 chosen_time
= n
->unblocked_time
;
1558 fs_instruction_scheduler::issue_time(backend_instruction
*inst
)
1560 if (is_compressed((fs_inst
*)inst
))
1567 vec4_instruction_scheduler::issue_time(backend_instruction
*inst
)
1569 /* We always execute as two vec4s in parallel. */
1574 instruction_scheduler::schedule_instructions(bblock_t
*block
)
1576 const struct gen_device_info
*devinfo
= bs
->devinfo
;
1578 if (!post_reg_alloc
)
1579 reg_pressure
= reg_pressure_in
[block
->num
];
1580 block_idx
= block
->num
;
1582 /* Remove non-DAG heads from the list. */
1583 foreach_in_list_safe(schedule_node
, n
, &instructions
) {
1584 if (n
->parent_count
!= 0)
1588 unsigned cand_generation
= 1;
1589 while (!instructions
.is_empty()) {
1590 schedule_node
*chosen
= choose_instruction_to_schedule();
1592 /* Schedule this instruction. */
1595 chosen
->inst
->exec_node::remove();
1596 block
->instructions
.push_tail(chosen
->inst
);
1597 instructions_to_schedule
--;
1599 if (!post_reg_alloc
) {
1600 reg_pressure
-= get_register_pressure_benefit(chosen
->inst
);
1601 update_register_pressure(chosen
->inst
);
1604 /* If we expected a delay for scheduling, then bump the clock to reflect
1605 * that. In reality, the hardware will switch to another hyperthread
1606 * and may not return to dispatching our thread for a while even after
1607 * we're unblocked. After this, we have the time when the chosen
1608 * instruction will start executing.
1610 time
= MAX2(time
, chosen
->unblocked_time
);
1612 /* Update the clock for how soon an instruction could start after the
1615 time
+= issue_time(chosen
->inst
);
1618 fprintf(stderr
, "clock %4d, scheduled: ", time
);
1619 bs
->dump_instruction(chosen
->inst
);
1620 if (!post_reg_alloc
)
1621 fprintf(stderr
, "(register pressure %d)\n", reg_pressure
);
1624 /* Now that we've scheduled a new instruction, some of its
1625 * children can be promoted to the list of instructions ready to
1626 * be scheduled. Update the children's unblocked time for this
1627 * DAG edge as we do so.
1629 for (int i
= chosen
->child_count
- 1; i
>= 0; i
--) {
1630 schedule_node
*child
= chosen
->children
[i
];
1632 child
->unblocked_time
= MAX2(child
->unblocked_time
,
1633 time
+ chosen
->child_latency
[i
]);
1636 fprintf(stderr
, "\tchild %d, %d parents: ", i
, child
->parent_count
);
1637 bs
->dump_instruction(child
->inst
);
1640 child
->cand_generation
= cand_generation
;
1641 child
->parent_count
--;
1642 if (child
->parent_count
== 0) {
1644 fprintf(stderr
, "\t\tnow available\n");
1646 instructions
.push_head(child
);
1651 /* Shared resource: the mathbox. There's one mathbox per EU on Gen6+
1652 * but it's more limited pre-gen6, so if we send something off to it then
1653 * the next math instruction isn't going to make progress until the first
1656 if (devinfo
->gen
< 6 && chosen
->inst
->is_math()) {
1657 foreach_in_list(schedule_node
, n
, &instructions
) {
1658 if (n
->inst
->is_math())
1659 n
->unblocked_time
= MAX2(n
->unblocked_time
,
1660 time
+ chosen
->latency
);
1665 assert(instructions_to_schedule
== 0);
1667 block
->cycle_count
= time
;
1670 static unsigned get_cycle_count(cfg_t
*cfg
)
1672 unsigned count
= 0, multiplier
= 1;
1673 foreach_block(block
, cfg
) {
1674 if (block
->start()->opcode
== BRW_OPCODE_DO
)
1675 multiplier
*= 10; /* assume that loops execute ~10 times */
1677 count
+= block
->cycle_count
* multiplier
;
1679 if (block
->end()->opcode
== BRW_OPCODE_WHILE
)
1687 instruction_scheduler::run(cfg_t
*cfg
)
1689 if (debug
&& !post_reg_alloc
) {
1690 fprintf(stderr
, "\nInstructions before scheduling (reg_alloc %d)\n",
1692 bs
->dump_instructions();
1695 if (!post_reg_alloc
)
1696 setup_liveness(cfg
);
1698 foreach_block(block
, cfg
) {
1699 if (block
->end_ip
- block
->start_ip
<= 1)
1702 if (reads_remaining
) {
1703 memset(reads_remaining
, 0,
1704 grf_count
* sizeof(*reads_remaining
));
1705 memset(hw_reads_remaining
, 0,
1706 hw_reg_count
* sizeof(*hw_reads_remaining
));
1707 memset(written
, 0, grf_count
* sizeof(*written
));
1709 foreach_inst_in_block(fs_inst
, inst
, block
)
1710 count_reads_remaining(inst
);
1713 add_insts_from_block(block
);
1720 schedule_instructions(block
);
1723 if (debug
&& !post_reg_alloc
) {
1724 fprintf(stderr
, "\nInstructions after scheduling (reg_alloc %d)\n",
1726 bs
->dump_instructions();
1729 cfg
->cycle_count
= get_cycle_count(cfg
);
1733 fs_visitor::schedule_instructions(instruction_scheduler_mode mode
)
1735 if (mode
!= SCHEDULE_POST
)
1736 calculate_live_intervals();
1739 if (mode
== SCHEDULE_POST
)
1740 grf_count
= grf_used
;
1742 grf_count
= alloc
.count
;
1744 fs_instruction_scheduler
sched(this, grf_count
, first_non_payload_grf
,
1745 cfg
->num_blocks
, mode
);
1748 invalidate_live_intervals();
1752 vec4_visitor::opt_schedule_instructions()
1754 vec4_instruction_scheduler
sched(this, prog_data
->total_grf
);
1757 invalidate_live_intervals();