2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include "brw_fs_live_variables.h"
32 #include "brw_shader.h"
36 /** @file brw_fs_schedule_instructions.cpp
38 * List scheduling of FS instructions.
40 * The basic model of the list scheduler is to take a basic block,
41 * compute a DAG of the dependencies (RAW ordering with latency, WAW
42 * ordering with latency, WAR ordering), and make a list of the DAG heads.
43 * Heuristically pick a DAG head, then put all the children that are
44 * now DAG heads into the list of things to schedule.
46 * The heuristic is the important part. We're trying to be cheap,
47 * since actually computing the optimal scheduling is NP complete.
48 * What we do is track a "current clock". When we schedule a node, we
49 * update the earliest-unblocked clock time of its children, and
50 * increment the clock. Then, when trying to schedule, we just pick
51 * the earliest-unblocked instruction to schedule.
53 * Note that often there will be many things which could execute
54 * immediately, and there are a range of heuristic options to choose
55 * from in picking among those.
58 static bool debug
= false;
60 class instruction_scheduler
;
62 class schedule_node
: public exec_node
65 schedule_node(backend_instruction
*inst
, instruction_scheduler
*sched
);
66 void set_latency_gen4();
67 void set_latency_gen7(bool is_haswell
);
69 backend_instruction
*inst
;
70 schedule_node
**children
;
79 * Which iteration of pushing groups of children onto the candidates list
80 * this node was a part of.
82 unsigned cand_generation
;
85 * This is the sum of the instruction's latency plus the maximum delay of
86 * its children, or just the issue_time if it's a leaf node.
92 schedule_node::set_latency_gen4()
95 int math_latency
= 22;
97 switch (inst
->opcode
) {
98 case SHADER_OPCODE_RCP
:
99 this->latency
= 1 * chans
* math_latency
;
101 case SHADER_OPCODE_RSQ
:
102 this->latency
= 2 * chans
* math_latency
;
104 case SHADER_OPCODE_INT_QUOTIENT
:
105 case SHADER_OPCODE_SQRT
:
106 case SHADER_OPCODE_LOG2
:
107 /* full precision log. partial is 2. */
108 this->latency
= 3 * chans
* math_latency
;
110 case SHADER_OPCODE_INT_REMAINDER
:
111 case SHADER_OPCODE_EXP2
:
112 /* full precision. partial is 3, same throughput. */
113 this->latency
= 4 * chans
* math_latency
;
115 case SHADER_OPCODE_POW
:
116 this->latency
= 8 * chans
* math_latency
;
118 case SHADER_OPCODE_SIN
:
119 case SHADER_OPCODE_COS
:
120 /* minimum latency, max is 12 rounds. */
121 this->latency
= 5 * chans
* math_latency
;
130 schedule_node::set_latency_gen7(bool is_haswell
)
132 switch (inst
->opcode
) {
135 * (since the last two src operands are in different register banks):
136 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
138 * 3 cycles on IVB, 4 on HSW
139 * (since the last two src operands are in the same register bank):
140 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
142 * 18 cycles on IVB, 16 on HSW
143 * (since the last two src operands are in different register banks):
144 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
145 * mov(8) null g4<4,5,1>F { align16 WE_normal 1Q };
147 * 20 cycles on IVB, 18 on HSW
148 * (since the last two src operands are in the same register bank):
149 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
150 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
153 /* Our register allocator doesn't know about register banks, so use the
156 latency
= is_haswell
? 16 : 18;
161 * (since the last two src operands are in different register banks):
162 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
164 * 3 cycles on IVB, 4 on HSW
165 * (since the last two src operands are in the same register bank):
166 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
168 * 16 cycles on IVB, 14 on HSW
169 * (since the last two src operands are in different register banks):
170 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
171 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
174 * (since the last two src operands are in the same register bank):
175 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
176 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
179 /* Our register allocator doesn't know about register banks, so use the
185 case SHADER_OPCODE_RCP
:
186 case SHADER_OPCODE_RSQ
:
187 case SHADER_OPCODE_SQRT
:
188 case SHADER_OPCODE_LOG2
:
189 case SHADER_OPCODE_EXP2
:
190 case SHADER_OPCODE_SIN
:
191 case SHADER_OPCODE_COS
:
193 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
196 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
197 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
199 * Same for exp2, log2, rsq, sqrt, sin, cos.
201 latency
= is_haswell
? 14 : 16;
204 case SHADER_OPCODE_POW
:
206 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
209 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
210 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
212 latency
= is_haswell
? 22 : 24;
215 case SHADER_OPCODE_TEX
:
216 case SHADER_OPCODE_TXD
:
217 case SHADER_OPCODE_TXF
:
218 case SHADER_OPCODE_TXL
:
220 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
221 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
222 * send(8) g4<1>UW g114<8,8,1>F
223 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
225 * 697 +/-49 cycles (min 610, n=26):
226 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
227 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
228 * send(8) g4<1>UW g114<8,8,1>F
229 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
230 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
232 * So the latency on our first texture load of the batchbuffer takes
233 * ~700 cycles, since the caches are cold at that point.
235 * 840 +/- 92 cycles (min 720, n=25):
236 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
237 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
238 * send(8) g4<1>UW g114<8,8,1>F
239 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
240 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
241 * send(8) g4<1>UW g114<8,8,1>F
242 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
243 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
245 * On the second load, it takes just an extra ~140 cycles, and after
246 * accounting for the 14 cycles of the MOV's latency, that makes ~130.
248 * 683 +/- 49 cycles (min = 602, n=47):
249 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
250 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
251 * send(8) g4<1>UW g114<8,8,1>F
252 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
253 * send(8) g50<1>UW g114<8,8,1>F
254 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
255 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
257 * The unit appears to be pipelined, since this matches up with the
258 * cache-cold case, despite there being two loads here. If you replace
259 * the g4 in the MOV to null with g50, it's still 693 +/- 52 (n=39).
261 * So, take some number between the cache-hot 140 cycles and the
262 * cache-cold 700 cycles. No particular tuning was done on this.
264 * I haven't done significant testing of the non-TEX opcodes. TXL at
265 * least looked about the same as TEX.
270 case SHADER_OPCODE_TXS
:
271 /* Testing textureSize(sampler2D, 0), one load was 420 +/- 41
273 * mov(8) g114<1>UD 0D { align1 WE_normal 1Q };
274 * send(8) g6<1>UW g114<8,8,1>F
275 * sampler (10, 0, 10, 1) mlen 1 rlen 4 { align1 WE_normal 1Q };
276 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1Q };
279 * Two loads was 535 +/- 30 cycles (n=19):
280 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
281 * send(16) g6<1>UW g114<8,8,1>F
282 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
283 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
284 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1H };
285 * send(16) g8<1>UW g114<8,8,1>F
286 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
287 * mov(16) g8<1>F g8<8,8,1>D { align1 WE_normal 1H };
288 * add(16) g6<1>F g6<8,8,1>F g8<8,8,1>F { align1 WE_normal 1H };
290 * Since the only caches that should matter are just the
291 * instruction/state cache containing the surface state, assume that we
292 * always have hot caches.
297 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD
:
298 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
:
299 case VS_OPCODE_PULL_CONSTANT_LOAD
:
300 /* testing using varying-index pull constants:
303 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
304 * send(8) g4<1>F g4<8,8,1>D
305 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
308 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
309 * send(8) g4<1>F g4<8,8,1>D
310 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
311 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
314 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
315 * send(8) g4<1>F g4<8,8,1>D
316 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
317 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
318 * send(8) g4<1>F g4<8,8,1>D
319 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
320 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
322 * So, if it's cache-hot, it's about 140. If it's cache cold, it's
323 * about 460. We expect to mostly be cache hot, so pick something more
329 case SHADER_OPCODE_GEN7_SCRATCH_READ
:
330 /* Testing a load from offset 0, that had been previously written:
332 * send(8) g114<1>UW g0<8,8,1>F data (0, 0, 0) mlen 1 rlen 1 { align1 WE_normal 1Q };
333 * mov(8) null g114<8,8,1>F { align1 WE_normal 1Q };
335 * The cycles spent seemed to be grouped around 40-50 (as low as 38),
336 * then around 140. Presumably this is cache hit vs miss.
341 case SHADER_OPCODE_UNTYPED_ATOMIC
:
342 case SHADER_OPCODE_TYPED_ATOMIC
:
344 * mov(8) g112<1>ud 0x00000000ud { align1 WE_all 1Q };
345 * mov(1) g112.7<1>ud g1.7<0,1,0>ud { align1 WE_all };
346 * mov(8) g113<1>ud 0x00000000ud { align1 WE_normal 1Q };
347 * send(8) g4<1>ud g112<8,8,1>ud
348 * data (38, 5, 6) mlen 2 rlen 1 { align1 WE_normal 1Q };
350 * Running it 100 times as fragment shader on a 128x128 quad
351 * gives an average latency of 13867 cycles per atomic op,
352 * standard deviation 3%. Note that this is a rather
353 * pessimistic estimate, the actual latency in cases with few
354 * collisions between threads and favorable pipelining has been
355 * seen to be reduced by a factor of 100.
360 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
361 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
362 case SHADER_OPCODE_TYPED_SURFACE_READ
:
363 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
365 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
366 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
367 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
368 * send(8) g4<1>UD g112<8,8,1>UD
369 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
371 * . [repeats 8 times]
373 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
374 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
375 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
376 * send(8) g4<1>UD g112<8,8,1>UD
377 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
379 * Running it 100 times as fragment shader on a 128x128 quad
380 * gives an average latency of 583 cycles per surface read,
381 * standard deviation 0.9%.
383 latency
= is_haswell
? 300 : 600;
388 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
391 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
392 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
399 class instruction_scheduler
{
401 instruction_scheduler(backend_shader
*s
, int grf_count
,
402 int hw_reg_count
, int block_count
,
403 instruction_scheduler_mode mode
)
406 this->mem_ctx
= ralloc_context(NULL
);
407 this->grf_count
= grf_count
;
408 this->hw_reg_count
= hw_reg_count
;
409 this->instructions
.make_empty();
410 this->instructions_to_schedule
= 0;
411 this->post_reg_alloc
= (mode
== SCHEDULE_POST
);
414 if (!post_reg_alloc
) {
415 this->reg_pressure_in
= rzalloc_array(mem_ctx
, int, block_count
);
417 this->livein
= ralloc_array(mem_ctx
, BITSET_WORD
*, block_count
);
418 for (int i
= 0; i
< block_count
; i
++)
419 this->livein
[i
] = rzalloc_array(mem_ctx
, BITSET_WORD
,
420 BITSET_WORDS(grf_count
));
422 this->liveout
= ralloc_array(mem_ctx
, BITSET_WORD
*, block_count
);
423 for (int i
= 0; i
< block_count
; i
++)
424 this->liveout
[i
] = rzalloc_array(mem_ctx
, BITSET_WORD
,
425 BITSET_WORDS(grf_count
));
427 this->hw_liveout
= ralloc_array(mem_ctx
, BITSET_WORD
*, block_count
);
428 for (int i
= 0; i
< block_count
; i
++)
429 this->hw_liveout
[i
] = rzalloc_array(mem_ctx
, BITSET_WORD
,
430 BITSET_WORDS(hw_reg_count
));
432 this->written
= rzalloc_array(mem_ctx
, bool, grf_count
);
434 this->reads_remaining
= rzalloc_array(mem_ctx
, int, grf_count
);
436 this->hw_reads_remaining
= rzalloc_array(mem_ctx
, int, hw_reg_count
);
438 this->reg_pressure_in
= NULL
;
440 this->liveout
= NULL
;
441 this->hw_liveout
= NULL
;
442 this->written
= NULL
;
443 this->reads_remaining
= NULL
;
444 this->hw_reads_remaining
= NULL
;
448 ~instruction_scheduler()
450 ralloc_free(this->mem_ctx
);
452 void add_barrier_deps(schedule_node
*n
);
453 void add_dep(schedule_node
*before
, schedule_node
*after
, int latency
);
454 void add_dep(schedule_node
*before
, schedule_node
*after
);
456 void run(cfg_t
*cfg
);
457 void add_insts_from_block(bblock_t
*block
);
458 void compute_delay(schedule_node
*node
);
459 virtual void calculate_deps() = 0;
460 virtual schedule_node
*choose_instruction_to_schedule() = 0;
463 * Returns how many cycles it takes the instruction to issue.
465 * Instructions in gen hardware are handled one simd4 vector at a time,
466 * with 1 cycle per vector dispatched. Thus SIMD8 pixel shaders take 2
467 * cycles to dispatch and SIMD16 (compressed) instructions take 4.
469 virtual int issue_time(backend_instruction
*inst
) = 0;
471 virtual void count_reads_remaining(backend_instruction
*inst
) = 0;
472 virtual void setup_liveness(cfg_t
*cfg
) = 0;
473 virtual void update_register_pressure(backend_instruction
*inst
) = 0;
474 virtual int get_register_pressure_benefit(backend_instruction
*inst
) = 0;
476 void schedule_instructions(bblock_t
*block
);
481 int instructions_to_schedule
;
487 exec_list instructions
;
490 instruction_scheduler_mode mode
;
493 * The register pressure at the beginning of each basic block.
496 int *reg_pressure_in
;
499 * The virtual GRF's whose range overlaps the beginning of each basic block.
502 BITSET_WORD
**livein
;
505 * The virtual GRF's whose range overlaps the end of each basic block.
508 BITSET_WORD
**liveout
;
511 * The hardware GRF's whose range overlaps the end of each basic block.
514 BITSET_WORD
**hw_liveout
;
517 * Whether we've scheduled a write for this virtual GRF yet.
523 * How many reads we haven't scheduled for this virtual GRF yet.
526 int *reads_remaining
;
529 * How many reads we haven't scheduled for this hardware GRF yet.
532 int *hw_reads_remaining
;
535 class fs_instruction_scheduler
: public instruction_scheduler
538 fs_instruction_scheduler(fs_visitor
*v
, int grf_count
, int hw_reg_count
,
540 instruction_scheduler_mode mode
);
541 void calculate_deps();
542 bool is_compressed(fs_inst
*inst
);
543 schedule_node
*choose_instruction_to_schedule();
544 int issue_time(backend_instruction
*inst
);
547 void count_reads_remaining(backend_instruction
*inst
);
548 void setup_liveness(cfg_t
*cfg
);
549 void update_register_pressure(backend_instruction
*inst
);
550 int get_register_pressure_benefit(backend_instruction
*inst
);
553 fs_instruction_scheduler::fs_instruction_scheduler(fs_visitor
*v
,
554 int grf_count
, int hw_reg_count
,
556 instruction_scheduler_mode mode
)
557 : instruction_scheduler(v
, grf_count
, hw_reg_count
, block_count
, mode
),
563 is_src_duplicate(fs_inst
*inst
, int src
)
565 for (int i
= 0; i
< src
; i
++)
566 if (inst
->src
[i
].equals(inst
->src
[src
]))
573 fs_instruction_scheduler::count_reads_remaining(backend_instruction
*be
)
575 fs_inst
*inst
= (fs_inst
*)be
;
577 if (!reads_remaining
)
580 for (int i
= 0; i
< inst
->sources
; i
++) {
581 if (is_src_duplicate(inst
, i
))
584 if (inst
->src
[i
].file
== VGRF
) {
585 reads_remaining
[inst
->src
[i
].nr
]++;
586 } else if (inst
->src
[i
].file
== FIXED_GRF
) {
587 if (inst
->src
[i
].nr
>= hw_reg_count
)
590 for (int j
= 0; j
< inst
->regs_read(i
); j
++)
591 hw_reads_remaining
[inst
->src
[i
].nr
+ j
]++;
597 fs_instruction_scheduler::setup_liveness(cfg_t
*cfg
)
599 /* First, compute liveness on a per-GRF level using the in/out sets from
600 * liveness calculation.
602 for (int block
= 0; block
< cfg
->num_blocks
; block
++) {
603 for (int i
= 0; i
< v
->live_intervals
->num_vars
; i
++) {
604 if (BITSET_TEST(v
->live_intervals
->block_data
[block
].livein
, i
)) {
605 int vgrf
= v
->live_intervals
->vgrf_from_var
[i
];
606 if (!BITSET_TEST(livein
[block
], vgrf
)) {
607 reg_pressure_in
[block
] += v
->alloc
.sizes
[vgrf
];
608 BITSET_SET(livein
[block
], vgrf
);
612 if (BITSET_TEST(v
->live_intervals
->block_data
[block
].liveout
, i
))
613 BITSET_SET(liveout
[block
], v
->live_intervals
->vgrf_from_var
[i
]);
617 /* Now, extend the live in/live out sets for when a range crosses a block
618 * boundary, which matches what our register allocator/interference code
619 * does to account for force_writemask_all and incompatible exec_mask's.
621 for (int block
= 0; block
< cfg
->num_blocks
- 1; block
++) {
622 for (int i
= 0; i
< grf_count
; i
++) {
623 if (v
->virtual_grf_start
[i
] <= cfg
->blocks
[block
]->end_ip
&&
624 v
->virtual_grf_end
[i
] >= cfg
->blocks
[block
+ 1]->start_ip
) {
625 if (!BITSET_TEST(livein
[block
+ 1], i
)) {
626 reg_pressure_in
[block
+ 1] += v
->alloc
.sizes
[i
];
627 BITSET_SET(livein
[block
+ 1], i
);
630 BITSET_SET(liveout
[block
], i
);
635 int payload_last_use_ip
[hw_reg_count
];
636 v
->calculate_payload_ranges(hw_reg_count
, payload_last_use_ip
);
638 for (int i
= 0; i
< hw_reg_count
; i
++) {
639 if (payload_last_use_ip
[i
] == -1)
642 for (int block
= 0; block
< cfg
->num_blocks
; block
++) {
643 if (cfg
->blocks
[block
]->start_ip
<= payload_last_use_ip
[i
])
644 reg_pressure_in
[block
]++;
646 if (cfg
->blocks
[block
]->end_ip
<= payload_last_use_ip
[i
])
647 BITSET_SET(hw_liveout
[block
], i
);
653 fs_instruction_scheduler::update_register_pressure(backend_instruction
*be
)
655 fs_inst
*inst
= (fs_inst
*)be
;
657 if (!reads_remaining
)
660 if (inst
->dst
.file
== VGRF
) {
661 written
[inst
->dst
.nr
] = true;
664 for (int i
= 0; i
< inst
->sources
; i
++) {
665 if (is_src_duplicate(inst
, i
))
668 if (inst
->src
[i
].file
== VGRF
) {
669 reads_remaining
[inst
->src
[i
].nr
]--;
670 } else if (inst
->src
[i
].file
== FIXED_GRF
&&
671 inst
->src
[i
].nr
< hw_reg_count
) {
672 for (int off
= 0; off
< inst
->regs_read(i
); off
++)
673 hw_reads_remaining
[inst
->src
[i
].nr
+ off
]--;
679 fs_instruction_scheduler::get_register_pressure_benefit(backend_instruction
*be
)
681 fs_inst
*inst
= (fs_inst
*)be
;
684 if (inst
->dst
.file
== VGRF
) {
685 if (!BITSET_TEST(livein
[block_idx
], inst
->dst
.nr
) &&
686 !written
[inst
->dst
.nr
])
687 benefit
-= v
->alloc
.sizes
[inst
->dst
.nr
];
690 for (int i
= 0; i
< inst
->sources
; i
++) {
691 if (is_src_duplicate(inst
, i
))
694 if (inst
->src
[i
].file
== VGRF
&&
695 !BITSET_TEST(liveout
[block_idx
], inst
->src
[i
].nr
) &&
696 reads_remaining
[inst
->src
[i
].nr
] == 1)
697 benefit
+= v
->alloc
.sizes
[inst
->src
[i
].nr
];
699 if (inst
->src
[i
].file
== FIXED_GRF
&&
700 inst
->src
[i
].nr
< hw_reg_count
) {
701 for (int off
= 0; off
< inst
->regs_read(i
); off
++) {
702 int reg
= inst
->src
[i
].nr
+ off
;
703 if (!BITSET_TEST(hw_liveout
[block_idx
], reg
) &&
704 hw_reads_remaining
[reg
] == 1) {
714 class vec4_instruction_scheduler
: public instruction_scheduler
717 vec4_instruction_scheduler(vec4_visitor
*v
, int grf_count
);
718 void calculate_deps();
719 schedule_node
*choose_instruction_to_schedule();
720 int issue_time(backend_instruction
*inst
);
723 void count_reads_remaining(backend_instruction
*inst
);
724 void setup_liveness(cfg_t
*cfg
);
725 void update_register_pressure(backend_instruction
*inst
);
726 int get_register_pressure_benefit(backend_instruction
*inst
);
729 vec4_instruction_scheduler::vec4_instruction_scheduler(vec4_visitor
*v
,
731 : instruction_scheduler(v
, grf_count
, 0, 0, SCHEDULE_POST
),
737 vec4_instruction_scheduler::count_reads_remaining(backend_instruction
*be
)
742 vec4_instruction_scheduler::setup_liveness(cfg_t
*cfg
)
747 vec4_instruction_scheduler::update_register_pressure(backend_instruction
*be
)
752 vec4_instruction_scheduler::get_register_pressure_benefit(backend_instruction
*be
)
757 schedule_node::schedule_node(backend_instruction
*inst
,
758 instruction_scheduler
*sched
)
760 const struct brw_device_info
*devinfo
= sched
->bs
->devinfo
;
763 this->child_array_size
= 0;
764 this->children
= NULL
;
765 this->child_latency
= NULL
;
766 this->child_count
= 0;
767 this->parent_count
= 0;
768 this->unblocked_time
= 0;
769 this->cand_generation
= 0;
772 /* We can't measure Gen6 timings directly but expect them to be much
773 * closer to Gen7 than Gen4.
775 if (!sched
->post_reg_alloc
)
777 else if (devinfo
->gen
>= 6)
778 set_latency_gen7(devinfo
->is_haswell
);
784 instruction_scheduler::add_insts_from_block(bblock_t
*block
)
786 foreach_inst_in_block(backend_instruction
, inst
, block
) {
787 schedule_node
*n
= new(mem_ctx
) schedule_node(inst
, this);
789 instructions
.push_tail(n
);
792 this->instructions_to_schedule
= block
->end_ip
- block
->start_ip
+ 1;
795 /** Recursive computation of the delay member of a node. */
797 instruction_scheduler::compute_delay(schedule_node
*n
)
799 if (!n
->child_count
) {
800 n
->delay
= issue_time(n
->inst
);
802 for (int i
= 0; i
< n
->child_count
; i
++) {
803 if (!n
->children
[i
]->delay
)
804 compute_delay(n
->children
[i
]);
805 n
->delay
= MAX2(n
->delay
, n
->latency
+ n
->children
[i
]->delay
);
811 * Add a dependency between two instruction nodes.
813 * The @after node will be scheduled after @before. We will try to
814 * schedule it @latency cycles after @before, but no guarantees there.
817 instruction_scheduler::add_dep(schedule_node
*before
, schedule_node
*after
,
820 if (!before
|| !after
)
823 assert(before
!= after
);
825 for (int i
= 0; i
< before
->child_count
; i
++) {
826 if (before
->children
[i
] == after
) {
827 before
->child_latency
[i
] = MAX2(before
->child_latency
[i
], latency
);
832 if (before
->child_array_size
<= before
->child_count
) {
833 if (before
->child_array_size
< 16)
834 before
->child_array_size
= 16;
836 before
->child_array_size
*= 2;
838 before
->children
= reralloc(mem_ctx
, before
->children
,
840 before
->child_array_size
);
841 before
->child_latency
= reralloc(mem_ctx
, before
->child_latency
,
842 int, before
->child_array_size
);
845 before
->children
[before
->child_count
] = after
;
846 before
->child_latency
[before
->child_count
] = latency
;
847 before
->child_count
++;
848 after
->parent_count
++;
852 instruction_scheduler::add_dep(schedule_node
*before
, schedule_node
*after
)
857 add_dep(before
, after
, before
->latency
);
861 * Sometimes we really want this node to execute after everything that
862 * was before it and before everything that followed it. This adds
866 instruction_scheduler::add_barrier_deps(schedule_node
*n
)
868 schedule_node
*prev
= (schedule_node
*)n
->prev
;
869 schedule_node
*next
= (schedule_node
*)n
->next
;
872 while (!prev
->is_head_sentinel()) {
874 prev
= (schedule_node
*)prev
->prev
;
879 while (!next
->is_tail_sentinel()) {
881 next
= (schedule_node
*)next
->next
;
886 /* instruction scheduling needs to be aware of when an MRF write
887 * actually writes 2 MRFs.
890 fs_instruction_scheduler::is_compressed(fs_inst
*inst
)
892 return inst
->exec_size
== 16;
896 is_scheduling_barrier(const fs_inst
*inst
)
898 return inst
->opcode
== FS_OPCODE_PLACEHOLDER_HALT
||
899 inst
->is_control_flow() ||
901 (inst
->has_side_effects() && inst
->opcode
!= FS_OPCODE_FB_WRITE
);
905 fs_instruction_scheduler::calculate_deps()
907 /* Pre-register-allocation, this tracks the last write per VGRF offset.
908 * After register allocation, reg_offsets are gone and we track individual
911 schedule_node
*last_grf_write
[grf_count
* 16];
912 schedule_node
*last_mrf_write
[BRW_MAX_MRF(v
->devinfo
->gen
)];
913 schedule_node
*last_conditional_mod
[2] = { NULL
, NULL
};
914 schedule_node
*last_accumulator_write
= NULL
;
915 /* Fixed HW registers are assumed to be separate from the virtual
916 * GRFs, so they can be tracked separately. We don't really write
917 * to fixed GRFs much, so don't bother tracking them on a more
920 schedule_node
*last_fixed_grf_write
= NULL
;
922 memset(last_grf_write
, 0, sizeof(last_grf_write
));
923 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
925 /* top-to-bottom dependencies: RAW and WAW. */
926 foreach_in_list(schedule_node
, n
, &instructions
) {
927 fs_inst
*inst
= (fs_inst
*)n
->inst
;
929 if (is_scheduling_barrier(inst
))
932 /* read-after-write deps. */
933 for (int i
= 0; i
< inst
->sources
; i
++) {
934 if (inst
->src
[i
].file
== VGRF
) {
935 if (post_reg_alloc
) {
936 for (int r
= 0; r
< inst
->regs_read(i
); r
++)
937 add_dep(last_grf_write
[inst
->src
[i
].nr
+ r
], n
);
939 for (int r
= 0; r
< inst
->regs_read(i
); r
++) {
940 add_dep(last_grf_write
[inst
->src
[i
].nr
* 16 + inst
->src
[i
].reg_offset
+ r
], n
);
943 } else if (inst
->src
[i
].file
== FIXED_GRF
) {
944 if (post_reg_alloc
) {
945 for (int r
= 0; r
< inst
->regs_read(i
); r
++)
946 add_dep(last_grf_write
[inst
->src
[i
].nr
+ r
], n
);
948 add_dep(last_fixed_grf_write
, n
);
950 } else if (inst
->src
[i
].is_accumulator()) {
951 add_dep(last_accumulator_write
, n
);
952 } else if (inst
->src
[i
].file
== ARF
) {
957 if (inst
->base_mrf
!= -1) {
958 for (int i
= 0; i
< inst
->mlen
; i
++) {
959 /* It looks like the MRF regs are released in the send
960 * instruction once it's sent, not when the result comes
963 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
967 if (inst
->reads_flag()) {
968 add_dep(last_conditional_mod
[inst
->flag_subreg
], n
);
971 if (inst
->reads_accumulator_implicitly()) {
972 add_dep(last_accumulator_write
, n
);
975 /* write-after-write deps. */
976 if (inst
->dst
.file
== VGRF
) {
977 if (post_reg_alloc
) {
978 for (int r
= 0; r
< inst
->regs_written
; r
++) {
979 add_dep(last_grf_write
[inst
->dst
.nr
+ r
], n
);
980 last_grf_write
[inst
->dst
.nr
+ r
] = n
;
983 for (int r
= 0; r
< inst
->regs_written
; r
++) {
984 add_dep(last_grf_write
[inst
->dst
.nr
* 16 + inst
->dst
.reg_offset
+ r
], n
);
985 last_grf_write
[inst
->dst
.nr
* 16 + inst
->dst
.reg_offset
+ r
] = n
;
988 } else if (inst
->dst
.file
== MRF
) {
989 int reg
= inst
->dst
.nr
& ~BRW_MRF_COMPR4
;
991 add_dep(last_mrf_write
[reg
], n
);
992 last_mrf_write
[reg
] = n
;
993 if (is_compressed(inst
)) {
994 if (inst
->dst
.nr
& BRW_MRF_COMPR4
)
998 add_dep(last_mrf_write
[reg
], n
);
999 last_mrf_write
[reg
] = n
;
1001 } else if (inst
->dst
.file
== FIXED_GRF
) {
1002 if (post_reg_alloc
) {
1003 for (int r
= 0; r
< inst
->regs_written
; r
++)
1004 last_grf_write
[inst
->dst
.nr
+ r
] = n
;
1006 last_fixed_grf_write
= n
;
1008 } else if (inst
->dst
.is_accumulator()) {
1009 add_dep(last_accumulator_write
, n
);
1010 last_accumulator_write
= n
;
1011 } else if (inst
->dst
.file
== ARF
&& !inst
->dst
.is_null()) {
1012 add_barrier_deps(n
);
1015 if (inst
->mlen
> 0 && inst
->base_mrf
!= -1) {
1016 for (int i
= 0; i
< v
->implied_mrf_writes(inst
); i
++) {
1017 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
1018 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
1022 if (inst
->writes_flag()) {
1023 add_dep(last_conditional_mod
[inst
->flag_subreg
], n
, 0);
1024 last_conditional_mod
[inst
->flag_subreg
] = n
;
1027 if (inst
->writes_accumulator_implicitly(v
->devinfo
) &&
1028 !inst
->dst
.is_accumulator()) {
1029 add_dep(last_accumulator_write
, n
);
1030 last_accumulator_write
= n
;
1034 /* bottom-to-top dependencies: WAR */
1035 memset(last_grf_write
, 0, sizeof(last_grf_write
));
1036 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
1037 memset(last_conditional_mod
, 0, sizeof(last_conditional_mod
));
1038 last_accumulator_write
= NULL
;
1039 last_fixed_grf_write
= NULL
;
1041 foreach_in_list_reverse_safe(schedule_node
, n
, &instructions
) {
1042 fs_inst
*inst
= (fs_inst
*)n
->inst
;
1044 /* write-after-read deps. */
1045 for (int i
= 0; i
< inst
->sources
; i
++) {
1046 if (inst
->src
[i
].file
== VGRF
) {
1047 if (post_reg_alloc
) {
1048 for (int r
= 0; r
< inst
->regs_read(i
); r
++)
1049 add_dep(n
, last_grf_write
[inst
->src
[i
].nr
+ r
], 0);
1051 for (int r
= 0; r
< inst
->regs_read(i
); r
++) {
1052 add_dep(n
, last_grf_write
[inst
->src
[i
].nr
* 16 + inst
->src
[i
].reg_offset
+ r
], 0);
1055 } else if (inst
->src
[i
].file
== FIXED_GRF
) {
1056 if (post_reg_alloc
) {
1057 for (int r
= 0; r
< inst
->regs_read(i
); r
++)
1058 add_dep(n
, last_grf_write
[inst
->src
[i
].nr
+ r
], 0);
1060 add_dep(n
, last_fixed_grf_write
, 0);
1062 } else if (inst
->src
[i
].is_accumulator()) {
1063 add_dep(n
, last_accumulator_write
, 0);
1064 } else if (inst
->src
[i
].file
== ARF
) {
1065 add_barrier_deps(n
);
1069 if (inst
->base_mrf
!= -1) {
1070 for (int i
= 0; i
< inst
->mlen
; i
++) {
1071 /* It looks like the MRF regs are released in the send
1072 * instruction once it's sent, not when the result comes
1075 add_dep(n
, last_mrf_write
[inst
->base_mrf
+ i
], 2);
1079 if (inst
->reads_flag()) {
1080 add_dep(n
, last_conditional_mod
[inst
->flag_subreg
]);
1083 if (inst
->reads_accumulator_implicitly()) {
1084 add_dep(n
, last_accumulator_write
);
1087 /* Update the things this instruction wrote, so earlier reads
1088 * can mark this as WAR dependency.
1090 if (inst
->dst
.file
== VGRF
) {
1091 if (post_reg_alloc
) {
1092 for (int r
= 0; r
< inst
->regs_written
; r
++)
1093 last_grf_write
[inst
->dst
.nr
+ r
] = n
;
1095 for (int r
= 0; r
< inst
->regs_written
; r
++) {
1096 last_grf_write
[inst
->dst
.nr
* 16 + inst
->dst
.reg_offset
+ r
] = n
;
1099 } else if (inst
->dst
.file
== MRF
) {
1100 int reg
= inst
->dst
.nr
& ~BRW_MRF_COMPR4
;
1102 last_mrf_write
[reg
] = n
;
1104 if (is_compressed(inst
)) {
1105 if (inst
->dst
.nr
& BRW_MRF_COMPR4
)
1110 last_mrf_write
[reg
] = n
;
1112 } else if (inst
->dst
.file
== FIXED_GRF
) {
1113 if (post_reg_alloc
) {
1114 for (int r
= 0; r
< inst
->regs_written
; r
++)
1115 last_grf_write
[inst
->dst
.nr
+ r
] = n
;
1117 last_fixed_grf_write
= n
;
1119 } else if (inst
->dst
.is_accumulator()) {
1120 last_accumulator_write
= n
;
1121 } else if (inst
->dst
.file
== ARF
&& !inst
->dst
.is_null()) {
1122 add_barrier_deps(n
);
1125 if (inst
->mlen
> 0 && inst
->base_mrf
!= -1) {
1126 for (int i
= 0; i
< v
->implied_mrf_writes(inst
); i
++) {
1127 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
1131 if (inst
->writes_flag()) {
1132 last_conditional_mod
[inst
->flag_subreg
] = n
;
1135 if (inst
->writes_accumulator_implicitly(v
->devinfo
)) {
1136 last_accumulator_write
= n
;
1142 is_scheduling_barrier(const vec4_instruction
*inst
)
1144 return inst
->is_control_flow() ||
1145 inst
->has_side_effects();
1149 vec4_instruction_scheduler::calculate_deps()
1151 schedule_node
*last_grf_write
[grf_count
];
1152 schedule_node
*last_mrf_write
[BRW_MAX_MRF(v
->devinfo
->gen
)];
1153 schedule_node
*last_conditional_mod
= NULL
;
1154 schedule_node
*last_accumulator_write
= NULL
;
1155 /* Fixed HW registers are assumed to be separate from the virtual
1156 * GRFs, so they can be tracked separately. We don't really write
1157 * to fixed GRFs much, so don't bother tracking them on a more
1160 schedule_node
*last_fixed_grf_write
= NULL
;
1162 memset(last_grf_write
, 0, sizeof(last_grf_write
));
1163 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
1165 /* top-to-bottom dependencies: RAW and WAW. */
1166 foreach_in_list(schedule_node
, n
, &instructions
) {
1167 vec4_instruction
*inst
= (vec4_instruction
*)n
->inst
;
1169 if (is_scheduling_barrier(inst
))
1170 add_barrier_deps(n
);
1172 /* read-after-write deps. */
1173 for (int i
= 0; i
< 3; i
++) {
1174 if (inst
->src
[i
].file
== VGRF
) {
1175 for (unsigned j
= 0; j
< inst
->regs_read(i
); ++j
)
1176 add_dep(last_grf_write
[inst
->src
[i
].nr
+ j
], n
);
1177 } else if (inst
->src
[i
].file
== FIXED_GRF
) {
1178 add_dep(last_fixed_grf_write
, n
);
1179 } else if (inst
->src
[i
].is_accumulator()) {
1180 assert(last_accumulator_write
);
1181 add_dep(last_accumulator_write
, n
);
1182 } else if (inst
->src
[i
].file
== ARF
) {
1183 add_barrier_deps(n
);
1187 if (!inst
->is_send_from_grf()) {
1188 for (int i
= 0; i
< inst
->mlen
; i
++) {
1189 /* It looks like the MRF regs are released in the send
1190 * instruction once it's sent, not when the result comes
1193 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
1197 if (inst
->reads_flag()) {
1198 assert(last_conditional_mod
);
1199 add_dep(last_conditional_mod
, n
);
1202 if (inst
->reads_accumulator_implicitly()) {
1203 assert(last_accumulator_write
);
1204 add_dep(last_accumulator_write
, n
);
1207 /* write-after-write deps. */
1208 if (inst
->dst
.file
== VGRF
) {
1209 for (unsigned j
= 0; j
< inst
->regs_written
; ++j
) {
1210 add_dep(last_grf_write
[inst
->dst
.nr
+ j
], n
);
1211 last_grf_write
[inst
->dst
.nr
+ j
] = n
;
1213 } else if (inst
->dst
.file
== MRF
) {
1214 add_dep(last_mrf_write
[inst
->dst
.nr
], n
);
1215 last_mrf_write
[inst
->dst
.nr
] = n
;
1216 } else if (inst
->dst
.file
== FIXED_GRF
) {
1217 last_fixed_grf_write
= n
;
1218 } else if (inst
->dst
.is_accumulator()) {
1219 add_dep(last_accumulator_write
, n
);
1220 last_accumulator_write
= n
;
1221 } else if (inst
->dst
.file
== ARF
&& !inst
->dst
.is_null()) {
1222 add_barrier_deps(n
);
1225 if (inst
->mlen
> 0 && !inst
->is_send_from_grf()) {
1226 for (int i
= 0; i
< v
->implied_mrf_writes(inst
); i
++) {
1227 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
1228 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
1232 if (inst
->writes_flag()) {
1233 add_dep(last_conditional_mod
, n
, 0);
1234 last_conditional_mod
= n
;
1237 if (inst
->writes_accumulator_implicitly(v
->devinfo
) &&
1238 !inst
->dst
.is_accumulator()) {
1239 add_dep(last_accumulator_write
, n
);
1240 last_accumulator_write
= n
;
1244 /* bottom-to-top dependencies: WAR */
1245 memset(last_grf_write
, 0, sizeof(last_grf_write
));
1246 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
1247 last_conditional_mod
= NULL
;
1248 last_accumulator_write
= NULL
;
1249 last_fixed_grf_write
= NULL
;
1251 foreach_in_list_reverse_safe(schedule_node
, n
, &instructions
) {
1252 vec4_instruction
*inst
= (vec4_instruction
*)n
->inst
;
1254 /* write-after-read deps. */
1255 for (int i
= 0; i
< 3; i
++) {
1256 if (inst
->src
[i
].file
== VGRF
) {
1257 for (unsigned j
= 0; j
< inst
->regs_read(i
); ++j
)
1258 add_dep(n
, last_grf_write
[inst
->src
[i
].nr
+ j
]);
1259 } else if (inst
->src
[i
].file
== FIXED_GRF
) {
1260 add_dep(n
, last_fixed_grf_write
);
1261 } else if (inst
->src
[i
].is_accumulator()) {
1262 add_dep(n
, last_accumulator_write
);
1263 } else if (inst
->src
[i
].file
== ARF
) {
1264 add_barrier_deps(n
);
1268 if (!inst
->is_send_from_grf()) {
1269 for (int i
= 0; i
< inst
->mlen
; i
++) {
1270 /* It looks like the MRF regs are released in the send
1271 * instruction once it's sent, not when the result comes
1274 add_dep(n
, last_mrf_write
[inst
->base_mrf
+ i
], 2);
1278 if (inst
->reads_flag()) {
1279 add_dep(n
, last_conditional_mod
);
1282 if (inst
->reads_accumulator_implicitly()) {
1283 add_dep(n
, last_accumulator_write
);
1286 /* Update the things this instruction wrote, so earlier reads
1287 * can mark this as WAR dependency.
1289 if (inst
->dst
.file
== VGRF
) {
1290 for (unsigned j
= 0; j
< inst
->regs_written
; ++j
)
1291 last_grf_write
[inst
->dst
.nr
+ j
] = n
;
1292 } else if (inst
->dst
.file
== MRF
) {
1293 last_mrf_write
[inst
->dst
.nr
] = n
;
1294 } else if (inst
->dst
.file
== FIXED_GRF
) {
1295 last_fixed_grf_write
= n
;
1296 } else if (inst
->dst
.is_accumulator()) {
1297 last_accumulator_write
= n
;
1298 } else if (inst
->dst
.file
== ARF
&& !inst
->dst
.is_null()) {
1299 add_barrier_deps(n
);
1302 if (inst
->mlen
> 0 && !inst
->is_send_from_grf()) {
1303 for (int i
= 0; i
< v
->implied_mrf_writes(inst
); i
++) {
1304 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
1308 if (inst
->writes_flag()) {
1309 last_conditional_mod
= n
;
1312 if (inst
->writes_accumulator_implicitly(v
->devinfo
)) {
1313 last_accumulator_write
= n
;
1319 fs_instruction_scheduler::choose_instruction_to_schedule()
1321 schedule_node
*chosen
= NULL
;
1323 if (mode
== SCHEDULE_PRE
|| mode
== SCHEDULE_POST
) {
1324 int chosen_time
= 0;
1326 /* Of the instructions ready to execute or the closest to
1327 * being ready, choose the oldest one.
1329 foreach_in_list(schedule_node
, n
, &instructions
) {
1330 if (!chosen
|| n
->unblocked_time
< chosen_time
) {
1332 chosen_time
= n
->unblocked_time
;
1336 /* Before register allocation, we don't care about the latencies of
1337 * instructions. All we care about is reducing live intervals of
1338 * variables so that we can avoid register spilling, or get SIMD16
1339 * shaders which naturally do a better job of hiding instruction
1342 foreach_in_list(schedule_node
, n
, &instructions
) {
1343 fs_inst
*inst
= (fs_inst
*)n
->inst
;
1350 /* Most important: If we can definitely reduce register pressure, do
1353 int register_pressure_benefit
= get_register_pressure_benefit(n
->inst
);
1354 int chosen_register_pressure_benefit
=
1355 get_register_pressure_benefit(chosen
->inst
);
1357 if (register_pressure_benefit
> 0 &&
1358 register_pressure_benefit
> chosen_register_pressure_benefit
) {
1361 } else if (chosen_register_pressure_benefit
> 0 &&
1362 (register_pressure_benefit
<
1363 chosen_register_pressure_benefit
)) {
1367 if (mode
== SCHEDULE_PRE_LIFO
) {
1368 /* Prefer instructions that recently became available for
1369 * scheduling. These are the things that are most likely to
1370 * (eventually) make a variable dead and reduce register pressure.
1371 * Typical register pressure estimates don't work for us because
1372 * most of our pressure comes from texturing, where no single
1373 * instruction to schedule will make a vec4 value dead.
1375 if (n
->cand_generation
> chosen
->cand_generation
) {
1378 } else if (n
->cand_generation
< chosen
->cand_generation
) {
1382 /* On MRF-using chips, prefer non-SEND instructions. If we don't
1383 * do this, then because we prefer instructions that just became
1384 * candidates, we'll end up in a pattern of scheduling a SEND,
1385 * then the MRFs for the next SEND, then the next SEND, then the
1386 * MRFs, etc., without ever consuming the results of a send.
1388 if (v
->devinfo
->gen
< 7) {
1389 fs_inst
*chosen_inst
= (fs_inst
*)chosen
->inst
;
1391 /* We use regs_written > 1 as our test for the kind of send
1392 * instruction to avoid -- only sends generate many regs, and a
1393 * single-result send is probably actually reducing register
1396 if (inst
->regs_written
<= inst
->exec_size
/ 8 &&
1397 chosen_inst
->regs_written
> chosen_inst
->exec_size
/ 8) {
1400 } else if (inst
->regs_written
> chosen_inst
->regs_written
) {
1406 /* For instructions pushed on the cands list at the same time, prefer
1407 * the one with the highest delay to the end of the program. This is
1408 * most likely to have its values able to be consumed first (such as
1409 * for a large tree of lowered ubo loads, which appear reversed in
1410 * the instruction stream with respect to when they can be consumed).
1412 if (n
->delay
> chosen
->delay
) {
1415 } else if (n
->delay
< chosen
->delay
) {
1419 /* If all other metrics are equal, we prefer the first instruction in
1420 * the list (program execution).
1429 vec4_instruction_scheduler::choose_instruction_to_schedule()
1431 schedule_node
*chosen
= NULL
;
1432 int chosen_time
= 0;
1434 /* Of the instructions ready to execute or the closest to being ready,
1435 * choose the oldest one.
1437 foreach_in_list(schedule_node
, n
, &instructions
) {
1438 if (!chosen
|| n
->unblocked_time
< chosen_time
) {
1440 chosen_time
= n
->unblocked_time
;
1448 fs_instruction_scheduler::issue_time(backend_instruction
*inst
)
1450 if (is_compressed((fs_inst
*)inst
))
1457 vec4_instruction_scheduler::issue_time(backend_instruction
*inst
)
1459 /* We always execute as two vec4s in parallel. */
1464 instruction_scheduler::schedule_instructions(bblock_t
*block
)
1466 const struct brw_device_info
*devinfo
= bs
->devinfo
;
1468 if (!post_reg_alloc
)
1469 reg_pressure
= reg_pressure_in
[block
->num
];
1470 block_idx
= block
->num
;
1472 /* Remove non-DAG heads from the list. */
1473 foreach_in_list_safe(schedule_node
, n
, &instructions
) {
1474 if (n
->parent_count
!= 0)
1478 unsigned cand_generation
= 1;
1479 while (!instructions
.is_empty()) {
1480 schedule_node
*chosen
= choose_instruction_to_schedule();
1482 /* Schedule this instruction. */
1485 chosen
->inst
->exec_node::remove();
1486 block
->instructions
.push_tail(chosen
->inst
);
1487 instructions_to_schedule
--;
1489 if (!post_reg_alloc
) {
1490 reg_pressure
-= get_register_pressure_benefit(chosen
->inst
);
1491 update_register_pressure(chosen
->inst
);
1494 /* If we expected a delay for scheduling, then bump the clock to reflect
1495 * that. In reality, the hardware will switch to another hyperthread
1496 * and may not return to dispatching our thread for a while even after
1497 * we're unblocked. After this, we have the time when the chosen
1498 * instruction will start executing.
1500 time
= MAX2(time
, chosen
->unblocked_time
);
1502 /* Update the clock for how soon an instruction could start after the
1505 time
+= issue_time(chosen
->inst
);
1508 fprintf(stderr
, "clock %4d, scheduled: ", time
);
1509 bs
->dump_instruction(chosen
->inst
);
1510 if (!post_reg_alloc
)
1511 fprintf(stderr
, "(register pressure %d)\n", reg_pressure
);
1514 /* Now that we've scheduled a new instruction, some of its
1515 * children can be promoted to the list of instructions ready to
1516 * be scheduled. Update the children's unblocked time for this
1517 * DAG edge as we do so.
1519 for (int i
= chosen
->child_count
- 1; i
>= 0; i
--) {
1520 schedule_node
*child
= chosen
->children
[i
];
1522 child
->unblocked_time
= MAX2(child
->unblocked_time
,
1523 time
+ chosen
->child_latency
[i
]);
1526 fprintf(stderr
, "\tchild %d, %d parents: ", i
, child
->parent_count
);
1527 bs
->dump_instruction(child
->inst
);
1530 child
->cand_generation
= cand_generation
;
1531 child
->parent_count
--;
1532 if (child
->parent_count
== 0) {
1534 fprintf(stderr
, "\t\tnow available\n");
1536 instructions
.push_head(child
);
1541 /* Shared resource: the mathbox. There's one mathbox per EU on Gen6+
1542 * but it's more limited pre-gen6, so if we send something off to it then
1543 * the next math instruction isn't going to make progress until the first
1546 if (devinfo
->gen
< 6 && chosen
->inst
->is_math()) {
1547 foreach_in_list(schedule_node
, n
, &instructions
) {
1548 if (n
->inst
->is_math())
1549 n
->unblocked_time
= MAX2(n
->unblocked_time
,
1550 time
+ chosen
->latency
);
1555 assert(instructions_to_schedule
== 0);
1557 block
->cycle_count
= time
;
1560 static unsigned get_cycle_count(cfg_t
*cfg
)
1562 unsigned count
= 0, multiplier
= 1;
1563 foreach_block(block
, cfg
) {
1564 if (block
->start()->opcode
== BRW_OPCODE_DO
)
1565 multiplier
*= 10; /* assume that loops execute ~10 times */
1567 count
+= block
->cycle_count
* multiplier
;
1569 if (block
->end()->opcode
== BRW_OPCODE_WHILE
)
1577 instruction_scheduler::run(cfg_t
*cfg
)
1579 if (debug
&& !post_reg_alloc
) {
1580 fprintf(stderr
, "\nInstructions before scheduling (reg_alloc %d)\n",
1582 bs
->dump_instructions();
1585 if (!post_reg_alloc
)
1586 setup_liveness(cfg
);
1588 foreach_block(block
, cfg
) {
1589 if (block
->end_ip
- block
->start_ip
<= 1)
1592 if (reads_remaining
) {
1593 memset(reads_remaining
, 0,
1594 grf_count
* sizeof(*reads_remaining
));
1595 memset(hw_reads_remaining
, 0,
1596 hw_reg_count
* sizeof(*hw_reads_remaining
));
1597 memset(written
, 0, grf_count
* sizeof(*written
));
1599 foreach_inst_in_block(fs_inst
, inst
, block
)
1600 count_reads_remaining(inst
);
1603 add_insts_from_block(block
);
1607 foreach_in_list(schedule_node
, n
, &instructions
) {
1611 schedule_instructions(block
);
1614 if (debug
&& !post_reg_alloc
) {
1615 fprintf(stderr
, "\nInstructions after scheduling (reg_alloc %d)\n",
1617 bs
->dump_instructions();
1620 cfg
->cycle_count
= get_cycle_count(cfg
);
1624 fs_visitor::schedule_instructions(instruction_scheduler_mode mode
)
1626 if (mode
!= SCHEDULE_POST
)
1627 calculate_live_intervals();
1630 if (mode
== SCHEDULE_POST
)
1631 grf_count
= grf_used
;
1633 grf_count
= alloc
.count
;
1635 fs_instruction_scheduler
sched(this, grf_count
, first_non_payload_grf
,
1636 cfg
->num_blocks
, mode
);
1639 invalidate_live_intervals();
1643 vec4_visitor::opt_schedule_instructions()
1645 vec4_instruction_scheduler
sched(this, prog_data
->total_grf
);
1648 invalidate_live_intervals();