2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include "brw_fs_live_variables.h"
32 #include "brw_shader.h"
33 #include "glsl/nir/glsl_types.h"
34 #include "glsl/ir_optimization.h"
38 /** @file brw_fs_schedule_instructions.cpp
40 * List scheduling of FS instructions.
42 * The basic model of the list scheduler is to take a basic block,
43 * compute a DAG of the dependencies (RAW ordering with latency, WAW
44 * ordering with latency, WAR ordering), and make a list of the DAG heads.
45 * Heuristically pick a DAG head, then put all the children that are
46 * now DAG heads into the list of things to schedule.
48 * The heuristic is the important part. We're trying to be cheap,
49 * since actually computing the optimal scheduling is NP complete.
50 * What we do is track a "current clock". When we schedule a node, we
51 * update the earliest-unblocked clock time of its children, and
52 * increment the clock. Then, when trying to schedule, we just pick
53 * the earliest-unblocked instruction to schedule.
55 * Note that often there will be many things which could execute
56 * immediately, and there are a range of heuristic options to choose
57 * from in picking among those.
60 static bool debug
= false;
62 class instruction_scheduler
;
64 class schedule_node
: public exec_node
67 schedule_node(backend_instruction
*inst
, instruction_scheduler
*sched
);
68 void set_latency_gen4();
69 void set_latency_gen7(bool is_haswell
);
71 backend_instruction
*inst
;
72 schedule_node
**children
;
81 * Which iteration of pushing groups of children onto the candidates list
82 * this node was a part of.
84 unsigned cand_generation
;
87 * This is the sum of the instruction's latency plus the maximum delay of
88 * its children, or just the issue_time if it's a leaf node.
94 schedule_node::set_latency_gen4()
97 int math_latency
= 22;
99 switch (inst
->opcode
) {
100 case SHADER_OPCODE_RCP
:
101 this->latency
= 1 * chans
* math_latency
;
103 case SHADER_OPCODE_RSQ
:
104 this->latency
= 2 * chans
* math_latency
;
106 case SHADER_OPCODE_INT_QUOTIENT
:
107 case SHADER_OPCODE_SQRT
:
108 case SHADER_OPCODE_LOG2
:
109 /* full precision log. partial is 2. */
110 this->latency
= 3 * chans
* math_latency
;
112 case SHADER_OPCODE_INT_REMAINDER
:
113 case SHADER_OPCODE_EXP2
:
114 /* full precision. partial is 3, same throughput. */
115 this->latency
= 4 * chans
* math_latency
;
117 case SHADER_OPCODE_POW
:
118 this->latency
= 8 * chans
* math_latency
;
120 case SHADER_OPCODE_SIN
:
121 case SHADER_OPCODE_COS
:
122 /* minimum latency, max is 12 rounds. */
123 this->latency
= 5 * chans
* math_latency
;
132 schedule_node::set_latency_gen7(bool is_haswell
)
134 switch (inst
->opcode
) {
137 * (since the last two src operands are in different register banks):
138 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
140 * 3 cycles on IVB, 4 on HSW
141 * (since the last two src operands are in the same register bank):
142 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
144 * 18 cycles on IVB, 16 on HSW
145 * (since the last two src operands are in different register banks):
146 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
147 * mov(8) null g4<4,5,1>F { align16 WE_normal 1Q };
149 * 20 cycles on IVB, 18 on HSW
150 * (since the last two src operands are in the same register bank):
151 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
152 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
155 /* Our register allocator doesn't know about register banks, so use the
158 latency
= is_haswell
? 16 : 18;
163 * (since the last two src operands are in different register banks):
164 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
166 * 3 cycles on IVB, 4 on HSW
167 * (since the last two src operands are in the same register bank):
168 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
170 * 16 cycles on IVB, 14 on HSW
171 * (since the last two src operands are in different register banks):
172 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
173 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
176 * (since the last two src operands are in the same register bank):
177 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
178 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
181 /* Our register allocator doesn't know about register banks, so use the
187 case SHADER_OPCODE_RCP
:
188 case SHADER_OPCODE_RSQ
:
189 case SHADER_OPCODE_SQRT
:
190 case SHADER_OPCODE_LOG2
:
191 case SHADER_OPCODE_EXP2
:
192 case SHADER_OPCODE_SIN
:
193 case SHADER_OPCODE_COS
:
195 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
198 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
199 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
201 * Same for exp2, log2, rsq, sqrt, sin, cos.
203 latency
= is_haswell
? 14 : 16;
206 case SHADER_OPCODE_POW
:
208 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
211 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
212 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
214 latency
= is_haswell
? 22 : 24;
217 case SHADER_OPCODE_TEX
:
218 case SHADER_OPCODE_TXD
:
219 case SHADER_OPCODE_TXF
:
220 case SHADER_OPCODE_TXL
:
222 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
223 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
224 * send(8) g4<1>UW g114<8,8,1>F
225 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
227 * 697 +/-49 cycles (min 610, n=26):
228 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
229 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
230 * send(8) g4<1>UW g114<8,8,1>F
231 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
232 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
234 * So the latency on our first texture load of the batchbuffer takes
235 * ~700 cycles, since the caches are cold at that point.
237 * 840 +/- 92 cycles (min 720, n=25):
238 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
239 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
240 * send(8) g4<1>UW g114<8,8,1>F
241 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
242 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
243 * send(8) g4<1>UW g114<8,8,1>F
244 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
245 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
247 * On the second load, it takes just an extra ~140 cycles, and after
248 * accounting for the 14 cycles of the MOV's latency, that makes ~130.
250 * 683 +/- 49 cycles (min = 602, n=47):
251 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
252 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
253 * send(8) g4<1>UW g114<8,8,1>F
254 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
255 * send(8) g50<1>UW g114<8,8,1>F
256 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
257 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
259 * The unit appears to be pipelined, since this matches up with the
260 * cache-cold case, despite there being two loads here. If you replace
261 * the g4 in the MOV to null with g50, it's still 693 +/- 52 (n=39).
263 * So, take some number between the cache-hot 140 cycles and the
264 * cache-cold 700 cycles. No particular tuning was done on this.
266 * I haven't done significant testing of the non-TEX opcodes. TXL at
267 * least looked about the same as TEX.
272 case SHADER_OPCODE_TXS
:
273 /* Testing textureSize(sampler2D, 0), one load was 420 +/- 41
275 * mov(8) g114<1>UD 0D { align1 WE_normal 1Q };
276 * send(8) g6<1>UW g114<8,8,1>F
277 * sampler (10, 0, 10, 1) mlen 1 rlen 4 { align1 WE_normal 1Q };
278 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1Q };
281 * Two loads was 535 +/- 30 cycles (n=19):
282 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
283 * send(16) g6<1>UW g114<8,8,1>F
284 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
285 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
286 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1H };
287 * send(16) g8<1>UW g114<8,8,1>F
288 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
289 * mov(16) g8<1>F g8<8,8,1>D { align1 WE_normal 1H };
290 * add(16) g6<1>F g6<8,8,1>F g8<8,8,1>F { align1 WE_normal 1H };
292 * Since the only caches that should matter are just the
293 * instruction/state cache containing the surface state, assume that we
294 * always have hot caches.
299 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD
:
300 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
:
301 case VS_OPCODE_PULL_CONSTANT_LOAD
:
302 /* testing using varying-index pull constants:
305 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
306 * send(8) g4<1>F g4<8,8,1>D
307 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
310 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
311 * send(8) g4<1>F g4<8,8,1>D
312 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
313 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
316 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
317 * send(8) g4<1>F g4<8,8,1>D
318 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
319 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
320 * send(8) g4<1>F g4<8,8,1>D
321 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
322 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
324 * So, if it's cache-hot, it's about 140. If it's cache cold, it's
325 * about 460. We expect to mostly be cache hot, so pick something more
331 case SHADER_OPCODE_GEN7_SCRATCH_READ
:
332 /* Testing a load from offset 0, that had been previously written:
334 * send(8) g114<1>UW g0<8,8,1>F data (0, 0, 0) mlen 1 rlen 1 { align1 WE_normal 1Q };
335 * mov(8) null g114<8,8,1>F { align1 WE_normal 1Q };
337 * The cycles spent seemed to be grouped around 40-50 (as low as 38),
338 * then around 140. Presumably this is cache hit vs miss.
343 case SHADER_OPCODE_UNTYPED_ATOMIC
:
344 case SHADER_OPCODE_TYPED_ATOMIC
:
346 * mov(8) g112<1>ud 0x00000000ud { align1 WE_all 1Q };
347 * mov(1) g112.7<1>ud g1.7<0,1,0>ud { align1 WE_all };
348 * mov(8) g113<1>ud 0x00000000ud { align1 WE_normal 1Q };
349 * send(8) g4<1>ud g112<8,8,1>ud
350 * data (38, 5, 6) mlen 2 rlen 1 { align1 WE_normal 1Q };
352 * Running it 100 times as fragment shader on a 128x128 quad
353 * gives an average latency of 13867 cycles per atomic op,
354 * standard deviation 3%. Note that this is a rather
355 * pessimistic estimate, the actual latency in cases with few
356 * collisions between threads and favorable pipelining has been
357 * seen to be reduced by a factor of 100.
362 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
363 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
364 case SHADER_OPCODE_TYPED_SURFACE_READ
:
365 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
367 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
368 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
369 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
370 * send(8) g4<1>UD g112<8,8,1>UD
371 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
373 * . [repeats 8 times]
375 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
376 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
377 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
378 * send(8) g4<1>UD g112<8,8,1>UD
379 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
381 * Running it 100 times as fragment shader on a 128x128 quad
382 * gives an average latency of 583 cycles per surface read,
383 * standard deviation 0.9%.
385 latency
= is_haswell
? 300 : 600;
390 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
393 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
394 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
401 class instruction_scheduler
{
403 instruction_scheduler(backend_shader
*s
, int grf_count
,
404 int hw_reg_count
, int block_count
,
405 instruction_scheduler_mode mode
)
408 this->mem_ctx
= ralloc_context(NULL
);
409 this->grf_count
= grf_count
;
410 this->hw_reg_count
= hw_reg_count
;
411 this->instructions
.make_empty();
412 this->instructions_to_schedule
= 0;
413 this->post_reg_alloc
= (mode
== SCHEDULE_POST
);
416 if (!post_reg_alloc
) {
417 this->reg_pressure_in
= rzalloc_array(mem_ctx
, int, block_count
);
419 this->livein
= ralloc_array(mem_ctx
, BITSET_WORD
*, block_count
);
420 for (int i
= 0; i
< block_count
; i
++)
421 this->livein
[i
] = rzalloc_array(mem_ctx
, BITSET_WORD
,
422 BITSET_WORDS(grf_count
));
424 this->liveout
= ralloc_array(mem_ctx
, BITSET_WORD
*, block_count
);
425 for (int i
= 0; i
< block_count
; i
++)
426 this->liveout
[i
] = rzalloc_array(mem_ctx
, BITSET_WORD
,
427 BITSET_WORDS(grf_count
));
429 this->hw_liveout
= ralloc_array(mem_ctx
, BITSET_WORD
*, block_count
);
430 for (int i
= 0; i
< block_count
; i
++)
431 this->hw_liveout
[i
] = rzalloc_array(mem_ctx
, BITSET_WORD
,
432 BITSET_WORDS(hw_reg_count
));
434 this->written
= rzalloc_array(mem_ctx
, bool, grf_count
);
436 this->reads_remaining
= rzalloc_array(mem_ctx
, int, grf_count
);
438 this->hw_reads_remaining
= rzalloc_array(mem_ctx
, int, hw_reg_count
);
440 this->reg_pressure_in
= NULL
;
442 this->liveout
= NULL
;
443 this->hw_liveout
= NULL
;
444 this->written
= NULL
;
445 this->reads_remaining
= NULL
;
446 this->hw_reads_remaining
= NULL
;
450 ~instruction_scheduler()
452 ralloc_free(this->mem_ctx
);
454 void add_barrier_deps(schedule_node
*n
);
455 void add_dep(schedule_node
*before
, schedule_node
*after
, int latency
);
456 void add_dep(schedule_node
*before
, schedule_node
*after
);
458 void run(cfg_t
*cfg
);
459 void add_insts_from_block(bblock_t
*block
);
460 void compute_delay(schedule_node
*node
);
461 virtual void calculate_deps() = 0;
462 virtual schedule_node
*choose_instruction_to_schedule() = 0;
465 * Returns how many cycles it takes the instruction to issue.
467 * Instructions in gen hardware are handled one simd4 vector at a time,
468 * with 1 cycle per vector dispatched. Thus SIMD8 pixel shaders take 2
469 * cycles to dispatch and SIMD16 (compressed) instructions take 4.
471 virtual int issue_time(backend_instruction
*inst
) = 0;
473 virtual void count_reads_remaining(backend_instruction
*inst
) = 0;
474 virtual void setup_liveness(cfg_t
*cfg
) = 0;
475 virtual void update_register_pressure(backend_instruction
*inst
) = 0;
476 virtual int get_register_pressure_benefit(backend_instruction
*inst
) = 0;
478 void schedule_instructions(bblock_t
*block
);
483 int instructions_to_schedule
;
489 exec_list instructions
;
492 instruction_scheduler_mode mode
;
495 * The register pressure at the beginning of each basic block.
498 int *reg_pressure_in
;
501 * The virtual GRF's whose range overlaps the beginning of each basic block.
504 BITSET_WORD
**livein
;
507 * The virtual GRF's whose range overlaps the end of each basic block.
510 BITSET_WORD
**liveout
;
513 * The hardware GRF's whose range overlaps the end of each basic block.
516 BITSET_WORD
**hw_liveout
;
519 * Whether we've scheduled a write for this virtual GRF yet.
525 * How many reads we haven't scheduled for this virtual GRF yet.
528 int *reads_remaining
;
531 * How many reads we haven't scheduled for this hardware GRF yet.
534 int *hw_reads_remaining
;
537 class fs_instruction_scheduler
: public instruction_scheduler
540 fs_instruction_scheduler(fs_visitor
*v
, int grf_count
, int hw_reg_count
,
542 instruction_scheduler_mode mode
);
543 void calculate_deps();
544 bool is_compressed(fs_inst
*inst
);
545 schedule_node
*choose_instruction_to_schedule();
546 int issue_time(backend_instruction
*inst
);
549 void count_reads_remaining(backend_instruction
*inst
);
550 void setup_liveness(cfg_t
*cfg
);
551 void update_register_pressure(backend_instruction
*inst
);
552 int get_register_pressure_benefit(backend_instruction
*inst
);
555 fs_instruction_scheduler::fs_instruction_scheduler(fs_visitor
*v
,
556 int grf_count
, int hw_reg_count
,
558 instruction_scheduler_mode mode
)
559 : instruction_scheduler(v
, grf_count
, hw_reg_count
, block_count
, mode
),
565 is_src_duplicate(fs_inst
*inst
, int src
)
567 for (int i
= 0; i
< src
; i
++)
568 if (inst
->src
[i
].equals(inst
->src
[src
]))
575 fs_instruction_scheduler::count_reads_remaining(backend_instruction
*be
)
577 fs_inst
*inst
= (fs_inst
*)be
;
579 if (!reads_remaining
)
582 for (int i
= 0; i
< inst
->sources
; i
++) {
583 if (is_src_duplicate(inst
, i
))
586 if (inst
->src
[i
].file
== GRF
) {
587 reads_remaining
[inst
->src
[i
].reg
]++;
588 } else if (inst
->src
[i
].file
== HW_REG
&&
589 inst
->src
[i
].fixed_hw_reg
.file
== BRW_GENERAL_REGISTER_FILE
) {
590 if (inst
->src
[i
].fixed_hw_reg
.nr
>= hw_reg_count
)
593 for (int j
= 0; j
< inst
->regs_read(i
); j
++)
594 hw_reads_remaining
[inst
->src
[i
].fixed_hw_reg
.nr
+ j
]++;
600 fs_instruction_scheduler::setup_liveness(cfg_t
*cfg
)
602 /* First, compute liveness on a per-GRF level using the in/out sets from
603 * liveness calculation.
605 for (int block
= 0; block
< cfg
->num_blocks
; block
++) {
606 for (int i
= 0; i
< v
->live_intervals
->num_vars
; i
++) {
607 if (BITSET_TEST(v
->live_intervals
->block_data
[block
].livein
, i
)) {
608 int vgrf
= v
->live_intervals
->vgrf_from_var
[i
];
609 if (!BITSET_TEST(livein
[block
], vgrf
)) {
610 reg_pressure_in
[block
] += v
->alloc
.sizes
[vgrf
];
611 BITSET_SET(livein
[block
], vgrf
);
615 if (BITSET_TEST(v
->live_intervals
->block_data
[block
].liveout
, i
))
616 BITSET_SET(liveout
[block
], v
->live_intervals
->vgrf_from_var
[i
]);
620 /* Now, extend the live in/live out sets for when a range crosses a block
621 * boundary, which matches what our register allocator/interference code
622 * does to account for force_writemask_all and incompatible exec_mask's.
624 for (int block
= 0; block
< cfg
->num_blocks
- 1; block
++) {
625 for (int i
= 0; i
< grf_count
; i
++) {
626 if (v
->virtual_grf_start
[i
] <= cfg
->blocks
[block
]->end_ip
&&
627 v
->virtual_grf_end
[i
] >= cfg
->blocks
[block
+ 1]->start_ip
) {
628 if (!BITSET_TEST(livein
[block
+ 1], i
)) {
629 reg_pressure_in
[block
+ 1] += v
->alloc
.sizes
[i
];
630 BITSET_SET(livein
[block
+ 1], i
);
633 BITSET_SET(liveout
[block
], i
);
638 int payload_last_use_ip
[hw_reg_count
];
639 v
->calculate_payload_ranges(hw_reg_count
, payload_last_use_ip
);
641 for (int i
= 0; i
< hw_reg_count
; i
++) {
642 if (payload_last_use_ip
[i
] == -1)
645 for (int block
= 0; block
< cfg
->num_blocks
; block
++) {
646 if (cfg
->blocks
[block
]->start_ip
<= payload_last_use_ip
[i
])
647 reg_pressure_in
[block
]++;
649 if (cfg
->blocks
[block
]->end_ip
<= payload_last_use_ip
[i
])
650 BITSET_SET(hw_liveout
[block
], i
);
656 fs_instruction_scheduler::update_register_pressure(backend_instruction
*be
)
658 fs_inst
*inst
= (fs_inst
*)be
;
660 if (!reads_remaining
)
663 if (inst
->dst
.file
== GRF
) {
664 written
[inst
->dst
.reg
] = true;
667 for (int i
= 0; i
< inst
->sources
; i
++) {
668 if (is_src_duplicate(inst
, i
))
671 if (inst
->src
[i
].file
== GRF
) {
672 reads_remaining
[inst
->src
[i
].reg
]--;
673 } else if (inst
->src
[i
].file
== HW_REG
&&
674 inst
->src
[i
].fixed_hw_reg
.file
== BRW_GENERAL_REGISTER_FILE
&&
675 inst
->src
[i
].fixed_hw_reg
.nr
< hw_reg_count
) {
676 for (int off
= 0; off
< inst
->regs_read(i
); off
++)
677 hw_reads_remaining
[inst
->src
[i
].fixed_hw_reg
.nr
+ off
]--;
683 fs_instruction_scheduler::get_register_pressure_benefit(backend_instruction
*be
)
685 fs_inst
*inst
= (fs_inst
*)be
;
688 if (inst
->dst
.file
== GRF
) {
689 if (!BITSET_TEST(livein
[block_idx
], inst
->dst
.reg
) &&
690 !written
[inst
->dst
.reg
])
691 benefit
-= v
->alloc
.sizes
[inst
->dst
.reg
];
694 for (int i
= 0; i
< inst
->sources
; i
++) {
695 if (is_src_duplicate(inst
, i
))
698 if (inst
->src
[i
].file
== GRF
&&
699 !BITSET_TEST(liveout
[block_idx
], inst
->src
[i
].reg
) &&
700 reads_remaining
[inst
->src
[i
].reg
] == 1)
701 benefit
+= v
->alloc
.sizes
[inst
->src
[i
].reg
];
703 if (inst
->src
[i
].file
== HW_REG
&&
704 inst
->src
[i
].fixed_hw_reg
.file
== BRW_GENERAL_REGISTER_FILE
&&
705 inst
->src
[i
].fixed_hw_reg
.nr
< hw_reg_count
) {
706 for (int off
= 0; off
< inst
->regs_read(i
); off
++) {
707 int reg
= inst
->src
[i
].fixed_hw_reg
.nr
+ off
;
708 if (!BITSET_TEST(hw_liveout
[block_idx
], reg
) &&
709 hw_reads_remaining
[reg
] == 1) {
719 class vec4_instruction_scheduler
: public instruction_scheduler
722 vec4_instruction_scheduler(vec4_visitor
*v
, int grf_count
);
723 void calculate_deps();
724 schedule_node
*choose_instruction_to_schedule();
725 int issue_time(backend_instruction
*inst
);
728 void count_reads_remaining(backend_instruction
*inst
);
729 void setup_liveness(cfg_t
*cfg
);
730 void update_register_pressure(backend_instruction
*inst
);
731 int get_register_pressure_benefit(backend_instruction
*inst
);
734 vec4_instruction_scheduler::vec4_instruction_scheduler(vec4_visitor
*v
,
736 : instruction_scheduler(v
, grf_count
, 0, 0, SCHEDULE_POST
),
742 vec4_instruction_scheduler::count_reads_remaining(backend_instruction
*be
)
747 vec4_instruction_scheduler::setup_liveness(cfg_t
*cfg
)
752 vec4_instruction_scheduler::update_register_pressure(backend_instruction
*be
)
757 vec4_instruction_scheduler::get_register_pressure_benefit(backend_instruction
*be
)
762 schedule_node::schedule_node(backend_instruction
*inst
,
763 instruction_scheduler
*sched
)
765 const struct brw_device_info
*devinfo
= sched
->bs
->devinfo
;
768 this->child_array_size
= 0;
769 this->children
= NULL
;
770 this->child_latency
= NULL
;
771 this->child_count
= 0;
772 this->parent_count
= 0;
773 this->unblocked_time
= 0;
774 this->cand_generation
= 0;
777 /* We can't measure Gen6 timings directly but expect them to be much
778 * closer to Gen7 than Gen4.
780 if (!sched
->post_reg_alloc
)
782 else if (devinfo
->gen
>= 6)
783 set_latency_gen7(devinfo
->is_haswell
);
789 instruction_scheduler::add_insts_from_block(bblock_t
*block
)
791 /* Removing the last instruction from a basic block removes the block as
792 * well, so put a NOP at the end to keep it alive.
794 if (!block
->end()->is_control_flow()) {
795 backend_instruction
*nop
= new(mem_ctx
) backend_instruction();
796 nop
->opcode
= BRW_OPCODE_NOP
;
797 block
->end()->insert_after(block
, nop
);
800 foreach_inst_in_block_safe(backend_instruction
, inst
, block
) {
801 if (inst
->opcode
== BRW_OPCODE_NOP
|| inst
->is_control_flow())
804 schedule_node
*n
= new(mem_ctx
) schedule_node(inst
, this);
806 this->instructions_to_schedule
++;
809 instructions
.push_tail(n
);
813 /** Recursive computation of the delay member of a node. */
815 instruction_scheduler::compute_delay(schedule_node
*n
)
817 if (!n
->child_count
) {
818 n
->delay
= issue_time(n
->inst
);
820 for (int i
= 0; i
< n
->child_count
; i
++) {
821 if (!n
->children
[i
]->delay
)
822 compute_delay(n
->children
[i
]);
823 n
->delay
= MAX2(n
->delay
, n
->latency
+ n
->children
[i
]->delay
);
829 * Add a dependency between two instruction nodes.
831 * The @after node will be scheduled after @before. We will try to
832 * schedule it @latency cycles after @before, but no guarantees there.
835 instruction_scheduler::add_dep(schedule_node
*before
, schedule_node
*after
,
838 if (!before
|| !after
)
841 assert(before
!= after
);
843 for (int i
= 0; i
< before
->child_count
; i
++) {
844 if (before
->children
[i
] == after
) {
845 before
->child_latency
[i
] = MAX2(before
->child_latency
[i
], latency
);
850 if (before
->child_array_size
<= before
->child_count
) {
851 if (before
->child_array_size
< 16)
852 before
->child_array_size
= 16;
854 before
->child_array_size
*= 2;
856 before
->children
= reralloc(mem_ctx
, before
->children
,
858 before
->child_array_size
);
859 before
->child_latency
= reralloc(mem_ctx
, before
->child_latency
,
860 int, before
->child_array_size
);
863 before
->children
[before
->child_count
] = after
;
864 before
->child_latency
[before
->child_count
] = latency
;
865 before
->child_count
++;
866 after
->parent_count
++;
870 instruction_scheduler::add_dep(schedule_node
*before
, schedule_node
*after
)
875 add_dep(before
, after
, before
->latency
);
879 * Sometimes we really want this node to execute after everything that
880 * was before it and before everything that followed it. This adds
884 instruction_scheduler::add_barrier_deps(schedule_node
*n
)
886 schedule_node
*prev
= (schedule_node
*)n
->prev
;
887 schedule_node
*next
= (schedule_node
*)n
->next
;
890 while (!prev
->is_head_sentinel()) {
892 prev
= (schedule_node
*)prev
->prev
;
897 while (!next
->is_tail_sentinel()) {
899 next
= (schedule_node
*)next
->next
;
904 /* instruction scheduling needs to be aware of when an MRF write
905 * actually writes 2 MRFs.
908 fs_instruction_scheduler::is_compressed(fs_inst
*inst
)
910 return inst
->exec_size
== 16;
914 fs_instruction_scheduler::calculate_deps()
916 /* Pre-register-allocation, this tracks the last write per VGRF offset.
917 * After register allocation, reg_offsets are gone and we track individual
920 schedule_node
*last_grf_write
[grf_count
* 16];
921 schedule_node
*last_mrf_write
[BRW_MAX_MRF(v
->devinfo
->gen
)];
922 schedule_node
*last_conditional_mod
[2] = { NULL
, NULL
};
923 schedule_node
*last_accumulator_write
= NULL
;
924 /* Fixed HW registers are assumed to be separate from the virtual
925 * GRFs, so they can be tracked separately. We don't really write
926 * to fixed GRFs much, so don't bother tracking them on a more
929 schedule_node
*last_fixed_grf_write
= NULL
;
930 int reg_width
= v
->dispatch_width
/ 8;
932 /* The last instruction always needs to still be the last
933 * instruction. Either it's flow control (IF, ELSE, ENDIF, DO,
934 * WHILE) and scheduling other things after it would disturb the
935 * basic block, or it's FB_WRITE and we should do a better job at
936 * dead code elimination anyway.
938 schedule_node
*last
= (schedule_node
*)instructions
.get_tail();
939 add_barrier_deps(last
);
941 memset(last_grf_write
, 0, sizeof(last_grf_write
));
942 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
944 /* top-to-bottom dependencies: RAW and WAW. */
945 foreach_in_list(schedule_node
, n
, &instructions
) {
946 fs_inst
*inst
= (fs_inst
*)n
->inst
;
948 if (inst
->opcode
== FS_OPCODE_PLACEHOLDER_HALT
||
949 inst
->has_side_effects())
952 /* read-after-write deps. */
953 for (int i
= 0; i
< inst
->sources
; i
++) {
954 if (inst
->src
[i
].file
== GRF
) {
955 if (post_reg_alloc
) {
956 for (int r
= 0; r
< inst
->regs_read(i
); r
++)
957 add_dep(last_grf_write
[inst
->src
[i
].reg
+ r
], n
);
959 for (int r
= 0; r
< inst
->regs_read(i
); r
++) {
960 add_dep(last_grf_write
[inst
->src
[i
].reg
* 16 + inst
->src
[i
].reg_offset
+ r
], n
);
963 } else if (inst
->src
[i
].file
== HW_REG
&&
964 (inst
->src
[i
].fixed_hw_reg
.file
==
965 BRW_GENERAL_REGISTER_FILE
)) {
966 if (post_reg_alloc
) {
967 int size
= reg_width
;
968 if (inst
->src
[i
].fixed_hw_reg
.vstride
== BRW_VERTICAL_STRIDE_0
)
970 for (int r
= 0; r
< size
; r
++)
971 add_dep(last_grf_write
[inst
->src
[i
].fixed_hw_reg
.nr
+ r
], n
);
973 add_dep(last_fixed_grf_write
, n
);
975 } else if (inst
->src
[i
].is_accumulator()) {
976 add_dep(last_accumulator_write
, n
);
977 } else if (inst
->src
[i
].file
!= BAD_FILE
&&
978 inst
->src
[i
].file
!= IMM
&&
979 inst
->src
[i
].file
!= UNIFORM
&&
980 (inst
->src
[i
].file
!= HW_REG
||
981 inst
->src
[i
].fixed_hw_reg
.file
!= BRW_IMMEDIATE_VALUE
)) {
982 assert(inst
->src
[i
].file
!= MRF
);
987 if (inst
->base_mrf
!= -1) {
988 for (int i
= 0; i
< inst
->mlen
; i
++) {
989 /* It looks like the MRF regs are released in the send
990 * instruction once it's sent, not when the result comes
993 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
997 if (inst
->reads_flag()) {
998 add_dep(last_conditional_mod
[inst
->flag_subreg
], n
);
1001 if (inst
->reads_accumulator_implicitly()) {
1002 add_dep(last_accumulator_write
, n
);
1005 /* write-after-write deps. */
1006 if (inst
->dst
.file
== GRF
) {
1007 if (post_reg_alloc
) {
1008 for (int r
= 0; r
< inst
->regs_written
; r
++) {
1009 add_dep(last_grf_write
[inst
->dst
.reg
+ r
], n
);
1010 last_grf_write
[inst
->dst
.reg
+ r
] = n
;
1013 for (int r
= 0; r
< inst
->regs_written
; r
++) {
1014 add_dep(last_grf_write
[inst
->dst
.reg
* 16 + inst
->dst
.reg_offset
+ r
], n
);
1015 last_grf_write
[inst
->dst
.reg
* 16 + inst
->dst
.reg_offset
+ r
] = n
;
1018 } else if (inst
->dst
.file
== MRF
) {
1019 int reg
= inst
->dst
.reg
& ~BRW_MRF_COMPR4
;
1021 add_dep(last_mrf_write
[reg
], n
);
1022 last_mrf_write
[reg
] = n
;
1023 if (is_compressed(inst
)) {
1024 if (inst
->dst
.reg
& BRW_MRF_COMPR4
)
1028 add_dep(last_mrf_write
[reg
], n
);
1029 last_mrf_write
[reg
] = n
;
1031 } else if (inst
->dst
.file
== HW_REG
&&
1032 inst
->dst
.fixed_hw_reg
.file
== BRW_GENERAL_REGISTER_FILE
) {
1033 if (post_reg_alloc
) {
1034 for (int r
= 0; r
< reg_width
; r
++)
1035 last_grf_write
[inst
->dst
.fixed_hw_reg
.nr
+ r
] = n
;
1037 last_fixed_grf_write
= n
;
1039 } else if (inst
->dst
.is_accumulator()) {
1040 add_dep(last_accumulator_write
, n
);
1041 last_accumulator_write
= n
;
1042 } else if (inst
->dst
.file
!= BAD_FILE
&&
1043 !inst
->dst
.is_null()) {
1044 add_barrier_deps(n
);
1047 if (inst
->mlen
> 0 && inst
->base_mrf
!= -1) {
1048 for (int i
= 0; i
< v
->implied_mrf_writes(inst
); i
++) {
1049 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
1050 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
1054 if (inst
->writes_flag()) {
1055 add_dep(last_conditional_mod
[inst
->flag_subreg
], n
, 0);
1056 last_conditional_mod
[inst
->flag_subreg
] = n
;
1059 if (inst
->writes_accumulator_implicitly(v
->devinfo
) &&
1060 !inst
->dst
.is_accumulator()) {
1061 add_dep(last_accumulator_write
, n
);
1062 last_accumulator_write
= n
;
1066 /* bottom-to-top dependencies: WAR */
1067 memset(last_grf_write
, 0, sizeof(last_grf_write
));
1068 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
1069 memset(last_conditional_mod
, 0, sizeof(last_conditional_mod
));
1070 last_accumulator_write
= NULL
;
1071 last_fixed_grf_write
= NULL
;
1075 for (node
= instructions
.get_tail(), prev
= node
->prev
;
1076 !node
->is_head_sentinel();
1077 node
= prev
, prev
= node
->prev
) {
1078 schedule_node
*n
= (schedule_node
*)node
;
1079 fs_inst
*inst
= (fs_inst
*)n
->inst
;
1081 /* write-after-read deps. */
1082 for (int i
= 0; i
< inst
->sources
; i
++) {
1083 if (inst
->src
[i
].file
== GRF
) {
1084 if (post_reg_alloc
) {
1085 for (int r
= 0; r
< inst
->regs_read(i
); r
++)
1086 add_dep(n
, last_grf_write
[inst
->src
[i
].reg
+ r
], 0);
1088 for (int r
= 0; r
< inst
->regs_read(i
); r
++) {
1089 add_dep(n
, last_grf_write
[inst
->src
[i
].reg
* 16 + inst
->src
[i
].reg_offset
+ r
], 0);
1092 } else if (inst
->src
[i
].file
== HW_REG
&&
1093 (inst
->src
[i
].fixed_hw_reg
.file
==
1094 BRW_GENERAL_REGISTER_FILE
)) {
1095 if (post_reg_alloc
) {
1096 int size
= reg_width
;
1097 if (inst
->src
[i
].fixed_hw_reg
.vstride
== BRW_VERTICAL_STRIDE_0
)
1099 for (int r
= 0; r
< size
; r
++)
1100 add_dep(n
, last_grf_write
[inst
->src
[i
].fixed_hw_reg
.nr
+ r
], 0);
1102 add_dep(n
, last_fixed_grf_write
, 0);
1104 } else if (inst
->src
[i
].is_accumulator()) {
1105 add_dep(n
, last_accumulator_write
, 0);
1106 } else if (inst
->src
[i
].file
!= BAD_FILE
&&
1107 inst
->src
[i
].file
!= IMM
&&
1108 inst
->src
[i
].file
!= UNIFORM
&&
1109 (inst
->src
[i
].file
!= HW_REG
||
1110 inst
->src
[i
].fixed_hw_reg
.file
!= BRW_IMMEDIATE_VALUE
)) {
1111 assert(inst
->src
[i
].file
!= MRF
);
1112 add_barrier_deps(n
);
1116 if (inst
->base_mrf
!= -1) {
1117 for (int i
= 0; i
< inst
->mlen
; i
++) {
1118 /* It looks like the MRF regs are released in the send
1119 * instruction once it's sent, not when the result comes
1122 add_dep(n
, last_mrf_write
[inst
->base_mrf
+ i
], 2);
1126 if (inst
->reads_flag()) {
1127 add_dep(n
, last_conditional_mod
[inst
->flag_subreg
]);
1130 if (inst
->reads_accumulator_implicitly()) {
1131 add_dep(n
, last_accumulator_write
);
1134 /* Update the things this instruction wrote, so earlier reads
1135 * can mark this as WAR dependency.
1137 if (inst
->dst
.file
== GRF
) {
1138 if (post_reg_alloc
) {
1139 for (int r
= 0; r
< inst
->regs_written
; r
++)
1140 last_grf_write
[inst
->dst
.reg
+ r
] = n
;
1142 for (int r
= 0; r
< inst
->regs_written
; r
++) {
1143 last_grf_write
[inst
->dst
.reg
* 16 + inst
->dst
.reg_offset
+ r
] = n
;
1146 } else if (inst
->dst
.file
== MRF
) {
1147 int reg
= inst
->dst
.reg
& ~BRW_MRF_COMPR4
;
1149 last_mrf_write
[reg
] = n
;
1151 if (is_compressed(inst
)) {
1152 if (inst
->dst
.reg
& BRW_MRF_COMPR4
)
1157 last_mrf_write
[reg
] = n
;
1159 } else if (inst
->dst
.file
== HW_REG
&&
1160 inst
->dst
.fixed_hw_reg
.file
== BRW_GENERAL_REGISTER_FILE
) {
1161 if (post_reg_alloc
) {
1162 for (int r
= 0; r
< reg_width
; r
++)
1163 last_grf_write
[inst
->dst
.fixed_hw_reg
.nr
+ r
] = n
;
1165 last_fixed_grf_write
= n
;
1167 } else if (inst
->dst
.is_accumulator()) {
1168 last_accumulator_write
= n
;
1169 } else if (inst
->dst
.file
!= BAD_FILE
&&
1170 !inst
->dst
.is_null()) {
1171 add_barrier_deps(n
);
1174 if (inst
->mlen
> 0 && inst
->base_mrf
!= -1) {
1175 for (int i
= 0; i
< v
->implied_mrf_writes(inst
); i
++) {
1176 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
1180 if (inst
->writes_flag()) {
1181 last_conditional_mod
[inst
->flag_subreg
] = n
;
1184 if (inst
->writes_accumulator_implicitly(v
->devinfo
)) {
1185 last_accumulator_write
= n
;
1191 vec4_instruction_scheduler::calculate_deps()
1193 schedule_node
*last_grf_write
[grf_count
];
1194 schedule_node
*last_mrf_write
[BRW_MAX_MRF(v
->devinfo
->gen
)];
1195 schedule_node
*last_conditional_mod
= NULL
;
1196 schedule_node
*last_accumulator_write
= NULL
;
1197 /* Fixed HW registers are assumed to be separate from the virtual
1198 * GRFs, so they can be tracked separately. We don't really write
1199 * to fixed GRFs much, so don't bother tracking them on a more
1202 schedule_node
*last_fixed_grf_write
= NULL
;
1204 /* The last instruction always needs to still be the last instruction.
1205 * Either it's flow control (IF, ELSE, ENDIF, DO, WHILE) and scheduling
1206 * other things after it would disturb the basic block, or it's the EOT
1207 * URB_WRITE and we should do a better job at dead code eliminating
1208 * anything that could have been scheduled after it.
1210 schedule_node
*last
= (schedule_node
*)instructions
.get_tail();
1211 add_barrier_deps(last
);
1213 memset(last_grf_write
, 0, sizeof(last_grf_write
));
1214 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
1216 /* top-to-bottom dependencies: RAW and WAW. */
1217 foreach_in_list(schedule_node
, n
, &instructions
) {
1218 vec4_instruction
*inst
= (vec4_instruction
*)n
->inst
;
1220 if (inst
->has_side_effects())
1221 add_barrier_deps(n
);
1223 /* read-after-write deps. */
1224 for (int i
= 0; i
< 3; i
++) {
1225 if (inst
->src
[i
].file
== GRF
) {
1226 for (unsigned j
= 0; j
< inst
->regs_read(i
); ++j
)
1227 add_dep(last_grf_write
[inst
->src
[i
].reg
+ j
], n
);
1228 } else if (inst
->src
[i
].file
== HW_REG
&&
1229 (inst
->src
[i
].fixed_hw_reg
.file
==
1230 BRW_GENERAL_REGISTER_FILE
)) {
1231 add_dep(last_fixed_grf_write
, n
);
1232 } else if (inst
->src
[i
].is_accumulator()) {
1233 assert(last_accumulator_write
);
1234 add_dep(last_accumulator_write
, n
);
1235 } else if (inst
->src
[i
].file
!= BAD_FILE
&&
1236 inst
->src
[i
].file
!= IMM
&&
1237 inst
->src
[i
].file
!= UNIFORM
&&
1238 (inst
->src
[i
].file
!= HW_REG
||
1239 inst
->src
[i
].fixed_hw_reg
.file
!= BRW_IMMEDIATE_VALUE
)) {
1240 /* No reads from MRF, and ATTR is already translated away */
1241 assert(inst
->src
[i
].file
!= MRF
&&
1242 inst
->src
[i
].file
!= ATTR
);
1243 add_barrier_deps(n
);
1247 if (!inst
->is_send_from_grf()) {
1248 for (int i
= 0; i
< inst
->mlen
; i
++) {
1249 /* It looks like the MRF regs are released in the send
1250 * instruction once it's sent, not when the result comes
1253 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
1257 if (inst
->reads_flag()) {
1258 assert(last_conditional_mod
);
1259 add_dep(last_conditional_mod
, n
);
1262 if (inst
->reads_accumulator_implicitly()) {
1263 assert(last_accumulator_write
);
1264 add_dep(last_accumulator_write
, n
);
1267 /* write-after-write deps. */
1268 if (inst
->dst
.file
== GRF
) {
1269 for (unsigned j
= 0; j
< inst
->regs_written
; ++j
) {
1270 add_dep(last_grf_write
[inst
->dst
.reg
+ j
], n
);
1271 last_grf_write
[inst
->dst
.reg
+ j
] = n
;
1273 } else if (inst
->dst
.file
== MRF
) {
1274 add_dep(last_mrf_write
[inst
->dst
.reg
], n
);
1275 last_mrf_write
[inst
->dst
.reg
] = n
;
1276 } else if (inst
->dst
.file
== HW_REG
&&
1277 inst
->dst
.fixed_hw_reg
.file
== BRW_GENERAL_REGISTER_FILE
) {
1278 last_fixed_grf_write
= n
;
1279 } else if (inst
->dst
.is_accumulator()) {
1280 add_dep(last_accumulator_write
, n
);
1281 last_accumulator_write
= n
;
1282 } else if (inst
->dst
.file
!= BAD_FILE
&&
1283 !inst
->dst
.is_null()) {
1284 add_barrier_deps(n
);
1287 if (inst
->mlen
> 0 && !inst
->is_send_from_grf()) {
1288 for (int i
= 0; i
< v
->implied_mrf_writes(inst
); i
++) {
1289 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
1290 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
1294 if (inst
->writes_flag()) {
1295 add_dep(last_conditional_mod
, n
, 0);
1296 last_conditional_mod
= n
;
1299 if (inst
->writes_accumulator_implicitly(v
->devinfo
) &&
1300 !inst
->dst
.is_accumulator()) {
1301 add_dep(last_accumulator_write
, n
);
1302 last_accumulator_write
= n
;
1306 /* bottom-to-top dependencies: WAR */
1307 memset(last_grf_write
, 0, sizeof(last_grf_write
));
1308 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
1309 last_conditional_mod
= NULL
;
1310 last_accumulator_write
= NULL
;
1311 last_fixed_grf_write
= NULL
;
1315 for (node
= instructions
.get_tail(), prev
= node
->prev
;
1316 !node
->is_head_sentinel();
1317 node
= prev
, prev
= node
->prev
) {
1318 schedule_node
*n
= (schedule_node
*)node
;
1319 vec4_instruction
*inst
= (vec4_instruction
*)n
->inst
;
1321 /* write-after-read deps. */
1322 for (int i
= 0; i
< 3; i
++) {
1323 if (inst
->src
[i
].file
== GRF
) {
1324 for (unsigned j
= 0; j
< inst
->regs_read(i
); ++j
)
1325 add_dep(n
, last_grf_write
[inst
->src
[i
].reg
+ j
]);
1326 } else if (inst
->src
[i
].file
== HW_REG
&&
1327 (inst
->src
[i
].fixed_hw_reg
.file
==
1328 BRW_GENERAL_REGISTER_FILE
)) {
1329 add_dep(n
, last_fixed_grf_write
);
1330 } else if (inst
->src
[i
].is_accumulator()) {
1331 add_dep(n
, last_accumulator_write
);
1332 } else if (inst
->src
[i
].file
!= BAD_FILE
&&
1333 inst
->src
[i
].file
!= IMM
&&
1334 inst
->src
[i
].file
!= UNIFORM
&&
1335 (inst
->src
[i
].file
!= HW_REG
||
1336 inst
->src
[i
].fixed_hw_reg
.file
!= BRW_IMMEDIATE_VALUE
)) {
1337 assert(inst
->src
[i
].file
!= MRF
&&
1338 inst
->src
[i
].file
!= ATTR
);
1339 add_barrier_deps(n
);
1343 if (!inst
->is_send_from_grf()) {
1344 for (int i
= 0; i
< inst
->mlen
; i
++) {
1345 /* It looks like the MRF regs are released in the send
1346 * instruction once it's sent, not when the result comes
1349 add_dep(n
, last_mrf_write
[inst
->base_mrf
+ i
], 2);
1353 if (inst
->reads_flag()) {
1354 add_dep(n
, last_conditional_mod
);
1357 if (inst
->reads_accumulator_implicitly()) {
1358 add_dep(n
, last_accumulator_write
);
1361 /* Update the things this instruction wrote, so earlier reads
1362 * can mark this as WAR dependency.
1364 if (inst
->dst
.file
== GRF
) {
1365 for (unsigned j
= 0; j
< inst
->regs_written
; ++j
)
1366 last_grf_write
[inst
->dst
.reg
+ j
] = n
;
1367 } else if (inst
->dst
.file
== MRF
) {
1368 last_mrf_write
[inst
->dst
.reg
] = n
;
1369 } else if (inst
->dst
.file
== HW_REG
&&
1370 inst
->dst
.fixed_hw_reg
.file
== BRW_GENERAL_REGISTER_FILE
) {
1371 last_fixed_grf_write
= n
;
1372 } else if (inst
->dst
.is_accumulator()) {
1373 last_accumulator_write
= n
;
1374 } else if (inst
->dst
.file
!= BAD_FILE
&&
1375 !inst
->dst
.is_null()) {
1376 add_barrier_deps(n
);
1379 if (inst
->mlen
> 0 && !inst
->is_send_from_grf()) {
1380 for (int i
= 0; i
< v
->implied_mrf_writes(inst
); i
++) {
1381 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
1385 if (inst
->writes_flag()) {
1386 last_conditional_mod
= n
;
1389 if (inst
->writes_accumulator_implicitly(v
->devinfo
)) {
1390 last_accumulator_write
= n
;
1396 fs_instruction_scheduler::choose_instruction_to_schedule()
1398 schedule_node
*chosen
= NULL
;
1400 if (mode
== SCHEDULE_PRE
|| mode
== SCHEDULE_POST
) {
1401 int chosen_time
= 0;
1403 /* Of the instructions ready to execute or the closest to
1404 * being ready, choose the oldest one.
1406 foreach_in_list(schedule_node
, n
, &instructions
) {
1407 if (!chosen
|| n
->unblocked_time
< chosen_time
) {
1409 chosen_time
= n
->unblocked_time
;
1413 /* Before register allocation, we don't care about the latencies of
1414 * instructions. All we care about is reducing live intervals of
1415 * variables so that we can avoid register spilling, or get SIMD16
1416 * shaders which naturally do a better job of hiding instruction
1419 foreach_in_list(schedule_node
, n
, &instructions
) {
1420 fs_inst
*inst
= (fs_inst
*)n
->inst
;
1427 /* Most important: If we can definitely reduce register pressure, do
1430 int register_pressure_benefit
= get_register_pressure_benefit(n
->inst
);
1431 int chosen_register_pressure_benefit
=
1432 get_register_pressure_benefit(chosen
->inst
);
1434 if (register_pressure_benefit
> 0 &&
1435 register_pressure_benefit
> chosen_register_pressure_benefit
) {
1438 } else if (chosen_register_pressure_benefit
> 0 &&
1439 (register_pressure_benefit
<
1440 chosen_register_pressure_benefit
)) {
1444 if (mode
== SCHEDULE_PRE_LIFO
) {
1445 /* Prefer instructions that recently became available for
1446 * scheduling. These are the things that are most likely to
1447 * (eventually) make a variable dead and reduce register pressure.
1448 * Typical register pressure estimates don't work for us because
1449 * most of our pressure comes from texturing, where no single
1450 * instruction to schedule will make a vec4 value dead.
1452 if (n
->cand_generation
> chosen
->cand_generation
) {
1455 } else if (n
->cand_generation
< chosen
->cand_generation
) {
1459 /* On MRF-using chips, prefer non-SEND instructions. If we don't
1460 * do this, then because we prefer instructions that just became
1461 * candidates, we'll end up in a pattern of scheduling a SEND,
1462 * then the MRFs for the next SEND, then the next SEND, then the
1463 * MRFs, etc., without ever consuming the results of a send.
1465 if (v
->devinfo
->gen
< 7) {
1466 fs_inst
*chosen_inst
= (fs_inst
*)chosen
->inst
;
1468 /* We use regs_written > 1 as our test for the kind of send
1469 * instruction to avoid -- only sends generate many regs, and a
1470 * single-result send is probably actually reducing register
1473 if (inst
->regs_written
<= inst
->exec_size
/ 8 &&
1474 chosen_inst
->regs_written
> chosen_inst
->exec_size
/ 8) {
1477 } else if (inst
->regs_written
> chosen_inst
->regs_written
) {
1483 /* For instructions pushed on the cands list at the same time, prefer
1484 * the one with the highest delay to the end of the program. This is
1485 * most likely to have its values able to be consumed first (such as
1486 * for a large tree of lowered ubo loads, which appear reversed in
1487 * the instruction stream with respect to when they can be consumed).
1489 if (n
->delay
> chosen
->delay
) {
1492 } else if (n
->delay
< chosen
->delay
) {
1496 /* If all other metrics are equal, we prefer the first instruction in
1497 * the list (program execution).
1506 vec4_instruction_scheduler::choose_instruction_to_schedule()
1508 schedule_node
*chosen
= NULL
;
1509 int chosen_time
= 0;
1511 /* Of the instructions ready to execute or the closest to being ready,
1512 * choose the oldest one.
1514 foreach_in_list(schedule_node
, n
, &instructions
) {
1515 if (!chosen
|| n
->unblocked_time
< chosen_time
) {
1517 chosen_time
= n
->unblocked_time
;
1525 fs_instruction_scheduler::issue_time(backend_instruction
*inst
)
1527 if (is_compressed((fs_inst
*)inst
))
1534 vec4_instruction_scheduler::issue_time(backend_instruction
*inst
)
1536 /* We always execute as two vec4s in parallel. */
1541 instruction_scheduler::schedule_instructions(bblock_t
*block
)
1543 const struct brw_device_info
*devinfo
= bs
->devinfo
;
1544 backend_instruction
*inst
= block
->end();
1546 if (!post_reg_alloc
)
1547 reg_pressure
= reg_pressure_in
[block
->num
];
1548 block_idx
= block
->num
;
1550 /* Remove non-DAG heads from the list. */
1551 foreach_in_list_safe(schedule_node
, n
, &instructions
) {
1552 if (n
->parent_count
!= 0)
1556 unsigned cand_generation
= 1;
1557 while (!instructions
.is_empty()) {
1558 schedule_node
*chosen
= choose_instruction_to_schedule();
1560 /* Schedule this instruction. */
1563 inst
->insert_before(block
, chosen
->inst
);
1564 instructions_to_schedule
--;
1566 if (!post_reg_alloc
) {
1567 reg_pressure
-= get_register_pressure_benefit(chosen
->inst
);
1568 update_register_pressure(chosen
->inst
);
1571 /* If we expected a delay for scheduling, then bump the clock to reflect
1572 * that. In reality, the hardware will switch to another hyperthread
1573 * and may not return to dispatching our thread for a while even after
1574 * we're unblocked. After this, we have the time when the chosen
1575 * instruction will start executing.
1577 time
= MAX2(time
, chosen
->unblocked_time
);
1579 /* Update the clock for how soon an instruction could start after the
1582 time
+= issue_time(chosen
->inst
);
1585 fprintf(stderr
, "clock %4d, scheduled: ", time
);
1586 bs
->dump_instruction(chosen
->inst
);
1587 if (!post_reg_alloc
)
1588 fprintf(stderr
, "(register pressure %d)\n", reg_pressure
);
1591 /* Now that we've scheduled a new instruction, some of its
1592 * children can be promoted to the list of instructions ready to
1593 * be scheduled. Update the children's unblocked time for this
1594 * DAG edge as we do so.
1596 for (int i
= chosen
->child_count
- 1; i
>= 0; i
--) {
1597 schedule_node
*child
= chosen
->children
[i
];
1599 child
->unblocked_time
= MAX2(child
->unblocked_time
,
1600 time
+ chosen
->child_latency
[i
]);
1603 fprintf(stderr
, "\tchild %d, %d parents: ", i
, child
->parent_count
);
1604 bs
->dump_instruction(child
->inst
);
1607 child
->cand_generation
= cand_generation
;
1608 child
->parent_count
--;
1609 if (child
->parent_count
== 0) {
1611 fprintf(stderr
, "\t\tnow available\n");
1613 instructions
.push_head(child
);
1618 /* Shared resource: the mathbox. There's one mathbox per EU on Gen6+
1619 * but it's more limited pre-gen6, so if we send something off to it then
1620 * the next math instruction isn't going to make progress until the first
1623 if (devinfo
->gen
< 6 && chosen
->inst
->is_math()) {
1624 foreach_in_list(schedule_node
, n
, &instructions
) {
1625 if (n
->inst
->is_math())
1626 n
->unblocked_time
= MAX2(n
->unblocked_time
,
1627 time
+ chosen
->latency
);
1632 if (block
->end()->opcode
== BRW_OPCODE_NOP
)
1633 block
->end()->remove(block
);
1634 assert(instructions_to_schedule
== 0);
1636 block
->cycle_count
= time
;
1639 static unsigned get_cycle_count(cfg_t
*cfg
)
1641 unsigned count
= 0, multiplier
= 1;
1642 foreach_block(block
, cfg
) {
1643 if (block
->start()->opcode
== BRW_OPCODE_DO
)
1644 multiplier
*= 10; /* assume that loops execute ~10 times */
1646 count
+= block
->cycle_count
* multiplier
;
1648 if (block
->end()->opcode
== BRW_OPCODE_WHILE
)
1656 instruction_scheduler::run(cfg_t
*cfg
)
1658 if (debug
&& !post_reg_alloc
) {
1659 fprintf(stderr
, "\nInstructions before scheduling (reg_alloc %d)\n",
1661 bs
->dump_instructions();
1664 if (!post_reg_alloc
)
1665 setup_liveness(cfg
);
1667 foreach_block(block
, cfg
) {
1668 if (block
->end_ip
- block
->start_ip
<= 1)
1671 if (reads_remaining
) {
1672 memset(reads_remaining
, 0,
1673 grf_count
* sizeof(*reads_remaining
));
1674 memset(hw_reads_remaining
, 0,
1675 hw_reg_count
* sizeof(*hw_reads_remaining
));
1676 memset(written
, 0, grf_count
* sizeof(*written
));
1678 foreach_inst_in_block(fs_inst
, inst
, block
)
1679 count_reads_remaining(inst
);
1682 add_insts_from_block(block
);
1686 foreach_in_list(schedule_node
, n
, &instructions
) {
1690 schedule_instructions(block
);
1693 if (debug
&& !post_reg_alloc
) {
1694 fprintf(stderr
, "\nInstructions after scheduling (reg_alloc %d)\n",
1696 bs
->dump_instructions();
1699 cfg
->cycle_count
= get_cycle_count(cfg
);
1703 fs_visitor::schedule_instructions(instruction_scheduler_mode mode
)
1705 calculate_live_intervals();
1708 if (mode
== SCHEDULE_POST
)
1709 grf_count
= grf_used
;
1711 grf_count
= alloc
.count
;
1713 fs_instruction_scheduler
sched(this, grf_count
, first_non_payload_grf
,
1714 cfg
->num_blocks
, mode
);
1717 if (unlikely(debug_enabled
) && mode
== SCHEDULE_POST
) {
1718 fprintf(stderr
, "%s%d estimated execution time: %d cycles\n",
1719 stage_abbrev
, dispatch_width
, sched
.time
);
1722 invalidate_live_intervals();
1726 vec4_visitor::opt_schedule_instructions()
1728 vec4_instruction_scheduler
sched(this, prog_data
->total_grf
);
1731 if (unlikely(debug_enabled
)) {
1732 fprintf(stderr
, "%s estimated execution time: %d cycles\n",
1733 stage_abbrev
, sched
.time
);
1736 invalidate_live_intervals();