2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
30 #include "glsl/glsl_types.h"
31 #include "glsl/ir_optimization.h"
35 /** @file brw_fs_schedule_instructions.cpp
37 * List scheduling of FS instructions.
39 * The basic model of the list scheduler is to take a basic block,
40 * compute a DAG of the dependencies (RAW ordering with latency, WAW
41 * ordering with latency, WAR ordering), and make a list of the DAG heads.
42 * Heuristically pick a DAG head, then put all the children that are
43 * now DAG heads into the list of things to schedule.
45 * The heuristic is the important part. We're trying to be cheap,
46 * since actually computing the optimal scheduling is NP complete.
47 * What we do is track a "current clock". When we schedule a node, we
48 * update the earliest-unblocked clock time of its children, and
49 * increment the clock. Then, when trying to schedule, we just pick
50 * the earliest-unblocked instruction to schedule.
52 * Note that often there will be many things which could execute
53 * immediately, and there are a range of heuristic options to choose
54 * from in picking among those.
57 static bool debug
= false;
59 class instruction_scheduler
;
61 class schedule_node
: public exec_node
64 schedule_node(backend_instruction
*inst
, instruction_scheduler
*sched
);
65 void set_latency_gen4();
66 void set_latency_gen7(bool is_haswell
);
68 backend_instruction
*inst
;
69 schedule_node
**children
;
78 * Which iteration of pushing groups of children onto the candidates list
79 * this node was a part of.
81 unsigned cand_generation
;
84 * This is the sum of the instruction's latency plus the maximum delay of
85 * its children, or just the issue_time if it's a leaf node.
91 schedule_node::set_latency_gen4()
94 int math_latency
= 22;
96 switch (inst
->opcode
) {
97 case SHADER_OPCODE_RCP
:
98 this->latency
= 1 * chans
* math_latency
;
100 case SHADER_OPCODE_RSQ
:
101 this->latency
= 2 * chans
* math_latency
;
103 case SHADER_OPCODE_INT_QUOTIENT
:
104 case SHADER_OPCODE_SQRT
:
105 case SHADER_OPCODE_LOG2
:
106 /* full precision log. partial is 2. */
107 this->latency
= 3 * chans
* math_latency
;
109 case SHADER_OPCODE_INT_REMAINDER
:
110 case SHADER_OPCODE_EXP2
:
111 /* full precision. partial is 3, same throughput. */
112 this->latency
= 4 * chans
* math_latency
;
114 case SHADER_OPCODE_POW
:
115 this->latency
= 8 * chans
* math_latency
;
117 case SHADER_OPCODE_SIN
:
118 case SHADER_OPCODE_COS
:
119 /* minimum latency, max is 12 rounds. */
120 this->latency
= 5 * chans
* math_latency
;
129 schedule_node::set_latency_gen7(bool is_haswell
)
131 switch (inst
->opcode
) {
134 * (since the last two src operands are in different register banks):
135 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
137 * 3 cycles on IVB, 4 on HSW
138 * (since the last two src operands are in the same register bank):
139 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
141 * 18 cycles on IVB, 16 on HSW
142 * (since the last two src operands are in different register banks):
143 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
144 * mov(8) null g4<4,5,1>F { align16 WE_normal 1Q };
146 * 20 cycles on IVB, 18 on HSW
147 * (since the last two src operands are in the same register bank):
148 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
149 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
152 /* Our register allocator doesn't know about register banks, so use the
155 latency
= is_haswell
? 16 : 18;
160 * (since the last two src operands are in different register banks):
161 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
163 * 3 cycles on IVB, 4 on HSW
164 * (since the last two src operands are in the same register bank):
165 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
167 * 16 cycles on IVB, 14 on HSW
168 * (since the last two src operands are in different register banks):
169 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
170 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
173 * (since the last two src operands are in the same register bank):
174 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
175 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
178 /* Our register allocator doesn't know about register banks, so use the
184 case SHADER_OPCODE_RCP
:
185 case SHADER_OPCODE_RSQ
:
186 case SHADER_OPCODE_SQRT
:
187 case SHADER_OPCODE_LOG2
:
188 case SHADER_OPCODE_EXP2
:
189 case SHADER_OPCODE_SIN
:
190 case SHADER_OPCODE_COS
:
192 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
195 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
196 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
198 * Same for exp2, log2, rsq, sqrt, sin, cos.
200 latency
= is_haswell
? 14 : 16;
203 case SHADER_OPCODE_POW
:
205 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
208 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
209 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
211 latency
= is_haswell
? 22 : 24;
214 case SHADER_OPCODE_TEX
:
215 case SHADER_OPCODE_TXD
:
216 case SHADER_OPCODE_TXF
:
217 case SHADER_OPCODE_TXL
:
219 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
220 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
221 * send(8) g4<1>UW g114<8,8,1>F
222 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
224 * 697 +/-49 cycles (min 610, n=26):
225 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
226 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
227 * send(8) g4<1>UW g114<8,8,1>F
228 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
229 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
231 * So the latency on our first texture load of the batchbuffer takes
232 * ~700 cycles, since the caches are cold at that point.
234 * 840 +/- 92 cycles (min 720, n=25):
235 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
236 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
237 * send(8) g4<1>UW g114<8,8,1>F
238 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
239 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
240 * send(8) g4<1>UW g114<8,8,1>F
241 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
242 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
244 * On the second load, it takes just an extra ~140 cycles, and after
245 * accounting for the 14 cycles of the MOV's latency, that makes ~130.
247 * 683 +/- 49 cycles (min = 602, n=47):
248 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
249 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
250 * send(8) g4<1>UW g114<8,8,1>F
251 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
252 * send(8) g50<1>UW g114<8,8,1>F
253 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
254 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
256 * The unit appears to be pipelined, since this matches up with the
257 * cache-cold case, despite there being two loads here. If you replace
258 * the g4 in the MOV to null with g50, it's still 693 +/- 52 (n=39).
260 * So, take some number between the cache-hot 140 cycles and the
261 * cache-cold 700 cycles. No particular tuning was done on this.
263 * I haven't done significant testing of the non-TEX opcodes. TXL at
264 * least looked about the same as TEX.
269 case SHADER_OPCODE_TXS
:
270 /* Testing textureSize(sampler2D, 0), one load was 420 +/- 41
272 * mov(8) g114<1>UD 0D { align1 WE_normal 1Q };
273 * send(8) g6<1>UW g114<8,8,1>F
274 * sampler (10, 0, 10, 1) mlen 1 rlen 4 { align1 WE_normal 1Q };
275 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1Q };
278 * Two loads was 535 +/- 30 cycles (n=19):
279 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
280 * send(16) g6<1>UW g114<8,8,1>F
281 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
282 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
283 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1H };
284 * send(16) g8<1>UW g114<8,8,1>F
285 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
286 * mov(16) g8<1>F g8<8,8,1>D { align1 WE_normal 1H };
287 * add(16) g6<1>F g6<8,8,1>F g8<8,8,1>F { align1 WE_normal 1H };
289 * Since the only caches that should matter are just the
290 * instruction/state cache containing the surface state, assume that we
291 * always have hot caches.
296 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD
:
297 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
:
298 case VS_OPCODE_PULL_CONSTANT_LOAD
:
299 /* testing using varying-index pull constants:
302 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
303 * send(8) g4<1>F g4<8,8,1>D
304 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
307 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
308 * send(8) g4<1>F g4<8,8,1>D
309 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
310 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
313 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
314 * send(8) g4<1>F g4<8,8,1>D
315 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
316 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
317 * send(8) g4<1>F g4<8,8,1>D
318 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
319 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
321 * So, if it's cache-hot, it's about 140. If it's cache cold, it's
322 * about 460. We expect to mostly be cache hot, so pick something more
328 case SHADER_OPCODE_GEN7_SCRATCH_READ
:
329 /* Testing a load from offset 0, that had been previously written:
331 * send(8) g114<1>UW g0<8,8,1>F data (0, 0, 0) mlen 1 rlen 1 { align1 WE_normal 1Q };
332 * mov(8) null g114<8,8,1>F { align1 WE_normal 1Q };
334 * The cycles spent seemed to be grouped around 40-50 (as low as 38),
335 * then around 140. Presumably this is cache hit vs miss.
340 case SHADER_OPCODE_UNTYPED_ATOMIC
:
342 * mov(8) g112<1>ud 0x00000000ud { align1 WE_all 1Q };
343 * mov(1) g112.7<1>ud g1.7<0,1,0>ud { align1 WE_all };
344 * mov(8) g113<1>ud 0x00000000ud { align1 WE_normal 1Q };
345 * send(8) g4<1>ud g112<8,8,1>ud
346 * data (38, 5, 6) mlen 2 rlen 1 { align1 WE_normal 1Q };
348 * Running it 100 times as fragment shader on a 128x128 quad
349 * gives an average latency of 13867 cycles per atomic op,
350 * standard deviation 3%. Note that this is a rather
351 * pessimistic estimate, the actual latency in cases with few
352 * collisions between threads and favorable pipelining has been
353 * seen to be reduced by a factor of 100.
358 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
360 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
361 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
362 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
363 * send(8) g4<1>UD g112<8,8,1>UD
364 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
366 * . [repeats 8 times]
368 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
369 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
370 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
371 * send(8) g4<1>UD g112<8,8,1>UD
372 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
374 * Running it 100 times as fragment shader on a 128x128 quad
375 * gives an average latency of 583 cycles per surface read,
376 * standard deviation 0.9%.
378 latency
= is_haswell
? 300 : 600;
383 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
386 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
387 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
394 class instruction_scheduler
{
396 instruction_scheduler(backend_visitor
*v
, int grf_count
,
397 instruction_scheduler_mode mode
)
400 this->mem_ctx
= ralloc_context(NULL
);
401 this->grf_count
= grf_count
;
402 this->instructions
.make_empty();
403 this->instructions_to_schedule
= 0;
404 this->post_reg_alloc
= (mode
== SCHEDULE_POST
);
407 if (!post_reg_alloc
) {
408 this->remaining_grf_uses
= rzalloc_array(mem_ctx
, int, grf_count
);
409 this->grf_active
= rzalloc_array(mem_ctx
, bool, grf_count
);
411 this->remaining_grf_uses
= NULL
;
412 this->grf_active
= NULL
;
416 ~instruction_scheduler()
418 ralloc_free(this->mem_ctx
);
420 void add_barrier_deps(schedule_node
*n
);
421 void add_dep(schedule_node
*before
, schedule_node
*after
, int latency
);
422 void add_dep(schedule_node
*before
, schedule_node
*after
);
424 void run(exec_list
*instructions
);
425 void add_inst(backend_instruction
*inst
);
426 void compute_delay(schedule_node
*node
);
427 virtual void calculate_deps() = 0;
428 virtual schedule_node
*choose_instruction_to_schedule() = 0;
431 * Returns how many cycles it takes the instruction to issue.
433 * Instructions in gen hardware are handled one simd4 vector at a time,
434 * with 1 cycle per vector dispatched. Thus SIMD8 pixel shaders take 2
435 * cycles to dispatch and SIMD16 (compressed) instructions take 4.
437 virtual int issue_time(backend_instruction
*inst
) = 0;
439 virtual void count_remaining_grf_uses(backend_instruction
*inst
) = 0;
440 virtual void update_register_pressure(backend_instruction
*inst
) = 0;
441 virtual int get_register_pressure_benefit(backend_instruction
*inst
) = 0;
443 void schedule_instructions(backend_instruction
*next_block_header
);
448 int instructions_to_schedule
;
451 exec_list instructions
;
454 instruction_scheduler_mode mode
;
457 * Number of instructions left to schedule that reference each vgrf.
459 * Used so that we can prefer scheduling instructions that will end the
460 * live intervals of multiple variables, to reduce register pressure.
462 int *remaining_grf_uses
;
465 * Tracks whether each VGRF has had an instruction scheduled that uses it.
467 * This is used to estimate whether scheduling a new instruction will
468 * increase register pressure.
473 class fs_instruction_scheduler
: public instruction_scheduler
476 fs_instruction_scheduler(fs_visitor
*v
, int grf_count
,
477 instruction_scheduler_mode mode
);
478 void calculate_deps();
479 bool is_compressed(fs_inst
*inst
);
480 schedule_node
*choose_instruction_to_schedule();
481 int issue_time(backend_instruction
*inst
);
484 void count_remaining_grf_uses(backend_instruction
*inst
);
485 void update_register_pressure(backend_instruction
*inst
);
486 int get_register_pressure_benefit(backend_instruction
*inst
);
489 fs_instruction_scheduler::fs_instruction_scheduler(fs_visitor
*v
,
491 instruction_scheduler_mode mode
)
492 : instruction_scheduler(v
, grf_count
, mode
),
498 fs_instruction_scheduler::count_remaining_grf_uses(backend_instruction
*be
)
500 fs_inst
*inst
= (fs_inst
*)be
;
502 if (!remaining_grf_uses
)
505 if (inst
->dst
.file
== GRF
)
506 remaining_grf_uses
[inst
->dst
.reg
]++;
508 for (int i
= 0; i
< inst
->sources
; i
++) {
509 if (inst
->src
[i
].file
!= GRF
)
512 remaining_grf_uses
[inst
->src
[i
].reg
]++;
517 fs_instruction_scheduler::update_register_pressure(backend_instruction
*be
)
519 fs_inst
*inst
= (fs_inst
*)be
;
521 if (!remaining_grf_uses
)
524 if (inst
->dst
.file
== GRF
) {
525 remaining_grf_uses
[inst
->dst
.reg
]--;
526 grf_active
[inst
->dst
.reg
] = true;
529 for (int i
= 0; i
< inst
->sources
; i
++) {
530 if (inst
->src
[i
].file
== GRF
) {
531 remaining_grf_uses
[inst
->src
[i
].reg
]--;
532 grf_active
[inst
->src
[i
].reg
] = true;
538 fs_instruction_scheduler::get_register_pressure_benefit(backend_instruction
*be
)
540 fs_inst
*inst
= (fs_inst
*)be
;
543 if (inst
->dst
.file
== GRF
) {
544 if (remaining_grf_uses
[inst
->dst
.reg
] == 1)
545 benefit
+= v
->virtual_grf_sizes
[inst
->dst
.reg
];
546 if (!grf_active
[inst
->dst
.reg
])
547 benefit
-= v
->virtual_grf_sizes
[inst
->dst
.reg
];
550 for (int i
= 0; i
< inst
->sources
; i
++) {
551 if (inst
->src
[i
].file
!= GRF
)
554 if (remaining_grf_uses
[inst
->src
[i
].reg
] == 1)
555 benefit
+= v
->virtual_grf_sizes
[inst
->src
[i
].reg
];
556 if (!grf_active
[inst
->src
[i
].reg
])
557 benefit
-= v
->virtual_grf_sizes
[inst
->src
[i
].reg
];
563 class vec4_instruction_scheduler
: public instruction_scheduler
566 vec4_instruction_scheduler(vec4_visitor
*v
, int grf_count
);
567 void calculate_deps();
568 schedule_node
*choose_instruction_to_schedule();
569 int issue_time(backend_instruction
*inst
);
572 void count_remaining_grf_uses(backend_instruction
*inst
);
573 void update_register_pressure(backend_instruction
*inst
);
574 int get_register_pressure_benefit(backend_instruction
*inst
);
577 vec4_instruction_scheduler::vec4_instruction_scheduler(vec4_visitor
*v
,
579 : instruction_scheduler(v
, grf_count
, SCHEDULE_POST
),
585 vec4_instruction_scheduler::count_remaining_grf_uses(backend_instruction
*be
)
590 vec4_instruction_scheduler::update_register_pressure(backend_instruction
*be
)
595 vec4_instruction_scheduler::get_register_pressure_benefit(backend_instruction
*be
)
600 schedule_node::schedule_node(backend_instruction
*inst
,
601 instruction_scheduler
*sched
)
603 struct brw_context
*brw
= sched
->bv
->brw
;
606 this->child_array_size
= 0;
607 this->children
= NULL
;
608 this->child_latency
= NULL
;
609 this->child_count
= 0;
610 this->parent_count
= 0;
611 this->unblocked_time
= 0;
612 this->cand_generation
= 0;
615 /* We can't measure Gen6 timings directly but expect them to be much
616 * closer to Gen7 than Gen4.
618 if (!sched
->post_reg_alloc
)
620 else if (brw
->gen
>= 6)
621 set_latency_gen7(brw
->is_haswell
);
627 instruction_scheduler::add_inst(backend_instruction
*inst
)
629 schedule_node
*n
= new(mem_ctx
) schedule_node(inst
, this);
631 assert(!inst
->is_head_sentinel());
632 assert(!inst
->is_tail_sentinel());
634 this->instructions_to_schedule
++;
637 instructions
.push_tail(n
);
640 /** Recursive computation of the delay member of a node. */
642 instruction_scheduler::compute_delay(schedule_node
*n
)
644 if (!n
->child_count
) {
645 n
->delay
= issue_time(n
->inst
);
647 for (int i
= 0; i
< n
->child_count
; i
++) {
648 if (!n
->children
[i
]->delay
)
649 compute_delay(n
->children
[i
]);
650 n
->delay
= MAX2(n
->delay
, n
->latency
+ n
->children
[i
]->delay
);
656 * Add a dependency between two instruction nodes.
658 * The @after node will be scheduled after @before. We will try to
659 * schedule it @latency cycles after @before, but no guarantees there.
662 instruction_scheduler::add_dep(schedule_node
*before
, schedule_node
*after
,
665 if (!before
|| !after
)
668 assert(before
!= after
);
670 for (int i
= 0; i
< before
->child_count
; i
++) {
671 if (before
->children
[i
] == after
) {
672 before
->child_latency
[i
] = MAX2(before
->child_latency
[i
], latency
);
677 if (before
->child_array_size
<= before
->child_count
) {
678 if (before
->child_array_size
< 16)
679 before
->child_array_size
= 16;
681 before
->child_array_size
*= 2;
683 before
->children
= reralloc(mem_ctx
, before
->children
,
685 before
->child_array_size
);
686 before
->child_latency
= reralloc(mem_ctx
, before
->child_latency
,
687 int, before
->child_array_size
);
690 before
->children
[before
->child_count
] = after
;
691 before
->child_latency
[before
->child_count
] = latency
;
692 before
->child_count
++;
693 after
->parent_count
++;
697 instruction_scheduler::add_dep(schedule_node
*before
, schedule_node
*after
)
702 add_dep(before
, after
, before
->latency
);
706 * Sometimes we really want this node to execute after everything that
707 * was before it and before everything that followed it. This adds
711 instruction_scheduler::add_barrier_deps(schedule_node
*n
)
713 schedule_node
*prev
= (schedule_node
*)n
->prev
;
714 schedule_node
*next
= (schedule_node
*)n
->next
;
717 while (!prev
->is_head_sentinel()) {
719 prev
= (schedule_node
*)prev
->prev
;
724 while (!next
->is_tail_sentinel()) {
726 next
= (schedule_node
*)next
->next
;
731 /* instruction scheduling needs to be aware of when an MRF write
732 * actually writes 2 MRFs.
735 fs_instruction_scheduler::is_compressed(fs_inst
*inst
)
737 return (v
->dispatch_width
== 16 &&
738 !inst
->force_uncompressed
&&
739 !inst
->force_sechalf
);
743 fs_instruction_scheduler::calculate_deps()
745 /* Pre-register-allocation, this tracks the last write per VGRF offset.
746 * After register allocation, reg_offsets are gone and we track individual
749 schedule_node
*last_grf_write
[grf_count
* 16];
750 schedule_node
*last_mrf_write
[BRW_MAX_MRF
];
751 schedule_node
*last_conditional_mod
[2] = { NULL
, NULL
};
752 schedule_node
*last_accumulator_write
= NULL
;
753 /* Fixed HW registers are assumed to be separate from the virtual
754 * GRFs, so they can be tracked separately. We don't really write
755 * to fixed GRFs much, so don't bother tracking them on a more
758 schedule_node
*last_fixed_grf_write
= NULL
;
759 int reg_width
= v
->dispatch_width
/ 8;
761 /* The last instruction always needs to still be the last
762 * instruction. Either it's flow control (IF, ELSE, ENDIF, DO,
763 * WHILE) and scheduling other things after it would disturb the
764 * basic block, or it's FB_WRITE and we should do a better job at
765 * dead code elimination anyway.
767 schedule_node
*last
= (schedule_node
*)instructions
.get_tail();
768 add_barrier_deps(last
);
770 memset(last_grf_write
, 0, sizeof(last_grf_write
));
771 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
773 /* top-to-bottom dependencies: RAW and WAW. */
774 foreach_in_list(schedule_node
, n
, &instructions
) {
775 fs_inst
*inst
= (fs_inst
*)n
->inst
;
777 if (inst
->opcode
== FS_OPCODE_PLACEHOLDER_HALT
||
778 inst
->has_side_effects())
781 /* read-after-write deps. */
782 for (int i
= 0; i
< inst
->sources
; i
++) {
783 if (inst
->src
[i
].file
== GRF
) {
784 if (post_reg_alloc
) {
785 for (int r
= 0; r
< reg_width
* inst
->regs_read(v
, i
); r
++)
786 add_dep(last_grf_write
[inst
->src
[i
].reg
+ r
], n
);
788 for (int r
= 0; r
< inst
->regs_read(v
, i
); r
++) {
789 add_dep(last_grf_write
[inst
->src
[i
].reg
* 16 + inst
->src
[i
].reg_offset
+ r
], n
);
792 } else if (inst
->src
[i
].file
== HW_REG
&&
793 (inst
->src
[i
].fixed_hw_reg
.file
==
794 BRW_GENERAL_REGISTER_FILE
)) {
795 if (post_reg_alloc
) {
796 int size
= reg_width
;
797 if (inst
->src
[i
].fixed_hw_reg
.vstride
== BRW_VERTICAL_STRIDE_0
)
799 for (int r
= 0; r
< size
; r
++)
800 add_dep(last_grf_write
[inst
->src
[i
].fixed_hw_reg
.nr
+ r
], n
);
802 add_dep(last_fixed_grf_write
, n
);
804 } else if (inst
->src
[i
].is_accumulator()) {
805 add_dep(last_accumulator_write
, n
);
806 } else if (inst
->src
[i
].file
!= BAD_FILE
&&
807 inst
->src
[i
].file
!= IMM
&&
808 inst
->src
[i
].file
!= UNIFORM
&&
809 (inst
->src
[i
].file
!= HW_REG
||
810 inst
->src
[i
].fixed_hw_reg
.file
!= IMM
)) {
811 assert(inst
->src
[i
].file
!= MRF
);
816 if (inst
->base_mrf
!= -1) {
817 for (int i
= 0; i
< inst
->mlen
; i
++) {
818 /* It looks like the MRF regs are released in the send
819 * instruction once it's sent, not when the result comes
822 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
826 if (inst
->reads_flag()) {
827 add_dep(last_conditional_mod
[inst
->flag_subreg
], n
);
830 if (inst
->reads_accumulator_implicitly()) {
831 add_dep(last_accumulator_write
, n
);
834 /* write-after-write deps. */
835 if (inst
->dst
.file
== GRF
) {
836 if (post_reg_alloc
) {
837 for (int r
= 0; r
< inst
->regs_written
* reg_width
; r
++) {
838 add_dep(last_grf_write
[inst
->dst
.reg
+ r
], n
);
839 last_grf_write
[inst
->dst
.reg
+ r
] = n
;
842 for (int r
= 0; r
< inst
->regs_written
; r
++) {
843 add_dep(last_grf_write
[inst
->dst
.reg
* 16 + inst
->dst
.reg_offset
+ r
], n
);
844 last_grf_write
[inst
->dst
.reg
* 16 + inst
->dst
.reg_offset
+ r
] = n
;
847 } else if (inst
->dst
.file
== MRF
) {
848 int reg
= inst
->dst
.reg
& ~BRW_MRF_COMPR4
;
850 add_dep(last_mrf_write
[reg
], n
);
851 last_mrf_write
[reg
] = n
;
852 if (is_compressed(inst
)) {
853 if (inst
->dst
.reg
& BRW_MRF_COMPR4
)
857 add_dep(last_mrf_write
[reg
], n
);
858 last_mrf_write
[reg
] = n
;
860 } else if (inst
->dst
.file
== HW_REG
&&
861 inst
->dst
.fixed_hw_reg
.file
== BRW_GENERAL_REGISTER_FILE
) {
862 if (post_reg_alloc
) {
863 for (int r
= 0; r
< reg_width
; r
++)
864 last_grf_write
[inst
->dst
.fixed_hw_reg
.nr
+ r
] = n
;
866 last_fixed_grf_write
= n
;
868 } else if (inst
->dst
.is_accumulator()) {
869 add_dep(last_accumulator_write
, n
);
870 last_accumulator_write
= n
;
871 } else if (inst
->dst
.file
!= BAD_FILE
) {
875 if (inst
->mlen
> 0 && inst
->base_mrf
!= -1) {
876 for (int i
= 0; i
< v
->implied_mrf_writes(inst
); i
++) {
877 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
878 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
882 if (inst
->writes_flag()) {
883 add_dep(last_conditional_mod
[inst
->flag_subreg
], n
, 0);
884 last_conditional_mod
[inst
->flag_subreg
] = n
;
887 if (inst
->writes_accumulator_implicitly(v
->brw
) &&
888 !inst
->dst
.is_accumulator()) {
889 add_dep(last_accumulator_write
, n
);
890 last_accumulator_write
= n
;
894 /* bottom-to-top dependencies: WAR */
895 memset(last_grf_write
, 0, sizeof(last_grf_write
));
896 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
897 memset(last_conditional_mod
, 0, sizeof(last_conditional_mod
));
898 last_accumulator_write
= NULL
;
899 last_fixed_grf_write
= NULL
;
903 for (node
= instructions
.get_tail(), prev
= node
->prev
;
904 !node
->is_head_sentinel();
905 node
= prev
, prev
= node
->prev
) {
906 schedule_node
*n
= (schedule_node
*)node
;
907 fs_inst
*inst
= (fs_inst
*)n
->inst
;
909 /* write-after-read deps. */
910 for (int i
= 0; i
< inst
->sources
; i
++) {
911 if (inst
->src
[i
].file
== GRF
) {
912 if (post_reg_alloc
) {
913 for (int r
= 0; r
< reg_width
* inst
->regs_read(v
, i
); r
++)
914 add_dep(n
, last_grf_write
[inst
->src
[i
].reg
+ r
]);
916 for (int r
= 0; r
< inst
->regs_read(v
, i
); r
++) {
917 add_dep(n
, last_grf_write
[inst
->src
[i
].reg
* 16 + inst
->src
[i
].reg_offset
+ r
]);
920 } else if (inst
->src
[i
].file
== HW_REG
&&
921 (inst
->src
[i
].fixed_hw_reg
.file
==
922 BRW_GENERAL_REGISTER_FILE
)) {
923 if (post_reg_alloc
) {
924 int size
= reg_width
;
925 if (inst
->src
[i
].fixed_hw_reg
.vstride
== BRW_VERTICAL_STRIDE_0
)
927 for (int r
= 0; r
< size
; r
++)
928 add_dep(n
, last_grf_write
[inst
->src
[i
].fixed_hw_reg
.nr
+ r
]);
930 add_dep(n
, last_fixed_grf_write
);
932 } else if (inst
->src
[i
].is_accumulator()) {
933 add_dep(n
, last_accumulator_write
);
934 } else if (inst
->src
[i
].file
!= BAD_FILE
&&
935 inst
->src
[i
].file
!= IMM
&&
936 inst
->src
[i
].file
!= UNIFORM
&&
937 (inst
->src
[i
].file
!= HW_REG
||
938 inst
->src
[i
].fixed_hw_reg
.file
!= IMM
)) {
939 assert(inst
->src
[i
].file
!= MRF
);
944 if (inst
->base_mrf
!= -1) {
945 for (int i
= 0; i
< inst
->mlen
; i
++) {
946 /* It looks like the MRF regs are released in the send
947 * instruction once it's sent, not when the result comes
950 add_dep(n
, last_mrf_write
[inst
->base_mrf
+ i
], 2);
954 if (inst
->reads_flag()) {
955 add_dep(n
, last_conditional_mod
[inst
->flag_subreg
]);
958 if (inst
->reads_accumulator_implicitly()) {
959 add_dep(n
, last_accumulator_write
);
962 /* Update the things this instruction wrote, so earlier reads
963 * can mark this as WAR dependency.
965 if (inst
->dst
.file
== GRF
) {
966 if (post_reg_alloc
) {
967 for (int r
= 0; r
< inst
->regs_written
* reg_width
; r
++)
968 last_grf_write
[inst
->dst
.reg
+ r
] = n
;
970 for (int r
= 0; r
< inst
->regs_written
; r
++) {
971 last_grf_write
[inst
->dst
.reg
* 16 + inst
->dst
.reg_offset
+ r
] = n
;
974 } else if (inst
->dst
.file
== MRF
) {
975 int reg
= inst
->dst
.reg
& ~BRW_MRF_COMPR4
;
977 last_mrf_write
[reg
] = n
;
979 if (is_compressed(inst
)) {
980 if (inst
->dst
.reg
& BRW_MRF_COMPR4
)
985 last_mrf_write
[reg
] = n
;
987 } else if (inst
->dst
.file
== HW_REG
&&
988 inst
->dst
.fixed_hw_reg
.file
== BRW_GENERAL_REGISTER_FILE
) {
989 if (post_reg_alloc
) {
990 for (int r
= 0; r
< reg_width
; r
++)
991 last_grf_write
[inst
->dst
.fixed_hw_reg
.nr
+ r
] = n
;
993 last_fixed_grf_write
= n
;
995 } else if (inst
->dst
.is_accumulator()) {
996 last_accumulator_write
= n
;
997 } else if (inst
->dst
.file
!= BAD_FILE
) {
1001 if (inst
->mlen
> 0 && inst
->base_mrf
!= -1) {
1002 for (int i
= 0; i
< v
->implied_mrf_writes(inst
); i
++) {
1003 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
1007 if (inst
->writes_flag()) {
1008 last_conditional_mod
[inst
->flag_subreg
] = n
;
1011 if (inst
->writes_accumulator_implicitly(v
->brw
)) {
1012 last_accumulator_write
= n
;
1018 vec4_instruction_scheduler::calculate_deps()
1020 schedule_node
*last_grf_write
[grf_count
];
1021 schedule_node
*last_mrf_write
[BRW_MAX_MRF
];
1022 schedule_node
*last_conditional_mod
= NULL
;
1023 schedule_node
*last_accumulator_write
= NULL
;
1024 /* Fixed HW registers are assumed to be separate from the virtual
1025 * GRFs, so they can be tracked separately. We don't really write
1026 * to fixed GRFs much, so don't bother tracking them on a more
1029 schedule_node
*last_fixed_grf_write
= NULL
;
1031 /* The last instruction always needs to still be the last instruction.
1032 * Either it's flow control (IF, ELSE, ENDIF, DO, WHILE) and scheduling
1033 * other things after it would disturb the basic block, or it's the EOT
1034 * URB_WRITE and we should do a better job at dead code eliminating
1035 * anything that could have been scheduled after it.
1037 schedule_node
*last
= (schedule_node
*)instructions
.get_tail();
1038 add_barrier_deps(last
);
1040 memset(last_grf_write
, 0, sizeof(last_grf_write
));
1041 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
1043 /* top-to-bottom dependencies: RAW and WAW. */
1044 foreach_in_list(schedule_node
, n
, &instructions
) {
1045 vec4_instruction
*inst
= (vec4_instruction
*)n
->inst
;
1047 if (inst
->has_side_effects())
1048 add_barrier_deps(n
);
1050 /* read-after-write deps. */
1051 for (int i
= 0; i
< 3; i
++) {
1052 if (inst
->src
[i
].file
== GRF
) {
1053 add_dep(last_grf_write
[inst
->src
[i
].reg
], n
);
1054 } else if (inst
->src
[i
].file
== HW_REG
&&
1055 (inst
->src
[i
].fixed_hw_reg
.file
==
1056 BRW_GENERAL_REGISTER_FILE
)) {
1057 add_dep(last_fixed_grf_write
, n
);
1058 } else if (inst
->src
[i
].is_accumulator()) {
1059 assert(last_accumulator_write
);
1060 add_dep(last_accumulator_write
, n
);
1061 } else if (inst
->src
[i
].file
!= BAD_FILE
&&
1062 inst
->src
[i
].file
!= IMM
&&
1063 inst
->src
[i
].file
!= UNIFORM
&&
1064 (inst
->src
[i
].file
!= HW_REG
||
1065 inst
->src
[i
].fixed_hw_reg
.file
!= IMM
)) {
1066 /* No reads from MRF, and ATTR is already translated away */
1067 assert(inst
->src
[i
].file
!= MRF
&&
1068 inst
->src
[i
].file
!= ATTR
);
1069 add_barrier_deps(n
);
1073 for (int i
= 0; i
< inst
->mlen
; i
++) {
1074 /* It looks like the MRF regs are released in the send
1075 * instruction once it's sent, not when the result comes
1078 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
1081 if (inst
->reads_flag()) {
1082 assert(last_conditional_mod
);
1083 add_dep(last_conditional_mod
, n
);
1086 if (inst
->reads_accumulator_implicitly()) {
1087 assert(last_accumulator_write
);
1088 add_dep(last_accumulator_write
, n
);
1091 /* write-after-write deps. */
1092 if (inst
->dst
.file
== GRF
) {
1093 add_dep(last_grf_write
[inst
->dst
.reg
], n
);
1094 last_grf_write
[inst
->dst
.reg
] = n
;
1095 } else if (inst
->dst
.file
== MRF
) {
1096 add_dep(last_mrf_write
[inst
->dst
.reg
], n
);
1097 last_mrf_write
[inst
->dst
.reg
] = n
;
1098 } else if (inst
->dst
.file
== HW_REG
&&
1099 inst
->dst
.fixed_hw_reg
.file
== BRW_GENERAL_REGISTER_FILE
) {
1100 last_fixed_grf_write
= n
;
1101 } else if (inst
->dst
.is_accumulator()) {
1102 add_dep(last_accumulator_write
, n
);
1103 last_accumulator_write
= n
;
1104 } else if (inst
->dst
.file
!= BAD_FILE
) {
1105 add_barrier_deps(n
);
1108 if (inst
->mlen
> 0) {
1109 for (int i
= 0; i
< v
->implied_mrf_writes(inst
); i
++) {
1110 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
1111 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
1115 if (inst
->writes_flag()) {
1116 add_dep(last_conditional_mod
, n
, 0);
1117 last_conditional_mod
= n
;
1120 if (inst
->writes_accumulator_implicitly(v
->brw
) &&
1121 !inst
->dst
.is_accumulator()) {
1122 add_dep(last_accumulator_write
, n
);
1123 last_accumulator_write
= n
;
1127 /* bottom-to-top dependencies: WAR */
1128 memset(last_grf_write
, 0, sizeof(last_grf_write
));
1129 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
1130 last_conditional_mod
= NULL
;
1131 last_accumulator_write
= NULL
;
1132 last_fixed_grf_write
= NULL
;
1136 for (node
= instructions
.get_tail(), prev
= node
->prev
;
1137 !node
->is_head_sentinel();
1138 node
= prev
, prev
= node
->prev
) {
1139 schedule_node
*n
= (schedule_node
*)node
;
1140 vec4_instruction
*inst
= (vec4_instruction
*)n
->inst
;
1142 /* write-after-read deps. */
1143 for (int i
= 0; i
< 3; i
++) {
1144 if (inst
->src
[i
].file
== GRF
) {
1145 add_dep(n
, last_grf_write
[inst
->src
[i
].reg
]);
1146 } else if (inst
->src
[i
].file
== HW_REG
&&
1147 (inst
->src
[i
].fixed_hw_reg
.file
==
1148 BRW_GENERAL_REGISTER_FILE
)) {
1149 add_dep(n
, last_fixed_grf_write
);
1150 } else if (inst
->src
[i
].is_accumulator()) {
1151 add_dep(n
, last_accumulator_write
);
1152 } else if (inst
->src
[i
].file
!= BAD_FILE
&&
1153 inst
->src
[i
].file
!= IMM
&&
1154 inst
->src
[i
].file
!= UNIFORM
&&
1155 (inst
->src
[i
].file
!= HW_REG
||
1156 inst
->src
[i
].fixed_hw_reg
.file
!= IMM
)) {
1157 assert(inst
->src
[i
].file
!= MRF
&&
1158 inst
->src
[i
].file
!= ATTR
);
1159 add_barrier_deps(n
);
1163 for (int i
= 0; i
< inst
->mlen
; i
++) {
1164 /* It looks like the MRF regs are released in the send
1165 * instruction once it's sent, not when the result comes
1168 add_dep(n
, last_mrf_write
[inst
->base_mrf
+ i
], 2);
1171 if (inst
->reads_flag()) {
1172 add_dep(n
, last_conditional_mod
);
1175 if (inst
->reads_accumulator_implicitly()) {
1176 add_dep(n
, last_accumulator_write
);
1179 /* Update the things this instruction wrote, so earlier reads
1180 * can mark this as WAR dependency.
1182 if (inst
->dst
.file
== GRF
) {
1183 last_grf_write
[inst
->dst
.reg
] = n
;
1184 } else if (inst
->dst
.file
== MRF
) {
1185 last_mrf_write
[inst
->dst
.reg
] = n
;
1186 } else if (inst
->dst
.file
== HW_REG
&&
1187 inst
->dst
.fixed_hw_reg
.file
== BRW_GENERAL_REGISTER_FILE
) {
1188 last_fixed_grf_write
= n
;
1189 } else if (inst
->dst
.is_accumulator()) {
1190 last_accumulator_write
= n
;
1191 } else if (inst
->dst
.file
!= BAD_FILE
) {
1192 add_barrier_deps(n
);
1195 if (inst
->mlen
> 0) {
1196 for (int i
= 0; i
< v
->implied_mrf_writes(inst
); i
++) {
1197 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
1201 if (inst
->writes_flag()) {
1202 last_conditional_mod
= n
;
1205 if (inst
->writes_accumulator_implicitly(v
->brw
)) {
1206 last_accumulator_write
= n
;
1212 fs_instruction_scheduler::choose_instruction_to_schedule()
1214 struct brw_context
*brw
= v
->brw
;
1215 schedule_node
*chosen
= NULL
;
1217 if (mode
== SCHEDULE_PRE
|| mode
== SCHEDULE_POST
) {
1218 int chosen_time
= 0;
1220 /* Of the instructions ready to execute or the closest to
1221 * being ready, choose the oldest one.
1223 foreach_in_list(schedule_node
, n
, &instructions
) {
1224 if (!chosen
|| n
->unblocked_time
< chosen_time
) {
1226 chosen_time
= n
->unblocked_time
;
1230 /* Before register allocation, we don't care about the latencies of
1231 * instructions. All we care about is reducing live intervals of
1232 * variables so that we can avoid register spilling, or get SIMD16
1233 * shaders which naturally do a better job of hiding instruction
1236 foreach_in_list(schedule_node
, n
, &instructions
) {
1237 fs_inst
*inst
= (fs_inst
*)n
->inst
;
1244 /* Most important: If we can definitely reduce register pressure, do
1247 int register_pressure_benefit
= get_register_pressure_benefit(n
->inst
);
1248 int chosen_register_pressure_benefit
=
1249 get_register_pressure_benefit(chosen
->inst
);
1251 if (register_pressure_benefit
> 0 &&
1252 register_pressure_benefit
> chosen_register_pressure_benefit
) {
1255 } else if (chosen_register_pressure_benefit
> 0 &&
1256 (register_pressure_benefit
<
1257 chosen_register_pressure_benefit
)) {
1261 if (mode
== SCHEDULE_PRE_LIFO
) {
1262 /* Prefer instructions that recently became available for
1263 * scheduling. These are the things that are most likely to
1264 * (eventually) make a variable dead and reduce register pressure.
1265 * Typical register pressure estimates don't work for us because
1266 * most of our pressure comes from texturing, where no single
1267 * instruction to schedule will make a vec4 value dead.
1269 if (n
->cand_generation
> chosen
->cand_generation
) {
1272 } else if (n
->cand_generation
< chosen
->cand_generation
) {
1276 /* On MRF-using chips, prefer non-SEND instructions. If we don't
1277 * do this, then because we prefer instructions that just became
1278 * candidates, we'll end up in a pattern of scheduling a SEND,
1279 * then the MRFs for the next SEND, then the next SEND, then the
1280 * MRFs, etc., without ever consuming the results of a send.
1283 fs_inst
*chosen_inst
= (fs_inst
*)chosen
->inst
;
1285 /* We use regs_written > 1 as our test for the kind of send
1286 * instruction to avoid -- only sends generate many regs, and a
1287 * single-result send is probably actually reducing register
1290 if (inst
->regs_written
<= 1 && chosen_inst
->regs_written
> 1) {
1293 } else if (inst
->regs_written
> chosen_inst
->regs_written
) {
1299 /* For instructions pushed on the cands list at the same time, prefer
1300 * the one with the highest delay to the end of the program. This is
1301 * most likely to have its values able to be consumed first (such as
1302 * for a large tree of lowered ubo loads, which appear reversed in
1303 * the instruction stream with respect to when they can be consumed).
1305 if (n
->delay
> chosen
->delay
) {
1308 } else if (n
->delay
< chosen
->delay
) {
1312 /* If all other metrics are equal, we prefer the first instruction in
1313 * the list (program execution).
1322 vec4_instruction_scheduler::choose_instruction_to_schedule()
1324 schedule_node
*chosen
= NULL
;
1325 int chosen_time
= 0;
1327 /* Of the instructions ready to execute or the closest to being ready,
1328 * choose the oldest one.
1330 foreach_in_list(schedule_node
, n
, &instructions
) {
1331 if (!chosen
|| n
->unblocked_time
< chosen_time
) {
1333 chosen_time
= n
->unblocked_time
;
1341 fs_instruction_scheduler::issue_time(backend_instruction
*inst
)
1343 if (is_compressed((fs_inst
*)inst
))
1350 vec4_instruction_scheduler::issue_time(backend_instruction
*inst
)
1352 /* We always execute as two vec4s in parallel. */
1357 instruction_scheduler::schedule_instructions(backend_instruction
*next_block_header
)
1359 struct brw_context
*brw
= bv
->brw
;
1362 /* Remove non-DAG heads from the list. */
1363 foreach_in_list_safe(schedule_node
, n
, &instructions
) {
1364 if (n
->parent_count
!= 0)
1368 unsigned cand_generation
= 1;
1369 while (!instructions
.is_empty()) {
1370 schedule_node
*chosen
= choose_instruction_to_schedule();
1372 /* Schedule this instruction. */
1375 next_block_header
->insert_before(chosen
->inst
);
1376 instructions_to_schedule
--;
1377 update_register_pressure(chosen
->inst
);
1379 /* Update the clock for how soon an instruction could start after the
1382 time
+= issue_time(chosen
->inst
);
1384 /* If we expected a delay for scheduling, then bump the clock to reflect
1385 * that as well. In reality, the hardware will switch to another
1386 * hyperthread and may not return to dispatching our thread for a while
1387 * even after we're unblocked.
1389 time
= MAX2(time
, chosen
->unblocked_time
);
1392 fprintf(stderr
, "clock %4d, scheduled: ", time
);
1393 bv
->dump_instruction(chosen
->inst
);
1396 /* Now that we've scheduled a new instruction, some of its
1397 * children can be promoted to the list of instructions ready to
1398 * be scheduled. Update the children's unblocked time for this
1399 * DAG edge as we do so.
1401 for (int i
= chosen
->child_count
- 1; i
>= 0; i
--) {
1402 schedule_node
*child
= chosen
->children
[i
];
1404 child
->unblocked_time
= MAX2(child
->unblocked_time
,
1405 time
+ chosen
->child_latency
[i
]);
1408 fprintf(stderr
, "\tchild %d, %d parents: ", i
, child
->parent_count
);
1409 bv
->dump_instruction(child
->inst
);
1412 child
->cand_generation
= cand_generation
;
1413 child
->parent_count
--;
1414 if (child
->parent_count
== 0) {
1416 fprintf(stderr
, "\t\tnow available\n");
1418 instructions
.push_head(child
);
1423 /* Shared resource: the mathbox. There's one mathbox per EU on Gen6+
1424 * but it's more limited pre-gen6, so if we send something off to it then
1425 * the next math instruction isn't going to make progress until the first
1428 if (brw
->gen
< 6 && chosen
->inst
->is_math()) {
1429 foreach_in_list(schedule_node
, n
, &instructions
) {
1430 if (n
->inst
->is_math())
1431 n
->unblocked_time
= MAX2(n
->unblocked_time
,
1432 time
+ chosen
->latency
);
1437 assert(instructions_to_schedule
== 0);
1441 instruction_scheduler::run(exec_list
*all_instructions
)
1443 backend_instruction
*next_block_header
=
1444 (backend_instruction
*)all_instructions
->head
;
1447 fprintf(stderr
, "\nInstructions before scheduling (reg_alloc %d)\n",
1449 bv
->dump_instructions();
1452 /* Populate the remaining GRF uses array to improve the pre-regalloc
1455 if (remaining_grf_uses
) {
1456 foreach_in_list(schedule_node
, node
, all_instructions
) {
1457 count_remaining_grf_uses((backend_instruction
*)node
);
1461 while (!next_block_header
->is_tail_sentinel()) {
1462 /* Add things to be scheduled until we get to a new BB. */
1463 while (!next_block_header
->is_tail_sentinel()) {
1464 backend_instruction
*inst
= next_block_header
;
1465 next_block_header
= (backend_instruction
*)next_block_header
->next
;
1468 if (inst
->is_control_flow())
1473 foreach_in_list(schedule_node
, n
, &instructions
) {
1477 schedule_instructions(next_block_header
);
1481 fprintf(stderr
, "\nInstructions after scheduling (reg_alloc %d)\n",
1483 bv
->dump_instructions();
1488 fs_visitor::schedule_instructions(instruction_scheduler_mode mode
)
1491 if (mode
== SCHEDULE_POST
)
1492 grf_count
= grf_used
;
1494 grf_count
= virtual_grf_count
;
1496 fs_instruction_scheduler
sched(this, grf_count
, mode
);
1497 sched
.run(&instructions
);
1499 if (unlikely(INTEL_DEBUG
& DEBUG_WM
) && mode
== SCHEDULE_POST
) {
1500 fprintf(stderr
, "fs%d estimated execution time: %d cycles\n",
1501 dispatch_width
, sched
.time
);
1504 invalidate_live_intervals();
1508 vec4_visitor::opt_schedule_instructions()
1510 vec4_instruction_scheduler
sched(this, prog_data
->total_grf
);
1511 sched
.run(&instructions
);
1513 if (unlikely(debug_flag
)) {
1514 fprintf(stderr
, "vec4 estimated execution time: %d cycles\n", sched
.time
);
1517 invalidate_live_intervals();