2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
30 #include "glsl/glsl_types.h"
31 #include "glsl/ir_optimization.h"
35 /** @file brw_fs_schedule_instructions.cpp
37 * List scheduling of FS instructions.
39 * The basic model of the list scheduler is to take a basic block,
40 * compute a DAG of the dependencies (RAW ordering with latency, WAW
41 * ordering with latency, WAR ordering), and make a list of the DAG heads.
42 * Heuristically pick a DAG head, then put all the children that are
43 * now DAG heads into the list of things to schedule.
45 * The heuristic is the important part. We're trying to be cheap,
46 * since actually computing the optimal scheduling is NP complete.
47 * What we do is track a "current clock". When we schedule a node, we
48 * update the earliest-unblocked clock time of its children, and
49 * increment the clock. Then, when trying to schedule, we just pick
50 * the earliest-unblocked instruction to schedule.
52 * Note that often there will be many things which could execute
53 * immediately, and there are a range of heuristic options to choose
54 * from in picking among those.
57 static bool debug
= false;
59 class schedule_node
: public exec_node
62 schedule_node(backend_instruction
*inst
, const struct brw_context
*brw
)
65 this->child_array_size
= 0;
66 this->children
= NULL
;
67 this->child_latency
= NULL
;
68 this->child_count
= 0;
69 this->parent_count
= 0;
70 this->unblocked_time
= 0;
71 this->cand_generation
= 0;
73 /* We can't measure Gen6 timings directly but expect them to be much
74 * closer to Gen7 than Gen4.
77 set_latency_gen7(brw
->is_haswell
);
82 void set_latency_gen4();
83 void set_latency_gen7(bool is_haswell
);
85 backend_instruction
*inst
;
86 schedule_node
**children
;
95 * Which iteration of pushing groups of children onto the candidates list
96 * this node was a part of.
98 unsigned cand_generation
;
101 * This is the sum of the instruction's latency plus the maximum delay of
102 * its children, or just the issue_time if it's a leaf node.
108 schedule_node::set_latency_gen4()
111 int math_latency
= 22;
113 switch (inst
->opcode
) {
114 case SHADER_OPCODE_RCP
:
115 this->latency
= 1 * chans
* math_latency
;
117 case SHADER_OPCODE_RSQ
:
118 this->latency
= 2 * chans
* math_latency
;
120 case SHADER_OPCODE_INT_QUOTIENT
:
121 case SHADER_OPCODE_SQRT
:
122 case SHADER_OPCODE_LOG2
:
123 /* full precision log. partial is 2. */
124 this->latency
= 3 * chans
* math_latency
;
126 case SHADER_OPCODE_INT_REMAINDER
:
127 case SHADER_OPCODE_EXP2
:
128 /* full precision. partial is 3, same throughput. */
129 this->latency
= 4 * chans
* math_latency
;
131 case SHADER_OPCODE_POW
:
132 this->latency
= 8 * chans
* math_latency
;
134 case SHADER_OPCODE_SIN
:
135 case SHADER_OPCODE_COS
:
136 /* minimum latency, max is 12 rounds. */
137 this->latency
= 5 * chans
* math_latency
;
146 schedule_node::set_latency_gen7(bool is_haswell
)
148 switch (inst
->opcode
) {
151 * (since the last two src operands are in different register banks):
152 * mad(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g3.1<4,1,1>F.x { align16 WE_normal 1Q };
154 * 3 cycles on IVB, 4 on HSW
155 * (since the last two src operands are in the same register bank):
156 * mad(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g2.1<4,1,1>F.x { align16 WE_normal 1Q };
158 * 18 cycles on IVB, 16 on HSW
159 * (since the last two src operands are in different register banks):
160 * mad(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g3.1<4,1,1>F.x { align16 WE_normal 1Q };
161 * mov(8) null g4<4,5,1>F { align16 WE_normal 1Q };
163 * 20 cycles on IVB, 18 on HSW
164 * (since the last two src operands are in the same register bank):
165 * mad(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g2.1<4,1,1>F.x { align16 WE_normal 1Q };
166 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
169 /* Our register allocator doesn't know about register banks, so use the
172 latency
= is_haswell
? 16 : 18;
177 * (since the last two src operands are in different register banks):
178 * lrp(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g3.1<4,1,1>F.x { align16 WE_normal 1Q };
180 * 3 cycles on IVB, 4 on HSW
181 * (since the last two src operands are in the same register bank):
182 * lrp(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g2.1<4,1,1>F.x { align16 WE_normal 1Q };
184 * 16 cycles on IVB, 14 on HSW
185 * (since the last two src operands are in different register banks):
186 * lrp(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g3.1<4,1,1>F.x { align16 WE_normal 1Q };
187 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
190 * (since the last two src operands are in the same register bank):
191 * lrp(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g2.1<4,1,1>F.x { align16 WE_normal 1Q };
192 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
195 /* Our register allocator doesn't know about register banks, so use the
201 case SHADER_OPCODE_RCP
:
202 case SHADER_OPCODE_RSQ
:
203 case SHADER_OPCODE_SQRT
:
204 case SHADER_OPCODE_LOG2
:
205 case SHADER_OPCODE_EXP2
:
206 case SHADER_OPCODE_SIN
:
207 case SHADER_OPCODE_COS
:
209 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
212 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
213 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
215 * Same for exp2, log2, rsq, sqrt, sin, cos.
217 latency
= is_haswell
? 14 : 16;
220 case SHADER_OPCODE_POW
:
222 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
225 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
226 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
228 latency
= is_haswell
? 22 : 24;
231 case SHADER_OPCODE_TEX
:
232 case SHADER_OPCODE_TXD
:
233 case SHADER_OPCODE_TXF
:
234 case SHADER_OPCODE_TXL
:
236 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
237 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
238 * send(8) g4<1>UW g114<8,8,1>F
239 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
241 * 697 +/-49 cycles (min 610, n=26):
242 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
243 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
244 * send(8) g4<1>UW g114<8,8,1>F
245 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
246 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
248 * So the latency on our first texture load of the batchbuffer takes
249 * ~700 cycles, since the caches are cold at that point.
251 * 840 +/- 92 cycles (min 720, n=25):
252 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
253 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
254 * send(8) g4<1>UW g114<8,8,1>F
255 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
256 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
257 * send(8) g4<1>UW g114<8,8,1>F
258 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
259 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
261 * On the second load, it takes just an extra ~140 cycles, and after
262 * accounting for the 14 cycles of the MOV's latency, that makes ~130.
264 * 683 +/- 49 cycles (min = 602, n=47):
265 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
266 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
267 * send(8) g4<1>UW g114<8,8,1>F
268 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
269 * send(8) g50<1>UW g114<8,8,1>F
270 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
271 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
273 * The unit appears to be pipelined, since this matches up with the
274 * cache-cold case, despite there being two loads here. If you replace
275 * the g4 in the MOV to null with g50, it's still 693 +/- 52 (n=39).
277 * So, take some number between the cache-hot 140 cycles and the
278 * cache-cold 700 cycles. No particular tuning was done on this.
280 * I haven't done significant testing of the non-TEX opcodes. TXL at
281 * least looked about the same as TEX.
286 case SHADER_OPCODE_TXS
:
287 /* Testing textureSize(sampler2D, 0), one load was 420 +/- 41
289 * mov(8) g114<1>UD 0D { align1 WE_normal 1Q };
290 * send(8) g6<1>UW g114<8,8,1>F
291 * sampler (10, 0, 10, 1) mlen 1 rlen 4 { align1 WE_normal 1Q };
292 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1Q };
295 * Two loads was 535 +/- 30 cycles (n=19):
296 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
297 * send(16) g6<1>UW g114<8,8,1>F
298 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
299 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
300 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1H };
301 * send(16) g8<1>UW g114<8,8,1>F
302 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
303 * mov(16) g8<1>F g8<8,8,1>D { align1 WE_normal 1H };
304 * add(16) g6<1>F g6<8,8,1>F g8<8,8,1>F { align1 WE_normal 1H };
306 * Since the only caches that should matter are just the
307 * instruction/state cache containing the surface state, assume that we
308 * always have hot caches.
313 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD
:
314 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
:
315 case VS_OPCODE_PULL_CONSTANT_LOAD
:
316 /* testing using varying-index pull constants:
319 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
320 * send(8) g4<1>F g4<8,8,1>D
321 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
324 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
325 * send(8) g4<1>F g4<8,8,1>D
326 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
327 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
330 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
331 * send(8) g4<1>F g4<8,8,1>D
332 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
333 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
334 * send(8) g4<1>F g4<8,8,1>D
335 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
336 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
338 * So, if it's cache-hot, it's about 140. If it's cache cold, it's
339 * about 460. We expect to mostly be cache hot, so pick something more
345 case SHADER_OPCODE_GEN7_SCRATCH_READ
:
346 /* Testing a load from offset 0, that had been previously written:
348 * send(8) g114<1>UW g0<8,8,1>F data (0, 0, 0) mlen 1 rlen 1 { align1 WE_normal 1Q };
349 * mov(8) null g114<8,8,1>F { align1 WE_normal 1Q };
351 * The cycles spent seemed to be grouped around 40-50 (as low as 38),
352 * then around 140. Presumably this is cache hit vs miss.
359 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
362 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
363 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
370 class instruction_scheduler
{
372 instruction_scheduler(backend_visitor
*v
, int grf_count
, bool post_reg_alloc
)
375 this->mem_ctx
= ralloc_context(NULL
);
376 this->grf_count
= grf_count
;
377 this->instructions
.make_empty();
378 this->instructions_to_schedule
= 0;
379 this->post_reg_alloc
= post_reg_alloc
;
383 ~instruction_scheduler()
385 ralloc_free(this->mem_ctx
);
387 void add_barrier_deps(schedule_node
*n
);
388 void add_dep(schedule_node
*before
, schedule_node
*after
, int latency
);
389 void add_dep(schedule_node
*before
, schedule_node
*after
);
391 void run(exec_list
*instructions
);
392 void add_inst(backend_instruction
*inst
);
393 void compute_delay(schedule_node
*node
);
394 virtual void calculate_deps() = 0;
395 virtual schedule_node
*choose_instruction_to_schedule() = 0;
398 * Returns how many cycles it takes the instruction to issue.
400 * Instructions in gen hardware are handled one simd4 vector at a time,
401 * with 1 cycle per vector dispatched. Thus 8-wide pixel shaders take 2
402 * cycles to dispatch and 16-wide (compressed) instructions take 4.
404 virtual int issue_time(backend_instruction
*inst
) = 0;
406 void schedule_instructions(backend_instruction
*next_block_header
);
411 int instructions_to_schedule
;
414 exec_list instructions
;
418 class fs_instruction_scheduler
: public instruction_scheduler
421 fs_instruction_scheduler(fs_visitor
*v
, int grf_count
, bool post_reg_alloc
);
422 void calculate_deps();
423 bool is_compressed(fs_inst
*inst
);
424 schedule_node
*choose_instruction_to_schedule();
425 int issue_time(backend_instruction
*inst
);
429 fs_instruction_scheduler::fs_instruction_scheduler(fs_visitor
*v
,
432 : instruction_scheduler(v
, grf_count
, post_reg_alloc
),
437 class vec4_instruction_scheduler
: public instruction_scheduler
440 vec4_instruction_scheduler(vec4_visitor
*v
, int grf_count
);
441 void calculate_deps();
442 schedule_node
*choose_instruction_to_schedule();
443 int issue_time(backend_instruction
*inst
);
447 vec4_instruction_scheduler::vec4_instruction_scheduler(vec4_visitor
*v
,
449 : instruction_scheduler(v
, grf_count
, true),
455 instruction_scheduler::add_inst(backend_instruction
*inst
)
457 schedule_node
*n
= new(mem_ctx
) schedule_node(inst
, bv
->brw
);
459 assert(!inst
->is_head_sentinel());
460 assert(!inst
->is_tail_sentinel());
462 this->instructions_to_schedule
++;
465 instructions
.push_tail(n
);
468 /** Recursive computation of the delay member of a node. */
470 instruction_scheduler::compute_delay(schedule_node
*n
)
472 if (!n
->child_count
) {
473 n
->delay
= issue_time(n
->inst
);
475 for (int i
= 0; i
< n
->child_count
; i
++) {
476 if (!n
->children
[i
]->delay
)
477 compute_delay(n
->children
[i
]);
478 n
->delay
= MAX2(n
->delay
, n
->latency
+ n
->children
[i
]->delay
);
484 * Add a dependency between two instruction nodes.
486 * The @after node will be scheduled after @before. We will try to
487 * schedule it @latency cycles after @before, but no guarantees there.
490 instruction_scheduler::add_dep(schedule_node
*before
, schedule_node
*after
,
493 if (!before
|| !after
)
496 assert(before
!= after
);
498 for (int i
= 0; i
< before
->child_count
; i
++) {
499 if (before
->children
[i
] == after
) {
500 before
->child_latency
[i
] = MAX2(before
->child_latency
[i
], latency
);
505 if (before
->child_array_size
<= before
->child_count
) {
506 if (before
->child_array_size
< 16)
507 before
->child_array_size
= 16;
509 before
->child_array_size
*= 2;
511 before
->children
= reralloc(mem_ctx
, before
->children
,
513 before
->child_array_size
);
514 before
->child_latency
= reralloc(mem_ctx
, before
->child_latency
,
515 int, before
->child_array_size
);
518 before
->children
[before
->child_count
] = after
;
519 before
->child_latency
[before
->child_count
] = latency
;
520 before
->child_count
++;
521 after
->parent_count
++;
525 instruction_scheduler::add_dep(schedule_node
*before
, schedule_node
*after
)
530 add_dep(before
, after
, before
->latency
);
534 * Sometimes we really want this node to execute after everything that
535 * was before it and before everything that followed it. This adds
539 instruction_scheduler::add_barrier_deps(schedule_node
*n
)
541 schedule_node
*prev
= (schedule_node
*)n
->prev
;
542 schedule_node
*next
= (schedule_node
*)n
->next
;
545 while (!prev
->is_head_sentinel()) {
547 prev
= (schedule_node
*)prev
->prev
;
552 while (!next
->is_tail_sentinel()) {
554 next
= (schedule_node
*)next
->next
;
559 /* instruction scheduling needs to be aware of when an MRF write
560 * actually writes 2 MRFs.
563 fs_instruction_scheduler::is_compressed(fs_inst
*inst
)
565 return (v
->dispatch_width
== 16 &&
566 !inst
->force_uncompressed
&&
567 !inst
->force_sechalf
);
571 fs_instruction_scheduler::calculate_deps()
573 /* Pre-register-allocation, this tracks the last write per VGRF (so
574 * different reg_offsets within it can interfere when they shouldn't).
575 * After register allocation, reg_offsets are gone and we track individual
578 schedule_node
*last_grf_write
[grf_count
];
579 schedule_node
*last_mrf_write
[BRW_MAX_MRF
];
580 schedule_node
*last_conditional_mod
[2] = { NULL
, NULL
};
581 /* Fixed HW registers are assumed to be separate from the virtual
582 * GRFs, so they can be tracked separately. We don't really write
583 * to fixed GRFs much, so don't bother tracking them on a more
586 schedule_node
*last_fixed_grf_write
= NULL
;
587 int reg_width
= v
->dispatch_width
/ 8;
589 /* The last instruction always needs to still be the last
590 * instruction. Either it's flow control (IF, ELSE, ENDIF, DO,
591 * WHILE) and scheduling other things after it would disturb the
592 * basic block, or it's FB_WRITE and we should do a better job at
593 * dead code elimination anyway.
595 schedule_node
*last
= (schedule_node
*)instructions
.get_tail();
596 add_barrier_deps(last
);
598 memset(last_grf_write
, 0, sizeof(last_grf_write
));
599 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
601 /* top-to-bottom dependencies: RAW and WAW. */
602 foreach_list(node
, &instructions
) {
603 schedule_node
*n
= (schedule_node
*)node
;
604 fs_inst
*inst
= (fs_inst
*)n
->inst
;
606 if (inst
->opcode
== FS_OPCODE_PLACEHOLDER_HALT
||
607 inst
->has_side_effects())
610 /* read-after-write deps. */
611 for (int i
= 0; i
< 3; i
++) {
612 if (inst
->src
[i
].file
== GRF
) {
613 if (post_reg_alloc
) {
614 for (int r
= 0; r
< reg_width
* inst
->regs_read(v
, i
); r
++)
615 add_dep(last_grf_write
[inst
->src
[i
].reg
+ r
], n
);
617 add_dep(last_grf_write
[inst
->src
[i
].reg
], n
);
619 } else if (inst
->src
[i
].file
== HW_REG
&&
620 (inst
->src
[i
].fixed_hw_reg
.file
==
621 BRW_GENERAL_REGISTER_FILE
)) {
622 if (post_reg_alloc
) {
623 int size
= reg_width
;
624 if (inst
->src
[i
].fixed_hw_reg
.vstride
== BRW_VERTICAL_STRIDE_0
)
626 for (int r
= 0; r
< size
; r
++)
627 add_dep(last_grf_write
[inst
->src
[i
].fixed_hw_reg
.nr
+ r
], n
);
629 add_dep(last_fixed_grf_write
, n
);
631 } else if (inst
->src
[i
].file
!= BAD_FILE
&&
632 inst
->src
[i
].file
!= IMM
&&
633 inst
->src
[i
].file
!= UNIFORM
) {
634 assert(inst
->src
[i
].file
!= MRF
);
639 if (inst
->base_mrf
!= -1) {
640 for (int i
= 0; i
< inst
->mlen
; i
++) {
641 /* It looks like the MRF regs are released in the send
642 * instruction once it's sent, not when the result comes
645 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
649 if (inst
->reads_flag()) {
650 add_dep(last_conditional_mod
[inst
->flag_subreg
], n
);
653 /* write-after-write deps. */
654 if (inst
->dst
.file
== GRF
) {
655 if (post_reg_alloc
) {
656 for (int r
= 0; r
< inst
->regs_written
* reg_width
; r
++) {
657 add_dep(last_grf_write
[inst
->dst
.reg
+ r
], n
);
658 last_grf_write
[inst
->dst
.reg
+ r
] = n
;
661 add_dep(last_grf_write
[inst
->dst
.reg
], n
);
662 last_grf_write
[inst
->dst
.reg
] = n
;
664 } else if (inst
->dst
.file
== MRF
) {
665 int reg
= inst
->dst
.reg
& ~BRW_MRF_COMPR4
;
667 add_dep(last_mrf_write
[reg
], n
);
668 last_mrf_write
[reg
] = n
;
669 if (is_compressed(inst
)) {
670 if (inst
->dst
.reg
& BRW_MRF_COMPR4
)
674 add_dep(last_mrf_write
[reg
], n
);
675 last_mrf_write
[reg
] = n
;
677 } else if (inst
->dst
.file
== HW_REG
&&
678 inst
->dst
.fixed_hw_reg
.file
== BRW_GENERAL_REGISTER_FILE
) {
679 if (post_reg_alloc
) {
680 for (int r
= 0; r
< reg_width
; r
++)
681 last_grf_write
[inst
->dst
.fixed_hw_reg
.nr
+ r
] = n
;
683 last_fixed_grf_write
= n
;
685 } else if (inst
->dst
.file
!= BAD_FILE
) {
689 if (inst
->mlen
> 0 && inst
->base_mrf
!= -1) {
690 for (int i
= 0; i
< v
->implied_mrf_writes(inst
); i
++) {
691 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
692 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
696 if (inst
->writes_flag()) {
697 add_dep(last_conditional_mod
[inst
->flag_subreg
], n
, 0);
698 last_conditional_mod
[inst
->flag_subreg
] = n
;
702 /* bottom-to-top dependencies: WAR */
703 memset(last_grf_write
, 0, sizeof(last_grf_write
));
704 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
705 memset(last_conditional_mod
, 0, sizeof(last_conditional_mod
));
706 last_fixed_grf_write
= NULL
;
710 for (node
= instructions
.get_tail(), prev
= node
->prev
;
711 !node
->is_head_sentinel();
712 node
= prev
, prev
= node
->prev
) {
713 schedule_node
*n
= (schedule_node
*)node
;
714 fs_inst
*inst
= (fs_inst
*)n
->inst
;
716 /* write-after-read deps. */
717 for (int i
= 0; i
< 3; i
++) {
718 if (inst
->src
[i
].file
== GRF
) {
719 if (post_reg_alloc
) {
720 for (int r
= 0; r
< reg_width
* inst
->regs_read(v
, i
); r
++)
721 add_dep(n
, last_grf_write
[inst
->src
[i
].reg
+ r
]);
723 add_dep(n
, last_grf_write
[inst
->src
[i
].reg
]);
725 } else if (inst
->src
[i
].file
== HW_REG
&&
726 (inst
->src
[i
].fixed_hw_reg
.file
==
727 BRW_GENERAL_REGISTER_FILE
)) {
728 if (post_reg_alloc
) {
729 int size
= reg_width
;
730 if (inst
->src
[i
].fixed_hw_reg
.vstride
== BRW_VERTICAL_STRIDE_0
)
732 for (int r
= 0; r
< size
; r
++)
733 add_dep(n
, last_grf_write
[inst
->src
[i
].fixed_hw_reg
.nr
+ r
]);
735 add_dep(n
, last_fixed_grf_write
);
737 } else if (inst
->src
[i
].file
!= BAD_FILE
&&
738 inst
->src
[i
].file
!= IMM
&&
739 inst
->src
[i
].file
!= UNIFORM
) {
740 assert(inst
->src
[i
].file
!= MRF
);
745 if (inst
->base_mrf
!= -1) {
746 for (int i
= 0; i
< inst
->mlen
; i
++) {
747 /* It looks like the MRF regs are released in the send
748 * instruction once it's sent, not when the result comes
751 add_dep(n
, last_mrf_write
[inst
->base_mrf
+ i
], 2);
755 if (inst
->reads_flag()) {
756 add_dep(n
, last_conditional_mod
[inst
->flag_subreg
]);
759 /* Update the things this instruction wrote, so earlier reads
760 * can mark this as WAR dependency.
762 if (inst
->dst
.file
== GRF
) {
763 if (post_reg_alloc
) {
764 for (int r
= 0; r
< inst
->regs_written
* reg_width
; r
++)
765 last_grf_write
[inst
->dst
.reg
+ r
] = n
;
767 last_grf_write
[inst
->dst
.reg
] = n
;
769 } else if (inst
->dst
.file
== MRF
) {
770 int reg
= inst
->dst
.reg
& ~BRW_MRF_COMPR4
;
772 last_mrf_write
[reg
] = n
;
774 if (is_compressed(inst
)) {
775 if (inst
->dst
.reg
& BRW_MRF_COMPR4
)
780 last_mrf_write
[reg
] = n
;
782 } else if (inst
->dst
.file
== HW_REG
&&
783 inst
->dst
.fixed_hw_reg
.file
== BRW_GENERAL_REGISTER_FILE
) {
784 if (post_reg_alloc
) {
785 for (int r
= 0; r
< reg_width
; r
++)
786 last_grf_write
[inst
->dst
.fixed_hw_reg
.nr
+ r
] = n
;
788 last_fixed_grf_write
= n
;
790 } else if (inst
->dst
.file
!= BAD_FILE
) {
794 if (inst
->mlen
> 0 && inst
->base_mrf
!= -1) {
795 for (int i
= 0; i
< v
->implied_mrf_writes(inst
); i
++) {
796 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
800 if (inst
->writes_flag()) {
801 last_conditional_mod
[inst
->flag_subreg
] = n
;
807 vec4_instruction_scheduler::calculate_deps()
809 schedule_node
*last_grf_write
[grf_count
];
810 schedule_node
*last_mrf_write
[BRW_MAX_MRF
];
811 schedule_node
*last_conditional_mod
= NULL
;
812 /* Fixed HW registers are assumed to be separate from the virtual
813 * GRFs, so they can be tracked separately. We don't really write
814 * to fixed GRFs much, so don't bother tracking them on a more
817 schedule_node
*last_fixed_grf_write
= NULL
;
819 /* The last instruction always needs to still be the last instruction.
820 * Either it's flow control (IF, ELSE, ENDIF, DO, WHILE) and scheduling
821 * other things after it would disturb the basic block, or it's the EOT
822 * URB_WRITE and we should do a better job at dead code eliminating
823 * anything that could have been scheduled after it.
825 schedule_node
*last
= (schedule_node
*)instructions
.get_tail();
826 add_barrier_deps(last
);
828 memset(last_grf_write
, 0, sizeof(last_grf_write
));
829 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
831 /* top-to-bottom dependencies: RAW and WAW. */
832 foreach_list(node
, &instructions
) {
833 schedule_node
*n
= (schedule_node
*)node
;
834 vec4_instruction
*inst
= (vec4_instruction
*)n
->inst
;
836 if (inst
->has_side_effects())
839 /* read-after-write deps. */
840 for (int i
= 0; i
< 3; i
++) {
841 if (inst
->src
[i
].file
== GRF
) {
842 add_dep(last_grf_write
[inst
->src
[i
].reg
], n
);
843 } else if (inst
->src
[i
].file
== HW_REG
&&
844 (inst
->src
[i
].fixed_hw_reg
.file
==
845 BRW_GENERAL_REGISTER_FILE
)) {
846 add_dep(last_fixed_grf_write
, n
);
847 } else if (inst
->src
[i
].file
!= BAD_FILE
&&
848 inst
->src
[i
].file
!= IMM
&&
849 inst
->src
[i
].file
!= UNIFORM
) {
850 /* No reads from MRF, and ATTR is already translated away */
851 assert(inst
->src
[i
].file
!= MRF
&&
852 inst
->src
[i
].file
!= ATTR
);
857 for (int i
= 0; i
< inst
->mlen
; i
++) {
858 /* It looks like the MRF regs are released in the send
859 * instruction once it's sent, not when the result comes
862 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
865 if (inst
->depends_on_flags()) {
866 assert(last_conditional_mod
);
867 add_dep(last_conditional_mod
, n
);
870 /* write-after-write deps. */
871 if (inst
->dst
.file
== GRF
) {
872 add_dep(last_grf_write
[inst
->dst
.reg
], n
);
873 last_grf_write
[inst
->dst
.reg
] = n
;
874 } else if (inst
->dst
.file
== MRF
) {
875 add_dep(last_mrf_write
[inst
->dst
.reg
], n
);
876 last_mrf_write
[inst
->dst
.reg
] = n
;
877 } else if (inst
->dst
.file
== HW_REG
&&
878 inst
->dst
.fixed_hw_reg
.file
== BRW_GENERAL_REGISTER_FILE
) {
879 last_fixed_grf_write
= n
;
880 } else if (inst
->dst
.file
!= BAD_FILE
) {
884 if (inst
->mlen
> 0) {
885 for (int i
= 0; i
< v
->implied_mrf_writes(inst
); i
++) {
886 add_dep(last_mrf_write
[inst
->base_mrf
+ i
], n
);
887 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
891 if (inst
->conditional_mod
) {
892 add_dep(last_conditional_mod
, n
, 0);
893 last_conditional_mod
= n
;
897 /* bottom-to-top dependencies: WAR */
898 memset(last_grf_write
, 0, sizeof(last_grf_write
));
899 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
900 last_conditional_mod
= NULL
;
901 last_fixed_grf_write
= NULL
;
905 for (node
= instructions
.get_tail(), prev
= node
->prev
;
906 !node
->is_head_sentinel();
907 node
= prev
, prev
= node
->prev
) {
908 schedule_node
*n
= (schedule_node
*)node
;
909 vec4_instruction
*inst
= (vec4_instruction
*)n
->inst
;
911 /* write-after-read deps. */
912 for (int i
= 0; i
< 3; i
++) {
913 if (inst
->src
[i
].file
== GRF
) {
914 add_dep(n
, last_grf_write
[inst
->src
[i
].reg
]);
915 } else if (inst
->src
[i
].file
== HW_REG
&&
916 (inst
->src
[i
].fixed_hw_reg
.file
==
917 BRW_GENERAL_REGISTER_FILE
)) {
918 add_dep(n
, last_fixed_grf_write
);
919 } else if (inst
->src
[i
].file
!= BAD_FILE
&&
920 inst
->src
[i
].file
!= IMM
&&
921 inst
->src
[i
].file
!= UNIFORM
) {
922 assert(inst
->src
[i
].file
!= MRF
&&
923 inst
->src
[i
].file
!= ATTR
);
928 for (int i
= 0; i
< inst
->mlen
; i
++) {
929 /* It looks like the MRF regs are released in the send
930 * instruction once it's sent, not when the result comes
933 add_dep(n
, last_mrf_write
[inst
->base_mrf
+ i
], 2);
936 if (inst
->depends_on_flags()) {
937 add_dep(n
, last_conditional_mod
);
940 /* Update the things this instruction wrote, so earlier reads
941 * can mark this as WAR dependency.
943 if (inst
->dst
.file
== GRF
) {
944 last_grf_write
[inst
->dst
.reg
] = n
;
945 } else if (inst
->dst
.file
== MRF
) {
946 last_mrf_write
[inst
->dst
.reg
] = n
;
947 } else if (inst
->dst
.file
== HW_REG
&&
948 inst
->dst
.fixed_hw_reg
.file
== BRW_GENERAL_REGISTER_FILE
) {
949 last_fixed_grf_write
= n
;
950 } else if (inst
->dst
.file
!= BAD_FILE
) {
954 if (inst
->mlen
> 0) {
955 for (int i
= 0; i
< v
->implied_mrf_writes(inst
); i
++) {
956 last_mrf_write
[inst
->base_mrf
+ i
] = n
;
960 if (inst
->conditional_mod
) {
961 last_conditional_mod
= n
;
967 fs_instruction_scheduler::choose_instruction_to_schedule()
969 schedule_node
*chosen
= NULL
;
971 if (post_reg_alloc
) {
974 /* Of the instructions closest ready to execute or the closest to
975 * being ready, choose the oldest one.
977 foreach_list(node
, &instructions
) {
978 schedule_node
*n
= (schedule_node
*)node
;
980 if (!chosen
|| n
->unblocked_time
< chosen_time
) {
982 chosen_time
= n
->unblocked_time
;
986 /* Before register allocation, we don't care about the latencies of
987 * instructions. All we care about is reducing live intervals of
988 * variables so that we can avoid register spilling, or get 16-wide
989 * shaders which naturally do a better job of hiding instruction
992 foreach_list(node
, &instructions
) {
993 schedule_node
*n
= (schedule_node
*)node
;
994 fs_inst
*inst
= (fs_inst
*)n
->inst
;
1001 /* Prefer instructions that recently became available for scheduling.
1002 * These are the things that are most likely to (eventually) make a
1003 * variable dead and reduce register pressure. Typical register
1004 * pressure estimates don't work for us because most of our pressure
1005 * comes from texturing, where no single instruction to schedule will
1006 * make a vec4 value dead.
1008 if (n
->cand_generation
> chosen
->cand_generation
) {
1011 } else if (n
->cand_generation
< chosen
->cand_generation
) {
1015 /* On MRF-using chips, prefer non-SEND instructions. If we don't do
1016 * this, then because we prefer instructions that just became
1017 * candidates, we'll end up in a pattern of scheduling a SEND, then
1018 * the MRFs for the next SEND, then the next SEND, then the MRFs,
1019 * etc., without ever consuming the results of a send.
1021 if (v
->brw
->gen
< 7) {
1022 fs_inst
*chosen_inst
= (fs_inst
*)chosen
->inst
;
1024 /* We use regs_written > 1 as our test for the kind of send
1025 * instruction to avoid -- only sends generate many regs, and a
1026 * single-result send is probably actually reducing register
1029 if (inst
->regs_written
<= 1 && chosen_inst
->regs_written
> 1) {
1032 } else if (inst
->regs_written
> chosen_inst
->regs_written
) {
1037 /* For instructions pushed on the cands list at the same time, prefer
1038 * the one with the highest delay to the end of the program. This is
1039 * most likely to have its values able to be consumed first (such as
1040 * for a large tree of lowered ubo loads, which appear reversed in
1041 * the instruction stream with respect to when they can be consumed).
1043 if (n
->delay
> chosen
->delay
) {
1046 } else if (n
->delay
< chosen
->delay
) {
1050 /* If all other metrics are equal, we prefer the first instruction in
1051 * the list (program execution).
1060 vec4_instruction_scheduler::choose_instruction_to_schedule()
1062 schedule_node
*chosen
= NULL
;
1063 int chosen_time
= 0;
1065 /* Of the instructions ready to execute or the closest to being ready,
1066 * choose the oldest one.
1068 foreach_list(node
, &instructions
) {
1069 schedule_node
*n
= (schedule_node
*)node
;
1071 if (!chosen
|| n
->unblocked_time
< chosen_time
) {
1073 chosen_time
= n
->unblocked_time
;
1081 fs_instruction_scheduler::issue_time(backend_instruction
*inst
)
1083 if (is_compressed((fs_inst
*)inst
))
1090 vec4_instruction_scheduler::issue_time(backend_instruction
*inst
)
1092 /* We always execute as two vec4s in parallel. */
1097 instruction_scheduler::schedule_instructions(backend_instruction
*next_block_header
)
1101 /* Remove non-DAG heads from the list. */
1102 foreach_list_safe(node
, &instructions
) {
1103 schedule_node
*n
= (schedule_node
*)node
;
1104 if (n
->parent_count
!= 0)
1108 unsigned cand_generation
= 1;
1109 while (!instructions
.is_empty()) {
1110 schedule_node
*chosen
= choose_instruction_to_schedule();
1112 /* Schedule this instruction. */
1115 next_block_header
->insert_before(chosen
->inst
);
1116 instructions_to_schedule
--;
1118 /* Update the clock for how soon an instruction could start after the
1121 time
+= issue_time(chosen
->inst
);
1123 /* If we expected a delay for scheduling, then bump the clock to reflect
1124 * that as well. In reality, the hardware will switch to another
1125 * hyperthread and may not return to dispatching our thread for a while
1126 * even after we're unblocked.
1128 time
= MAX2(time
, chosen
->unblocked_time
);
1131 printf("clock %4d, scheduled: ", time
);
1132 bv
->dump_instruction(chosen
->inst
);
1135 /* Now that we've scheduled a new instruction, some of its
1136 * children can be promoted to the list of instructions ready to
1137 * be scheduled. Update the children's unblocked time for this
1138 * DAG edge as we do so.
1140 for (int i
= chosen
->child_count
- 1; i
>= 0; i
--) {
1141 schedule_node
*child
= chosen
->children
[i
];
1143 child
->unblocked_time
= MAX2(child
->unblocked_time
,
1144 time
+ chosen
->child_latency
[i
]);
1147 printf("\tchild %d, %d parents: ", i
, child
->parent_count
);
1148 bv
->dump_instruction(child
->inst
);
1151 child
->cand_generation
= cand_generation
;
1152 child
->parent_count
--;
1153 if (child
->parent_count
== 0) {
1155 printf("\t\tnow available\n");
1157 instructions
.push_head(child
);
1162 /* Shared resource: the mathbox. There's one mathbox per EU on Gen6+
1163 * but it's more limited pre-gen6, so if we send something off to it then
1164 * the next math instruction isn't going to make progress until the first
1167 if (chosen
->inst
->is_math()) {
1168 foreach_list(node
, &instructions
) {
1169 schedule_node
*n
= (schedule_node
*)node
;
1171 if (n
->inst
->is_math())
1172 n
->unblocked_time
= MAX2(n
->unblocked_time
,
1173 time
+ chosen
->latency
);
1178 assert(instructions_to_schedule
== 0);
1182 instruction_scheduler::run(exec_list
*all_instructions
)
1184 backend_instruction
*next_block_header
=
1185 (backend_instruction
*)all_instructions
->head
;
1188 printf("\nInstructions before scheduling (reg_alloc %d)\n", post_reg_alloc
);
1189 bv
->dump_instructions();
1192 while (!next_block_header
->is_tail_sentinel()) {
1193 /* Add things to be scheduled until we get to a new BB. */
1194 while (!next_block_header
->is_tail_sentinel()) {
1195 backend_instruction
*inst
= next_block_header
;
1196 next_block_header
= (backend_instruction
*)next_block_header
->next
;
1199 if (inst
->is_control_flow())
1204 foreach_list(node
, &instructions
) {
1205 schedule_node
*n
= (schedule_node
*)node
;
1209 schedule_instructions(next_block_header
);
1213 printf("\nInstructions after scheduling (reg_alloc %d)\n", post_reg_alloc
);
1214 bv
->dump_instructions();
1219 fs_visitor::schedule_instructions(bool post_reg_alloc
)
1223 grf_count
= grf_used
;
1225 grf_count
= virtual_grf_count
;
1227 fs_instruction_scheduler
sched(this, grf_count
, post_reg_alloc
);
1228 sched
.run(&instructions
);
1230 if (unlikely(INTEL_DEBUG
& DEBUG_WM
) && post_reg_alloc
) {
1231 printf("fs%d estimated execution time: %d cycles\n",
1232 dispatch_width
, sched
.time
);
1235 invalidate_live_intervals();
1239 vec4_visitor::opt_schedule_instructions()
1241 vec4_instruction_scheduler
sched(this, prog_data
->total_grf
);
1242 sched
.run(&instructions
);
1244 if (unlikely(debug_flag
)) {
1245 printf("vec4 estimated execution time: %d cycles\n", sched
.time
);
1248 this->live_intervals_valid
= false;