5710380f12eaff843a903801d0ae12588f7cc03e
[mesa.git] / src / mesa / drivers / dri / i965 / brw_schedule_instructions.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include "brw_fs.h"
29 #include "brw_vec4.h"
30 #include "glsl/glsl_types.h"
31 #include "glsl/ir_optimization.h"
32
33 using namespace brw;
34
35 /** @file brw_fs_schedule_instructions.cpp
36 *
37 * List scheduling of FS instructions.
38 *
39 * The basic model of the list scheduler is to take a basic block,
40 * compute a DAG of the dependencies (RAW ordering with latency, WAW
41 * ordering with latency, WAR ordering), and make a list of the DAG heads.
42 * Heuristically pick a DAG head, then put all the children that are
43 * now DAG heads into the list of things to schedule.
44 *
45 * The heuristic is the important part. We're trying to be cheap,
46 * since actually computing the optimal scheduling is NP complete.
47 * What we do is track a "current clock". When we schedule a node, we
48 * update the earliest-unblocked clock time of its children, and
49 * increment the clock. Then, when trying to schedule, we just pick
50 * the earliest-unblocked instruction to schedule.
51 *
52 * Note that often there will be many things which could execute
53 * immediately, and there are a range of heuristic options to choose
54 * from in picking among those.
55 */
56
57 static bool debug = false;
58
59 class instruction_scheduler;
60
61 class schedule_node : public exec_node
62 {
63 public:
64 schedule_node(backend_instruction *inst, instruction_scheduler *sched);
65 void set_latency_gen4();
66 void set_latency_gen7(bool is_haswell);
67
68 backend_instruction *inst;
69 schedule_node **children;
70 int *child_latency;
71 int child_count;
72 int parent_count;
73 int child_array_size;
74 int unblocked_time;
75 int latency;
76
77 /**
78 * Which iteration of pushing groups of children onto the candidates list
79 * this node was a part of.
80 */
81 unsigned cand_generation;
82
83 /**
84 * This is the sum of the instruction's latency plus the maximum delay of
85 * its children, or just the issue_time if it's a leaf node.
86 */
87 int delay;
88 };
89
90 void
91 schedule_node::set_latency_gen4()
92 {
93 int chans = 8;
94 int math_latency = 22;
95
96 switch (inst->opcode) {
97 case SHADER_OPCODE_RCP:
98 this->latency = 1 * chans * math_latency;
99 break;
100 case SHADER_OPCODE_RSQ:
101 this->latency = 2 * chans * math_latency;
102 break;
103 case SHADER_OPCODE_INT_QUOTIENT:
104 case SHADER_OPCODE_SQRT:
105 case SHADER_OPCODE_LOG2:
106 /* full precision log. partial is 2. */
107 this->latency = 3 * chans * math_latency;
108 break;
109 case SHADER_OPCODE_INT_REMAINDER:
110 case SHADER_OPCODE_EXP2:
111 /* full precision. partial is 3, same throughput. */
112 this->latency = 4 * chans * math_latency;
113 break;
114 case SHADER_OPCODE_POW:
115 this->latency = 8 * chans * math_latency;
116 break;
117 case SHADER_OPCODE_SIN:
118 case SHADER_OPCODE_COS:
119 /* minimum latency, max is 12 rounds. */
120 this->latency = 5 * chans * math_latency;
121 break;
122 default:
123 this->latency = 2;
124 break;
125 }
126 }
127
128 void
129 schedule_node::set_latency_gen7(bool is_haswell)
130 {
131 switch (inst->opcode) {
132 case BRW_OPCODE_MAD:
133 /* 2 cycles
134 * (since the last two src operands are in different register banks):
135 * mad(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g3.1<4,1,1>F.x { align16 WE_normal 1Q };
136 *
137 * 3 cycles on IVB, 4 on HSW
138 * (since the last two src operands are in the same register bank):
139 * mad(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g2.1<4,1,1>F.x { align16 WE_normal 1Q };
140 *
141 * 18 cycles on IVB, 16 on HSW
142 * (since the last two src operands are in different register banks):
143 * mad(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g3.1<4,1,1>F.x { align16 WE_normal 1Q };
144 * mov(8) null g4<4,5,1>F { align16 WE_normal 1Q };
145 *
146 * 20 cycles on IVB, 18 on HSW
147 * (since the last two src operands are in the same register bank):
148 * mad(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g2.1<4,1,1>F.x { align16 WE_normal 1Q };
149 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
150 */
151
152 /* Our register allocator doesn't know about register banks, so use the
153 * higher latency.
154 */
155 latency = is_haswell ? 16 : 18;
156 break;
157
158 case BRW_OPCODE_LRP:
159 /* 2 cycles
160 * (since the last two src operands are in different register banks):
161 * lrp(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g3.1<4,1,1>F.x { align16 WE_normal 1Q };
162 *
163 * 3 cycles on IVB, 4 on HSW
164 * (since the last two src operands are in the same register bank):
165 * lrp(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g2.1<4,1,1>F.x { align16 WE_normal 1Q };
166 *
167 * 16 cycles on IVB, 14 on HSW
168 * (since the last two src operands are in different register banks):
169 * lrp(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g3.1<4,1,1>F.x { align16 WE_normal 1Q };
170 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
171 *
172 * 16 cycles
173 * (since the last two src operands are in the same register bank):
174 * lrp(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g2.1<4,1,1>F.x { align16 WE_normal 1Q };
175 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
176 */
177
178 /* Our register allocator doesn't know about register banks, so use the
179 * higher latency.
180 */
181 latency = 14;
182 break;
183
184 case SHADER_OPCODE_RCP:
185 case SHADER_OPCODE_RSQ:
186 case SHADER_OPCODE_SQRT:
187 case SHADER_OPCODE_LOG2:
188 case SHADER_OPCODE_EXP2:
189 case SHADER_OPCODE_SIN:
190 case SHADER_OPCODE_COS:
191 /* 2 cycles:
192 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
193 *
194 * 18 cycles:
195 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
196 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
197 *
198 * Same for exp2, log2, rsq, sqrt, sin, cos.
199 */
200 latency = is_haswell ? 14 : 16;
201 break;
202
203 case SHADER_OPCODE_POW:
204 /* 2 cycles:
205 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
206 *
207 * 26 cycles:
208 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
209 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
210 */
211 latency = is_haswell ? 22 : 24;
212 break;
213
214 case SHADER_OPCODE_TEX:
215 case SHADER_OPCODE_TXD:
216 case SHADER_OPCODE_TXF:
217 case SHADER_OPCODE_TXL:
218 /* 18 cycles:
219 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
220 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
221 * send(8) g4<1>UW g114<8,8,1>F
222 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
223 *
224 * 697 +/-49 cycles (min 610, n=26):
225 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
226 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
227 * send(8) g4<1>UW g114<8,8,1>F
228 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
229 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
230 *
231 * So the latency on our first texture load of the batchbuffer takes
232 * ~700 cycles, since the caches are cold at that point.
233 *
234 * 840 +/- 92 cycles (min 720, n=25):
235 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
236 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
237 * send(8) g4<1>UW g114<8,8,1>F
238 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
239 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
240 * send(8) g4<1>UW g114<8,8,1>F
241 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
242 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
243 *
244 * On the second load, it takes just an extra ~140 cycles, and after
245 * accounting for the 14 cycles of the MOV's latency, that makes ~130.
246 *
247 * 683 +/- 49 cycles (min = 602, n=47):
248 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
249 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
250 * send(8) g4<1>UW g114<8,8,1>F
251 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
252 * send(8) g50<1>UW g114<8,8,1>F
253 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
254 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
255 *
256 * The unit appears to be pipelined, since this matches up with the
257 * cache-cold case, despite there being two loads here. If you replace
258 * the g4 in the MOV to null with g50, it's still 693 +/- 52 (n=39).
259 *
260 * So, take some number between the cache-hot 140 cycles and the
261 * cache-cold 700 cycles. No particular tuning was done on this.
262 *
263 * I haven't done significant testing of the non-TEX opcodes. TXL at
264 * least looked about the same as TEX.
265 */
266 latency = 200;
267 break;
268
269 case SHADER_OPCODE_TXS:
270 /* Testing textureSize(sampler2D, 0), one load was 420 +/- 41
271 * cycles (n=15):
272 * mov(8) g114<1>UD 0D { align1 WE_normal 1Q };
273 * send(8) g6<1>UW g114<8,8,1>F
274 * sampler (10, 0, 10, 1) mlen 1 rlen 4 { align1 WE_normal 1Q };
275 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1Q };
276 *
277 *
278 * Two loads was 535 +/- 30 cycles (n=19):
279 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
280 * send(16) g6<1>UW g114<8,8,1>F
281 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
282 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
283 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1H };
284 * send(16) g8<1>UW g114<8,8,1>F
285 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
286 * mov(16) g8<1>F g8<8,8,1>D { align1 WE_normal 1H };
287 * add(16) g6<1>F g6<8,8,1>F g8<8,8,1>F { align1 WE_normal 1H };
288 *
289 * Since the only caches that should matter are just the
290 * instruction/state cache containing the surface state, assume that we
291 * always have hot caches.
292 */
293 latency = 100;
294 break;
295
296 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
297 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
298 case VS_OPCODE_PULL_CONSTANT_LOAD:
299 /* testing using varying-index pull constants:
300 *
301 * 16 cycles:
302 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
303 * send(8) g4<1>F g4<8,8,1>D
304 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
305 *
306 * ~480 cycles:
307 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
308 * send(8) g4<1>F g4<8,8,1>D
309 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
310 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
311 *
312 * ~620 cycles:
313 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
314 * send(8) g4<1>F g4<8,8,1>D
315 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
316 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
317 * send(8) g4<1>F g4<8,8,1>D
318 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
319 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
320 *
321 * So, if it's cache-hot, it's about 140. If it's cache cold, it's
322 * about 460. We expect to mostly be cache hot, so pick something more
323 * in that direction.
324 */
325 latency = 200;
326 break;
327
328 case SHADER_OPCODE_GEN7_SCRATCH_READ:
329 /* Testing a load from offset 0, that had been previously written:
330 *
331 * send(8) g114<1>UW g0<8,8,1>F data (0, 0, 0) mlen 1 rlen 1 { align1 WE_normal 1Q };
332 * mov(8) null g114<8,8,1>F { align1 WE_normal 1Q };
333 *
334 * The cycles spent seemed to be grouped around 40-50 (as low as 38),
335 * then around 140. Presumably this is cache hit vs miss.
336 */
337 latency = 50;
338 case SHADER_OPCODE_UNTYPED_ATOMIC:
339 /* Test code:
340 * mov(8) g112<1>ud 0x00000000ud { align1 WE_all 1Q };
341 * mov(1) g112.7<1>ud g1.7<0,1,0>ud { align1 WE_all };
342 * mov(8) g113<1>ud 0x00000000ud { align1 WE_normal 1Q };
343 * send(8) g4<1>ud g112<8,8,1>ud
344 * data (38, 5, 6) mlen 2 rlen 1 { align1 WE_normal 1Q };
345 *
346 * Running it 100 times as fragment shader on a 128x128 quad
347 * gives an average latency of 13867 cycles per atomic op,
348 * standard deviation 3%. Note that this is a rather
349 * pessimistic estimate, the actual latency in cases with few
350 * collisions between threads and favorable pipelining has been
351 * seen to be reduced by a factor of 100.
352 */
353 latency = 14000;
354 break;
355
356 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
357 /* Test code:
358 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
359 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
360 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
361 * send(8) g4<1>UD g112<8,8,1>UD
362 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
363 * .
364 * . [repeats 8 times]
365 * .
366 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
367 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
368 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
369 * send(8) g4<1>UD g112<8,8,1>UD
370 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
371 *
372 * Running it 100 times as fragment shader on a 128x128 quad
373 * gives an average latency of 583 cycles per surface read,
374 * standard deviation 0.9%.
375 */
376 latency = is_haswell ? 300 : 600;
377 break;
378
379 default:
380 /* 2 cycles:
381 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
382 *
383 * 16 cycles:
384 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
385 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
386 */
387 latency = 14;
388 break;
389 }
390 }
391
392 class instruction_scheduler {
393 public:
394 instruction_scheduler(backend_visitor *v, int grf_count, bool post_reg_alloc)
395 {
396 this->bv = v;
397 this->mem_ctx = ralloc_context(NULL);
398 this->grf_count = grf_count;
399 this->instructions.make_empty();
400 this->instructions_to_schedule = 0;
401 this->post_reg_alloc = post_reg_alloc;
402 this->time = 0;
403 if (!post_reg_alloc) {
404 this->remaining_grf_uses = rzalloc_array(mem_ctx, int, grf_count);
405 this->grf_active = rzalloc_array(mem_ctx, bool, grf_count);
406 } else {
407 this->remaining_grf_uses = NULL;
408 this->grf_active = NULL;
409 }
410 }
411
412 ~instruction_scheduler()
413 {
414 ralloc_free(this->mem_ctx);
415 }
416 void add_barrier_deps(schedule_node *n);
417 void add_dep(schedule_node *before, schedule_node *after, int latency);
418 void add_dep(schedule_node *before, schedule_node *after);
419
420 void run(exec_list *instructions);
421 void add_inst(backend_instruction *inst);
422 void compute_delay(schedule_node *node);
423 virtual void calculate_deps() = 0;
424 virtual schedule_node *choose_instruction_to_schedule() = 0;
425
426 /**
427 * Returns how many cycles it takes the instruction to issue.
428 *
429 * Instructions in gen hardware are handled one simd4 vector at a time,
430 * with 1 cycle per vector dispatched. Thus 8-wide pixel shaders take 2
431 * cycles to dispatch and 16-wide (compressed) instructions take 4.
432 */
433 virtual int issue_time(backend_instruction *inst) = 0;
434
435 virtual void count_remaining_grf_uses(backend_instruction *inst) = 0;
436 virtual void update_register_pressure(backend_instruction *inst) = 0;
437 virtual int get_register_pressure_benefit(backend_instruction *inst) = 0;
438
439 void schedule_instructions(backend_instruction *next_block_header);
440
441 void *mem_ctx;
442
443 bool post_reg_alloc;
444 int instructions_to_schedule;
445 int grf_count;
446 int time;
447 exec_list instructions;
448 backend_visitor *bv;
449
450 /**
451 * Number of instructions left to schedule that reference each vgrf.
452 *
453 * Used so that we can prefer scheduling instructions that will end the
454 * live intervals of multiple variables, to reduce register pressure.
455 */
456 int *remaining_grf_uses;
457
458 /**
459 * Tracks whether each VGRF has had an instruction scheduled that uses it.
460 *
461 * This is used to estimate whether scheduling a new instruction will
462 * increase register pressure.
463 */
464 bool *grf_active;
465 };
466
467 class fs_instruction_scheduler : public instruction_scheduler
468 {
469 public:
470 fs_instruction_scheduler(fs_visitor *v, int grf_count, bool post_reg_alloc);
471 void calculate_deps();
472 bool is_compressed(fs_inst *inst);
473 schedule_node *choose_instruction_to_schedule();
474 int issue_time(backend_instruction *inst);
475 fs_visitor *v;
476
477 void count_remaining_grf_uses(backend_instruction *inst);
478 void update_register_pressure(backend_instruction *inst);
479 int get_register_pressure_benefit(backend_instruction *inst);
480 };
481
482 fs_instruction_scheduler::fs_instruction_scheduler(fs_visitor *v,
483 int grf_count,
484 bool post_reg_alloc)
485 : instruction_scheduler(v, grf_count, post_reg_alloc),
486 v(v)
487 {
488 }
489
490 void
491 fs_instruction_scheduler::count_remaining_grf_uses(backend_instruction *be)
492 {
493 fs_inst *inst = (fs_inst *)be;
494
495 if (!remaining_grf_uses)
496 return;
497
498 if (inst->dst.file == GRF)
499 remaining_grf_uses[inst->dst.reg]++;
500
501 for (int i = 0; i < 3; i++) {
502 if (inst->src[i].file != GRF)
503 continue;
504
505 remaining_grf_uses[inst->src[i].reg]++;
506 }
507 }
508
509 void
510 fs_instruction_scheduler::update_register_pressure(backend_instruction *be)
511 {
512 fs_inst *inst = (fs_inst *)be;
513
514 if (!remaining_grf_uses)
515 return;
516
517 if (inst->dst.file == GRF) {
518 remaining_grf_uses[inst->dst.reg]--;
519 grf_active[inst->dst.reg] = true;
520 }
521
522 for (int i = 0; i < 3; i++) {
523 if (inst->src[i].file == GRF) {
524 remaining_grf_uses[inst->src[i].reg]--;
525 grf_active[inst->src[i].reg] = true;
526 }
527 }
528 }
529
530 int
531 fs_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
532 {
533 fs_inst *inst = (fs_inst *)be;
534 int benefit = 0;
535
536 if (inst->dst.file == GRF) {
537 if (remaining_grf_uses[inst->dst.reg] == 1)
538 benefit += v->virtual_grf_sizes[inst->dst.reg];
539 if (!grf_active[inst->dst.reg])
540 benefit -= v->virtual_grf_sizes[inst->dst.reg];
541 }
542
543 for (int i = 0; i < 3; i++) {
544 if (inst->src[i].file != GRF)
545 continue;
546
547 if (remaining_grf_uses[inst->src[i].reg] == 1)
548 benefit += v->virtual_grf_sizes[inst->src[i].reg];
549 if (!grf_active[inst->src[i].reg])
550 benefit -= v->virtual_grf_sizes[inst->src[i].reg];
551 }
552
553 return benefit;
554 }
555
556 class vec4_instruction_scheduler : public instruction_scheduler
557 {
558 public:
559 vec4_instruction_scheduler(vec4_visitor *v, int grf_count);
560 void calculate_deps();
561 schedule_node *choose_instruction_to_schedule();
562 int issue_time(backend_instruction *inst);
563 vec4_visitor *v;
564
565 void count_remaining_grf_uses(backend_instruction *inst);
566 void update_register_pressure(backend_instruction *inst);
567 int get_register_pressure_benefit(backend_instruction *inst);
568 };
569
570 vec4_instruction_scheduler::vec4_instruction_scheduler(vec4_visitor *v,
571 int grf_count)
572 : instruction_scheduler(v, grf_count, true),
573 v(v)
574 {
575 }
576
577 void
578 vec4_instruction_scheduler::count_remaining_grf_uses(backend_instruction *be)
579 {
580 }
581
582 void
583 vec4_instruction_scheduler::update_register_pressure(backend_instruction *be)
584 {
585 }
586
587 int
588 vec4_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
589 {
590 return 0;
591 }
592
593 schedule_node::schedule_node(backend_instruction *inst,
594 instruction_scheduler *sched)
595 {
596 struct brw_context *brw = sched->bv->brw;
597
598 this->inst = inst;
599 this->child_array_size = 0;
600 this->children = NULL;
601 this->child_latency = NULL;
602 this->child_count = 0;
603 this->parent_count = 0;
604 this->unblocked_time = 0;
605 this->cand_generation = 0;
606
607 /* We can't measure Gen6 timings directly but expect them to be much
608 * closer to Gen7 than Gen4.
609 */
610 if (!sched->post_reg_alloc)
611 this->latency = 1;
612 else if (brw->gen >= 6)
613 set_latency_gen7(brw->is_haswell);
614 else
615 set_latency_gen4();
616 }
617
618 void
619 instruction_scheduler::add_inst(backend_instruction *inst)
620 {
621 schedule_node *n = new(mem_ctx) schedule_node(inst, this);
622
623 assert(!inst->is_head_sentinel());
624 assert(!inst->is_tail_sentinel());
625
626 this->instructions_to_schedule++;
627
628 inst->remove();
629 instructions.push_tail(n);
630 }
631
632 /** Recursive computation of the delay member of a node. */
633 void
634 instruction_scheduler::compute_delay(schedule_node *n)
635 {
636 if (!n->child_count) {
637 n->delay = issue_time(n->inst);
638 } else {
639 for (int i = 0; i < n->child_count; i++) {
640 if (!n->children[i]->delay)
641 compute_delay(n->children[i]);
642 n->delay = MAX2(n->delay, n->latency + n->children[i]->delay);
643 }
644 }
645 }
646
647 /**
648 * Add a dependency between two instruction nodes.
649 *
650 * The @after node will be scheduled after @before. We will try to
651 * schedule it @latency cycles after @before, but no guarantees there.
652 */
653 void
654 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after,
655 int latency)
656 {
657 if (!before || !after)
658 return;
659
660 assert(before != after);
661
662 for (int i = 0; i < before->child_count; i++) {
663 if (before->children[i] == after) {
664 before->child_latency[i] = MAX2(before->child_latency[i], latency);
665 return;
666 }
667 }
668
669 if (before->child_array_size <= before->child_count) {
670 if (before->child_array_size < 16)
671 before->child_array_size = 16;
672 else
673 before->child_array_size *= 2;
674
675 before->children = reralloc(mem_ctx, before->children,
676 schedule_node *,
677 before->child_array_size);
678 before->child_latency = reralloc(mem_ctx, before->child_latency,
679 int, before->child_array_size);
680 }
681
682 before->children[before->child_count] = after;
683 before->child_latency[before->child_count] = latency;
684 before->child_count++;
685 after->parent_count++;
686 }
687
688 void
689 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after)
690 {
691 if (!before)
692 return;
693
694 add_dep(before, after, before->latency);
695 }
696
697 /**
698 * Sometimes we really want this node to execute after everything that
699 * was before it and before everything that followed it. This adds
700 * the deps to do so.
701 */
702 void
703 instruction_scheduler::add_barrier_deps(schedule_node *n)
704 {
705 schedule_node *prev = (schedule_node *)n->prev;
706 schedule_node *next = (schedule_node *)n->next;
707
708 if (prev) {
709 while (!prev->is_head_sentinel()) {
710 add_dep(prev, n, 0);
711 prev = (schedule_node *)prev->prev;
712 }
713 }
714
715 if (next) {
716 while (!next->is_tail_sentinel()) {
717 add_dep(n, next, 0);
718 next = (schedule_node *)next->next;
719 }
720 }
721 }
722
723 /* instruction scheduling needs to be aware of when an MRF write
724 * actually writes 2 MRFs.
725 */
726 bool
727 fs_instruction_scheduler::is_compressed(fs_inst *inst)
728 {
729 return (v->dispatch_width == 16 &&
730 !inst->force_uncompressed &&
731 !inst->force_sechalf);
732 }
733
734 void
735 fs_instruction_scheduler::calculate_deps()
736 {
737 /* Pre-register-allocation, this tracks the last write per VGRF (so
738 * different reg_offsets within it can interfere when they shouldn't).
739 * After register allocation, reg_offsets are gone and we track individual
740 * GRF registers.
741 */
742 schedule_node *last_grf_write[grf_count];
743 schedule_node *last_mrf_write[BRW_MAX_MRF];
744 schedule_node *last_conditional_mod[2] = { NULL, NULL };
745 /* Fixed HW registers are assumed to be separate from the virtual
746 * GRFs, so they can be tracked separately. We don't really write
747 * to fixed GRFs much, so don't bother tracking them on a more
748 * granular level.
749 */
750 schedule_node *last_fixed_grf_write = NULL;
751 int reg_width = v->dispatch_width / 8;
752
753 /* The last instruction always needs to still be the last
754 * instruction. Either it's flow control (IF, ELSE, ENDIF, DO,
755 * WHILE) and scheduling other things after it would disturb the
756 * basic block, or it's FB_WRITE and we should do a better job at
757 * dead code elimination anyway.
758 */
759 schedule_node *last = (schedule_node *)instructions.get_tail();
760 add_barrier_deps(last);
761
762 memset(last_grf_write, 0, sizeof(last_grf_write));
763 memset(last_mrf_write, 0, sizeof(last_mrf_write));
764
765 /* top-to-bottom dependencies: RAW and WAW. */
766 foreach_list(node, &instructions) {
767 schedule_node *n = (schedule_node *)node;
768 fs_inst *inst = (fs_inst *)n->inst;
769
770 if (inst->opcode == FS_OPCODE_PLACEHOLDER_HALT ||
771 inst->has_side_effects())
772 add_barrier_deps(n);
773
774 /* read-after-write deps. */
775 for (int i = 0; i < 3; i++) {
776 if (inst->src[i].file == GRF) {
777 if (post_reg_alloc) {
778 for (int r = 0; r < reg_width * inst->regs_read(v, i); r++)
779 add_dep(last_grf_write[inst->src[i].reg + r], n);
780 } else {
781 add_dep(last_grf_write[inst->src[i].reg], n);
782 }
783 } else if (inst->src[i].file == HW_REG &&
784 (inst->src[i].fixed_hw_reg.file ==
785 BRW_GENERAL_REGISTER_FILE)) {
786 if (post_reg_alloc) {
787 int size = reg_width;
788 if (inst->src[i].fixed_hw_reg.vstride == BRW_VERTICAL_STRIDE_0)
789 size = 1;
790 for (int r = 0; r < size; r++)
791 add_dep(last_grf_write[inst->src[i].fixed_hw_reg.nr + r], n);
792 } else {
793 add_dep(last_fixed_grf_write, n);
794 }
795 } else if (inst->src[i].file != BAD_FILE &&
796 inst->src[i].file != IMM &&
797 inst->src[i].file != UNIFORM) {
798 assert(inst->src[i].file != MRF);
799 add_barrier_deps(n);
800 }
801 }
802
803 if (inst->base_mrf != -1) {
804 for (int i = 0; i < inst->mlen; i++) {
805 /* It looks like the MRF regs are released in the send
806 * instruction once it's sent, not when the result comes
807 * back.
808 */
809 add_dep(last_mrf_write[inst->base_mrf + i], n);
810 }
811 }
812
813 if (inst->reads_flag()) {
814 add_dep(last_conditional_mod[inst->flag_subreg], n);
815 }
816
817 /* write-after-write deps. */
818 if (inst->dst.file == GRF) {
819 if (post_reg_alloc) {
820 for (int r = 0; r < inst->regs_written * reg_width; r++) {
821 add_dep(last_grf_write[inst->dst.reg + r], n);
822 last_grf_write[inst->dst.reg + r] = n;
823 }
824 } else {
825 add_dep(last_grf_write[inst->dst.reg], n);
826 last_grf_write[inst->dst.reg] = n;
827 }
828 } else if (inst->dst.file == MRF) {
829 int reg = inst->dst.reg & ~BRW_MRF_COMPR4;
830
831 add_dep(last_mrf_write[reg], n);
832 last_mrf_write[reg] = n;
833 if (is_compressed(inst)) {
834 if (inst->dst.reg & BRW_MRF_COMPR4)
835 reg += 4;
836 else
837 reg++;
838 add_dep(last_mrf_write[reg], n);
839 last_mrf_write[reg] = n;
840 }
841 } else if (inst->dst.file == HW_REG &&
842 inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
843 if (post_reg_alloc) {
844 for (int r = 0; r < reg_width; r++)
845 last_grf_write[inst->dst.fixed_hw_reg.nr + r] = n;
846 } else {
847 last_fixed_grf_write = n;
848 }
849 } else if (inst->dst.file != BAD_FILE) {
850 add_barrier_deps(n);
851 }
852
853 if (inst->mlen > 0 && inst->base_mrf != -1) {
854 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
855 add_dep(last_mrf_write[inst->base_mrf + i], n);
856 last_mrf_write[inst->base_mrf + i] = n;
857 }
858 }
859
860 if (inst->writes_flag()) {
861 add_dep(last_conditional_mod[inst->flag_subreg], n, 0);
862 last_conditional_mod[inst->flag_subreg] = n;
863 }
864 }
865
866 /* bottom-to-top dependencies: WAR */
867 memset(last_grf_write, 0, sizeof(last_grf_write));
868 memset(last_mrf_write, 0, sizeof(last_mrf_write));
869 memset(last_conditional_mod, 0, sizeof(last_conditional_mod));
870 last_fixed_grf_write = NULL;
871
872 exec_node *node;
873 exec_node *prev;
874 for (node = instructions.get_tail(), prev = node->prev;
875 !node->is_head_sentinel();
876 node = prev, prev = node->prev) {
877 schedule_node *n = (schedule_node *)node;
878 fs_inst *inst = (fs_inst *)n->inst;
879
880 /* write-after-read deps. */
881 for (int i = 0; i < 3; i++) {
882 if (inst->src[i].file == GRF) {
883 if (post_reg_alloc) {
884 for (int r = 0; r < reg_width * inst->regs_read(v, i); r++)
885 add_dep(n, last_grf_write[inst->src[i].reg + r]);
886 } else {
887 add_dep(n, last_grf_write[inst->src[i].reg]);
888 }
889 } else if (inst->src[i].file == HW_REG &&
890 (inst->src[i].fixed_hw_reg.file ==
891 BRW_GENERAL_REGISTER_FILE)) {
892 if (post_reg_alloc) {
893 int size = reg_width;
894 if (inst->src[i].fixed_hw_reg.vstride == BRW_VERTICAL_STRIDE_0)
895 size = 1;
896 for (int r = 0; r < size; r++)
897 add_dep(n, last_grf_write[inst->src[i].fixed_hw_reg.nr + r]);
898 } else {
899 add_dep(n, last_fixed_grf_write);
900 }
901 } else if (inst->src[i].file != BAD_FILE &&
902 inst->src[i].file != IMM &&
903 inst->src[i].file != UNIFORM) {
904 assert(inst->src[i].file != MRF);
905 add_barrier_deps(n);
906 }
907 }
908
909 if (inst->base_mrf != -1) {
910 for (int i = 0; i < inst->mlen; i++) {
911 /* It looks like the MRF regs are released in the send
912 * instruction once it's sent, not when the result comes
913 * back.
914 */
915 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
916 }
917 }
918
919 if (inst->reads_flag()) {
920 add_dep(n, last_conditional_mod[inst->flag_subreg]);
921 }
922
923 /* Update the things this instruction wrote, so earlier reads
924 * can mark this as WAR dependency.
925 */
926 if (inst->dst.file == GRF) {
927 if (post_reg_alloc) {
928 for (int r = 0; r < inst->regs_written * reg_width; r++)
929 last_grf_write[inst->dst.reg + r] = n;
930 } else {
931 last_grf_write[inst->dst.reg] = n;
932 }
933 } else if (inst->dst.file == MRF) {
934 int reg = inst->dst.reg & ~BRW_MRF_COMPR4;
935
936 last_mrf_write[reg] = n;
937
938 if (is_compressed(inst)) {
939 if (inst->dst.reg & BRW_MRF_COMPR4)
940 reg += 4;
941 else
942 reg++;
943
944 last_mrf_write[reg] = n;
945 }
946 } else if (inst->dst.file == HW_REG &&
947 inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
948 if (post_reg_alloc) {
949 for (int r = 0; r < reg_width; r++)
950 last_grf_write[inst->dst.fixed_hw_reg.nr + r] = n;
951 } else {
952 last_fixed_grf_write = n;
953 }
954 } else if (inst->dst.file != BAD_FILE) {
955 add_barrier_deps(n);
956 }
957
958 if (inst->mlen > 0 && inst->base_mrf != -1) {
959 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
960 last_mrf_write[inst->base_mrf + i] = n;
961 }
962 }
963
964 if (inst->writes_flag()) {
965 last_conditional_mod[inst->flag_subreg] = n;
966 }
967 }
968 }
969
970 void
971 vec4_instruction_scheduler::calculate_deps()
972 {
973 schedule_node *last_grf_write[grf_count];
974 schedule_node *last_mrf_write[BRW_MAX_MRF];
975 schedule_node *last_conditional_mod = NULL;
976 /* Fixed HW registers are assumed to be separate from the virtual
977 * GRFs, so they can be tracked separately. We don't really write
978 * to fixed GRFs much, so don't bother tracking them on a more
979 * granular level.
980 */
981 schedule_node *last_fixed_grf_write = NULL;
982
983 /* The last instruction always needs to still be the last instruction.
984 * Either it's flow control (IF, ELSE, ENDIF, DO, WHILE) and scheduling
985 * other things after it would disturb the basic block, or it's the EOT
986 * URB_WRITE and we should do a better job at dead code eliminating
987 * anything that could have been scheduled after it.
988 */
989 schedule_node *last = (schedule_node *)instructions.get_tail();
990 add_barrier_deps(last);
991
992 memset(last_grf_write, 0, sizeof(last_grf_write));
993 memset(last_mrf_write, 0, sizeof(last_mrf_write));
994
995 /* top-to-bottom dependencies: RAW and WAW. */
996 foreach_list(node, &instructions) {
997 schedule_node *n = (schedule_node *)node;
998 vec4_instruction *inst = (vec4_instruction *)n->inst;
999
1000 if (inst->has_side_effects())
1001 add_barrier_deps(n);
1002
1003 /* read-after-write deps. */
1004 for (int i = 0; i < 3; i++) {
1005 if (inst->src[i].file == GRF) {
1006 add_dep(last_grf_write[inst->src[i].reg], n);
1007 } else if (inst->src[i].file == HW_REG &&
1008 (inst->src[i].fixed_hw_reg.file ==
1009 BRW_GENERAL_REGISTER_FILE)) {
1010 add_dep(last_fixed_grf_write, n);
1011 } else if (inst->src[i].file != BAD_FILE &&
1012 inst->src[i].file != IMM &&
1013 inst->src[i].file != UNIFORM) {
1014 /* No reads from MRF, and ATTR is already translated away */
1015 assert(inst->src[i].file != MRF &&
1016 inst->src[i].file != ATTR);
1017 add_barrier_deps(n);
1018 }
1019 }
1020
1021 for (int i = 0; i < inst->mlen; i++) {
1022 /* It looks like the MRF regs are released in the send
1023 * instruction once it's sent, not when the result comes
1024 * back.
1025 */
1026 add_dep(last_mrf_write[inst->base_mrf + i], n);
1027 }
1028
1029 if (inst->depends_on_flags()) {
1030 assert(last_conditional_mod);
1031 add_dep(last_conditional_mod, n);
1032 }
1033
1034 /* write-after-write deps. */
1035 if (inst->dst.file == GRF) {
1036 add_dep(last_grf_write[inst->dst.reg], n);
1037 last_grf_write[inst->dst.reg] = n;
1038 } else if (inst->dst.file == MRF) {
1039 add_dep(last_mrf_write[inst->dst.reg], n);
1040 last_mrf_write[inst->dst.reg] = n;
1041 } else if (inst->dst.file == HW_REG &&
1042 inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
1043 last_fixed_grf_write = n;
1044 } else if (inst->dst.file != BAD_FILE) {
1045 add_barrier_deps(n);
1046 }
1047
1048 if (inst->mlen > 0) {
1049 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1050 add_dep(last_mrf_write[inst->base_mrf + i], n);
1051 last_mrf_write[inst->base_mrf + i] = n;
1052 }
1053 }
1054
1055 if (inst->conditional_mod) {
1056 add_dep(last_conditional_mod, n, 0);
1057 last_conditional_mod = n;
1058 }
1059 }
1060
1061 /* bottom-to-top dependencies: WAR */
1062 memset(last_grf_write, 0, sizeof(last_grf_write));
1063 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1064 last_conditional_mod = NULL;
1065 last_fixed_grf_write = NULL;
1066
1067 exec_node *node;
1068 exec_node *prev;
1069 for (node = instructions.get_tail(), prev = node->prev;
1070 !node->is_head_sentinel();
1071 node = prev, prev = node->prev) {
1072 schedule_node *n = (schedule_node *)node;
1073 vec4_instruction *inst = (vec4_instruction *)n->inst;
1074
1075 /* write-after-read deps. */
1076 for (int i = 0; i < 3; i++) {
1077 if (inst->src[i].file == GRF) {
1078 add_dep(n, last_grf_write[inst->src[i].reg]);
1079 } else if (inst->src[i].file == HW_REG &&
1080 (inst->src[i].fixed_hw_reg.file ==
1081 BRW_GENERAL_REGISTER_FILE)) {
1082 add_dep(n, last_fixed_grf_write);
1083 } else if (inst->src[i].file != BAD_FILE &&
1084 inst->src[i].file != IMM &&
1085 inst->src[i].file != UNIFORM) {
1086 assert(inst->src[i].file != MRF &&
1087 inst->src[i].file != ATTR);
1088 add_barrier_deps(n);
1089 }
1090 }
1091
1092 for (int i = 0; i < inst->mlen; i++) {
1093 /* It looks like the MRF regs are released in the send
1094 * instruction once it's sent, not when the result comes
1095 * back.
1096 */
1097 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1098 }
1099
1100 if (inst->depends_on_flags()) {
1101 add_dep(n, last_conditional_mod);
1102 }
1103
1104 /* Update the things this instruction wrote, so earlier reads
1105 * can mark this as WAR dependency.
1106 */
1107 if (inst->dst.file == GRF) {
1108 last_grf_write[inst->dst.reg] = n;
1109 } else if (inst->dst.file == MRF) {
1110 last_mrf_write[inst->dst.reg] = n;
1111 } else if (inst->dst.file == HW_REG &&
1112 inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
1113 last_fixed_grf_write = n;
1114 } else if (inst->dst.file != BAD_FILE) {
1115 add_barrier_deps(n);
1116 }
1117
1118 if (inst->mlen > 0) {
1119 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1120 last_mrf_write[inst->base_mrf + i] = n;
1121 }
1122 }
1123
1124 if (inst->conditional_mod) {
1125 last_conditional_mod = n;
1126 }
1127 }
1128 }
1129
1130 schedule_node *
1131 fs_instruction_scheduler::choose_instruction_to_schedule()
1132 {
1133 schedule_node *chosen = NULL;
1134
1135 if (post_reg_alloc) {
1136 int chosen_time = 0;
1137
1138 /* Of the instructions closest ready to execute or the closest to
1139 * being ready, choose the oldest one.
1140 */
1141 foreach_list(node, &instructions) {
1142 schedule_node *n = (schedule_node *)node;
1143
1144 if (!chosen || n->unblocked_time < chosen_time) {
1145 chosen = n;
1146 chosen_time = n->unblocked_time;
1147 }
1148 }
1149 } else {
1150 /* Before register allocation, we don't care about the latencies of
1151 * instructions. All we care about is reducing live intervals of
1152 * variables so that we can avoid register spilling, or get 16-wide
1153 * shaders which naturally do a better job of hiding instruction
1154 * latency.
1155 */
1156 foreach_list(node, &instructions) {
1157 schedule_node *n = (schedule_node *)node;
1158 fs_inst *inst = (fs_inst *)n->inst;
1159
1160 if (!chosen) {
1161 chosen = n;
1162 continue;
1163 }
1164
1165 /* Most important: If we can definitely reduce register pressure, do
1166 * so immediately.
1167 */
1168 int register_pressure_benefit = get_register_pressure_benefit(n->inst);
1169 int chosen_register_pressure_benefit =
1170 get_register_pressure_benefit(chosen->inst);
1171
1172 if (register_pressure_benefit > 0 &&
1173 register_pressure_benefit > chosen_register_pressure_benefit) {
1174 chosen = n;
1175 continue;
1176 } else if (chosen_register_pressure_benefit > 0 &&
1177 (register_pressure_benefit <
1178 chosen_register_pressure_benefit)) {
1179 continue;
1180 }
1181
1182 /* Prefer instructions that recently became available for scheduling.
1183 * These are the things that are most likely to (eventually) make a
1184 * variable dead and reduce register pressure. Typical register
1185 * pressure estimates don't work for us because most of our pressure
1186 * comes from texturing, where no single instruction to schedule will
1187 * make a vec4 value dead.
1188 */
1189 if (n->cand_generation > chosen->cand_generation) {
1190 chosen = n;
1191 continue;
1192 } else if (n->cand_generation < chosen->cand_generation) {
1193 continue;
1194 }
1195
1196 /* On MRF-using chips, prefer non-SEND instructions. If we don't do
1197 * this, then because we prefer instructions that just became
1198 * candidates, we'll end up in a pattern of scheduling a SEND, then
1199 * the MRFs for the next SEND, then the next SEND, then the MRFs,
1200 * etc., without ever consuming the results of a send.
1201 */
1202 if (v->brw->gen < 7) {
1203 fs_inst *chosen_inst = (fs_inst *)chosen->inst;
1204
1205 /* We use regs_written > 1 as our test for the kind of send
1206 * instruction to avoid -- only sends generate many regs, and a
1207 * single-result send is probably actually reducing register
1208 * pressure.
1209 */
1210 if (inst->regs_written <= 1 && chosen_inst->regs_written > 1) {
1211 chosen = n;
1212 continue;
1213 } else if (inst->regs_written > chosen_inst->regs_written) {
1214 continue;
1215 }
1216 }
1217
1218 /* For instructions pushed on the cands list at the same time, prefer
1219 * the one with the highest delay to the end of the program. This is
1220 * most likely to have its values able to be consumed first (such as
1221 * for a large tree of lowered ubo loads, which appear reversed in
1222 * the instruction stream with respect to when they can be consumed).
1223 */
1224 if (n->delay > chosen->delay) {
1225 chosen = n;
1226 continue;
1227 } else if (n->delay < chosen->delay) {
1228 continue;
1229 }
1230
1231 /* If all other metrics are equal, we prefer the first instruction in
1232 * the list (program execution).
1233 */
1234 }
1235 }
1236
1237 return chosen;
1238 }
1239
1240 schedule_node *
1241 vec4_instruction_scheduler::choose_instruction_to_schedule()
1242 {
1243 schedule_node *chosen = NULL;
1244 int chosen_time = 0;
1245
1246 /* Of the instructions ready to execute or the closest to being ready,
1247 * choose the oldest one.
1248 */
1249 foreach_list(node, &instructions) {
1250 schedule_node *n = (schedule_node *)node;
1251
1252 if (!chosen || n->unblocked_time < chosen_time) {
1253 chosen = n;
1254 chosen_time = n->unblocked_time;
1255 }
1256 }
1257
1258 return chosen;
1259 }
1260
1261 int
1262 fs_instruction_scheduler::issue_time(backend_instruction *inst)
1263 {
1264 if (is_compressed((fs_inst *)inst))
1265 return 4;
1266 else
1267 return 2;
1268 }
1269
1270 int
1271 vec4_instruction_scheduler::issue_time(backend_instruction *inst)
1272 {
1273 /* We always execute as two vec4s in parallel. */
1274 return 2;
1275 }
1276
1277 void
1278 instruction_scheduler::schedule_instructions(backend_instruction *next_block_header)
1279 {
1280 time = 0;
1281
1282 /* Remove non-DAG heads from the list. */
1283 foreach_list_safe(node, &instructions) {
1284 schedule_node *n = (schedule_node *)node;
1285 if (n->parent_count != 0)
1286 n->remove();
1287 }
1288
1289 unsigned cand_generation = 1;
1290 while (!instructions.is_empty()) {
1291 schedule_node *chosen = choose_instruction_to_schedule();
1292
1293 /* Schedule this instruction. */
1294 assert(chosen);
1295 chosen->remove();
1296 next_block_header->insert_before(chosen->inst);
1297 instructions_to_schedule--;
1298 update_register_pressure(chosen->inst);
1299
1300 /* Update the clock for how soon an instruction could start after the
1301 * chosen one.
1302 */
1303 time += issue_time(chosen->inst);
1304
1305 /* If we expected a delay for scheduling, then bump the clock to reflect
1306 * that as well. In reality, the hardware will switch to another
1307 * hyperthread and may not return to dispatching our thread for a while
1308 * even after we're unblocked.
1309 */
1310 time = MAX2(time, chosen->unblocked_time);
1311
1312 if (debug) {
1313 printf("clock %4d, scheduled: ", time);
1314 bv->dump_instruction(chosen->inst);
1315 }
1316
1317 /* Now that we've scheduled a new instruction, some of its
1318 * children can be promoted to the list of instructions ready to
1319 * be scheduled. Update the children's unblocked time for this
1320 * DAG edge as we do so.
1321 */
1322 for (int i = chosen->child_count - 1; i >= 0; i--) {
1323 schedule_node *child = chosen->children[i];
1324
1325 child->unblocked_time = MAX2(child->unblocked_time,
1326 time + chosen->child_latency[i]);
1327
1328 if (debug) {
1329 printf("\tchild %d, %d parents: ", i, child->parent_count);
1330 bv->dump_instruction(child->inst);
1331 }
1332
1333 child->cand_generation = cand_generation;
1334 child->parent_count--;
1335 if (child->parent_count == 0) {
1336 if (debug) {
1337 printf("\t\tnow available\n");
1338 }
1339 instructions.push_head(child);
1340 }
1341 }
1342 cand_generation++;
1343
1344 /* Shared resource: the mathbox. There's one mathbox per EU on Gen6+
1345 * but it's more limited pre-gen6, so if we send something off to it then
1346 * the next math instruction isn't going to make progress until the first
1347 * is done.
1348 */
1349 if (chosen->inst->is_math()) {
1350 foreach_list(node, &instructions) {
1351 schedule_node *n = (schedule_node *)node;
1352
1353 if (n->inst->is_math())
1354 n->unblocked_time = MAX2(n->unblocked_time,
1355 time + chosen->latency);
1356 }
1357 }
1358 }
1359
1360 assert(instructions_to_schedule == 0);
1361 }
1362
1363 void
1364 instruction_scheduler::run(exec_list *all_instructions)
1365 {
1366 backend_instruction *next_block_header =
1367 (backend_instruction *)all_instructions->head;
1368
1369 if (debug) {
1370 printf("\nInstructions before scheduling (reg_alloc %d)\n", post_reg_alloc);
1371 bv->dump_instructions();
1372 }
1373
1374 /* Populate the remaining GRF uses array to improve the pre-regalloc
1375 * scheduling.
1376 */
1377 if (remaining_grf_uses) {
1378 foreach_list(node, all_instructions) {
1379 count_remaining_grf_uses((backend_instruction *)node);
1380 }
1381 }
1382
1383 while (!next_block_header->is_tail_sentinel()) {
1384 /* Add things to be scheduled until we get to a new BB. */
1385 while (!next_block_header->is_tail_sentinel()) {
1386 backend_instruction *inst = next_block_header;
1387 next_block_header = (backend_instruction *)next_block_header->next;
1388
1389 add_inst(inst);
1390 if (inst->is_control_flow())
1391 break;
1392 }
1393 calculate_deps();
1394
1395 foreach_list(node, &instructions) {
1396 schedule_node *n = (schedule_node *)node;
1397 compute_delay(n);
1398 }
1399
1400 schedule_instructions(next_block_header);
1401 }
1402
1403 if (debug) {
1404 printf("\nInstructions after scheduling (reg_alloc %d)\n", post_reg_alloc);
1405 bv->dump_instructions();
1406 }
1407 }
1408
1409 void
1410 fs_visitor::schedule_instructions(bool post_reg_alloc)
1411 {
1412 int grf_count;
1413 if (post_reg_alloc)
1414 grf_count = grf_used;
1415 else
1416 grf_count = virtual_grf_count;
1417
1418 fs_instruction_scheduler sched(this, grf_count, post_reg_alloc);
1419 sched.run(&instructions);
1420
1421 if (unlikely(INTEL_DEBUG & DEBUG_WM) && post_reg_alloc) {
1422 printf("fs%d estimated execution time: %d cycles\n",
1423 dispatch_width, sched.time);
1424 }
1425
1426 invalidate_live_intervals();
1427 }
1428
1429 void
1430 vec4_visitor::opt_schedule_instructions()
1431 {
1432 vec4_instruction_scheduler sched(this, prog_data->total_grf);
1433 sched.run(&instructions);
1434
1435 if (unlikely(debug_flag)) {
1436 printf("vec4 estimated execution time: %d cycles\n", sched.time);
1437 }
1438
1439 this->live_intervals_valid = false;
1440 }