i965/vec4: Fix the scheduler to take into account reads and writes of multiple registers.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_schedule_instructions.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include "brw_fs.h"
29 #include "brw_vec4.h"
30 #include "brw_cfg.h"
31 #include "brw_shader.h"
32 #include "glsl/glsl_types.h"
33 #include "glsl/ir_optimization.h"
34
35 using namespace brw;
36
37 /** @file brw_fs_schedule_instructions.cpp
38 *
39 * List scheduling of FS instructions.
40 *
41 * The basic model of the list scheduler is to take a basic block,
42 * compute a DAG of the dependencies (RAW ordering with latency, WAW
43 * ordering with latency, WAR ordering), and make a list of the DAG heads.
44 * Heuristically pick a DAG head, then put all the children that are
45 * now DAG heads into the list of things to schedule.
46 *
47 * The heuristic is the important part. We're trying to be cheap,
48 * since actually computing the optimal scheduling is NP complete.
49 * What we do is track a "current clock". When we schedule a node, we
50 * update the earliest-unblocked clock time of its children, and
51 * increment the clock. Then, when trying to schedule, we just pick
52 * the earliest-unblocked instruction to schedule.
53 *
54 * Note that often there will be many things which could execute
55 * immediately, and there are a range of heuristic options to choose
56 * from in picking among those.
57 */
58
59 static bool debug = false;
60
61 class instruction_scheduler;
62
63 class schedule_node : public exec_node
64 {
65 public:
66 schedule_node(backend_instruction *inst, instruction_scheduler *sched);
67 void set_latency_gen4();
68 void set_latency_gen7(bool is_haswell);
69
70 backend_instruction *inst;
71 schedule_node **children;
72 int *child_latency;
73 int child_count;
74 int parent_count;
75 int child_array_size;
76 int unblocked_time;
77 int latency;
78
79 /**
80 * Which iteration of pushing groups of children onto the candidates list
81 * this node was a part of.
82 */
83 unsigned cand_generation;
84
85 /**
86 * This is the sum of the instruction's latency plus the maximum delay of
87 * its children, or just the issue_time if it's a leaf node.
88 */
89 int delay;
90 };
91
92 void
93 schedule_node::set_latency_gen4()
94 {
95 int chans = 8;
96 int math_latency = 22;
97
98 switch (inst->opcode) {
99 case SHADER_OPCODE_RCP:
100 this->latency = 1 * chans * math_latency;
101 break;
102 case SHADER_OPCODE_RSQ:
103 this->latency = 2 * chans * math_latency;
104 break;
105 case SHADER_OPCODE_INT_QUOTIENT:
106 case SHADER_OPCODE_SQRT:
107 case SHADER_OPCODE_LOG2:
108 /* full precision log. partial is 2. */
109 this->latency = 3 * chans * math_latency;
110 break;
111 case SHADER_OPCODE_INT_REMAINDER:
112 case SHADER_OPCODE_EXP2:
113 /* full precision. partial is 3, same throughput. */
114 this->latency = 4 * chans * math_latency;
115 break;
116 case SHADER_OPCODE_POW:
117 this->latency = 8 * chans * math_latency;
118 break;
119 case SHADER_OPCODE_SIN:
120 case SHADER_OPCODE_COS:
121 /* minimum latency, max is 12 rounds. */
122 this->latency = 5 * chans * math_latency;
123 break;
124 default:
125 this->latency = 2;
126 break;
127 }
128 }
129
130 void
131 schedule_node::set_latency_gen7(bool is_haswell)
132 {
133 switch (inst->opcode) {
134 case BRW_OPCODE_MAD:
135 /* 2 cycles
136 * (since the last two src operands are in different register banks):
137 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
138 *
139 * 3 cycles on IVB, 4 on HSW
140 * (since the last two src operands are in the same register bank):
141 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
142 *
143 * 18 cycles on IVB, 16 on HSW
144 * (since the last two src operands are in different register banks):
145 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
146 * mov(8) null g4<4,5,1>F { align16 WE_normal 1Q };
147 *
148 * 20 cycles on IVB, 18 on HSW
149 * (since the last two src operands are in the same register bank):
150 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
151 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
152 */
153
154 /* Our register allocator doesn't know about register banks, so use the
155 * higher latency.
156 */
157 latency = is_haswell ? 16 : 18;
158 break;
159
160 case BRW_OPCODE_LRP:
161 /* 2 cycles
162 * (since the last two src operands are in different register banks):
163 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
164 *
165 * 3 cycles on IVB, 4 on HSW
166 * (since the last two src operands are in the same register bank):
167 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
168 *
169 * 16 cycles on IVB, 14 on HSW
170 * (since the last two src operands are in different register banks):
171 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
172 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
173 *
174 * 16 cycles
175 * (since the last two src operands are in the same register bank):
176 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
177 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
178 */
179
180 /* Our register allocator doesn't know about register banks, so use the
181 * higher latency.
182 */
183 latency = 14;
184 break;
185
186 case SHADER_OPCODE_RCP:
187 case SHADER_OPCODE_RSQ:
188 case SHADER_OPCODE_SQRT:
189 case SHADER_OPCODE_LOG2:
190 case SHADER_OPCODE_EXP2:
191 case SHADER_OPCODE_SIN:
192 case SHADER_OPCODE_COS:
193 /* 2 cycles:
194 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
195 *
196 * 18 cycles:
197 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
198 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
199 *
200 * Same for exp2, log2, rsq, sqrt, sin, cos.
201 */
202 latency = is_haswell ? 14 : 16;
203 break;
204
205 case SHADER_OPCODE_POW:
206 /* 2 cycles:
207 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
208 *
209 * 26 cycles:
210 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
211 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
212 */
213 latency = is_haswell ? 22 : 24;
214 break;
215
216 case SHADER_OPCODE_TEX:
217 case SHADER_OPCODE_TXD:
218 case SHADER_OPCODE_TXF:
219 case SHADER_OPCODE_TXL:
220 /* 18 cycles:
221 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
222 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
223 * send(8) g4<1>UW g114<8,8,1>F
224 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
225 *
226 * 697 +/-49 cycles (min 610, n=26):
227 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
228 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
229 * send(8) g4<1>UW g114<8,8,1>F
230 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
231 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
232 *
233 * So the latency on our first texture load of the batchbuffer takes
234 * ~700 cycles, since the caches are cold at that point.
235 *
236 * 840 +/- 92 cycles (min 720, n=25):
237 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
238 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
239 * send(8) g4<1>UW g114<8,8,1>F
240 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
241 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
242 * send(8) g4<1>UW g114<8,8,1>F
243 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
244 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
245 *
246 * On the second load, it takes just an extra ~140 cycles, and after
247 * accounting for the 14 cycles of the MOV's latency, that makes ~130.
248 *
249 * 683 +/- 49 cycles (min = 602, n=47):
250 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
251 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
252 * send(8) g4<1>UW g114<8,8,1>F
253 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
254 * send(8) g50<1>UW g114<8,8,1>F
255 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
256 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
257 *
258 * The unit appears to be pipelined, since this matches up with the
259 * cache-cold case, despite there being two loads here. If you replace
260 * the g4 in the MOV to null with g50, it's still 693 +/- 52 (n=39).
261 *
262 * So, take some number between the cache-hot 140 cycles and the
263 * cache-cold 700 cycles. No particular tuning was done on this.
264 *
265 * I haven't done significant testing of the non-TEX opcodes. TXL at
266 * least looked about the same as TEX.
267 */
268 latency = 200;
269 break;
270
271 case SHADER_OPCODE_TXS:
272 /* Testing textureSize(sampler2D, 0), one load was 420 +/- 41
273 * cycles (n=15):
274 * mov(8) g114<1>UD 0D { align1 WE_normal 1Q };
275 * send(8) g6<1>UW g114<8,8,1>F
276 * sampler (10, 0, 10, 1) mlen 1 rlen 4 { align1 WE_normal 1Q };
277 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1Q };
278 *
279 *
280 * Two loads was 535 +/- 30 cycles (n=19):
281 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
282 * send(16) g6<1>UW g114<8,8,1>F
283 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
284 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
285 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1H };
286 * send(16) g8<1>UW g114<8,8,1>F
287 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
288 * mov(16) g8<1>F g8<8,8,1>D { align1 WE_normal 1H };
289 * add(16) g6<1>F g6<8,8,1>F g8<8,8,1>F { align1 WE_normal 1H };
290 *
291 * Since the only caches that should matter are just the
292 * instruction/state cache containing the surface state, assume that we
293 * always have hot caches.
294 */
295 latency = 100;
296 break;
297
298 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
299 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
300 case VS_OPCODE_PULL_CONSTANT_LOAD:
301 /* testing using varying-index pull constants:
302 *
303 * 16 cycles:
304 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
305 * send(8) g4<1>F g4<8,8,1>D
306 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
307 *
308 * ~480 cycles:
309 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
310 * send(8) g4<1>F g4<8,8,1>D
311 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
312 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
313 *
314 * ~620 cycles:
315 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
316 * send(8) g4<1>F g4<8,8,1>D
317 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
318 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
319 * send(8) g4<1>F g4<8,8,1>D
320 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
321 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
322 *
323 * So, if it's cache-hot, it's about 140. If it's cache cold, it's
324 * about 460. We expect to mostly be cache hot, so pick something more
325 * in that direction.
326 */
327 latency = 200;
328 break;
329
330 case SHADER_OPCODE_GEN7_SCRATCH_READ:
331 /* Testing a load from offset 0, that had been previously written:
332 *
333 * send(8) g114<1>UW g0<8,8,1>F data (0, 0, 0) mlen 1 rlen 1 { align1 WE_normal 1Q };
334 * mov(8) null g114<8,8,1>F { align1 WE_normal 1Q };
335 *
336 * The cycles spent seemed to be grouped around 40-50 (as low as 38),
337 * then around 140. Presumably this is cache hit vs miss.
338 */
339 latency = 50;
340 break;
341
342 case SHADER_OPCODE_UNTYPED_ATOMIC:
343 /* Test code:
344 * mov(8) g112<1>ud 0x00000000ud { align1 WE_all 1Q };
345 * mov(1) g112.7<1>ud g1.7<0,1,0>ud { align1 WE_all };
346 * mov(8) g113<1>ud 0x00000000ud { align1 WE_normal 1Q };
347 * send(8) g4<1>ud g112<8,8,1>ud
348 * data (38, 5, 6) mlen 2 rlen 1 { align1 WE_normal 1Q };
349 *
350 * Running it 100 times as fragment shader on a 128x128 quad
351 * gives an average latency of 13867 cycles per atomic op,
352 * standard deviation 3%. Note that this is a rather
353 * pessimistic estimate, the actual latency in cases with few
354 * collisions between threads and favorable pipelining has been
355 * seen to be reduced by a factor of 100.
356 */
357 latency = 14000;
358 break;
359
360 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
361 /* Test code:
362 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
363 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
364 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
365 * send(8) g4<1>UD g112<8,8,1>UD
366 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
367 * .
368 * . [repeats 8 times]
369 * .
370 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
371 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
372 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
373 * send(8) g4<1>UD g112<8,8,1>UD
374 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
375 *
376 * Running it 100 times as fragment shader on a 128x128 quad
377 * gives an average latency of 583 cycles per surface read,
378 * standard deviation 0.9%.
379 */
380 latency = is_haswell ? 300 : 600;
381 break;
382
383 default:
384 /* 2 cycles:
385 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
386 *
387 * 16 cycles:
388 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
389 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
390 */
391 latency = 14;
392 break;
393 }
394 }
395
396 class instruction_scheduler {
397 public:
398 instruction_scheduler(backend_visitor *v, int grf_count,
399 instruction_scheduler_mode mode)
400 {
401 this->bv = v;
402 this->mem_ctx = ralloc_context(NULL);
403 this->grf_count = grf_count;
404 this->instructions.make_empty();
405 this->instructions_to_schedule = 0;
406 this->post_reg_alloc = (mode == SCHEDULE_POST);
407 this->mode = mode;
408 this->time = 0;
409 if (!post_reg_alloc) {
410 this->remaining_grf_uses = rzalloc_array(mem_ctx, int, grf_count);
411 this->grf_active = rzalloc_array(mem_ctx, bool, grf_count);
412 } else {
413 this->remaining_grf_uses = NULL;
414 this->grf_active = NULL;
415 }
416 }
417
418 ~instruction_scheduler()
419 {
420 ralloc_free(this->mem_ctx);
421 }
422 void add_barrier_deps(schedule_node *n);
423 void add_dep(schedule_node *before, schedule_node *after, int latency);
424 void add_dep(schedule_node *before, schedule_node *after);
425
426 void run(cfg_t *cfg);
427 void add_insts_from_block(bblock_t *block);
428 void compute_delay(schedule_node *node);
429 virtual void calculate_deps() = 0;
430 virtual schedule_node *choose_instruction_to_schedule() = 0;
431
432 /**
433 * Returns how many cycles it takes the instruction to issue.
434 *
435 * Instructions in gen hardware are handled one simd4 vector at a time,
436 * with 1 cycle per vector dispatched. Thus SIMD8 pixel shaders take 2
437 * cycles to dispatch and SIMD16 (compressed) instructions take 4.
438 */
439 virtual int issue_time(backend_instruction *inst) = 0;
440
441 virtual void count_remaining_grf_uses(backend_instruction *inst) = 0;
442 virtual void update_register_pressure(backend_instruction *inst) = 0;
443 virtual int get_register_pressure_benefit(backend_instruction *inst) = 0;
444
445 void schedule_instructions(bblock_t *block);
446
447 void *mem_ctx;
448
449 bool post_reg_alloc;
450 int instructions_to_schedule;
451 int grf_count;
452 int time;
453 exec_list instructions;
454 backend_visitor *bv;
455
456 instruction_scheduler_mode mode;
457
458 /**
459 * Number of instructions left to schedule that reference each vgrf.
460 *
461 * Used so that we can prefer scheduling instructions that will end the
462 * live intervals of multiple variables, to reduce register pressure.
463 */
464 int *remaining_grf_uses;
465
466 /**
467 * Tracks whether each VGRF has had an instruction scheduled that uses it.
468 *
469 * This is used to estimate whether scheduling a new instruction will
470 * increase register pressure.
471 */
472 bool *grf_active;
473 };
474
475 class fs_instruction_scheduler : public instruction_scheduler
476 {
477 public:
478 fs_instruction_scheduler(fs_visitor *v, int grf_count,
479 instruction_scheduler_mode mode);
480 void calculate_deps();
481 bool is_compressed(fs_inst *inst);
482 schedule_node *choose_instruction_to_schedule();
483 int issue_time(backend_instruction *inst);
484 fs_visitor *v;
485
486 void count_remaining_grf_uses(backend_instruction *inst);
487 void update_register_pressure(backend_instruction *inst);
488 int get_register_pressure_benefit(backend_instruction *inst);
489 };
490
491 fs_instruction_scheduler::fs_instruction_scheduler(fs_visitor *v,
492 int grf_count,
493 instruction_scheduler_mode mode)
494 : instruction_scheduler(v, grf_count, mode),
495 v(v)
496 {
497 }
498
499 void
500 fs_instruction_scheduler::count_remaining_grf_uses(backend_instruction *be)
501 {
502 fs_inst *inst = (fs_inst *)be;
503
504 if (!remaining_grf_uses)
505 return;
506
507 if (inst->dst.file == GRF)
508 remaining_grf_uses[inst->dst.reg]++;
509
510 for (int i = 0; i < inst->sources; i++) {
511 if (inst->src[i].file != GRF)
512 continue;
513
514 remaining_grf_uses[inst->src[i].reg]++;
515 }
516 }
517
518 void
519 fs_instruction_scheduler::update_register_pressure(backend_instruction *be)
520 {
521 fs_inst *inst = (fs_inst *)be;
522
523 if (!remaining_grf_uses)
524 return;
525
526 if (inst->dst.file == GRF) {
527 remaining_grf_uses[inst->dst.reg]--;
528 grf_active[inst->dst.reg] = true;
529 }
530
531 for (int i = 0; i < inst->sources; i++) {
532 if (inst->src[i].file == GRF) {
533 remaining_grf_uses[inst->src[i].reg]--;
534 grf_active[inst->src[i].reg] = true;
535 }
536 }
537 }
538
539 int
540 fs_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
541 {
542 fs_inst *inst = (fs_inst *)be;
543 int benefit = 0;
544
545 if (inst->dst.file == GRF) {
546 if (remaining_grf_uses[inst->dst.reg] == 1)
547 benefit += v->alloc.sizes[inst->dst.reg];
548 if (!grf_active[inst->dst.reg])
549 benefit -= v->alloc.sizes[inst->dst.reg];
550 }
551
552 for (int i = 0; i < inst->sources; i++) {
553 if (inst->src[i].file != GRF)
554 continue;
555
556 if (remaining_grf_uses[inst->src[i].reg] == 1)
557 benefit += v->alloc.sizes[inst->src[i].reg];
558 if (!grf_active[inst->src[i].reg])
559 benefit -= v->alloc.sizes[inst->src[i].reg];
560 }
561
562 return benefit;
563 }
564
565 class vec4_instruction_scheduler : public instruction_scheduler
566 {
567 public:
568 vec4_instruction_scheduler(vec4_visitor *v, int grf_count);
569 void calculate_deps();
570 schedule_node *choose_instruction_to_schedule();
571 int issue_time(backend_instruction *inst);
572 vec4_visitor *v;
573
574 void count_remaining_grf_uses(backend_instruction *inst);
575 void update_register_pressure(backend_instruction *inst);
576 int get_register_pressure_benefit(backend_instruction *inst);
577 };
578
579 vec4_instruction_scheduler::vec4_instruction_scheduler(vec4_visitor *v,
580 int grf_count)
581 : instruction_scheduler(v, grf_count, SCHEDULE_POST),
582 v(v)
583 {
584 }
585
586 void
587 vec4_instruction_scheduler::count_remaining_grf_uses(backend_instruction *be)
588 {
589 }
590
591 void
592 vec4_instruction_scheduler::update_register_pressure(backend_instruction *be)
593 {
594 }
595
596 int
597 vec4_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
598 {
599 return 0;
600 }
601
602 schedule_node::schedule_node(backend_instruction *inst,
603 instruction_scheduler *sched)
604 {
605 struct brw_context *brw = sched->bv->brw;
606
607 this->inst = inst;
608 this->child_array_size = 0;
609 this->children = NULL;
610 this->child_latency = NULL;
611 this->child_count = 0;
612 this->parent_count = 0;
613 this->unblocked_time = 0;
614 this->cand_generation = 0;
615 this->delay = 0;
616
617 /* We can't measure Gen6 timings directly but expect them to be much
618 * closer to Gen7 than Gen4.
619 */
620 if (!sched->post_reg_alloc)
621 this->latency = 1;
622 else if (brw->gen >= 6)
623 set_latency_gen7(brw->is_haswell);
624 else
625 set_latency_gen4();
626 }
627
628 void
629 instruction_scheduler::add_insts_from_block(bblock_t *block)
630 {
631 /* Removing the last instruction from a basic block removes the block as
632 * well, so put a NOP at the end to keep it alive.
633 */
634 if (!block->end()->is_control_flow()) {
635 backend_instruction *nop = new(mem_ctx) backend_instruction();
636 nop->opcode = BRW_OPCODE_NOP;
637 block->end()->insert_after(block, nop);
638 }
639
640 foreach_inst_in_block_safe(backend_instruction, inst, block) {
641 if (inst->opcode == BRW_OPCODE_NOP || inst->is_control_flow())
642 continue;
643
644 schedule_node *n = new(mem_ctx) schedule_node(inst, this);
645
646 this->instructions_to_schedule++;
647
648 inst->remove(block);
649 instructions.push_tail(n);
650 }
651 }
652
653 /** Recursive computation of the delay member of a node. */
654 void
655 instruction_scheduler::compute_delay(schedule_node *n)
656 {
657 if (!n->child_count) {
658 n->delay = issue_time(n->inst);
659 } else {
660 for (int i = 0; i < n->child_count; i++) {
661 if (!n->children[i]->delay)
662 compute_delay(n->children[i]);
663 n->delay = MAX2(n->delay, n->latency + n->children[i]->delay);
664 }
665 }
666 }
667
668 /**
669 * Add a dependency between two instruction nodes.
670 *
671 * The @after node will be scheduled after @before. We will try to
672 * schedule it @latency cycles after @before, but no guarantees there.
673 */
674 void
675 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after,
676 int latency)
677 {
678 if (!before || !after)
679 return;
680
681 assert(before != after);
682
683 for (int i = 0; i < before->child_count; i++) {
684 if (before->children[i] == after) {
685 before->child_latency[i] = MAX2(before->child_latency[i], latency);
686 return;
687 }
688 }
689
690 if (before->child_array_size <= before->child_count) {
691 if (before->child_array_size < 16)
692 before->child_array_size = 16;
693 else
694 before->child_array_size *= 2;
695
696 before->children = reralloc(mem_ctx, before->children,
697 schedule_node *,
698 before->child_array_size);
699 before->child_latency = reralloc(mem_ctx, before->child_latency,
700 int, before->child_array_size);
701 }
702
703 before->children[before->child_count] = after;
704 before->child_latency[before->child_count] = latency;
705 before->child_count++;
706 after->parent_count++;
707 }
708
709 void
710 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after)
711 {
712 if (!before)
713 return;
714
715 add_dep(before, after, before->latency);
716 }
717
718 /**
719 * Sometimes we really want this node to execute after everything that
720 * was before it and before everything that followed it. This adds
721 * the deps to do so.
722 */
723 void
724 instruction_scheduler::add_barrier_deps(schedule_node *n)
725 {
726 schedule_node *prev = (schedule_node *)n->prev;
727 schedule_node *next = (schedule_node *)n->next;
728
729 if (prev) {
730 while (!prev->is_head_sentinel()) {
731 add_dep(prev, n, 0);
732 prev = (schedule_node *)prev->prev;
733 }
734 }
735
736 if (next) {
737 while (!next->is_tail_sentinel()) {
738 add_dep(n, next, 0);
739 next = (schedule_node *)next->next;
740 }
741 }
742 }
743
744 /* instruction scheduling needs to be aware of when an MRF write
745 * actually writes 2 MRFs.
746 */
747 bool
748 fs_instruction_scheduler::is_compressed(fs_inst *inst)
749 {
750 return inst->exec_size == 16;
751 }
752
753 void
754 fs_instruction_scheduler::calculate_deps()
755 {
756 /* Pre-register-allocation, this tracks the last write per VGRF offset.
757 * After register allocation, reg_offsets are gone and we track individual
758 * GRF registers.
759 */
760 schedule_node *last_grf_write[grf_count * 16];
761 schedule_node *last_mrf_write[BRW_MAX_MRF];
762 schedule_node *last_conditional_mod[2] = { NULL, NULL };
763 schedule_node *last_accumulator_write = NULL;
764 /* Fixed HW registers are assumed to be separate from the virtual
765 * GRFs, so they can be tracked separately. We don't really write
766 * to fixed GRFs much, so don't bother tracking them on a more
767 * granular level.
768 */
769 schedule_node *last_fixed_grf_write = NULL;
770 int reg_width = v->dispatch_width / 8;
771
772 /* The last instruction always needs to still be the last
773 * instruction. Either it's flow control (IF, ELSE, ENDIF, DO,
774 * WHILE) and scheduling other things after it would disturb the
775 * basic block, or it's FB_WRITE and we should do a better job at
776 * dead code elimination anyway.
777 */
778 schedule_node *last = (schedule_node *)instructions.get_tail();
779 add_barrier_deps(last);
780
781 memset(last_grf_write, 0, sizeof(last_grf_write));
782 memset(last_mrf_write, 0, sizeof(last_mrf_write));
783
784 /* top-to-bottom dependencies: RAW and WAW. */
785 foreach_in_list(schedule_node, n, &instructions) {
786 fs_inst *inst = (fs_inst *)n->inst;
787
788 if (inst->opcode == FS_OPCODE_PLACEHOLDER_HALT ||
789 inst->has_side_effects())
790 add_barrier_deps(n);
791
792 /* read-after-write deps. */
793 for (int i = 0; i < inst->sources; i++) {
794 if (inst->src[i].file == GRF) {
795 if (post_reg_alloc) {
796 for (int r = 0; r < inst->regs_read(i); r++)
797 add_dep(last_grf_write[inst->src[i].reg + r], n);
798 } else {
799 for (int r = 0; r < inst->regs_read(i); r++) {
800 add_dep(last_grf_write[inst->src[i].reg * 16 + inst->src[i].reg_offset + r], n);
801 }
802 }
803 } else if (inst->src[i].file == HW_REG &&
804 (inst->src[i].fixed_hw_reg.file ==
805 BRW_GENERAL_REGISTER_FILE)) {
806 if (post_reg_alloc) {
807 int size = reg_width;
808 if (inst->src[i].fixed_hw_reg.vstride == BRW_VERTICAL_STRIDE_0)
809 size = 1;
810 for (int r = 0; r < size; r++)
811 add_dep(last_grf_write[inst->src[i].fixed_hw_reg.nr + r], n);
812 } else {
813 add_dep(last_fixed_grf_write, n);
814 }
815 } else if (inst->src[i].is_accumulator()) {
816 add_dep(last_accumulator_write, n);
817 } else if (inst->src[i].file != BAD_FILE &&
818 inst->src[i].file != IMM &&
819 inst->src[i].file != UNIFORM &&
820 (inst->src[i].file != HW_REG ||
821 inst->src[i].fixed_hw_reg.file != IMM)) {
822 assert(inst->src[i].file != MRF);
823 add_barrier_deps(n);
824 }
825 }
826
827 if (inst->base_mrf != -1) {
828 for (int i = 0; i < inst->mlen; i++) {
829 /* It looks like the MRF regs are released in the send
830 * instruction once it's sent, not when the result comes
831 * back.
832 */
833 add_dep(last_mrf_write[inst->base_mrf + i], n);
834 }
835 }
836
837 if (inst->reads_flag()) {
838 add_dep(last_conditional_mod[inst->flag_subreg], n);
839 }
840
841 if (inst->reads_accumulator_implicitly()) {
842 add_dep(last_accumulator_write, n);
843 }
844
845 /* write-after-write deps. */
846 if (inst->dst.file == GRF) {
847 if (post_reg_alloc) {
848 for (int r = 0; r < inst->regs_written; r++) {
849 add_dep(last_grf_write[inst->dst.reg + r], n);
850 last_grf_write[inst->dst.reg + r] = n;
851 }
852 } else {
853 for (int r = 0; r < inst->regs_written; r++) {
854 add_dep(last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r], n);
855 last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r] = n;
856 }
857 }
858 } else if (inst->dst.file == MRF) {
859 int reg = inst->dst.reg & ~BRW_MRF_COMPR4;
860
861 add_dep(last_mrf_write[reg], n);
862 last_mrf_write[reg] = n;
863 if (is_compressed(inst)) {
864 if (inst->dst.reg & BRW_MRF_COMPR4)
865 reg += 4;
866 else
867 reg++;
868 add_dep(last_mrf_write[reg], n);
869 last_mrf_write[reg] = n;
870 }
871 } else if (inst->dst.file == HW_REG &&
872 inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
873 if (post_reg_alloc) {
874 for (int r = 0; r < reg_width; r++)
875 last_grf_write[inst->dst.fixed_hw_reg.nr + r] = n;
876 } else {
877 last_fixed_grf_write = n;
878 }
879 } else if (inst->dst.is_accumulator()) {
880 add_dep(last_accumulator_write, n);
881 last_accumulator_write = n;
882 } else if (inst->dst.file != BAD_FILE &&
883 !inst->dst.is_null()) {
884 add_barrier_deps(n);
885 }
886
887 if (inst->mlen > 0 && inst->base_mrf != -1) {
888 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
889 add_dep(last_mrf_write[inst->base_mrf + i], n);
890 last_mrf_write[inst->base_mrf + i] = n;
891 }
892 }
893
894 if (inst->writes_flag()) {
895 add_dep(last_conditional_mod[inst->flag_subreg], n, 0);
896 last_conditional_mod[inst->flag_subreg] = n;
897 }
898
899 if (inst->writes_accumulator_implicitly(v->brw) &&
900 !inst->dst.is_accumulator()) {
901 add_dep(last_accumulator_write, n);
902 last_accumulator_write = n;
903 }
904 }
905
906 /* bottom-to-top dependencies: WAR */
907 memset(last_grf_write, 0, sizeof(last_grf_write));
908 memset(last_mrf_write, 0, sizeof(last_mrf_write));
909 memset(last_conditional_mod, 0, sizeof(last_conditional_mod));
910 last_accumulator_write = NULL;
911 last_fixed_grf_write = NULL;
912
913 exec_node *node;
914 exec_node *prev;
915 for (node = instructions.get_tail(), prev = node->prev;
916 !node->is_head_sentinel();
917 node = prev, prev = node->prev) {
918 schedule_node *n = (schedule_node *)node;
919 fs_inst *inst = (fs_inst *)n->inst;
920
921 /* write-after-read deps. */
922 for (int i = 0; i < inst->sources; i++) {
923 if (inst->src[i].file == GRF) {
924 if (post_reg_alloc) {
925 for (int r = 0; r < inst->regs_read(i); r++)
926 add_dep(n, last_grf_write[inst->src[i].reg + r]);
927 } else {
928 for (int r = 0; r < inst->regs_read(i); r++) {
929 add_dep(n, last_grf_write[inst->src[i].reg * 16 + inst->src[i].reg_offset + r]);
930 }
931 }
932 } else if (inst->src[i].file == HW_REG &&
933 (inst->src[i].fixed_hw_reg.file ==
934 BRW_GENERAL_REGISTER_FILE)) {
935 if (post_reg_alloc) {
936 int size = reg_width;
937 if (inst->src[i].fixed_hw_reg.vstride == BRW_VERTICAL_STRIDE_0)
938 size = 1;
939 for (int r = 0; r < size; r++)
940 add_dep(n, last_grf_write[inst->src[i].fixed_hw_reg.nr + r]);
941 } else {
942 add_dep(n, last_fixed_grf_write);
943 }
944 } else if (inst->src[i].is_accumulator()) {
945 add_dep(n, last_accumulator_write);
946 } else if (inst->src[i].file != BAD_FILE &&
947 inst->src[i].file != IMM &&
948 inst->src[i].file != UNIFORM &&
949 (inst->src[i].file != HW_REG ||
950 inst->src[i].fixed_hw_reg.file != IMM)) {
951 assert(inst->src[i].file != MRF);
952 add_barrier_deps(n);
953 }
954 }
955
956 if (inst->base_mrf != -1) {
957 for (int i = 0; i < inst->mlen; i++) {
958 /* It looks like the MRF regs are released in the send
959 * instruction once it's sent, not when the result comes
960 * back.
961 */
962 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
963 }
964 }
965
966 if (inst->reads_flag()) {
967 add_dep(n, last_conditional_mod[inst->flag_subreg]);
968 }
969
970 if (inst->reads_accumulator_implicitly()) {
971 add_dep(n, last_accumulator_write);
972 }
973
974 /* Update the things this instruction wrote, so earlier reads
975 * can mark this as WAR dependency.
976 */
977 if (inst->dst.file == GRF) {
978 if (post_reg_alloc) {
979 for (int r = 0; r < inst->regs_written; r++)
980 last_grf_write[inst->dst.reg + r] = n;
981 } else {
982 for (int r = 0; r < inst->regs_written; r++) {
983 last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r] = n;
984 }
985 }
986 } else if (inst->dst.file == MRF) {
987 int reg = inst->dst.reg & ~BRW_MRF_COMPR4;
988
989 last_mrf_write[reg] = n;
990
991 if (is_compressed(inst)) {
992 if (inst->dst.reg & BRW_MRF_COMPR4)
993 reg += 4;
994 else
995 reg++;
996
997 last_mrf_write[reg] = n;
998 }
999 } else if (inst->dst.file == HW_REG &&
1000 inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
1001 if (post_reg_alloc) {
1002 for (int r = 0; r < reg_width; r++)
1003 last_grf_write[inst->dst.fixed_hw_reg.nr + r] = n;
1004 } else {
1005 last_fixed_grf_write = n;
1006 }
1007 } else if (inst->dst.is_accumulator()) {
1008 last_accumulator_write = n;
1009 } else if (inst->dst.file != BAD_FILE &&
1010 !inst->dst.is_null()) {
1011 add_barrier_deps(n);
1012 }
1013
1014 if (inst->mlen > 0 && inst->base_mrf != -1) {
1015 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1016 last_mrf_write[inst->base_mrf + i] = n;
1017 }
1018 }
1019
1020 if (inst->writes_flag()) {
1021 last_conditional_mod[inst->flag_subreg] = n;
1022 }
1023
1024 if (inst->writes_accumulator_implicitly(v->brw)) {
1025 last_accumulator_write = n;
1026 }
1027 }
1028 }
1029
1030 void
1031 vec4_instruction_scheduler::calculate_deps()
1032 {
1033 schedule_node *last_grf_write[grf_count];
1034 schedule_node *last_mrf_write[BRW_MAX_MRF];
1035 schedule_node *last_conditional_mod = NULL;
1036 schedule_node *last_accumulator_write = NULL;
1037 /* Fixed HW registers are assumed to be separate from the virtual
1038 * GRFs, so they can be tracked separately. We don't really write
1039 * to fixed GRFs much, so don't bother tracking them on a more
1040 * granular level.
1041 */
1042 schedule_node *last_fixed_grf_write = NULL;
1043
1044 /* The last instruction always needs to still be the last instruction.
1045 * Either it's flow control (IF, ELSE, ENDIF, DO, WHILE) and scheduling
1046 * other things after it would disturb the basic block, or it's the EOT
1047 * URB_WRITE and we should do a better job at dead code eliminating
1048 * anything that could have been scheduled after it.
1049 */
1050 schedule_node *last = (schedule_node *)instructions.get_tail();
1051 add_barrier_deps(last);
1052
1053 memset(last_grf_write, 0, sizeof(last_grf_write));
1054 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1055
1056 /* top-to-bottom dependencies: RAW and WAW. */
1057 foreach_in_list(schedule_node, n, &instructions) {
1058 vec4_instruction *inst = (vec4_instruction *)n->inst;
1059
1060 if (inst->has_side_effects())
1061 add_barrier_deps(n);
1062
1063 /* read-after-write deps. */
1064 for (int i = 0; i < 3; i++) {
1065 if (inst->src[i].file == GRF) {
1066 for (unsigned j = 0; j < inst->regs_read(i); ++j)
1067 add_dep(last_grf_write[inst->src[i].reg + j], n);
1068 } else if (inst->src[i].file == HW_REG &&
1069 (inst->src[i].fixed_hw_reg.file ==
1070 BRW_GENERAL_REGISTER_FILE)) {
1071 add_dep(last_fixed_grf_write, n);
1072 } else if (inst->src[i].is_accumulator()) {
1073 assert(last_accumulator_write);
1074 add_dep(last_accumulator_write, n);
1075 } else if (inst->src[i].file != BAD_FILE &&
1076 inst->src[i].file != IMM &&
1077 inst->src[i].file != UNIFORM &&
1078 (inst->src[i].file != HW_REG ||
1079 inst->src[i].fixed_hw_reg.file != IMM)) {
1080 /* No reads from MRF, and ATTR is already translated away */
1081 assert(inst->src[i].file != MRF &&
1082 inst->src[i].file != ATTR);
1083 add_barrier_deps(n);
1084 }
1085 }
1086
1087 for (int i = 0; i < inst->mlen; i++) {
1088 /* It looks like the MRF regs are released in the send
1089 * instruction once it's sent, not when the result comes
1090 * back.
1091 */
1092 add_dep(last_mrf_write[inst->base_mrf + i], n);
1093 }
1094
1095 if (inst->reads_flag()) {
1096 assert(last_conditional_mod);
1097 add_dep(last_conditional_mod, n);
1098 }
1099
1100 if (inst->reads_accumulator_implicitly()) {
1101 assert(last_accumulator_write);
1102 add_dep(last_accumulator_write, n);
1103 }
1104
1105 /* write-after-write deps. */
1106 if (inst->dst.file == GRF) {
1107 for (unsigned j = 0; j < inst->regs_written; ++j) {
1108 add_dep(last_grf_write[inst->dst.reg + j], n);
1109 last_grf_write[inst->dst.reg + j] = n;
1110 }
1111 } else if (inst->dst.file == MRF) {
1112 add_dep(last_mrf_write[inst->dst.reg], n);
1113 last_mrf_write[inst->dst.reg] = n;
1114 } else if (inst->dst.file == HW_REG &&
1115 inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
1116 last_fixed_grf_write = n;
1117 } else if (inst->dst.is_accumulator()) {
1118 add_dep(last_accumulator_write, n);
1119 last_accumulator_write = n;
1120 } else if (inst->dst.file != BAD_FILE &&
1121 !inst->dst.is_null()) {
1122 add_barrier_deps(n);
1123 }
1124
1125 if (inst->mlen > 0) {
1126 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1127 add_dep(last_mrf_write[inst->base_mrf + i], n);
1128 last_mrf_write[inst->base_mrf + i] = n;
1129 }
1130 }
1131
1132 if (inst->writes_flag()) {
1133 add_dep(last_conditional_mod, n, 0);
1134 last_conditional_mod = n;
1135 }
1136
1137 if (inst->writes_accumulator_implicitly(v->brw) &&
1138 !inst->dst.is_accumulator()) {
1139 add_dep(last_accumulator_write, n);
1140 last_accumulator_write = n;
1141 }
1142 }
1143
1144 /* bottom-to-top dependencies: WAR */
1145 memset(last_grf_write, 0, sizeof(last_grf_write));
1146 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1147 last_conditional_mod = NULL;
1148 last_accumulator_write = NULL;
1149 last_fixed_grf_write = NULL;
1150
1151 exec_node *node;
1152 exec_node *prev;
1153 for (node = instructions.get_tail(), prev = node->prev;
1154 !node->is_head_sentinel();
1155 node = prev, prev = node->prev) {
1156 schedule_node *n = (schedule_node *)node;
1157 vec4_instruction *inst = (vec4_instruction *)n->inst;
1158
1159 /* write-after-read deps. */
1160 for (int i = 0; i < 3; i++) {
1161 if (inst->src[i].file == GRF) {
1162 for (unsigned j = 0; j < inst->regs_read(i); ++j)
1163 add_dep(n, last_grf_write[inst->src[i].reg + j]);
1164 } else if (inst->src[i].file == HW_REG &&
1165 (inst->src[i].fixed_hw_reg.file ==
1166 BRW_GENERAL_REGISTER_FILE)) {
1167 add_dep(n, last_fixed_grf_write);
1168 } else if (inst->src[i].is_accumulator()) {
1169 add_dep(n, last_accumulator_write);
1170 } else if (inst->src[i].file != BAD_FILE &&
1171 inst->src[i].file != IMM &&
1172 inst->src[i].file != UNIFORM &&
1173 (inst->src[i].file != HW_REG ||
1174 inst->src[i].fixed_hw_reg.file != IMM)) {
1175 assert(inst->src[i].file != MRF &&
1176 inst->src[i].file != ATTR);
1177 add_barrier_deps(n);
1178 }
1179 }
1180
1181 for (int i = 0; i < inst->mlen; i++) {
1182 /* It looks like the MRF regs are released in the send
1183 * instruction once it's sent, not when the result comes
1184 * back.
1185 */
1186 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1187 }
1188
1189 if (inst->reads_flag()) {
1190 add_dep(n, last_conditional_mod);
1191 }
1192
1193 if (inst->reads_accumulator_implicitly()) {
1194 add_dep(n, last_accumulator_write);
1195 }
1196
1197 /* Update the things this instruction wrote, so earlier reads
1198 * can mark this as WAR dependency.
1199 */
1200 if (inst->dst.file == GRF) {
1201 for (unsigned j = 0; j < inst->regs_written; ++j)
1202 last_grf_write[inst->dst.reg + j] = n;
1203 } else if (inst->dst.file == MRF) {
1204 last_mrf_write[inst->dst.reg] = n;
1205 } else if (inst->dst.file == HW_REG &&
1206 inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
1207 last_fixed_grf_write = n;
1208 } else if (inst->dst.is_accumulator()) {
1209 last_accumulator_write = n;
1210 } else if (inst->dst.file != BAD_FILE &&
1211 !inst->dst.is_null()) {
1212 add_barrier_deps(n);
1213 }
1214
1215 if (inst->mlen > 0) {
1216 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1217 last_mrf_write[inst->base_mrf + i] = n;
1218 }
1219 }
1220
1221 if (inst->writes_flag()) {
1222 last_conditional_mod = n;
1223 }
1224
1225 if (inst->writes_accumulator_implicitly(v->brw)) {
1226 last_accumulator_write = n;
1227 }
1228 }
1229 }
1230
1231 schedule_node *
1232 fs_instruction_scheduler::choose_instruction_to_schedule()
1233 {
1234 struct brw_context *brw = v->brw;
1235 schedule_node *chosen = NULL;
1236
1237 if (mode == SCHEDULE_PRE || mode == SCHEDULE_POST) {
1238 int chosen_time = 0;
1239
1240 /* Of the instructions ready to execute or the closest to
1241 * being ready, choose the oldest one.
1242 */
1243 foreach_in_list(schedule_node, n, &instructions) {
1244 if (!chosen || n->unblocked_time < chosen_time) {
1245 chosen = n;
1246 chosen_time = n->unblocked_time;
1247 }
1248 }
1249 } else {
1250 /* Before register allocation, we don't care about the latencies of
1251 * instructions. All we care about is reducing live intervals of
1252 * variables so that we can avoid register spilling, or get SIMD16
1253 * shaders which naturally do a better job of hiding instruction
1254 * latency.
1255 */
1256 foreach_in_list(schedule_node, n, &instructions) {
1257 fs_inst *inst = (fs_inst *)n->inst;
1258
1259 if (!chosen) {
1260 chosen = n;
1261 continue;
1262 }
1263
1264 /* Most important: If we can definitely reduce register pressure, do
1265 * so immediately.
1266 */
1267 int register_pressure_benefit = get_register_pressure_benefit(n->inst);
1268 int chosen_register_pressure_benefit =
1269 get_register_pressure_benefit(chosen->inst);
1270
1271 if (register_pressure_benefit > 0 &&
1272 register_pressure_benefit > chosen_register_pressure_benefit) {
1273 chosen = n;
1274 continue;
1275 } else if (chosen_register_pressure_benefit > 0 &&
1276 (register_pressure_benefit <
1277 chosen_register_pressure_benefit)) {
1278 continue;
1279 }
1280
1281 if (mode == SCHEDULE_PRE_LIFO) {
1282 /* Prefer instructions that recently became available for
1283 * scheduling. These are the things that are most likely to
1284 * (eventually) make a variable dead and reduce register pressure.
1285 * Typical register pressure estimates don't work for us because
1286 * most of our pressure comes from texturing, where no single
1287 * instruction to schedule will make a vec4 value dead.
1288 */
1289 if (n->cand_generation > chosen->cand_generation) {
1290 chosen = n;
1291 continue;
1292 } else if (n->cand_generation < chosen->cand_generation) {
1293 continue;
1294 }
1295
1296 /* On MRF-using chips, prefer non-SEND instructions. If we don't
1297 * do this, then because we prefer instructions that just became
1298 * candidates, we'll end up in a pattern of scheduling a SEND,
1299 * then the MRFs for the next SEND, then the next SEND, then the
1300 * MRFs, etc., without ever consuming the results of a send.
1301 */
1302 if (brw->gen < 7) {
1303 fs_inst *chosen_inst = (fs_inst *)chosen->inst;
1304
1305 /* We use regs_written > 1 as our test for the kind of send
1306 * instruction to avoid -- only sends generate many regs, and a
1307 * single-result send is probably actually reducing register
1308 * pressure.
1309 */
1310 if (inst->regs_written <= inst->dst.width / 8 &&
1311 chosen_inst->regs_written > chosen_inst->dst.width / 8) {
1312 chosen = n;
1313 continue;
1314 } else if (inst->regs_written > chosen_inst->regs_written) {
1315 continue;
1316 }
1317 }
1318 }
1319
1320 /* For instructions pushed on the cands list at the same time, prefer
1321 * the one with the highest delay to the end of the program. This is
1322 * most likely to have its values able to be consumed first (such as
1323 * for a large tree of lowered ubo loads, which appear reversed in
1324 * the instruction stream with respect to when they can be consumed).
1325 */
1326 if (n->delay > chosen->delay) {
1327 chosen = n;
1328 continue;
1329 } else if (n->delay < chosen->delay) {
1330 continue;
1331 }
1332
1333 /* If all other metrics are equal, we prefer the first instruction in
1334 * the list (program execution).
1335 */
1336 }
1337 }
1338
1339 return chosen;
1340 }
1341
1342 schedule_node *
1343 vec4_instruction_scheduler::choose_instruction_to_schedule()
1344 {
1345 schedule_node *chosen = NULL;
1346 int chosen_time = 0;
1347
1348 /* Of the instructions ready to execute or the closest to being ready,
1349 * choose the oldest one.
1350 */
1351 foreach_in_list(schedule_node, n, &instructions) {
1352 if (!chosen || n->unblocked_time < chosen_time) {
1353 chosen = n;
1354 chosen_time = n->unblocked_time;
1355 }
1356 }
1357
1358 return chosen;
1359 }
1360
1361 int
1362 fs_instruction_scheduler::issue_time(backend_instruction *inst)
1363 {
1364 if (is_compressed((fs_inst *)inst))
1365 return 4;
1366 else
1367 return 2;
1368 }
1369
1370 int
1371 vec4_instruction_scheduler::issue_time(backend_instruction *inst)
1372 {
1373 /* We always execute as two vec4s in parallel. */
1374 return 2;
1375 }
1376
1377 void
1378 instruction_scheduler::schedule_instructions(bblock_t *block)
1379 {
1380 struct brw_context *brw = bv->brw;
1381 backend_instruction *inst = block->end();
1382 time = 0;
1383
1384 /* Remove non-DAG heads from the list. */
1385 foreach_in_list_safe(schedule_node, n, &instructions) {
1386 if (n->parent_count != 0)
1387 n->remove();
1388 }
1389
1390 unsigned cand_generation = 1;
1391 while (!instructions.is_empty()) {
1392 schedule_node *chosen = choose_instruction_to_schedule();
1393
1394 /* Schedule this instruction. */
1395 assert(chosen);
1396 chosen->remove();
1397 inst->insert_before(block, chosen->inst);
1398 instructions_to_schedule--;
1399 update_register_pressure(chosen->inst);
1400
1401 /* Update the clock for how soon an instruction could start after the
1402 * chosen one.
1403 */
1404 time += issue_time(chosen->inst);
1405
1406 /* If we expected a delay for scheduling, then bump the clock to reflect
1407 * that as well. In reality, the hardware will switch to another
1408 * hyperthread and may not return to dispatching our thread for a while
1409 * even after we're unblocked.
1410 */
1411 time = MAX2(time, chosen->unblocked_time);
1412
1413 if (debug) {
1414 fprintf(stderr, "clock %4d, scheduled: ", time);
1415 bv->dump_instruction(chosen->inst);
1416 }
1417
1418 /* Now that we've scheduled a new instruction, some of its
1419 * children can be promoted to the list of instructions ready to
1420 * be scheduled. Update the children's unblocked time for this
1421 * DAG edge as we do so.
1422 */
1423 for (int i = chosen->child_count - 1; i >= 0; i--) {
1424 schedule_node *child = chosen->children[i];
1425
1426 child->unblocked_time = MAX2(child->unblocked_time,
1427 time + chosen->child_latency[i]);
1428
1429 if (debug) {
1430 fprintf(stderr, "\tchild %d, %d parents: ", i, child->parent_count);
1431 bv->dump_instruction(child->inst);
1432 }
1433
1434 child->cand_generation = cand_generation;
1435 child->parent_count--;
1436 if (child->parent_count == 0) {
1437 if (debug) {
1438 fprintf(stderr, "\t\tnow available\n");
1439 }
1440 instructions.push_head(child);
1441 }
1442 }
1443 cand_generation++;
1444
1445 /* Shared resource: the mathbox. There's one mathbox per EU on Gen6+
1446 * but it's more limited pre-gen6, so if we send something off to it then
1447 * the next math instruction isn't going to make progress until the first
1448 * is done.
1449 */
1450 if (brw->gen < 6 && chosen->inst->is_math()) {
1451 foreach_in_list(schedule_node, n, &instructions) {
1452 if (n->inst->is_math())
1453 n->unblocked_time = MAX2(n->unblocked_time,
1454 time + chosen->latency);
1455 }
1456 }
1457 }
1458
1459 if (block->end()->opcode == BRW_OPCODE_NOP)
1460 block->end()->remove(block);
1461 assert(instructions_to_schedule == 0);
1462 }
1463
1464 void
1465 instruction_scheduler::run(cfg_t *cfg)
1466 {
1467 if (debug) {
1468 fprintf(stderr, "\nInstructions before scheduling (reg_alloc %d)\n",
1469 post_reg_alloc);
1470 bv->dump_instructions();
1471 }
1472
1473 /* Populate the remaining GRF uses array to improve the pre-regalloc
1474 * scheduling.
1475 */
1476 if (remaining_grf_uses) {
1477 foreach_block_and_inst(block, backend_instruction, inst, cfg) {
1478 count_remaining_grf_uses(inst);
1479 }
1480 }
1481
1482 foreach_block(block, cfg) {
1483 if (block->end_ip - block->start_ip <= 1)
1484 continue;
1485
1486 add_insts_from_block(block);
1487
1488 calculate_deps();
1489
1490 foreach_in_list(schedule_node, n, &instructions) {
1491 compute_delay(n);
1492 }
1493
1494 schedule_instructions(block);
1495 }
1496
1497 if (debug) {
1498 fprintf(stderr, "\nInstructions after scheduling (reg_alloc %d)\n",
1499 post_reg_alloc);
1500 bv->dump_instructions();
1501 }
1502 }
1503
1504 void
1505 fs_visitor::schedule_instructions(instruction_scheduler_mode mode)
1506 {
1507 int grf_count;
1508 if (mode == SCHEDULE_POST)
1509 grf_count = grf_used;
1510 else
1511 grf_count = alloc.count;
1512
1513 fs_instruction_scheduler sched(this, grf_count, mode);
1514 sched.run(cfg);
1515
1516 if (unlikely(INTEL_DEBUG & DEBUG_WM) && mode == SCHEDULE_POST) {
1517 fprintf(stderr, "fs%d estimated execution time: %d cycles\n",
1518 dispatch_width, sched.time);
1519 }
1520
1521 invalidate_live_intervals();
1522 }
1523
1524 void
1525 vec4_visitor::opt_schedule_instructions()
1526 {
1527 vec4_instruction_scheduler sched(this, prog_data->total_grf);
1528 sched.run(cfg);
1529
1530 if (unlikely(debug_flag)) {
1531 fprintf(stderr, "vec4 estimated execution time: %d cycles\n", sched.time);
1532 }
1533
1534 invalidate_live_intervals();
1535 }