i965: Add optimization pass to let us use the replicate data message
[mesa.git] / src / mesa / drivers / dri / i965 / brw_schedule_instructions.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include "brw_fs.h"
29 #include "brw_vec4.h"
30 #include "glsl/glsl_types.h"
31 #include "glsl/ir_optimization.h"
32
33 using namespace brw;
34
35 /** @file brw_fs_schedule_instructions.cpp
36 *
37 * List scheduling of FS instructions.
38 *
39 * The basic model of the list scheduler is to take a basic block,
40 * compute a DAG of the dependencies (RAW ordering with latency, WAW
41 * ordering with latency, WAR ordering), and make a list of the DAG heads.
42 * Heuristically pick a DAG head, then put all the children that are
43 * now DAG heads into the list of things to schedule.
44 *
45 * The heuristic is the important part. We're trying to be cheap,
46 * since actually computing the optimal scheduling is NP complete.
47 * What we do is track a "current clock". When we schedule a node, we
48 * update the earliest-unblocked clock time of its children, and
49 * increment the clock. Then, when trying to schedule, we just pick
50 * the earliest-unblocked instruction to schedule.
51 *
52 * Note that often there will be many things which could execute
53 * immediately, and there are a range of heuristic options to choose
54 * from in picking among those.
55 */
56
57 static bool debug = false;
58
59 class instruction_scheduler;
60
61 class schedule_node : public exec_node
62 {
63 public:
64 schedule_node(backend_instruction *inst, instruction_scheduler *sched);
65 void set_latency_gen4();
66 void set_latency_gen7(bool is_haswell);
67
68 backend_instruction *inst;
69 schedule_node **children;
70 int *child_latency;
71 int child_count;
72 int parent_count;
73 int child_array_size;
74 int unblocked_time;
75 int latency;
76
77 /**
78 * Which iteration of pushing groups of children onto the candidates list
79 * this node was a part of.
80 */
81 unsigned cand_generation;
82
83 /**
84 * This is the sum of the instruction's latency plus the maximum delay of
85 * its children, or just the issue_time if it's a leaf node.
86 */
87 int delay;
88 };
89
90 void
91 schedule_node::set_latency_gen4()
92 {
93 int chans = 8;
94 int math_latency = 22;
95
96 switch (inst->opcode) {
97 case SHADER_OPCODE_RCP:
98 this->latency = 1 * chans * math_latency;
99 break;
100 case SHADER_OPCODE_RSQ:
101 this->latency = 2 * chans * math_latency;
102 break;
103 case SHADER_OPCODE_INT_QUOTIENT:
104 case SHADER_OPCODE_SQRT:
105 case SHADER_OPCODE_LOG2:
106 /* full precision log. partial is 2. */
107 this->latency = 3 * chans * math_latency;
108 break;
109 case SHADER_OPCODE_INT_REMAINDER:
110 case SHADER_OPCODE_EXP2:
111 /* full precision. partial is 3, same throughput. */
112 this->latency = 4 * chans * math_latency;
113 break;
114 case SHADER_OPCODE_POW:
115 this->latency = 8 * chans * math_latency;
116 break;
117 case SHADER_OPCODE_SIN:
118 case SHADER_OPCODE_COS:
119 /* minimum latency, max is 12 rounds. */
120 this->latency = 5 * chans * math_latency;
121 break;
122 default:
123 this->latency = 2;
124 break;
125 }
126 }
127
128 void
129 schedule_node::set_latency_gen7(bool is_haswell)
130 {
131 switch (inst->opcode) {
132 case BRW_OPCODE_MAD:
133 /* 2 cycles
134 * (since the last two src operands are in different register banks):
135 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
136 *
137 * 3 cycles on IVB, 4 on HSW
138 * (since the last two src operands are in the same register bank):
139 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
140 *
141 * 18 cycles on IVB, 16 on HSW
142 * (since the last two src operands are in different register banks):
143 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
144 * mov(8) null g4<4,5,1>F { align16 WE_normal 1Q };
145 *
146 * 20 cycles on IVB, 18 on HSW
147 * (since the last two src operands are in the same register bank):
148 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
149 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
150 */
151
152 /* Our register allocator doesn't know about register banks, so use the
153 * higher latency.
154 */
155 latency = is_haswell ? 16 : 18;
156 break;
157
158 case BRW_OPCODE_LRP:
159 /* 2 cycles
160 * (since the last two src operands are in different register banks):
161 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
162 *
163 * 3 cycles on IVB, 4 on HSW
164 * (since the last two src operands are in the same register bank):
165 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
166 *
167 * 16 cycles on IVB, 14 on HSW
168 * (since the last two src operands are in different register banks):
169 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
170 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
171 *
172 * 16 cycles
173 * (since the last two src operands are in the same register bank):
174 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
175 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
176 */
177
178 /* Our register allocator doesn't know about register banks, so use the
179 * higher latency.
180 */
181 latency = 14;
182 break;
183
184 case SHADER_OPCODE_RCP:
185 case SHADER_OPCODE_RSQ:
186 case SHADER_OPCODE_SQRT:
187 case SHADER_OPCODE_LOG2:
188 case SHADER_OPCODE_EXP2:
189 case SHADER_OPCODE_SIN:
190 case SHADER_OPCODE_COS:
191 /* 2 cycles:
192 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
193 *
194 * 18 cycles:
195 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
196 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
197 *
198 * Same for exp2, log2, rsq, sqrt, sin, cos.
199 */
200 latency = is_haswell ? 14 : 16;
201 break;
202
203 case SHADER_OPCODE_POW:
204 /* 2 cycles:
205 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
206 *
207 * 26 cycles:
208 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
209 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
210 */
211 latency = is_haswell ? 22 : 24;
212 break;
213
214 case SHADER_OPCODE_TEX:
215 case SHADER_OPCODE_TXD:
216 case SHADER_OPCODE_TXF:
217 case SHADER_OPCODE_TXL:
218 /* 18 cycles:
219 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
220 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
221 * send(8) g4<1>UW g114<8,8,1>F
222 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
223 *
224 * 697 +/-49 cycles (min 610, n=26):
225 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
226 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
227 * send(8) g4<1>UW g114<8,8,1>F
228 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
229 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
230 *
231 * So the latency on our first texture load of the batchbuffer takes
232 * ~700 cycles, since the caches are cold at that point.
233 *
234 * 840 +/- 92 cycles (min 720, n=25):
235 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
236 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
237 * send(8) g4<1>UW g114<8,8,1>F
238 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
239 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
240 * send(8) g4<1>UW g114<8,8,1>F
241 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
242 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
243 *
244 * On the second load, it takes just an extra ~140 cycles, and after
245 * accounting for the 14 cycles of the MOV's latency, that makes ~130.
246 *
247 * 683 +/- 49 cycles (min = 602, n=47):
248 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
249 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
250 * send(8) g4<1>UW g114<8,8,1>F
251 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
252 * send(8) g50<1>UW g114<8,8,1>F
253 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
254 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
255 *
256 * The unit appears to be pipelined, since this matches up with the
257 * cache-cold case, despite there being two loads here. If you replace
258 * the g4 in the MOV to null with g50, it's still 693 +/- 52 (n=39).
259 *
260 * So, take some number between the cache-hot 140 cycles and the
261 * cache-cold 700 cycles. No particular tuning was done on this.
262 *
263 * I haven't done significant testing of the non-TEX opcodes. TXL at
264 * least looked about the same as TEX.
265 */
266 latency = 200;
267 break;
268
269 case SHADER_OPCODE_TXS:
270 /* Testing textureSize(sampler2D, 0), one load was 420 +/- 41
271 * cycles (n=15):
272 * mov(8) g114<1>UD 0D { align1 WE_normal 1Q };
273 * send(8) g6<1>UW g114<8,8,1>F
274 * sampler (10, 0, 10, 1) mlen 1 rlen 4 { align1 WE_normal 1Q };
275 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1Q };
276 *
277 *
278 * Two loads was 535 +/- 30 cycles (n=19):
279 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
280 * send(16) g6<1>UW g114<8,8,1>F
281 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
282 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
283 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1H };
284 * send(16) g8<1>UW g114<8,8,1>F
285 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
286 * mov(16) g8<1>F g8<8,8,1>D { align1 WE_normal 1H };
287 * add(16) g6<1>F g6<8,8,1>F g8<8,8,1>F { align1 WE_normal 1H };
288 *
289 * Since the only caches that should matter are just the
290 * instruction/state cache containing the surface state, assume that we
291 * always have hot caches.
292 */
293 latency = 100;
294 break;
295
296 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
297 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
298 case VS_OPCODE_PULL_CONSTANT_LOAD:
299 /* testing using varying-index pull constants:
300 *
301 * 16 cycles:
302 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
303 * send(8) g4<1>F g4<8,8,1>D
304 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
305 *
306 * ~480 cycles:
307 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
308 * send(8) g4<1>F g4<8,8,1>D
309 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
310 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
311 *
312 * ~620 cycles:
313 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
314 * send(8) g4<1>F g4<8,8,1>D
315 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
316 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
317 * send(8) g4<1>F g4<8,8,1>D
318 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
319 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
320 *
321 * So, if it's cache-hot, it's about 140. If it's cache cold, it's
322 * about 460. We expect to mostly be cache hot, so pick something more
323 * in that direction.
324 */
325 latency = 200;
326 break;
327
328 case SHADER_OPCODE_GEN7_SCRATCH_READ:
329 /* Testing a load from offset 0, that had been previously written:
330 *
331 * send(8) g114<1>UW g0<8,8,1>F data (0, 0, 0) mlen 1 rlen 1 { align1 WE_normal 1Q };
332 * mov(8) null g114<8,8,1>F { align1 WE_normal 1Q };
333 *
334 * The cycles spent seemed to be grouped around 40-50 (as low as 38),
335 * then around 140. Presumably this is cache hit vs miss.
336 */
337 latency = 50;
338 break;
339
340 case SHADER_OPCODE_UNTYPED_ATOMIC:
341 /* Test code:
342 * mov(8) g112<1>ud 0x00000000ud { align1 WE_all 1Q };
343 * mov(1) g112.7<1>ud g1.7<0,1,0>ud { align1 WE_all };
344 * mov(8) g113<1>ud 0x00000000ud { align1 WE_normal 1Q };
345 * send(8) g4<1>ud g112<8,8,1>ud
346 * data (38, 5, 6) mlen 2 rlen 1 { align1 WE_normal 1Q };
347 *
348 * Running it 100 times as fragment shader on a 128x128 quad
349 * gives an average latency of 13867 cycles per atomic op,
350 * standard deviation 3%. Note that this is a rather
351 * pessimistic estimate, the actual latency in cases with few
352 * collisions between threads and favorable pipelining has been
353 * seen to be reduced by a factor of 100.
354 */
355 latency = 14000;
356 break;
357
358 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
359 /* Test code:
360 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
361 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
362 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
363 * send(8) g4<1>UD g112<8,8,1>UD
364 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
365 * .
366 * . [repeats 8 times]
367 * .
368 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
369 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
370 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
371 * send(8) g4<1>UD g112<8,8,1>UD
372 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
373 *
374 * Running it 100 times as fragment shader on a 128x128 quad
375 * gives an average latency of 583 cycles per surface read,
376 * standard deviation 0.9%.
377 */
378 latency = is_haswell ? 300 : 600;
379 break;
380
381 default:
382 /* 2 cycles:
383 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
384 *
385 * 16 cycles:
386 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
387 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
388 */
389 latency = 14;
390 break;
391 }
392 }
393
394 class instruction_scheduler {
395 public:
396 instruction_scheduler(backend_visitor *v, int grf_count,
397 instruction_scheduler_mode mode)
398 {
399 this->bv = v;
400 this->mem_ctx = ralloc_context(NULL);
401 this->grf_count = grf_count;
402 this->instructions.make_empty();
403 this->instructions_to_schedule = 0;
404 this->post_reg_alloc = (mode == SCHEDULE_POST);
405 this->mode = mode;
406 this->time = 0;
407 if (!post_reg_alloc) {
408 this->remaining_grf_uses = rzalloc_array(mem_ctx, int, grf_count);
409 this->grf_active = rzalloc_array(mem_ctx, bool, grf_count);
410 } else {
411 this->remaining_grf_uses = NULL;
412 this->grf_active = NULL;
413 }
414 }
415
416 ~instruction_scheduler()
417 {
418 ralloc_free(this->mem_ctx);
419 }
420 void add_barrier_deps(schedule_node *n);
421 void add_dep(schedule_node *before, schedule_node *after, int latency);
422 void add_dep(schedule_node *before, schedule_node *after);
423
424 void run(exec_list *instructions);
425 void add_inst(backend_instruction *inst);
426 void compute_delay(schedule_node *node);
427 virtual void calculate_deps() = 0;
428 virtual schedule_node *choose_instruction_to_schedule() = 0;
429
430 /**
431 * Returns how many cycles it takes the instruction to issue.
432 *
433 * Instructions in gen hardware are handled one simd4 vector at a time,
434 * with 1 cycle per vector dispatched. Thus SIMD8 pixel shaders take 2
435 * cycles to dispatch and SIMD16 (compressed) instructions take 4.
436 */
437 virtual int issue_time(backend_instruction *inst) = 0;
438
439 virtual void count_remaining_grf_uses(backend_instruction *inst) = 0;
440 virtual void update_register_pressure(backend_instruction *inst) = 0;
441 virtual int get_register_pressure_benefit(backend_instruction *inst) = 0;
442
443 void schedule_instructions(backend_instruction *next_block_header);
444
445 void *mem_ctx;
446
447 bool post_reg_alloc;
448 int instructions_to_schedule;
449 int grf_count;
450 int time;
451 exec_list instructions;
452 backend_visitor *bv;
453
454 instruction_scheduler_mode mode;
455
456 /**
457 * Number of instructions left to schedule that reference each vgrf.
458 *
459 * Used so that we can prefer scheduling instructions that will end the
460 * live intervals of multiple variables, to reduce register pressure.
461 */
462 int *remaining_grf_uses;
463
464 /**
465 * Tracks whether each VGRF has had an instruction scheduled that uses it.
466 *
467 * This is used to estimate whether scheduling a new instruction will
468 * increase register pressure.
469 */
470 bool *grf_active;
471 };
472
473 class fs_instruction_scheduler : public instruction_scheduler
474 {
475 public:
476 fs_instruction_scheduler(fs_visitor *v, int grf_count,
477 instruction_scheduler_mode mode);
478 void calculate_deps();
479 bool is_compressed(fs_inst *inst);
480 schedule_node *choose_instruction_to_schedule();
481 int issue_time(backend_instruction *inst);
482 fs_visitor *v;
483
484 void count_remaining_grf_uses(backend_instruction *inst);
485 void update_register_pressure(backend_instruction *inst);
486 int get_register_pressure_benefit(backend_instruction *inst);
487 };
488
489 fs_instruction_scheduler::fs_instruction_scheduler(fs_visitor *v,
490 int grf_count,
491 instruction_scheduler_mode mode)
492 : instruction_scheduler(v, grf_count, mode),
493 v(v)
494 {
495 }
496
497 void
498 fs_instruction_scheduler::count_remaining_grf_uses(backend_instruction *be)
499 {
500 fs_inst *inst = (fs_inst *)be;
501
502 if (!remaining_grf_uses)
503 return;
504
505 if (inst->dst.file == GRF)
506 remaining_grf_uses[inst->dst.reg]++;
507
508 for (int i = 0; i < inst->sources; i++) {
509 if (inst->src[i].file != GRF)
510 continue;
511
512 remaining_grf_uses[inst->src[i].reg]++;
513 }
514 }
515
516 void
517 fs_instruction_scheduler::update_register_pressure(backend_instruction *be)
518 {
519 fs_inst *inst = (fs_inst *)be;
520
521 if (!remaining_grf_uses)
522 return;
523
524 if (inst->dst.file == GRF) {
525 remaining_grf_uses[inst->dst.reg]--;
526 grf_active[inst->dst.reg] = true;
527 }
528
529 for (int i = 0; i < inst->sources; i++) {
530 if (inst->src[i].file == GRF) {
531 remaining_grf_uses[inst->src[i].reg]--;
532 grf_active[inst->src[i].reg] = true;
533 }
534 }
535 }
536
537 int
538 fs_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
539 {
540 fs_inst *inst = (fs_inst *)be;
541 int benefit = 0;
542
543 if (inst->dst.file == GRF) {
544 if (remaining_grf_uses[inst->dst.reg] == 1)
545 benefit += v->virtual_grf_sizes[inst->dst.reg];
546 if (!grf_active[inst->dst.reg])
547 benefit -= v->virtual_grf_sizes[inst->dst.reg];
548 }
549
550 for (int i = 0; i < inst->sources; i++) {
551 if (inst->src[i].file != GRF)
552 continue;
553
554 if (remaining_grf_uses[inst->src[i].reg] == 1)
555 benefit += v->virtual_grf_sizes[inst->src[i].reg];
556 if (!grf_active[inst->src[i].reg])
557 benefit -= v->virtual_grf_sizes[inst->src[i].reg];
558 }
559
560 return benefit;
561 }
562
563 class vec4_instruction_scheduler : public instruction_scheduler
564 {
565 public:
566 vec4_instruction_scheduler(vec4_visitor *v, int grf_count);
567 void calculate_deps();
568 schedule_node *choose_instruction_to_schedule();
569 int issue_time(backend_instruction *inst);
570 vec4_visitor *v;
571
572 void count_remaining_grf_uses(backend_instruction *inst);
573 void update_register_pressure(backend_instruction *inst);
574 int get_register_pressure_benefit(backend_instruction *inst);
575 };
576
577 vec4_instruction_scheduler::vec4_instruction_scheduler(vec4_visitor *v,
578 int grf_count)
579 : instruction_scheduler(v, grf_count, SCHEDULE_POST),
580 v(v)
581 {
582 }
583
584 void
585 vec4_instruction_scheduler::count_remaining_grf_uses(backend_instruction *be)
586 {
587 }
588
589 void
590 vec4_instruction_scheduler::update_register_pressure(backend_instruction *be)
591 {
592 }
593
594 int
595 vec4_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
596 {
597 return 0;
598 }
599
600 schedule_node::schedule_node(backend_instruction *inst,
601 instruction_scheduler *sched)
602 {
603 struct brw_context *brw = sched->bv->brw;
604
605 this->inst = inst;
606 this->child_array_size = 0;
607 this->children = NULL;
608 this->child_latency = NULL;
609 this->child_count = 0;
610 this->parent_count = 0;
611 this->unblocked_time = 0;
612 this->cand_generation = 0;
613 this->delay = 0;
614
615 /* We can't measure Gen6 timings directly but expect them to be much
616 * closer to Gen7 than Gen4.
617 */
618 if (!sched->post_reg_alloc)
619 this->latency = 1;
620 else if (brw->gen >= 6)
621 set_latency_gen7(brw->is_haswell);
622 else
623 set_latency_gen4();
624 }
625
626 void
627 instruction_scheduler::add_inst(backend_instruction *inst)
628 {
629 schedule_node *n = new(mem_ctx) schedule_node(inst, this);
630
631 assert(!inst->is_head_sentinel());
632 assert(!inst->is_tail_sentinel());
633
634 this->instructions_to_schedule++;
635
636 inst->remove();
637 instructions.push_tail(n);
638 }
639
640 /** Recursive computation of the delay member of a node. */
641 void
642 instruction_scheduler::compute_delay(schedule_node *n)
643 {
644 if (!n->child_count) {
645 n->delay = issue_time(n->inst);
646 } else {
647 for (int i = 0; i < n->child_count; i++) {
648 if (!n->children[i]->delay)
649 compute_delay(n->children[i]);
650 n->delay = MAX2(n->delay, n->latency + n->children[i]->delay);
651 }
652 }
653 }
654
655 /**
656 * Add a dependency between two instruction nodes.
657 *
658 * The @after node will be scheduled after @before. We will try to
659 * schedule it @latency cycles after @before, but no guarantees there.
660 */
661 void
662 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after,
663 int latency)
664 {
665 if (!before || !after)
666 return;
667
668 assert(before != after);
669
670 for (int i = 0; i < before->child_count; i++) {
671 if (before->children[i] == after) {
672 before->child_latency[i] = MAX2(before->child_latency[i], latency);
673 return;
674 }
675 }
676
677 if (before->child_array_size <= before->child_count) {
678 if (before->child_array_size < 16)
679 before->child_array_size = 16;
680 else
681 before->child_array_size *= 2;
682
683 before->children = reralloc(mem_ctx, before->children,
684 schedule_node *,
685 before->child_array_size);
686 before->child_latency = reralloc(mem_ctx, before->child_latency,
687 int, before->child_array_size);
688 }
689
690 before->children[before->child_count] = after;
691 before->child_latency[before->child_count] = latency;
692 before->child_count++;
693 after->parent_count++;
694 }
695
696 void
697 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after)
698 {
699 if (!before)
700 return;
701
702 add_dep(before, after, before->latency);
703 }
704
705 /**
706 * Sometimes we really want this node to execute after everything that
707 * was before it and before everything that followed it. This adds
708 * the deps to do so.
709 */
710 void
711 instruction_scheduler::add_barrier_deps(schedule_node *n)
712 {
713 schedule_node *prev = (schedule_node *)n->prev;
714 schedule_node *next = (schedule_node *)n->next;
715
716 if (prev) {
717 while (!prev->is_head_sentinel()) {
718 add_dep(prev, n, 0);
719 prev = (schedule_node *)prev->prev;
720 }
721 }
722
723 if (next) {
724 while (!next->is_tail_sentinel()) {
725 add_dep(n, next, 0);
726 next = (schedule_node *)next->next;
727 }
728 }
729 }
730
731 /* instruction scheduling needs to be aware of when an MRF write
732 * actually writes 2 MRFs.
733 */
734 bool
735 fs_instruction_scheduler::is_compressed(fs_inst *inst)
736 {
737 return (v->dispatch_width == 16 &&
738 !inst->force_uncompressed &&
739 !inst->force_sechalf);
740 }
741
742 void
743 fs_instruction_scheduler::calculate_deps()
744 {
745 /* Pre-register-allocation, this tracks the last write per VGRF offset.
746 * After register allocation, reg_offsets are gone and we track individual
747 * GRF registers.
748 */
749 schedule_node *last_grf_write[grf_count * 16];
750 schedule_node *last_mrf_write[BRW_MAX_MRF];
751 schedule_node *last_conditional_mod[2] = { NULL, NULL };
752 schedule_node *last_accumulator_write = NULL;
753 /* Fixed HW registers are assumed to be separate from the virtual
754 * GRFs, so they can be tracked separately. We don't really write
755 * to fixed GRFs much, so don't bother tracking them on a more
756 * granular level.
757 */
758 schedule_node *last_fixed_grf_write = NULL;
759 int reg_width = v->dispatch_width / 8;
760
761 /* The last instruction always needs to still be the last
762 * instruction. Either it's flow control (IF, ELSE, ENDIF, DO,
763 * WHILE) and scheduling other things after it would disturb the
764 * basic block, or it's FB_WRITE and we should do a better job at
765 * dead code elimination anyway.
766 */
767 schedule_node *last = (schedule_node *)instructions.get_tail();
768 add_barrier_deps(last);
769
770 memset(last_grf_write, 0, sizeof(last_grf_write));
771 memset(last_mrf_write, 0, sizeof(last_mrf_write));
772
773 /* top-to-bottom dependencies: RAW and WAW. */
774 foreach_in_list(schedule_node, n, &instructions) {
775 fs_inst *inst = (fs_inst *)n->inst;
776
777 if (inst->opcode == FS_OPCODE_PLACEHOLDER_HALT ||
778 inst->has_side_effects())
779 add_barrier_deps(n);
780
781 /* read-after-write deps. */
782 for (int i = 0; i < inst->sources; i++) {
783 if (inst->src[i].file == GRF) {
784 if (post_reg_alloc) {
785 for (int r = 0; r < reg_width * inst->regs_read(v, i); r++)
786 add_dep(last_grf_write[inst->src[i].reg + r], n);
787 } else {
788 for (int r = 0; r < inst->regs_read(v, i); r++) {
789 add_dep(last_grf_write[inst->src[i].reg * 16 + inst->src[i].reg_offset + r], n);
790 }
791 }
792 } else if (inst->src[i].file == HW_REG &&
793 (inst->src[i].fixed_hw_reg.file ==
794 BRW_GENERAL_REGISTER_FILE)) {
795 if (post_reg_alloc) {
796 int size = reg_width;
797 if (inst->src[i].fixed_hw_reg.vstride == BRW_VERTICAL_STRIDE_0)
798 size = 1;
799 for (int r = 0; r < size; r++)
800 add_dep(last_grf_write[inst->src[i].fixed_hw_reg.nr + r], n);
801 } else {
802 add_dep(last_fixed_grf_write, n);
803 }
804 } else if (inst->src[i].is_accumulator()) {
805 add_dep(last_accumulator_write, n);
806 } else if (inst->src[i].file != BAD_FILE &&
807 inst->src[i].file != IMM &&
808 inst->src[i].file != UNIFORM &&
809 (inst->src[i].file != HW_REG ||
810 inst->src[i].fixed_hw_reg.file != IMM)) {
811 assert(inst->src[i].file != MRF);
812 add_barrier_deps(n);
813 }
814 }
815
816 if (inst->base_mrf != -1) {
817 for (int i = 0; i < inst->mlen; i++) {
818 /* It looks like the MRF regs are released in the send
819 * instruction once it's sent, not when the result comes
820 * back.
821 */
822 add_dep(last_mrf_write[inst->base_mrf + i], n);
823 }
824 }
825
826 if (inst->reads_flag()) {
827 add_dep(last_conditional_mod[inst->flag_subreg], n);
828 }
829
830 if (inst->reads_accumulator_implicitly()) {
831 add_dep(last_accumulator_write, n);
832 }
833
834 /* write-after-write deps. */
835 if (inst->dst.file == GRF) {
836 if (post_reg_alloc) {
837 for (int r = 0; r < inst->regs_written * reg_width; r++) {
838 add_dep(last_grf_write[inst->dst.reg + r], n);
839 last_grf_write[inst->dst.reg + r] = n;
840 }
841 } else {
842 for (int r = 0; r < inst->regs_written; r++) {
843 add_dep(last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r], n);
844 last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r] = n;
845 }
846 }
847 } else if (inst->dst.file == MRF) {
848 int reg = inst->dst.reg & ~BRW_MRF_COMPR4;
849
850 add_dep(last_mrf_write[reg], n);
851 last_mrf_write[reg] = n;
852 if (is_compressed(inst)) {
853 if (inst->dst.reg & BRW_MRF_COMPR4)
854 reg += 4;
855 else
856 reg++;
857 add_dep(last_mrf_write[reg], n);
858 last_mrf_write[reg] = n;
859 }
860 } else if (inst->dst.file == HW_REG &&
861 inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
862 if (post_reg_alloc) {
863 for (int r = 0; r < reg_width; r++)
864 last_grf_write[inst->dst.fixed_hw_reg.nr + r] = n;
865 } else {
866 last_fixed_grf_write = n;
867 }
868 } else if (inst->dst.is_accumulator()) {
869 add_dep(last_accumulator_write, n);
870 last_accumulator_write = n;
871 } else if (inst->dst.file != BAD_FILE) {
872 add_barrier_deps(n);
873 }
874
875 if (inst->mlen > 0 && inst->base_mrf != -1) {
876 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
877 add_dep(last_mrf_write[inst->base_mrf + i], n);
878 last_mrf_write[inst->base_mrf + i] = n;
879 }
880 }
881
882 if (inst->writes_flag()) {
883 add_dep(last_conditional_mod[inst->flag_subreg], n, 0);
884 last_conditional_mod[inst->flag_subreg] = n;
885 }
886
887 if (inst->writes_accumulator_implicitly(v->brw) &&
888 !inst->dst.is_accumulator()) {
889 add_dep(last_accumulator_write, n);
890 last_accumulator_write = n;
891 }
892 }
893
894 /* bottom-to-top dependencies: WAR */
895 memset(last_grf_write, 0, sizeof(last_grf_write));
896 memset(last_mrf_write, 0, sizeof(last_mrf_write));
897 memset(last_conditional_mod, 0, sizeof(last_conditional_mod));
898 last_accumulator_write = NULL;
899 last_fixed_grf_write = NULL;
900
901 exec_node *node;
902 exec_node *prev;
903 for (node = instructions.get_tail(), prev = node->prev;
904 !node->is_head_sentinel();
905 node = prev, prev = node->prev) {
906 schedule_node *n = (schedule_node *)node;
907 fs_inst *inst = (fs_inst *)n->inst;
908
909 /* write-after-read deps. */
910 for (int i = 0; i < inst->sources; i++) {
911 if (inst->src[i].file == GRF) {
912 if (post_reg_alloc) {
913 for (int r = 0; r < reg_width * inst->regs_read(v, i); r++)
914 add_dep(n, last_grf_write[inst->src[i].reg + r]);
915 } else {
916 for (int r = 0; r < inst->regs_read(v, i); r++) {
917 add_dep(n, last_grf_write[inst->src[i].reg * 16 + inst->src[i].reg_offset + r]);
918 }
919 }
920 } else if (inst->src[i].file == HW_REG &&
921 (inst->src[i].fixed_hw_reg.file ==
922 BRW_GENERAL_REGISTER_FILE)) {
923 if (post_reg_alloc) {
924 int size = reg_width;
925 if (inst->src[i].fixed_hw_reg.vstride == BRW_VERTICAL_STRIDE_0)
926 size = 1;
927 for (int r = 0; r < size; r++)
928 add_dep(n, last_grf_write[inst->src[i].fixed_hw_reg.nr + r]);
929 } else {
930 add_dep(n, last_fixed_grf_write);
931 }
932 } else if (inst->src[i].is_accumulator()) {
933 add_dep(n, last_accumulator_write);
934 } else if (inst->src[i].file != BAD_FILE &&
935 inst->src[i].file != IMM &&
936 inst->src[i].file != UNIFORM &&
937 (inst->src[i].file != HW_REG ||
938 inst->src[i].fixed_hw_reg.file != IMM)) {
939 assert(inst->src[i].file != MRF);
940 add_barrier_deps(n);
941 }
942 }
943
944 if (inst->base_mrf != -1) {
945 for (int i = 0; i < inst->mlen; i++) {
946 /* It looks like the MRF regs are released in the send
947 * instruction once it's sent, not when the result comes
948 * back.
949 */
950 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
951 }
952 }
953
954 if (inst->reads_flag()) {
955 add_dep(n, last_conditional_mod[inst->flag_subreg]);
956 }
957
958 if (inst->reads_accumulator_implicitly()) {
959 add_dep(n, last_accumulator_write);
960 }
961
962 /* Update the things this instruction wrote, so earlier reads
963 * can mark this as WAR dependency.
964 */
965 if (inst->dst.file == GRF) {
966 if (post_reg_alloc) {
967 for (int r = 0; r < inst->regs_written * reg_width; r++)
968 last_grf_write[inst->dst.reg + r] = n;
969 } else {
970 for (int r = 0; r < inst->regs_written; r++) {
971 last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r] = n;
972 }
973 }
974 } else if (inst->dst.file == MRF) {
975 int reg = inst->dst.reg & ~BRW_MRF_COMPR4;
976
977 last_mrf_write[reg] = n;
978
979 if (is_compressed(inst)) {
980 if (inst->dst.reg & BRW_MRF_COMPR4)
981 reg += 4;
982 else
983 reg++;
984
985 last_mrf_write[reg] = n;
986 }
987 } else if (inst->dst.file == HW_REG &&
988 inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
989 if (post_reg_alloc) {
990 for (int r = 0; r < reg_width; r++)
991 last_grf_write[inst->dst.fixed_hw_reg.nr + r] = n;
992 } else {
993 last_fixed_grf_write = n;
994 }
995 } else if (inst->dst.is_accumulator()) {
996 last_accumulator_write = n;
997 } else if (inst->dst.file != BAD_FILE) {
998 add_barrier_deps(n);
999 }
1000
1001 if (inst->mlen > 0 && inst->base_mrf != -1) {
1002 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1003 last_mrf_write[inst->base_mrf + i] = n;
1004 }
1005 }
1006
1007 if (inst->writes_flag()) {
1008 last_conditional_mod[inst->flag_subreg] = n;
1009 }
1010
1011 if (inst->writes_accumulator_implicitly(v->brw)) {
1012 last_accumulator_write = n;
1013 }
1014 }
1015 }
1016
1017 void
1018 vec4_instruction_scheduler::calculate_deps()
1019 {
1020 schedule_node *last_grf_write[grf_count];
1021 schedule_node *last_mrf_write[BRW_MAX_MRF];
1022 schedule_node *last_conditional_mod = NULL;
1023 schedule_node *last_accumulator_write = NULL;
1024 /* Fixed HW registers are assumed to be separate from the virtual
1025 * GRFs, so they can be tracked separately. We don't really write
1026 * to fixed GRFs much, so don't bother tracking them on a more
1027 * granular level.
1028 */
1029 schedule_node *last_fixed_grf_write = NULL;
1030
1031 /* The last instruction always needs to still be the last instruction.
1032 * Either it's flow control (IF, ELSE, ENDIF, DO, WHILE) and scheduling
1033 * other things after it would disturb the basic block, or it's the EOT
1034 * URB_WRITE and we should do a better job at dead code eliminating
1035 * anything that could have been scheduled after it.
1036 */
1037 schedule_node *last = (schedule_node *)instructions.get_tail();
1038 add_barrier_deps(last);
1039
1040 memset(last_grf_write, 0, sizeof(last_grf_write));
1041 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1042
1043 /* top-to-bottom dependencies: RAW and WAW. */
1044 foreach_in_list(schedule_node, n, &instructions) {
1045 vec4_instruction *inst = (vec4_instruction *)n->inst;
1046
1047 if (inst->has_side_effects())
1048 add_barrier_deps(n);
1049
1050 /* read-after-write deps. */
1051 for (int i = 0; i < 3; i++) {
1052 if (inst->src[i].file == GRF) {
1053 add_dep(last_grf_write[inst->src[i].reg], n);
1054 } else if (inst->src[i].file == HW_REG &&
1055 (inst->src[i].fixed_hw_reg.file ==
1056 BRW_GENERAL_REGISTER_FILE)) {
1057 add_dep(last_fixed_grf_write, n);
1058 } else if (inst->src[i].is_accumulator()) {
1059 assert(last_accumulator_write);
1060 add_dep(last_accumulator_write, n);
1061 } else if (inst->src[i].file != BAD_FILE &&
1062 inst->src[i].file != IMM &&
1063 inst->src[i].file != UNIFORM &&
1064 (inst->src[i].file != HW_REG ||
1065 inst->src[i].fixed_hw_reg.file != IMM)) {
1066 /* No reads from MRF, and ATTR is already translated away */
1067 assert(inst->src[i].file != MRF &&
1068 inst->src[i].file != ATTR);
1069 add_barrier_deps(n);
1070 }
1071 }
1072
1073 for (int i = 0; i < inst->mlen; i++) {
1074 /* It looks like the MRF regs are released in the send
1075 * instruction once it's sent, not when the result comes
1076 * back.
1077 */
1078 add_dep(last_mrf_write[inst->base_mrf + i], n);
1079 }
1080
1081 if (inst->reads_flag()) {
1082 assert(last_conditional_mod);
1083 add_dep(last_conditional_mod, n);
1084 }
1085
1086 if (inst->reads_accumulator_implicitly()) {
1087 assert(last_accumulator_write);
1088 add_dep(last_accumulator_write, n);
1089 }
1090
1091 /* write-after-write deps. */
1092 if (inst->dst.file == GRF) {
1093 add_dep(last_grf_write[inst->dst.reg], n);
1094 last_grf_write[inst->dst.reg] = n;
1095 } else if (inst->dst.file == MRF) {
1096 add_dep(last_mrf_write[inst->dst.reg], n);
1097 last_mrf_write[inst->dst.reg] = n;
1098 } else if (inst->dst.file == HW_REG &&
1099 inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
1100 last_fixed_grf_write = n;
1101 } else if (inst->dst.is_accumulator()) {
1102 add_dep(last_accumulator_write, n);
1103 last_accumulator_write = n;
1104 } else if (inst->dst.file != BAD_FILE) {
1105 add_barrier_deps(n);
1106 }
1107
1108 if (inst->mlen > 0) {
1109 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1110 add_dep(last_mrf_write[inst->base_mrf + i], n);
1111 last_mrf_write[inst->base_mrf + i] = n;
1112 }
1113 }
1114
1115 if (inst->writes_flag()) {
1116 add_dep(last_conditional_mod, n, 0);
1117 last_conditional_mod = n;
1118 }
1119
1120 if (inst->writes_accumulator_implicitly(v->brw) &&
1121 !inst->dst.is_accumulator()) {
1122 add_dep(last_accumulator_write, n);
1123 last_accumulator_write = n;
1124 }
1125 }
1126
1127 /* bottom-to-top dependencies: WAR */
1128 memset(last_grf_write, 0, sizeof(last_grf_write));
1129 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1130 last_conditional_mod = NULL;
1131 last_accumulator_write = NULL;
1132 last_fixed_grf_write = NULL;
1133
1134 exec_node *node;
1135 exec_node *prev;
1136 for (node = instructions.get_tail(), prev = node->prev;
1137 !node->is_head_sentinel();
1138 node = prev, prev = node->prev) {
1139 schedule_node *n = (schedule_node *)node;
1140 vec4_instruction *inst = (vec4_instruction *)n->inst;
1141
1142 /* write-after-read deps. */
1143 for (int i = 0; i < 3; i++) {
1144 if (inst->src[i].file == GRF) {
1145 add_dep(n, last_grf_write[inst->src[i].reg]);
1146 } else if (inst->src[i].file == HW_REG &&
1147 (inst->src[i].fixed_hw_reg.file ==
1148 BRW_GENERAL_REGISTER_FILE)) {
1149 add_dep(n, last_fixed_grf_write);
1150 } else if (inst->src[i].is_accumulator()) {
1151 add_dep(n, last_accumulator_write);
1152 } else if (inst->src[i].file != BAD_FILE &&
1153 inst->src[i].file != IMM &&
1154 inst->src[i].file != UNIFORM &&
1155 (inst->src[i].file != HW_REG ||
1156 inst->src[i].fixed_hw_reg.file != IMM)) {
1157 assert(inst->src[i].file != MRF &&
1158 inst->src[i].file != ATTR);
1159 add_barrier_deps(n);
1160 }
1161 }
1162
1163 for (int i = 0; i < inst->mlen; i++) {
1164 /* It looks like the MRF regs are released in the send
1165 * instruction once it's sent, not when the result comes
1166 * back.
1167 */
1168 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1169 }
1170
1171 if (inst->reads_flag()) {
1172 add_dep(n, last_conditional_mod);
1173 }
1174
1175 if (inst->reads_accumulator_implicitly()) {
1176 add_dep(n, last_accumulator_write);
1177 }
1178
1179 /* Update the things this instruction wrote, so earlier reads
1180 * can mark this as WAR dependency.
1181 */
1182 if (inst->dst.file == GRF) {
1183 last_grf_write[inst->dst.reg] = n;
1184 } else if (inst->dst.file == MRF) {
1185 last_mrf_write[inst->dst.reg] = n;
1186 } else if (inst->dst.file == HW_REG &&
1187 inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
1188 last_fixed_grf_write = n;
1189 } else if (inst->dst.is_accumulator()) {
1190 last_accumulator_write = n;
1191 } else if (inst->dst.file != BAD_FILE) {
1192 add_barrier_deps(n);
1193 }
1194
1195 if (inst->mlen > 0) {
1196 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1197 last_mrf_write[inst->base_mrf + i] = n;
1198 }
1199 }
1200
1201 if (inst->writes_flag()) {
1202 last_conditional_mod = n;
1203 }
1204
1205 if (inst->writes_accumulator_implicitly(v->brw)) {
1206 last_accumulator_write = n;
1207 }
1208 }
1209 }
1210
1211 schedule_node *
1212 fs_instruction_scheduler::choose_instruction_to_schedule()
1213 {
1214 struct brw_context *brw = v->brw;
1215 schedule_node *chosen = NULL;
1216
1217 if (mode == SCHEDULE_PRE || mode == SCHEDULE_POST) {
1218 int chosen_time = 0;
1219
1220 /* Of the instructions ready to execute or the closest to
1221 * being ready, choose the oldest one.
1222 */
1223 foreach_in_list(schedule_node, n, &instructions) {
1224 if (!chosen || n->unblocked_time < chosen_time) {
1225 chosen = n;
1226 chosen_time = n->unblocked_time;
1227 }
1228 }
1229 } else {
1230 /* Before register allocation, we don't care about the latencies of
1231 * instructions. All we care about is reducing live intervals of
1232 * variables so that we can avoid register spilling, or get SIMD16
1233 * shaders which naturally do a better job of hiding instruction
1234 * latency.
1235 */
1236 foreach_in_list(schedule_node, n, &instructions) {
1237 fs_inst *inst = (fs_inst *)n->inst;
1238
1239 if (!chosen) {
1240 chosen = n;
1241 continue;
1242 }
1243
1244 /* Most important: If we can definitely reduce register pressure, do
1245 * so immediately.
1246 */
1247 int register_pressure_benefit = get_register_pressure_benefit(n->inst);
1248 int chosen_register_pressure_benefit =
1249 get_register_pressure_benefit(chosen->inst);
1250
1251 if (register_pressure_benefit > 0 &&
1252 register_pressure_benefit > chosen_register_pressure_benefit) {
1253 chosen = n;
1254 continue;
1255 } else if (chosen_register_pressure_benefit > 0 &&
1256 (register_pressure_benefit <
1257 chosen_register_pressure_benefit)) {
1258 continue;
1259 }
1260
1261 if (mode == SCHEDULE_PRE_LIFO) {
1262 /* Prefer instructions that recently became available for
1263 * scheduling. These are the things that are most likely to
1264 * (eventually) make a variable dead and reduce register pressure.
1265 * Typical register pressure estimates don't work for us because
1266 * most of our pressure comes from texturing, where no single
1267 * instruction to schedule will make a vec4 value dead.
1268 */
1269 if (n->cand_generation > chosen->cand_generation) {
1270 chosen = n;
1271 continue;
1272 } else if (n->cand_generation < chosen->cand_generation) {
1273 continue;
1274 }
1275
1276 /* On MRF-using chips, prefer non-SEND instructions. If we don't
1277 * do this, then because we prefer instructions that just became
1278 * candidates, we'll end up in a pattern of scheduling a SEND,
1279 * then the MRFs for the next SEND, then the next SEND, then the
1280 * MRFs, etc., without ever consuming the results of a send.
1281 */
1282 if (brw->gen < 7) {
1283 fs_inst *chosen_inst = (fs_inst *)chosen->inst;
1284
1285 /* We use regs_written > 1 as our test for the kind of send
1286 * instruction to avoid -- only sends generate many regs, and a
1287 * single-result send is probably actually reducing register
1288 * pressure.
1289 */
1290 if (inst->regs_written <= 1 && chosen_inst->regs_written > 1) {
1291 chosen = n;
1292 continue;
1293 } else if (inst->regs_written > chosen_inst->regs_written) {
1294 continue;
1295 }
1296 }
1297 }
1298
1299 /* For instructions pushed on the cands list at the same time, prefer
1300 * the one with the highest delay to the end of the program. This is
1301 * most likely to have its values able to be consumed first (such as
1302 * for a large tree of lowered ubo loads, which appear reversed in
1303 * the instruction stream with respect to when they can be consumed).
1304 */
1305 if (n->delay > chosen->delay) {
1306 chosen = n;
1307 continue;
1308 } else if (n->delay < chosen->delay) {
1309 continue;
1310 }
1311
1312 /* If all other metrics are equal, we prefer the first instruction in
1313 * the list (program execution).
1314 */
1315 }
1316 }
1317
1318 return chosen;
1319 }
1320
1321 schedule_node *
1322 vec4_instruction_scheduler::choose_instruction_to_schedule()
1323 {
1324 schedule_node *chosen = NULL;
1325 int chosen_time = 0;
1326
1327 /* Of the instructions ready to execute or the closest to being ready,
1328 * choose the oldest one.
1329 */
1330 foreach_in_list(schedule_node, n, &instructions) {
1331 if (!chosen || n->unblocked_time < chosen_time) {
1332 chosen = n;
1333 chosen_time = n->unblocked_time;
1334 }
1335 }
1336
1337 return chosen;
1338 }
1339
1340 int
1341 fs_instruction_scheduler::issue_time(backend_instruction *inst)
1342 {
1343 if (is_compressed((fs_inst *)inst))
1344 return 4;
1345 else
1346 return 2;
1347 }
1348
1349 int
1350 vec4_instruction_scheduler::issue_time(backend_instruction *inst)
1351 {
1352 /* We always execute as two vec4s in parallel. */
1353 return 2;
1354 }
1355
1356 void
1357 instruction_scheduler::schedule_instructions(backend_instruction *next_block_header)
1358 {
1359 struct brw_context *brw = bv->brw;
1360 time = 0;
1361
1362 /* Remove non-DAG heads from the list. */
1363 foreach_in_list_safe(schedule_node, n, &instructions) {
1364 if (n->parent_count != 0)
1365 n->remove();
1366 }
1367
1368 unsigned cand_generation = 1;
1369 while (!instructions.is_empty()) {
1370 schedule_node *chosen = choose_instruction_to_schedule();
1371
1372 /* Schedule this instruction. */
1373 assert(chosen);
1374 chosen->remove();
1375 next_block_header->insert_before(chosen->inst);
1376 instructions_to_schedule--;
1377 update_register_pressure(chosen->inst);
1378
1379 /* Update the clock for how soon an instruction could start after the
1380 * chosen one.
1381 */
1382 time += issue_time(chosen->inst);
1383
1384 /* If we expected a delay for scheduling, then bump the clock to reflect
1385 * that as well. In reality, the hardware will switch to another
1386 * hyperthread and may not return to dispatching our thread for a while
1387 * even after we're unblocked.
1388 */
1389 time = MAX2(time, chosen->unblocked_time);
1390
1391 if (debug) {
1392 fprintf(stderr, "clock %4d, scheduled: ", time);
1393 bv->dump_instruction(chosen->inst);
1394 }
1395
1396 /* Now that we've scheduled a new instruction, some of its
1397 * children can be promoted to the list of instructions ready to
1398 * be scheduled. Update the children's unblocked time for this
1399 * DAG edge as we do so.
1400 */
1401 for (int i = chosen->child_count - 1; i >= 0; i--) {
1402 schedule_node *child = chosen->children[i];
1403
1404 child->unblocked_time = MAX2(child->unblocked_time,
1405 time + chosen->child_latency[i]);
1406
1407 if (debug) {
1408 fprintf(stderr, "\tchild %d, %d parents: ", i, child->parent_count);
1409 bv->dump_instruction(child->inst);
1410 }
1411
1412 child->cand_generation = cand_generation;
1413 child->parent_count--;
1414 if (child->parent_count == 0) {
1415 if (debug) {
1416 fprintf(stderr, "\t\tnow available\n");
1417 }
1418 instructions.push_head(child);
1419 }
1420 }
1421 cand_generation++;
1422
1423 /* Shared resource: the mathbox. There's one mathbox per EU on Gen6+
1424 * but it's more limited pre-gen6, so if we send something off to it then
1425 * the next math instruction isn't going to make progress until the first
1426 * is done.
1427 */
1428 if (brw->gen < 6 && chosen->inst->is_math()) {
1429 foreach_in_list(schedule_node, n, &instructions) {
1430 if (n->inst->is_math())
1431 n->unblocked_time = MAX2(n->unblocked_time,
1432 time + chosen->latency);
1433 }
1434 }
1435 }
1436
1437 assert(instructions_to_schedule == 0);
1438 }
1439
1440 void
1441 instruction_scheduler::run(exec_list *all_instructions)
1442 {
1443 backend_instruction *next_block_header =
1444 (backend_instruction *)all_instructions->head;
1445
1446 if (debug) {
1447 fprintf(stderr, "\nInstructions before scheduling (reg_alloc %d)\n",
1448 post_reg_alloc);
1449 bv->dump_instructions();
1450 }
1451
1452 /* Populate the remaining GRF uses array to improve the pre-regalloc
1453 * scheduling.
1454 */
1455 if (remaining_grf_uses) {
1456 foreach_in_list(schedule_node, node, all_instructions) {
1457 count_remaining_grf_uses((backend_instruction *)node);
1458 }
1459 }
1460
1461 while (!next_block_header->is_tail_sentinel()) {
1462 /* Add things to be scheduled until we get to a new BB. */
1463 while (!next_block_header->is_tail_sentinel()) {
1464 backend_instruction *inst = next_block_header;
1465 next_block_header = (backend_instruction *)next_block_header->next;
1466
1467 add_inst(inst);
1468 if (inst->is_control_flow())
1469 break;
1470 }
1471 calculate_deps();
1472
1473 foreach_in_list(schedule_node, n, &instructions) {
1474 compute_delay(n);
1475 }
1476
1477 schedule_instructions(next_block_header);
1478 }
1479
1480 if (debug) {
1481 fprintf(stderr, "\nInstructions after scheduling (reg_alloc %d)\n",
1482 post_reg_alloc);
1483 bv->dump_instructions();
1484 }
1485 }
1486
1487 void
1488 fs_visitor::schedule_instructions(instruction_scheduler_mode mode)
1489 {
1490 int grf_count;
1491 if (mode == SCHEDULE_POST)
1492 grf_count = grf_used;
1493 else
1494 grf_count = virtual_grf_count;
1495
1496 fs_instruction_scheduler sched(this, grf_count, mode);
1497 sched.run(&instructions);
1498
1499 if (unlikely(INTEL_DEBUG & DEBUG_WM) && mode == SCHEDULE_POST) {
1500 fprintf(stderr, "fs%d estimated execution time: %d cycles\n",
1501 dispatch_width, sched.time);
1502 }
1503
1504 invalidate_live_intervals();
1505 }
1506
1507 void
1508 vec4_visitor::opt_schedule_instructions()
1509 {
1510 vec4_instruction_scheduler sched(this, prog_data->total_grf);
1511 sched.run(&instructions);
1512
1513 if (unlikely(debug_flag)) {
1514 fprintf(stderr, "vec4 estimated execution time: %d cycles\n", sched.time);
1515 }
1516
1517 invalidate_live_intervals();
1518 }