i965/sched: use liveness analysis for computing register pressure
[mesa.git] / src / mesa / drivers / dri / i965 / brw_schedule_instructions.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include "brw_fs.h"
29 #include "brw_fs_live_variables.h"
30 #include "brw_vec4.h"
31 #include "brw_cfg.h"
32 #include "brw_shader.h"
33 #include "glsl/nir/glsl_types.h"
34 #include "glsl/ir_optimization.h"
35
36 using namespace brw;
37
38 /** @file brw_fs_schedule_instructions.cpp
39 *
40 * List scheduling of FS instructions.
41 *
42 * The basic model of the list scheduler is to take a basic block,
43 * compute a DAG of the dependencies (RAW ordering with latency, WAW
44 * ordering with latency, WAR ordering), and make a list of the DAG heads.
45 * Heuristically pick a DAG head, then put all the children that are
46 * now DAG heads into the list of things to schedule.
47 *
48 * The heuristic is the important part. We're trying to be cheap,
49 * since actually computing the optimal scheduling is NP complete.
50 * What we do is track a "current clock". When we schedule a node, we
51 * update the earliest-unblocked clock time of its children, and
52 * increment the clock. Then, when trying to schedule, we just pick
53 * the earliest-unblocked instruction to schedule.
54 *
55 * Note that often there will be many things which could execute
56 * immediately, and there are a range of heuristic options to choose
57 * from in picking among those.
58 */
59
60 static bool debug = false;
61
62 class instruction_scheduler;
63
64 class schedule_node : public exec_node
65 {
66 public:
67 schedule_node(backend_instruction *inst, instruction_scheduler *sched);
68 void set_latency_gen4();
69 void set_latency_gen7(bool is_haswell);
70
71 backend_instruction *inst;
72 schedule_node **children;
73 int *child_latency;
74 int child_count;
75 int parent_count;
76 int child_array_size;
77 int unblocked_time;
78 int latency;
79
80 /**
81 * Which iteration of pushing groups of children onto the candidates list
82 * this node was a part of.
83 */
84 unsigned cand_generation;
85
86 /**
87 * This is the sum of the instruction's latency plus the maximum delay of
88 * its children, or just the issue_time if it's a leaf node.
89 */
90 int delay;
91 };
92
93 void
94 schedule_node::set_latency_gen4()
95 {
96 int chans = 8;
97 int math_latency = 22;
98
99 switch (inst->opcode) {
100 case SHADER_OPCODE_RCP:
101 this->latency = 1 * chans * math_latency;
102 break;
103 case SHADER_OPCODE_RSQ:
104 this->latency = 2 * chans * math_latency;
105 break;
106 case SHADER_OPCODE_INT_QUOTIENT:
107 case SHADER_OPCODE_SQRT:
108 case SHADER_OPCODE_LOG2:
109 /* full precision log. partial is 2. */
110 this->latency = 3 * chans * math_latency;
111 break;
112 case SHADER_OPCODE_INT_REMAINDER:
113 case SHADER_OPCODE_EXP2:
114 /* full precision. partial is 3, same throughput. */
115 this->latency = 4 * chans * math_latency;
116 break;
117 case SHADER_OPCODE_POW:
118 this->latency = 8 * chans * math_latency;
119 break;
120 case SHADER_OPCODE_SIN:
121 case SHADER_OPCODE_COS:
122 /* minimum latency, max is 12 rounds. */
123 this->latency = 5 * chans * math_latency;
124 break;
125 default:
126 this->latency = 2;
127 break;
128 }
129 }
130
131 void
132 schedule_node::set_latency_gen7(bool is_haswell)
133 {
134 switch (inst->opcode) {
135 case BRW_OPCODE_MAD:
136 /* 2 cycles
137 * (since the last two src operands are in different register banks):
138 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
139 *
140 * 3 cycles on IVB, 4 on HSW
141 * (since the last two src operands are in the same register bank):
142 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
143 *
144 * 18 cycles on IVB, 16 on HSW
145 * (since the last two src operands are in different register banks):
146 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
147 * mov(8) null g4<4,5,1>F { align16 WE_normal 1Q };
148 *
149 * 20 cycles on IVB, 18 on HSW
150 * (since the last two src operands are in the same register bank):
151 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
152 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
153 */
154
155 /* Our register allocator doesn't know about register banks, so use the
156 * higher latency.
157 */
158 latency = is_haswell ? 16 : 18;
159 break;
160
161 case BRW_OPCODE_LRP:
162 /* 2 cycles
163 * (since the last two src operands are in different register banks):
164 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
165 *
166 * 3 cycles on IVB, 4 on HSW
167 * (since the last two src operands are in the same register bank):
168 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
169 *
170 * 16 cycles on IVB, 14 on HSW
171 * (since the last two src operands are in different register banks):
172 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
173 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
174 *
175 * 16 cycles
176 * (since the last two src operands are in the same register bank):
177 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
178 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
179 */
180
181 /* Our register allocator doesn't know about register banks, so use the
182 * higher latency.
183 */
184 latency = 14;
185 break;
186
187 case SHADER_OPCODE_RCP:
188 case SHADER_OPCODE_RSQ:
189 case SHADER_OPCODE_SQRT:
190 case SHADER_OPCODE_LOG2:
191 case SHADER_OPCODE_EXP2:
192 case SHADER_OPCODE_SIN:
193 case SHADER_OPCODE_COS:
194 /* 2 cycles:
195 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
196 *
197 * 18 cycles:
198 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
199 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
200 *
201 * Same for exp2, log2, rsq, sqrt, sin, cos.
202 */
203 latency = is_haswell ? 14 : 16;
204 break;
205
206 case SHADER_OPCODE_POW:
207 /* 2 cycles:
208 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
209 *
210 * 26 cycles:
211 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
212 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
213 */
214 latency = is_haswell ? 22 : 24;
215 break;
216
217 case SHADER_OPCODE_TEX:
218 case SHADER_OPCODE_TXD:
219 case SHADER_OPCODE_TXF:
220 case SHADER_OPCODE_TXL:
221 /* 18 cycles:
222 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
223 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
224 * send(8) g4<1>UW g114<8,8,1>F
225 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
226 *
227 * 697 +/-49 cycles (min 610, n=26):
228 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
229 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
230 * send(8) g4<1>UW g114<8,8,1>F
231 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
232 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
233 *
234 * So the latency on our first texture load of the batchbuffer takes
235 * ~700 cycles, since the caches are cold at that point.
236 *
237 * 840 +/- 92 cycles (min 720, n=25):
238 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
239 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
240 * send(8) g4<1>UW g114<8,8,1>F
241 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
242 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
243 * send(8) g4<1>UW g114<8,8,1>F
244 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
245 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
246 *
247 * On the second load, it takes just an extra ~140 cycles, and after
248 * accounting for the 14 cycles of the MOV's latency, that makes ~130.
249 *
250 * 683 +/- 49 cycles (min = 602, n=47):
251 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
252 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
253 * send(8) g4<1>UW g114<8,8,1>F
254 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
255 * send(8) g50<1>UW g114<8,8,1>F
256 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
257 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
258 *
259 * The unit appears to be pipelined, since this matches up with the
260 * cache-cold case, despite there being two loads here. If you replace
261 * the g4 in the MOV to null with g50, it's still 693 +/- 52 (n=39).
262 *
263 * So, take some number between the cache-hot 140 cycles and the
264 * cache-cold 700 cycles. No particular tuning was done on this.
265 *
266 * I haven't done significant testing of the non-TEX opcodes. TXL at
267 * least looked about the same as TEX.
268 */
269 latency = 200;
270 break;
271
272 case SHADER_OPCODE_TXS:
273 /* Testing textureSize(sampler2D, 0), one load was 420 +/- 41
274 * cycles (n=15):
275 * mov(8) g114<1>UD 0D { align1 WE_normal 1Q };
276 * send(8) g6<1>UW g114<8,8,1>F
277 * sampler (10, 0, 10, 1) mlen 1 rlen 4 { align1 WE_normal 1Q };
278 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1Q };
279 *
280 *
281 * Two loads was 535 +/- 30 cycles (n=19):
282 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
283 * send(16) g6<1>UW g114<8,8,1>F
284 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
285 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
286 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1H };
287 * send(16) g8<1>UW g114<8,8,1>F
288 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
289 * mov(16) g8<1>F g8<8,8,1>D { align1 WE_normal 1H };
290 * add(16) g6<1>F g6<8,8,1>F g8<8,8,1>F { align1 WE_normal 1H };
291 *
292 * Since the only caches that should matter are just the
293 * instruction/state cache containing the surface state, assume that we
294 * always have hot caches.
295 */
296 latency = 100;
297 break;
298
299 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
300 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
301 case VS_OPCODE_PULL_CONSTANT_LOAD:
302 /* testing using varying-index pull constants:
303 *
304 * 16 cycles:
305 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
306 * send(8) g4<1>F g4<8,8,1>D
307 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
308 *
309 * ~480 cycles:
310 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
311 * send(8) g4<1>F g4<8,8,1>D
312 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
313 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
314 *
315 * ~620 cycles:
316 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
317 * send(8) g4<1>F g4<8,8,1>D
318 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
319 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
320 * send(8) g4<1>F g4<8,8,1>D
321 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
322 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
323 *
324 * So, if it's cache-hot, it's about 140. If it's cache cold, it's
325 * about 460. We expect to mostly be cache hot, so pick something more
326 * in that direction.
327 */
328 latency = 200;
329 break;
330
331 case SHADER_OPCODE_GEN7_SCRATCH_READ:
332 /* Testing a load from offset 0, that had been previously written:
333 *
334 * send(8) g114<1>UW g0<8,8,1>F data (0, 0, 0) mlen 1 rlen 1 { align1 WE_normal 1Q };
335 * mov(8) null g114<8,8,1>F { align1 WE_normal 1Q };
336 *
337 * The cycles spent seemed to be grouped around 40-50 (as low as 38),
338 * then around 140. Presumably this is cache hit vs miss.
339 */
340 latency = 50;
341 break;
342
343 case SHADER_OPCODE_UNTYPED_ATOMIC:
344 case SHADER_OPCODE_TYPED_ATOMIC:
345 /* Test code:
346 * mov(8) g112<1>ud 0x00000000ud { align1 WE_all 1Q };
347 * mov(1) g112.7<1>ud g1.7<0,1,0>ud { align1 WE_all };
348 * mov(8) g113<1>ud 0x00000000ud { align1 WE_normal 1Q };
349 * send(8) g4<1>ud g112<8,8,1>ud
350 * data (38, 5, 6) mlen 2 rlen 1 { align1 WE_normal 1Q };
351 *
352 * Running it 100 times as fragment shader on a 128x128 quad
353 * gives an average latency of 13867 cycles per atomic op,
354 * standard deviation 3%. Note that this is a rather
355 * pessimistic estimate, the actual latency in cases with few
356 * collisions between threads and favorable pipelining has been
357 * seen to be reduced by a factor of 100.
358 */
359 latency = 14000;
360 break;
361
362 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
363 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
364 case SHADER_OPCODE_TYPED_SURFACE_READ:
365 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
366 /* Test code:
367 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
368 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
369 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
370 * send(8) g4<1>UD g112<8,8,1>UD
371 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
372 * .
373 * . [repeats 8 times]
374 * .
375 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
376 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
377 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
378 * send(8) g4<1>UD g112<8,8,1>UD
379 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
380 *
381 * Running it 100 times as fragment shader on a 128x128 quad
382 * gives an average latency of 583 cycles per surface read,
383 * standard deviation 0.9%.
384 */
385 latency = is_haswell ? 300 : 600;
386 break;
387
388 default:
389 /* 2 cycles:
390 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
391 *
392 * 16 cycles:
393 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
394 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
395 */
396 latency = 14;
397 break;
398 }
399 }
400
401 class instruction_scheduler {
402 public:
403 instruction_scheduler(backend_shader *s, int grf_count,
404 int hw_reg_count, int block_count,
405 instruction_scheduler_mode mode)
406 {
407 this->bs = s;
408 this->mem_ctx = ralloc_context(NULL);
409 this->grf_count = grf_count;
410 this->hw_reg_count = hw_reg_count;
411 this->instructions.make_empty();
412 this->instructions_to_schedule = 0;
413 this->post_reg_alloc = (mode == SCHEDULE_POST);
414 this->mode = mode;
415 this->time = 0;
416 if (!post_reg_alloc) {
417 this->reg_pressure_in = rzalloc_array(mem_ctx, int, block_count);
418
419 this->livein = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
420 for (int i = 0; i < block_count; i++)
421 this->livein[i] = rzalloc_array(mem_ctx, BITSET_WORD,
422 BITSET_WORDS(grf_count));
423
424 this->liveout = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
425 for (int i = 0; i < block_count; i++)
426 this->liveout[i] = rzalloc_array(mem_ctx, BITSET_WORD,
427 BITSET_WORDS(grf_count));
428
429 this->hw_liveout = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
430 for (int i = 0; i < block_count; i++)
431 this->hw_liveout[i] = rzalloc_array(mem_ctx, BITSET_WORD,
432 BITSET_WORDS(hw_reg_count));
433
434 this->written = rzalloc_array(mem_ctx, bool, grf_count);
435
436 this->reads_remaining = rzalloc_array(mem_ctx, int, grf_count);
437
438 this->hw_reads_remaining = rzalloc_array(mem_ctx, int, hw_reg_count);
439 } else {
440 this->reg_pressure_in = NULL;
441 this->livein = NULL;
442 this->liveout = NULL;
443 this->hw_liveout = NULL;
444 this->written = NULL;
445 this->reads_remaining = NULL;
446 this->hw_reads_remaining = NULL;
447 }
448 }
449
450 ~instruction_scheduler()
451 {
452 ralloc_free(this->mem_ctx);
453 }
454 void add_barrier_deps(schedule_node *n);
455 void add_dep(schedule_node *before, schedule_node *after, int latency);
456 void add_dep(schedule_node *before, schedule_node *after);
457
458 void run(cfg_t *cfg);
459 void add_insts_from_block(bblock_t *block);
460 void compute_delay(schedule_node *node);
461 virtual void calculate_deps() = 0;
462 virtual schedule_node *choose_instruction_to_schedule() = 0;
463
464 /**
465 * Returns how many cycles it takes the instruction to issue.
466 *
467 * Instructions in gen hardware are handled one simd4 vector at a time,
468 * with 1 cycle per vector dispatched. Thus SIMD8 pixel shaders take 2
469 * cycles to dispatch and SIMD16 (compressed) instructions take 4.
470 */
471 virtual int issue_time(backend_instruction *inst) = 0;
472
473 virtual void count_reads_remaining(backend_instruction *inst) = 0;
474 virtual void setup_liveness(cfg_t *cfg) = 0;
475 virtual void update_register_pressure(backend_instruction *inst) = 0;
476 virtual int get_register_pressure_benefit(backend_instruction *inst) = 0;
477
478 void schedule_instructions(bblock_t *block);
479
480 void *mem_ctx;
481
482 bool post_reg_alloc;
483 int instructions_to_schedule;
484 int grf_count;
485 int hw_reg_count;
486 int time;
487 int reg_pressure;
488 int block_idx;
489 exec_list instructions;
490 backend_shader *bs;
491
492 instruction_scheduler_mode mode;
493
494 /*
495 * The register pressure at the beginning of each basic block.
496 */
497
498 int *reg_pressure_in;
499
500 /*
501 * The virtual GRF's whose range overlaps the beginning of each basic block.
502 */
503
504 BITSET_WORD **livein;
505
506 /*
507 * The virtual GRF's whose range overlaps the end of each basic block.
508 */
509
510 BITSET_WORD **liveout;
511
512 /*
513 * The hardware GRF's whose range overlaps the end of each basic block.
514 */
515
516 BITSET_WORD **hw_liveout;
517
518 /*
519 * Whether we've scheduled a write for this virtual GRF yet.
520 */
521
522 bool *written;
523
524 /*
525 * How many reads we haven't scheduled for this virtual GRF yet.
526 */
527
528 int *reads_remaining;
529
530 /*
531 * How many reads we haven't scheduled for this hardware GRF yet.
532 */
533
534 int *hw_reads_remaining;
535 };
536
537 class fs_instruction_scheduler : public instruction_scheduler
538 {
539 public:
540 fs_instruction_scheduler(fs_visitor *v, int grf_count, int hw_reg_count,
541 int block_count,
542 instruction_scheduler_mode mode);
543 void calculate_deps();
544 bool is_compressed(fs_inst *inst);
545 schedule_node *choose_instruction_to_schedule();
546 int issue_time(backend_instruction *inst);
547 fs_visitor *v;
548
549 void count_reads_remaining(backend_instruction *inst);
550 void setup_liveness(cfg_t *cfg);
551 void update_register_pressure(backend_instruction *inst);
552 int get_register_pressure_benefit(backend_instruction *inst);
553 };
554
555 fs_instruction_scheduler::fs_instruction_scheduler(fs_visitor *v,
556 int grf_count, int hw_reg_count,
557 int block_count,
558 instruction_scheduler_mode mode)
559 : instruction_scheduler(v, grf_count, hw_reg_count, block_count, mode),
560 v(v)
561 {
562 }
563
564 static bool
565 is_src_duplicate(fs_inst *inst, int src)
566 {
567 for (int i = 0; i < src; i++)
568 if (inst->src[i].equals(inst->src[src]))
569 return true;
570
571 return false;
572 }
573
574 void
575 fs_instruction_scheduler::count_reads_remaining(backend_instruction *be)
576 {
577 fs_inst *inst = (fs_inst *)be;
578
579 if (!reads_remaining)
580 return;
581
582 for (int i = 0; i < inst->sources; i++) {
583 if (is_src_duplicate(inst, i))
584 continue;
585
586 if (inst->src[i].file == GRF) {
587 reads_remaining[inst->src[i].reg]++;
588 } else if (inst->src[i].file == HW_REG &&
589 inst->src[i].fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
590 if (inst->src[i].fixed_hw_reg.nr >= hw_reg_count)
591 continue;
592
593 for (int j = 0; j < inst->regs_read(i); j++)
594 hw_reads_remaining[inst->src[i].fixed_hw_reg.nr + j]++;
595 }
596 }
597 }
598
599 void
600 fs_instruction_scheduler::setup_liveness(cfg_t *cfg)
601 {
602 /* First, compute liveness on a per-GRF level using the in/out sets from
603 * liveness calculation.
604 */
605 for (int block = 0; block < cfg->num_blocks; block++) {
606 for (int i = 0; i < v->live_intervals->num_vars; i++) {
607 if (BITSET_TEST(v->live_intervals->block_data[block].livein, i)) {
608 int vgrf = v->live_intervals->vgrf_from_var[i];
609 if (!BITSET_TEST(livein[block], vgrf)) {
610 reg_pressure_in[block] += v->alloc.sizes[vgrf];
611 BITSET_SET(livein[block], vgrf);
612 }
613 }
614
615 if (BITSET_TEST(v->live_intervals->block_data[block].liveout, i))
616 BITSET_SET(liveout[block], v->live_intervals->vgrf_from_var[i]);
617 }
618 }
619
620 /* Now, extend the live in/live out sets for when a range crosses a block
621 * boundary, which matches what our register allocator/interference code
622 * does to account for force_writemask_all and incompatible exec_mask's.
623 */
624 for (int block = 0; block < cfg->num_blocks - 1; block++) {
625 for (int i = 0; i < grf_count; i++) {
626 if (v->virtual_grf_start[i] <= cfg->blocks[block]->end_ip &&
627 v->virtual_grf_end[i] >= cfg->blocks[block + 1]->start_ip) {
628 if (!BITSET_TEST(livein[block + 1], i)) {
629 reg_pressure_in[block + 1] += v->alloc.sizes[i];
630 BITSET_SET(livein[block + 1], i);
631 }
632
633 BITSET_SET(liveout[block], i);
634 }
635 }
636 }
637
638 int payload_last_use_ip[hw_reg_count];
639 v->calculate_payload_ranges(hw_reg_count, payload_last_use_ip);
640
641 for (int i = 0; i < hw_reg_count; i++) {
642 if (payload_last_use_ip[i] == -1)
643 continue;
644
645 for (int block = 0; block < cfg->num_blocks; block++) {
646 if (cfg->blocks[block]->start_ip <= payload_last_use_ip[i])
647 reg_pressure_in[block]++;
648
649 if (cfg->blocks[block]->end_ip <= payload_last_use_ip[i])
650 BITSET_SET(hw_liveout[block], i);
651 }
652 }
653 }
654
655 void
656 fs_instruction_scheduler::update_register_pressure(backend_instruction *be)
657 {
658 fs_inst *inst = (fs_inst *)be;
659
660 if (!reads_remaining)
661 return;
662
663 if (inst->dst.file == GRF) {
664 written[inst->dst.reg] = true;
665 }
666
667 for (int i = 0; i < inst->sources; i++) {
668 if (is_src_duplicate(inst, i))
669 continue;
670
671 if (inst->src[i].file == GRF) {
672 reads_remaining[inst->src[i].reg]--;
673 } else if (inst->src[i].file == HW_REG &&
674 inst->src[i].fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE &&
675 inst->src[i].fixed_hw_reg.nr < hw_reg_count) {
676 for (int off = 0; off < inst->regs_read(i); off++)
677 hw_reads_remaining[inst->src[i].fixed_hw_reg.nr + off]--;
678 }
679 }
680 }
681
682 int
683 fs_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
684 {
685 fs_inst *inst = (fs_inst *)be;
686 int benefit = 0;
687
688 if (inst->dst.file == GRF) {
689 if (!BITSET_TEST(livein[block_idx], inst->dst.reg) &&
690 !written[inst->dst.reg])
691 benefit -= v->alloc.sizes[inst->dst.reg];
692 }
693
694 for (int i = 0; i < inst->sources; i++) {
695 if (is_src_duplicate(inst, i))
696 continue;
697
698 if (inst->src[i].file == GRF &&
699 !BITSET_TEST(liveout[block_idx], inst->src[i].reg) &&
700 reads_remaining[inst->src[i].reg] == 1)
701 benefit += v->alloc.sizes[inst->src[i].reg];
702
703 if (inst->src[i].file == HW_REG &&
704 inst->src[i].fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE &&
705 inst->src[i].fixed_hw_reg.nr < hw_reg_count) {
706 for (int off = 0; off < inst->regs_read(i); off++) {
707 int reg = inst->src[i].fixed_hw_reg.nr + off;
708 if (!BITSET_TEST(hw_liveout[block_idx], reg) &&
709 hw_reads_remaining[reg] == 1) {
710 benefit++;
711 }
712 }
713 }
714 }
715
716 return benefit;
717 }
718
719 class vec4_instruction_scheduler : public instruction_scheduler
720 {
721 public:
722 vec4_instruction_scheduler(vec4_visitor *v, int grf_count);
723 void calculate_deps();
724 schedule_node *choose_instruction_to_schedule();
725 int issue_time(backend_instruction *inst);
726 vec4_visitor *v;
727
728 void count_reads_remaining(backend_instruction *inst);
729 void setup_liveness(cfg_t *cfg);
730 void update_register_pressure(backend_instruction *inst);
731 int get_register_pressure_benefit(backend_instruction *inst);
732 };
733
734 vec4_instruction_scheduler::vec4_instruction_scheduler(vec4_visitor *v,
735 int grf_count)
736 : instruction_scheduler(v, grf_count, 0, 0, SCHEDULE_POST),
737 v(v)
738 {
739 }
740
741 void
742 vec4_instruction_scheduler::count_reads_remaining(backend_instruction *be)
743 {
744 }
745
746 void
747 vec4_instruction_scheduler::setup_liveness(cfg_t *cfg)
748 {
749 }
750
751 void
752 vec4_instruction_scheduler::update_register_pressure(backend_instruction *be)
753 {
754 }
755
756 int
757 vec4_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
758 {
759 return 0;
760 }
761
762 schedule_node::schedule_node(backend_instruction *inst,
763 instruction_scheduler *sched)
764 {
765 const struct brw_device_info *devinfo = sched->bs->devinfo;
766
767 this->inst = inst;
768 this->child_array_size = 0;
769 this->children = NULL;
770 this->child_latency = NULL;
771 this->child_count = 0;
772 this->parent_count = 0;
773 this->unblocked_time = 0;
774 this->cand_generation = 0;
775 this->delay = 0;
776
777 /* We can't measure Gen6 timings directly but expect them to be much
778 * closer to Gen7 than Gen4.
779 */
780 if (!sched->post_reg_alloc)
781 this->latency = 1;
782 else if (devinfo->gen >= 6)
783 set_latency_gen7(devinfo->is_haswell);
784 else
785 set_latency_gen4();
786 }
787
788 void
789 instruction_scheduler::add_insts_from_block(bblock_t *block)
790 {
791 /* Removing the last instruction from a basic block removes the block as
792 * well, so put a NOP at the end to keep it alive.
793 */
794 if (!block->end()->is_control_flow()) {
795 backend_instruction *nop = new(mem_ctx) backend_instruction();
796 nop->opcode = BRW_OPCODE_NOP;
797 block->end()->insert_after(block, nop);
798 }
799
800 foreach_inst_in_block_safe(backend_instruction, inst, block) {
801 if (inst->opcode == BRW_OPCODE_NOP || inst->is_control_flow())
802 continue;
803
804 schedule_node *n = new(mem_ctx) schedule_node(inst, this);
805
806 this->instructions_to_schedule++;
807
808 inst->remove(block);
809 instructions.push_tail(n);
810 }
811 }
812
813 /** Recursive computation of the delay member of a node. */
814 void
815 instruction_scheduler::compute_delay(schedule_node *n)
816 {
817 if (!n->child_count) {
818 n->delay = issue_time(n->inst);
819 } else {
820 for (int i = 0; i < n->child_count; i++) {
821 if (!n->children[i]->delay)
822 compute_delay(n->children[i]);
823 n->delay = MAX2(n->delay, n->latency + n->children[i]->delay);
824 }
825 }
826 }
827
828 /**
829 * Add a dependency between two instruction nodes.
830 *
831 * The @after node will be scheduled after @before. We will try to
832 * schedule it @latency cycles after @before, but no guarantees there.
833 */
834 void
835 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after,
836 int latency)
837 {
838 if (!before || !after)
839 return;
840
841 assert(before != after);
842
843 for (int i = 0; i < before->child_count; i++) {
844 if (before->children[i] == after) {
845 before->child_latency[i] = MAX2(before->child_latency[i], latency);
846 return;
847 }
848 }
849
850 if (before->child_array_size <= before->child_count) {
851 if (before->child_array_size < 16)
852 before->child_array_size = 16;
853 else
854 before->child_array_size *= 2;
855
856 before->children = reralloc(mem_ctx, before->children,
857 schedule_node *,
858 before->child_array_size);
859 before->child_latency = reralloc(mem_ctx, before->child_latency,
860 int, before->child_array_size);
861 }
862
863 before->children[before->child_count] = after;
864 before->child_latency[before->child_count] = latency;
865 before->child_count++;
866 after->parent_count++;
867 }
868
869 void
870 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after)
871 {
872 if (!before)
873 return;
874
875 add_dep(before, after, before->latency);
876 }
877
878 /**
879 * Sometimes we really want this node to execute after everything that
880 * was before it and before everything that followed it. This adds
881 * the deps to do so.
882 */
883 void
884 instruction_scheduler::add_barrier_deps(schedule_node *n)
885 {
886 schedule_node *prev = (schedule_node *)n->prev;
887 schedule_node *next = (schedule_node *)n->next;
888
889 if (prev) {
890 while (!prev->is_head_sentinel()) {
891 add_dep(prev, n, 0);
892 prev = (schedule_node *)prev->prev;
893 }
894 }
895
896 if (next) {
897 while (!next->is_tail_sentinel()) {
898 add_dep(n, next, 0);
899 next = (schedule_node *)next->next;
900 }
901 }
902 }
903
904 /* instruction scheduling needs to be aware of when an MRF write
905 * actually writes 2 MRFs.
906 */
907 bool
908 fs_instruction_scheduler::is_compressed(fs_inst *inst)
909 {
910 return inst->exec_size == 16;
911 }
912
913 void
914 fs_instruction_scheduler::calculate_deps()
915 {
916 /* Pre-register-allocation, this tracks the last write per VGRF offset.
917 * After register allocation, reg_offsets are gone and we track individual
918 * GRF registers.
919 */
920 schedule_node *last_grf_write[grf_count * 16];
921 schedule_node *last_mrf_write[BRW_MAX_MRF(v->devinfo->gen)];
922 schedule_node *last_conditional_mod[2] = { NULL, NULL };
923 schedule_node *last_accumulator_write = NULL;
924 /* Fixed HW registers are assumed to be separate from the virtual
925 * GRFs, so they can be tracked separately. We don't really write
926 * to fixed GRFs much, so don't bother tracking them on a more
927 * granular level.
928 */
929 schedule_node *last_fixed_grf_write = NULL;
930 int reg_width = v->dispatch_width / 8;
931
932 /* The last instruction always needs to still be the last
933 * instruction. Either it's flow control (IF, ELSE, ENDIF, DO,
934 * WHILE) and scheduling other things after it would disturb the
935 * basic block, or it's FB_WRITE and we should do a better job at
936 * dead code elimination anyway.
937 */
938 schedule_node *last = (schedule_node *)instructions.get_tail();
939 add_barrier_deps(last);
940
941 memset(last_grf_write, 0, sizeof(last_grf_write));
942 memset(last_mrf_write, 0, sizeof(last_mrf_write));
943
944 /* top-to-bottom dependencies: RAW and WAW. */
945 foreach_in_list(schedule_node, n, &instructions) {
946 fs_inst *inst = (fs_inst *)n->inst;
947
948 if (inst->opcode == FS_OPCODE_PLACEHOLDER_HALT ||
949 inst->has_side_effects())
950 add_barrier_deps(n);
951
952 /* read-after-write deps. */
953 for (int i = 0; i < inst->sources; i++) {
954 if (inst->src[i].file == GRF) {
955 if (post_reg_alloc) {
956 for (int r = 0; r < inst->regs_read(i); r++)
957 add_dep(last_grf_write[inst->src[i].reg + r], n);
958 } else {
959 for (int r = 0; r < inst->regs_read(i); r++) {
960 add_dep(last_grf_write[inst->src[i].reg * 16 + inst->src[i].reg_offset + r], n);
961 }
962 }
963 } else if (inst->src[i].file == HW_REG &&
964 (inst->src[i].fixed_hw_reg.file ==
965 BRW_GENERAL_REGISTER_FILE)) {
966 if (post_reg_alloc) {
967 int size = reg_width;
968 if (inst->src[i].fixed_hw_reg.vstride == BRW_VERTICAL_STRIDE_0)
969 size = 1;
970 for (int r = 0; r < size; r++)
971 add_dep(last_grf_write[inst->src[i].fixed_hw_reg.nr + r], n);
972 } else {
973 add_dep(last_fixed_grf_write, n);
974 }
975 } else if (inst->src[i].is_accumulator()) {
976 add_dep(last_accumulator_write, n);
977 } else if (inst->src[i].file != BAD_FILE &&
978 inst->src[i].file != IMM &&
979 inst->src[i].file != UNIFORM &&
980 (inst->src[i].file != HW_REG ||
981 inst->src[i].fixed_hw_reg.file != BRW_IMMEDIATE_VALUE)) {
982 assert(inst->src[i].file != MRF);
983 add_barrier_deps(n);
984 }
985 }
986
987 if (inst->base_mrf != -1) {
988 for (int i = 0; i < inst->mlen; i++) {
989 /* It looks like the MRF regs are released in the send
990 * instruction once it's sent, not when the result comes
991 * back.
992 */
993 add_dep(last_mrf_write[inst->base_mrf + i], n);
994 }
995 }
996
997 if (inst->reads_flag()) {
998 add_dep(last_conditional_mod[inst->flag_subreg], n);
999 }
1000
1001 if (inst->reads_accumulator_implicitly()) {
1002 add_dep(last_accumulator_write, n);
1003 }
1004
1005 /* write-after-write deps. */
1006 if (inst->dst.file == GRF) {
1007 if (post_reg_alloc) {
1008 for (int r = 0; r < inst->regs_written; r++) {
1009 add_dep(last_grf_write[inst->dst.reg + r], n);
1010 last_grf_write[inst->dst.reg + r] = n;
1011 }
1012 } else {
1013 for (int r = 0; r < inst->regs_written; r++) {
1014 add_dep(last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r], n);
1015 last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r] = n;
1016 }
1017 }
1018 } else if (inst->dst.file == MRF) {
1019 int reg = inst->dst.reg & ~BRW_MRF_COMPR4;
1020
1021 add_dep(last_mrf_write[reg], n);
1022 last_mrf_write[reg] = n;
1023 if (is_compressed(inst)) {
1024 if (inst->dst.reg & BRW_MRF_COMPR4)
1025 reg += 4;
1026 else
1027 reg++;
1028 add_dep(last_mrf_write[reg], n);
1029 last_mrf_write[reg] = n;
1030 }
1031 } else if (inst->dst.file == HW_REG &&
1032 inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
1033 if (post_reg_alloc) {
1034 for (int r = 0; r < reg_width; r++)
1035 last_grf_write[inst->dst.fixed_hw_reg.nr + r] = n;
1036 } else {
1037 last_fixed_grf_write = n;
1038 }
1039 } else if (inst->dst.is_accumulator()) {
1040 add_dep(last_accumulator_write, n);
1041 last_accumulator_write = n;
1042 } else if (inst->dst.file != BAD_FILE &&
1043 !inst->dst.is_null()) {
1044 add_barrier_deps(n);
1045 }
1046
1047 if (inst->mlen > 0 && inst->base_mrf != -1) {
1048 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1049 add_dep(last_mrf_write[inst->base_mrf + i], n);
1050 last_mrf_write[inst->base_mrf + i] = n;
1051 }
1052 }
1053
1054 if (inst->writes_flag()) {
1055 add_dep(last_conditional_mod[inst->flag_subreg], n, 0);
1056 last_conditional_mod[inst->flag_subreg] = n;
1057 }
1058
1059 if (inst->writes_accumulator_implicitly(v->devinfo) &&
1060 !inst->dst.is_accumulator()) {
1061 add_dep(last_accumulator_write, n);
1062 last_accumulator_write = n;
1063 }
1064 }
1065
1066 /* bottom-to-top dependencies: WAR */
1067 memset(last_grf_write, 0, sizeof(last_grf_write));
1068 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1069 memset(last_conditional_mod, 0, sizeof(last_conditional_mod));
1070 last_accumulator_write = NULL;
1071 last_fixed_grf_write = NULL;
1072
1073 exec_node *node;
1074 exec_node *prev;
1075 for (node = instructions.get_tail(), prev = node->prev;
1076 !node->is_head_sentinel();
1077 node = prev, prev = node->prev) {
1078 schedule_node *n = (schedule_node *)node;
1079 fs_inst *inst = (fs_inst *)n->inst;
1080
1081 /* write-after-read deps. */
1082 for (int i = 0; i < inst->sources; i++) {
1083 if (inst->src[i].file == GRF) {
1084 if (post_reg_alloc) {
1085 for (int r = 0; r < inst->regs_read(i); r++)
1086 add_dep(n, last_grf_write[inst->src[i].reg + r], 0);
1087 } else {
1088 for (int r = 0; r < inst->regs_read(i); r++) {
1089 add_dep(n, last_grf_write[inst->src[i].reg * 16 + inst->src[i].reg_offset + r], 0);
1090 }
1091 }
1092 } else if (inst->src[i].file == HW_REG &&
1093 (inst->src[i].fixed_hw_reg.file ==
1094 BRW_GENERAL_REGISTER_FILE)) {
1095 if (post_reg_alloc) {
1096 int size = reg_width;
1097 if (inst->src[i].fixed_hw_reg.vstride == BRW_VERTICAL_STRIDE_0)
1098 size = 1;
1099 for (int r = 0; r < size; r++)
1100 add_dep(n, last_grf_write[inst->src[i].fixed_hw_reg.nr + r], 0);
1101 } else {
1102 add_dep(n, last_fixed_grf_write, 0);
1103 }
1104 } else if (inst->src[i].is_accumulator()) {
1105 add_dep(n, last_accumulator_write, 0);
1106 } else if (inst->src[i].file != BAD_FILE &&
1107 inst->src[i].file != IMM &&
1108 inst->src[i].file != UNIFORM &&
1109 (inst->src[i].file != HW_REG ||
1110 inst->src[i].fixed_hw_reg.file != BRW_IMMEDIATE_VALUE)) {
1111 assert(inst->src[i].file != MRF);
1112 add_barrier_deps(n);
1113 }
1114 }
1115
1116 if (inst->base_mrf != -1) {
1117 for (int i = 0; i < inst->mlen; i++) {
1118 /* It looks like the MRF regs are released in the send
1119 * instruction once it's sent, not when the result comes
1120 * back.
1121 */
1122 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1123 }
1124 }
1125
1126 if (inst->reads_flag()) {
1127 add_dep(n, last_conditional_mod[inst->flag_subreg]);
1128 }
1129
1130 if (inst->reads_accumulator_implicitly()) {
1131 add_dep(n, last_accumulator_write);
1132 }
1133
1134 /* Update the things this instruction wrote, so earlier reads
1135 * can mark this as WAR dependency.
1136 */
1137 if (inst->dst.file == GRF) {
1138 if (post_reg_alloc) {
1139 for (int r = 0; r < inst->regs_written; r++)
1140 last_grf_write[inst->dst.reg + r] = n;
1141 } else {
1142 for (int r = 0; r < inst->regs_written; r++) {
1143 last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r] = n;
1144 }
1145 }
1146 } else if (inst->dst.file == MRF) {
1147 int reg = inst->dst.reg & ~BRW_MRF_COMPR4;
1148
1149 last_mrf_write[reg] = n;
1150
1151 if (is_compressed(inst)) {
1152 if (inst->dst.reg & BRW_MRF_COMPR4)
1153 reg += 4;
1154 else
1155 reg++;
1156
1157 last_mrf_write[reg] = n;
1158 }
1159 } else if (inst->dst.file == HW_REG &&
1160 inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
1161 if (post_reg_alloc) {
1162 for (int r = 0; r < reg_width; r++)
1163 last_grf_write[inst->dst.fixed_hw_reg.nr + r] = n;
1164 } else {
1165 last_fixed_grf_write = n;
1166 }
1167 } else if (inst->dst.is_accumulator()) {
1168 last_accumulator_write = n;
1169 } else if (inst->dst.file != BAD_FILE &&
1170 !inst->dst.is_null()) {
1171 add_barrier_deps(n);
1172 }
1173
1174 if (inst->mlen > 0 && inst->base_mrf != -1) {
1175 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1176 last_mrf_write[inst->base_mrf + i] = n;
1177 }
1178 }
1179
1180 if (inst->writes_flag()) {
1181 last_conditional_mod[inst->flag_subreg] = n;
1182 }
1183
1184 if (inst->writes_accumulator_implicitly(v->devinfo)) {
1185 last_accumulator_write = n;
1186 }
1187 }
1188 }
1189
1190 void
1191 vec4_instruction_scheduler::calculate_deps()
1192 {
1193 schedule_node *last_grf_write[grf_count];
1194 schedule_node *last_mrf_write[BRW_MAX_MRF(v->devinfo->gen)];
1195 schedule_node *last_conditional_mod = NULL;
1196 schedule_node *last_accumulator_write = NULL;
1197 /* Fixed HW registers are assumed to be separate from the virtual
1198 * GRFs, so they can be tracked separately. We don't really write
1199 * to fixed GRFs much, so don't bother tracking them on a more
1200 * granular level.
1201 */
1202 schedule_node *last_fixed_grf_write = NULL;
1203
1204 /* The last instruction always needs to still be the last instruction.
1205 * Either it's flow control (IF, ELSE, ENDIF, DO, WHILE) and scheduling
1206 * other things after it would disturb the basic block, or it's the EOT
1207 * URB_WRITE and we should do a better job at dead code eliminating
1208 * anything that could have been scheduled after it.
1209 */
1210 schedule_node *last = (schedule_node *)instructions.get_tail();
1211 add_barrier_deps(last);
1212
1213 memset(last_grf_write, 0, sizeof(last_grf_write));
1214 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1215
1216 /* top-to-bottom dependencies: RAW and WAW. */
1217 foreach_in_list(schedule_node, n, &instructions) {
1218 vec4_instruction *inst = (vec4_instruction *)n->inst;
1219
1220 if (inst->has_side_effects())
1221 add_barrier_deps(n);
1222
1223 /* read-after-write deps. */
1224 for (int i = 0; i < 3; i++) {
1225 if (inst->src[i].file == GRF) {
1226 for (unsigned j = 0; j < inst->regs_read(i); ++j)
1227 add_dep(last_grf_write[inst->src[i].reg + j], n);
1228 } else if (inst->src[i].file == HW_REG &&
1229 (inst->src[i].fixed_hw_reg.file ==
1230 BRW_GENERAL_REGISTER_FILE)) {
1231 add_dep(last_fixed_grf_write, n);
1232 } else if (inst->src[i].is_accumulator()) {
1233 assert(last_accumulator_write);
1234 add_dep(last_accumulator_write, n);
1235 } else if (inst->src[i].file != BAD_FILE &&
1236 inst->src[i].file != IMM &&
1237 inst->src[i].file != UNIFORM &&
1238 (inst->src[i].file != HW_REG ||
1239 inst->src[i].fixed_hw_reg.file != BRW_IMMEDIATE_VALUE)) {
1240 /* No reads from MRF, and ATTR is already translated away */
1241 assert(inst->src[i].file != MRF &&
1242 inst->src[i].file != ATTR);
1243 add_barrier_deps(n);
1244 }
1245 }
1246
1247 if (!inst->is_send_from_grf()) {
1248 for (int i = 0; i < inst->mlen; i++) {
1249 /* It looks like the MRF regs are released in the send
1250 * instruction once it's sent, not when the result comes
1251 * back.
1252 */
1253 add_dep(last_mrf_write[inst->base_mrf + i], n);
1254 }
1255 }
1256
1257 if (inst->reads_flag()) {
1258 assert(last_conditional_mod);
1259 add_dep(last_conditional_mod, n);
1260 }
1261
1262 if (inst->reads_accumulator_implicitly()) {
1263 assert(last_accumulator_write);
1264 add_dep(last_accumulator_write, n);
1265 }
1266
1267 /* write-after-write deps. */
1268 if (inst->dst.file == GRF) {
1269 for (unsigned j = 0; j < inst->regs_written; ++j) {
1270 add_dep(last_grf_write[inst->dst.reg + j], n);
1271 last_grf_write[inst->dst.reg + j] = n;
1272 }
1273 } else if (inst->dst.file == MRF) {
1274 add_dep(last_mrf_write[inst->dst.reg], n);
1275 last_mrf_write[inst->dst.reg] = n;
1276 } else if (inst->dst.file == HW_REG &&
1277 inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
1278 last_fixed_grf_write = n;
1279 } else if (inst->dst.is_accumulator()) {
1280 add_dep(last_accumulator_write, n);
1281 last_accumulator_write = n;
1282 } else if (inst->dst.file != BAD_FILE &&
1283 !inst->dst.is_null()) {
1284 add_barrier_deps(n);
1285 }
1286
1287 if (inst->mlen > 0 && !inst->is_send_from_grf()) {
1288 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1289 add_dep(last_mrf_write[inst->base_mrf + i], n);
1290 last_mrf_write[inst->base_mrf + i] = n;
1291 }
1292 }
1293
1294 if (inst->writes_flag()) {
1295 add_dep(last_conditional_mod, n, 0);
1296 last_conditional_mod = n;
1297 }
1298
1299 if (inst->writes_accumulator_implicitly(v->devinfo) &&
1300 !inst->dst.is_accumulator()) {
1301 add_dep(last_accumulator_write, n);
1302 last_accumulator_write = n;
1303 }
1304 }
1305
1306 /* bottom-to-top dependencies: WAR */
1307 memset(last_grf_write, 0, sizeof(last_grf_write));
1308 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1309 last_conditional_mod = NULL;
1310 last_accumulator_write = NULL;
1311 last_fixed_grf_write = NULL;
1312
1313 exec_node *node;
1314 exec_node *prev;
1315 for (node = instructions.get_tail(), prev = node->prev;
1316 !node->is_head_sentinel();
1317 node = prev, prev = node->prev) {
1318 schedule_node *n = (schedule_node *)node;
1319 vec4_instruction *inst = (vec4_instruction *)n->inst;
1320
1321 /* write-after-read deps. */
1322 for (int i = 0; i < 3; i++) {
1323 if (inst->src[i].file == GRF) {
1324 for (unsigned j = 0; j < inst->regs_read(i); ++j)
1325 add_dep(n, last_grf_write[inst->src[i].reg + j]);
1326 } else if (inst->src[i].file == HW_REG &&
1327 (inst->src[i].fixed_hw_reg.file ==
1328 BRW_GENERAL_REGISTER_FILE)) {
1329 add_dep(n, last_fixed_grf_write);
1330 } else if (inst->src[i].is_accumulator()) {
1331 add_dep(n, last_accumulator_write);
1332 } else if (inst->src[i].file != BAD_FILE &&
1333 inst->src[i].file != IMM &&
1334 inst->src[i].file != UNIFORM &&
1335 (inst->src[i].file != HW_REG ||
1336 inst->src[i].fixed_hw_reg.file != BRW_IMMEDIATE_VALUE)) {
1337 assert(inst->src[i].file != MRF &&
1338 inst->src[i].file != ATTR);
1339 add_barrier_deps(n);
1340 }
1341 }
1342
1343 if (!inst->is_send_from_grf()) {
1344 for (int i = 0; i < inst->mlen; i++) {
1345 /* It looks like the MRF regs are released in the send
1346 * instruction once it's sent, not when the result comes
1347 * back.
1348 */
1349 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1350 }
1351 }
1352
1353 if (inst->reads_flag()) {
1354 add_dep(n, last_conditional_mod);
1355 }
1356
1357 if (inst->reads_accumulator_implicitly()) {
1358 add_dep(n, last_accumulator_write);
1359 }
1360
1361 /* Update the things this instruction wrote, so earlier reads
1362 * can mark this as WAR dependency.
1363 */
1364 if (inst->dst.file == GRF) {
1365 for (unsigned j = 0; j < inst->regs_written; ++j)
1366 last_grf_write[inst->dst.reg + j] = n;
1367 } else if (inst->dst.file == MRF) {
1368 last_mrf_write[inst->dst.reg] = n;
1369 } else if (inst->dst.file == HW_REG &&
1370 inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
1371 last_fixed_grf_write = n;
1372 } else if (inst->dst.is_accumulator()) {
1373 last_accumulator_write = n;
1374 } else if (inst->dst.file != BAD_FILE &&
1375 !inst->dst.is_null()) {
1376 add_barrier_deps(n);
1377 }
1378
1379 if (inst->mlen > 0 && !inst->is_send_from_grf()) {
1380 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
1381 last_mrf_write[inst->base_mrf + i] = n;
1382 }
1383 }
1384
1385 if (inst->writes_flag()) {
1386 last_conditional_mod = n;
1387 }
1388
1389 if (inst->writes_accumulator_implicitly(v->devinfo)) {
1390 last_accumulator_write = n;
1391 }
1392 }
1393 }
1394
1395 schedule_node *
1396 fs_instruction_scheduler::choose_instruction_to_schedule()
1397 {
1398 schedule_node *chosen = NULL;
1399
1400 if (mode == SCHEDULE_PRE || mode == SCHEDULE_POST) {
1401 int chosen_time = 0;
1402
1403 /* Of the instructions ready to execute or the closest to
1404 * being ready, choose the oldest one.
1405 */
1406 foreach_in_list(schedule_node, n, &instructions) {
1407 if (!chosen || n->unblocked_time < chosen_time) {
1408 chosen = n;
1409 chosen_time = n->unblocked_time;
1410 }
1411 }
1412 } else {
1413 /* Before register allocation, we don't care about the latencies of
1414 * instructions. All we care about is reducing live intervals of
1415 * variables so that we can avoid register spilling, or get SIMD16
1416 * shaders which naturally do a better job of hiding instruction
1417 * latency.
1418 */
1419 foreach_in_list(schedule_node, n, &instructions) {
1420 fs_inst *inst = (fs_inst *)n->inst;
1421
1422 if (!chosen) {
1423 chosen = n;
1424 continue;
1425 }
1426
1427 /* Most important: If we can definitely reduce register pressure, do
1428 * so immediately.
1429 */
1430 int register_pressure_benefit = get_register_pressure_benefit(n->inst);
1431 int chosen_register_pressure_benefit =
1432 get_register_pressure_benefit(chosen->inst);
1433
1434 if (register_pressure_benefit > 0 &&
1435 register_pressure_benefit > chosen_register_pressure_benefit) {
1436 chosen = n;
1437 continue;
1438 } else if (chosen_register_pressure_benefit > 0 &&
1439 (register_pressure_benefit <
1440 chosen_register_pressure_benefit)) {
1441 continue;
1442 }
1443
1444 if (mode == SCHEDULE_PRE_LIFO) {
1445 /* Prefer instructions that recently became available for
1446 * scheduling. These are the things that are most likely to
1447 * (eventually) make a variable dead and reduce register pressure.
1448 * Typical register pressure estimates don't work for us because
1449 * most of our pressure comes from texturing, where no single
1450 * instruction to schedule will make a vec4 value dead.
1451 */
1452 if (n->cand_generation > chosen->cand_generation) {
1453 chosen = n;
1454 continue;
1455 } else if (n->cand_generation < chosen->cand_generation) {
1456 continue;
1457 }
1458
1459 /* On MRF-using chips, prefer non-SEND instructions. If we don't
1460 * do this, then because we prefer instructions that just became
1461 * candidates, we'll end up in a pattern of scheduling a SEND,
1462 * then the MRFs for the next SEND, then the next SEND, then the
1463 * MRFs, etc., without ever consuming the results of a send.
1464 */
1465 if (v->devinfo->gen < 7) {
1466 fs_inst *chosen_inst = (fs_inst *)chosen->inst;
1467
1468 /* We use regs_written > 1 as our test for the kind of send
1469 * instruction to avoid -- only sends generate many regs, and a
1470 * single-result send is probably actually reducing register
1471 * pressure.
1472 */
1473 if (inst->regs_written <= inst->exec_size / 8 &&
1474 chosen_inst->regs_written > chosen_inst->exec_size / 8) {
1475 chosen = n;
1476 continue;
1477 } else if (inst->regs_written > chosen_inst->regs_written) {
1478 continue;
1479 }
1480 }
1481 }
1482
1483 /* For instructions pushed on the cands list at the same time, prefer
1484 * the one with the highest delay to the end of the program. This is
1485 * most likely to have its values able to be consumed first (such as
1486 * for a large tree of lowered ubo loads, which appear reversed in
1487 * the instruction stream with respect to when they can be consumed).
1488 */
1489 if (n->delay > chosen->delay) {
1490 chosen = n;
1491 continue;
1492 } else if (n->delay < chosen->delay) {
1493 continue;
1494 }
1495
1496 /* If all other metrics are equal, we prefer the first instruction in
1497 * the list (program execution).
1498 */
1499 }
1500 }
1501
1502 return chosen;
1503 }
1504
1505 schedule_node *
1506 vec4_instruction_scheduler::choose_instruction_to_schedule()
1507 {
1508 schedule_node *chosen = NULL;
1509 int chosen_time = 0;
1510
1511 /* Of the instructions ready to execute or the closest to being ready,
1512 * choose the oldest one.
1513 */
1514 foreach_in_list(schedule_node, n, &instructions) {
1515 if (!chosen || n->unblocked_time < chosen_time) {
1516 chosen = n;
1517 chosen_time = n->unblocked_time;
1518 }
1519 }
1520
1521 return chosen;
1522 }
1523
1524 int
1525 fs_instruction_scheduler::issue_time(backend_instruction *inst)
1526 {
1527 if (is_compressed((fs_inst *)inst))
1528 return 4;
1529 else
1530 return 2;
1531 }
1532
1533 int
1534 vec4_instruction_scheduler::issue_time(backend_instruction *inst)
1535 {
1536 /* We always execute as two vec4s in parallel. */
1537 return 2;
1538 }
1539
1540 void
1541 instruction_scheduler::schedule_instructions(bblock_t *block)
1542 {
1543 const struct brw_device_info *devinfo = bs->devinfo;
1544 backend_instruction *inst = block->end();
1545 time = 0;
1546 if (!post_reg_alloc)
1547 reg_pressure = reg_pressure_in[block->num];
1548 block_idx = block->num;
1549
1550 /* Remove non-DAG heads from the list. */
1551 foreach_in_list_safe(schedule_node, n, &instructions) {
1552 if (n->parent_count != 0)
1553 n->remove();
1554 }
1555
1556 unsigned cand_generation = 1;
1557 while (!instructions.is_empty()) {
1558 schedule_node *chosen = choose_instruction_to_schedule();
1559
1560 /* Schedule this instruction. */
1561 assert(chosen);
1562 chosen->remove();
1563 inst->insert_before(block, chosen->inst);
1564 instructions_to_schedule--;
1565
1566 if (!post_reg_alloc) {
1567 reg_pressure -= get_register_pressure_benefit(chosen->inst);
1568 update_register_pressure(chosen->inst);
1569 }
1570
1571 /* If we expected a delay for scheduling, then bump the clock to reflect
1572 * that. In reality, the hardware will switch to another hyperthread
1573 * and may not return to dispatching our thread for a while even after
1574 * we're unblocked. After this, we have the time when the chosen
1575 * instruction will start executing.
1576 */
1577 time = MAX2(time, chosen->unblocked_time);
1578
1579 /* Update the clock for how soon an instruction could start after the
1580 * chosen one.
1581 */
1582 time += issue_time(chosen->inst);
1583
1584 if (debug) {
1585 fprintf(stderr, "clock %4d, scheduled: ", time);
1586 bs->dump_instruction(chosen->inst);
1587 if (!post_reg_alloc)
1588 fprintf(stderr, "(register pressure %d)\n", reg_pressure);
1589 }
1590
1591 /* Now that we've scheduled a new instruction, some of its
1592 * children can be promoted to the list of instructions ready to
1593 * be scheduled. Update the children's unblocked time for this
1594 * DAG edge as we do so.
1595 */
1596 for (int i = chosen->child_count - 1; i >= 0; i--) {
1597 schedule_node *child = chosen->children[i];
1598
1599 child->unblocked_time = MAX2(child->unblocked_time,
1600 time + chosen->child_latency[i]);
1601
1602 if (debug) {
1603 fprintf(stderr, "\tchild %d, %d parents: ", i, child->parent_count);
1604 bs->dump_instruction(child->inst);
1605 }
1606
1607 child->cand_generation = cand_generation;
1608 child->parent_count--;
1609 if (child->parent_count == 0) {
1610 if (debug) {
1611 fprintf(stderr, "\t\tnow available\n");
1612 }
1613 instructions.push_head(child);
1614 }
1615 }
1616 cand_generation++;
1617
1618 /* Shared resource: the mathbox. There's one mathbox per EU on Gen6+
1619 * but it's more limited pre-gen6, so if we send something off to it then
1620 * the next math instruction isn't going to make progress until the first
1621 * is done.
1622 */
1623 if (devinfo->gen < 6 && chosen->inst->is_math()) {
1624 foreach_in_list(schedule_node, n, &instructions) {
1625 if (n->inst->is_math())
1626 n->unblocked_time = MAX2(n->unblocked_time,
1627 time + chosen->latency);
1628 }
1629 }
1630 }
1631
1632 if (block->end()->opcode == BRW_OPCODE_NOP)
1633 block->end()->remove(block);
1634 assert(instructions_to_schedule == 0);
1635
1636 block->cycle_count = time;
1637 }
1638
1639 static unsigned get_cycle_count(cfg_t *cfg)
1640 {
1641 unsigned count = 0, multiplier = 1;
1642 foreach_block(block, cfg) {
1643 if (block->start()->opcode == BRW_OPCODE_DO)
1644 multiplier *= 10; /* assume that loops execute ~10 times */
1645
1646 count += block->cycle_count * multiplier;
1647
1648 if (block->end()->opcode == BRW_OPCODE_WHILE)
1649 multiplier /= 10;
1650 }
1651
1652 return count;
1653 }
1654
1655 void
1656 instruction_scheduler::run(cfg_t *cfg)
1657 {
1658 if (debug && !post_reg_alloc) {
1659 fprintf(stderr, "\nInstructions before scheduling (reg_alloc %d)\n",
1660 post_reg_alloc);
1661 bs->dump_instructions();
1662 }
1663
1664 if (!post_reg_alloc)
1665 setup_liveness(cfg);
1666
1667 foreach_block(block, cfg) {
1668 if (block->end_ip - block->start_ip <= 1)
1669 continue;
1670
1671 if (reads_remaining) {
1672 memset(reads_remaining, 0,
1673 grf_count * sizeof(*reads_remaining));
1674 memset(hw_reads_remaining, 0,
1675 hw_reg_count * sizeof(*hw_reads_remaining));
1676 memset(written, 0, grf_count * sizeof(*written));
1677
1678 foreach_inst_in_block(fs_inst, inst, block)
1679 count_reads_remaining(inst);
1680 }
1681
1682 add_insts_from_block(block);
1683
1684 calculate_deps();
1685
1686 foreach_in_list(schedule_node, n, &instructions) {
1687 compute_delay(n);
1688 }
1689
1690 schedule_instructions(block);
1691 }
1692
1693 if (debug && !post_reg_alloc) {
1694 fprintf(stderr, "\nInstructions after scheduling (reg_alloc %d)\n",
1695 post_reg_alloc);
1696 bs->dump_instructions();
1697 }
1698
1699 cfg->cycle_count = get_cycle_count(cfg);
1700 }
1701
1702 void
1703 fs_visitor::schedule_instructions(instruction_scheduler_mode mode)
1704 {
1705 calculate_live_intervals();
1706
1707 int grf_count;
1708 if (mode == SCHEDULE_POST)
1709 grf_count = grf_used;
1710 else
1711 grf_count = alloc.count;
1712
1713 fs_instruction_scheduler sched(this, grf_count, first_non_payload_grf,
1714 cfg->num_blocks, mode);
1715 sched.run(cfg);
1716
1717 if (unlikely(debug_enabled) && mode == SCHEDULE_POST) {
1718 fprintf(stderr, "%s%d estimated execution time: %d cycles\n",
1719 stage_abbrev, dispatch_width, sched.time);
1720 }
1721
1722 invalidate_live_intervals();
1723 }
1724
1725 void
1726 vec4_visitor::opt_schedule_instructions()
1727 {
1728 vec4_instruction_scheduler sched(this, prog_data->total_grf);
1729 sched.run(cfg);
1730
1731 if (unlikely(debug_enabled)) {
1732 fprintf(stderr, "%s estimated execution time: %d cycles\n",
1733 stage_abbrev, sched.time);
1734 }
1735
1736 invalidate_live_intervals();
1737 }